Index: third_party/WebKit/Source/modules/webaudio/AbstractAudioContext.cpp |
diff --git a/third_party/WebKit/Source/modules/webaudio/AbstractAudioContext.cpp b/third_party/WebKit/Source/modules/webaudio/AbstractAudioContext.cpp |
index 36344db0f2263eac94c7836d70103eac6972a9b6..0766eeeb786c8526b093520cfd9275cb89371367 100644 |
--- a/third_party/WebKit/Source/modules/webaudio/AbstractAudioContext.cpp |
+++ b/third_party/WebKit/Source/modules/webaudio/AbstractAudioContext.cpp |
@@ -32,8 +32,11 @@ |
#include "core/dom/Document.h" |
#include "core/dom/ExceptionCode.h" |
#include "core/dom/ExecutionContextTask.h" |
+#include "core/frame/LocalDOMWindow.h" |
#include "core/frame/Settings.h" |
#include "core/html/HTMLMediaElement.h" |
+#include "core/timing/DOMWindowPerformance.h" |
+#include "core/timing/Performance.h" |
#include "modules/mediastream/MediaStream.h" |
#include "modules/webaudio/AnalyserNode.h" |
#include "modules/webaudio/AudioBuffer.h" |
@@ -43,6 +46,7 @@ |
#include "modules/webaudio/AudioListener.h" |
#include "modules/webaudio/AudioNodeInput.h" |
#include "modules/webaudio/AudioNodeOutput.h" |
+#include "modules/webaudio/AudioTimestamp.h" |
#include "modules/webaudio/BiquadFilterNode.h" |
#include "modules/webaudio/ChannelMergerNode.h" |
#include "modules/webaudio/ChannelSplitterNode.h" |
@@ -560,6 +564,53 @@ void AbstractAudioContext::recordUserGestureState() |
m_userGestureRequired = false; |
} |
+static double toPerformanceTime(ExecutionContext* context, double seconds) |
+{ |
+ if (!context) |
+ return 0.0; |
+ |
+ LocalDOMWindow* window = context->executingWindow(); |
+ if (!window) |
+ return 0.0; |
+ |
+ Performance* performance = DOMWindowPerformance::performance(*window); |
+ if (!performance) |
+ return 0.0; |
+ |
+ return performance->monotonicTimeToDOMHighResTimeStamp(seconds); |
+} |
+ |
+void AbstractAudioContext::getOutputTimestamp(AudioTimestamp& result) |
+{ |
+ DCHECK(isMainThread()); |
+ if (!m_destinationNode) { |
+ result.setContextTime(0.0); |
+ result.setPerformanceTime(0.0); |
+ return; |
+ } |
+ |
+ WebAudioTimestamp outputTimestamp; |
+ { |
+ AutoLocker locker(this); |
+ outputTimestamp = m_outputTimestamp; |
+ } |
+ AudioDestinationHandler& destinationHandler = m_destinationNode->audioDestinationHandler(); |
+ double contextTime = outputTimestamp.frames / static_cast<double>(destinationHandler.sampleRate()); |
+ double performanceTime = outputTimestamp.seconds ? toPerformanceTime(getExecutionContext(), outputTimestamp.seconds) : 0.0; |
+ |
+ result.setContextTime(contextTime); |
+ result.setPerformanceTime(performanceTime); |
+} |
+ |
+void AbstractAudioContext::setWebAudioTimestamp(const WebAudioTimestamp& timestamp) |
+{ |
+ DCHECK(isAudioThread()); |
+ if (tryLock()) { |
+ m_outputTimestamp = timestamp; |
+ unlock(); |
+ } |
+} |
+ |
String AbstractAudioContext::state() const |
{ |
// These strings had better match the strings for AudioContextState in AudioContext.idl. |