Index: third_party/WebKit/Source/modules/webaudio/AbstractAudioContext.cpp |
diff --git a/third_party/WebKit/Source/modules/webaudio/AbstractAudioContext.cpp b/third_party/WebKit/Source/modules/webaudio/AbstractAudioContext.cpp |
index 36344db0f2263eac94c7836d70103eac6972a9b6..12a663d46470477573285b1ae3e9472f89c1b96b 100644 |
--- a/third_party/WebKit/Source/modules/webaudio/AbstractAudioContext.cpp |
+++ b/third_party/WebKit/Source/modules/webaudio/AbstractAudioContext.cpp |
@@ -32,8 +32,11 @@ |
#include "core/dom/Document.h" |
#include "core/dom/ExceptionCode.h" |
#include "core/dom/ExecutionContextTask.h" |
+#include "core/frame/LocalDOMWindow.h" |
#include "core/frame/Settings.h" |
#include "core/html/HTMLMediaElement.h" |
+#include "core/timing/DOMWindowPerformance.h" |
+#include "core/timing/Performance.h" |
#include "modules/mediastream/MediaStream.h" |
#include "modules/webaudio/AnalyserNode.h" |
#include "modules/webaudio/AudioBuffer.h" |
@@ -43,6 +46,7 @@ |
#include "modules/webaudio/AudioListener.h" |
#include "modules/webaudio/AudioNodeInput.h" |
#include "modules/webaudio/AudioNodeOutput.h" |
+#include "modules/webaudio/AudioTimestamp.h" |
#include "modules/webaudio/BiquadFilterNode.h" |
#include "modules/webaudio/ChannelMergerNode.h" |
#include "modules/webaudio/ChannelSplitterNode.h" |
@@ -560,6 +564,30 @@ void AbstractAudioContext::recordUserGestureState() |
m_userGestureRequired = false; |
} |
+static double toPerformanceTime(ExecutionContext* context, double seconds) |
+{ |
+ LocalDOMWindow* window = context ? context->executingWindow() : nullptr; |
Raymond Toy
2016/06/14 16:42:40
Why not just early return with 0.0 if the context
Mikhail
2016/06/17 09:36:57
Done.
|
+ Performance* performance = window ? DOMWindowPerformance::performance(*window) : nullptr; |
+ return performance ? performance->monotonicTimeToDOMHighResTimeStamp(seconds) : 0.0; |
+} |
+ |
+void AbstractAudioContext::getOutputTimestamp(AudioTimestamp& result) |
+{ |
+ if (!m_destinationNode) { |
+ result.setContextTime(0.0); |
+ result.setPerformanceTime(0.0); |
+ return; |
+ } |
+ AudioDestinationHandler& destinationHandler = m_destinationNode->audioDestinationHandler(); |
+ |
+ WebAudioTimestamp outputTimestamp = destinationHandler.outputTimestamp(); |
+ double contextTime = outputTimestamp.frames / static_cast<double>(destinationHandler.sampleRate()); |
+ double performanceTime = outputTimestamp.seconds ? toPerformanceTime(getExecutionContext(), outputTimestamp.seconds) : 0.0; |
+ |
+ result.setContextTime(contextTime); |
+ result.setPerformanceTime(performanceTime); |
+} |
+ |
String AbstractAudioContext::state() const |
{ |
// These strings had better match the strings for AudioContextState in AudioContext.idl. |