| Index: third_party/WebKit/Source/modules/webaudio/AbstractAudioContext.cpp
|
| diff --git a/third_party/WebKit/Source/modules/webaudio/AbstractAudioContext.cpp b/third_party/WebKit/Source/modules/webaudio/AbstractAudioContext.cpp
|
| index fcfa28d9c3f55cbcff6d24dc9f2ce5767d40f202..089da9423c97380e6a2eb57c004c80378adf23b3 100644
|
| --- a/third_party/WebKit/Source/modules/webaudio/AbstractAudioContext.cpp
|
| +++ b/third_party/WebKit/Source/modules/webaudio/AbstractAudioContext.cpp
|
| @@ -64,6 +64,7 @@
|
| #include "modules/webaudio/ScriptProcessorNode.h"
|
| #include "modules/webaudio/StereoPannerNode.h"
|
| #include "modules/webaudio/WaveShaperNode.h"
|
| +#include "platform/Histogram.h"
|
| #include "platform/ThreadSafeFunctional.h"
|
| #include "platform/audio/IIRFilter.h"
|
| #include "public/platform/Platform.h"
|
| @@ -206,7 +207,35 @@ AudioBuffer* AbstractAudioContext::createBuffer(unsigned numberOfChannels, size_
|
| // It's ok to call createBuffer, even if the context is closed because the AudioBuffer doesn't
|
| // really "belong" to any particular context.
|
|
|
| - return AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate, exceptionState);
|
| + AudioBuffer* buffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate, exceptionState);
|
| +
|
| + if (buffer) {
|
| + // Only record the data if the creation succeeded.
|
| + DEFINE_STATIC_LOCAL(SparseHistogram, audioBufferChannelsHistogram,
|
| + ("WebAudio.AudioBuffer.NumberOfChannels"));
|
| + DEFINE_STATIC_LOCAL(SparseHistogram, audioBufferLengthHistogram,
|
| + ("WebAudio.AudioBuffer.Length"));
|
| + DEFINE_STATIC_LOCAL(SparseHistogram, audioBufferSampleRateHistogram,
|
| + ("WebAudio.AudioBuffer.SampleRate"));
|
| +
|
| + audioBufferChannelsHistogram.sample(numberOfChannels);
|
| + audioBufferLengthHistogram.sample(clampTo(numberOfFrames, 0, std::numeric_limits<int>::max()));
|
| + audioBufferSampleRateHistogram.sample(static_cast<int>(sampleRate));
|
| +
|
| + // Compute the ratio of the buffer rate and the context rate so we know
|
| + // how often the buffer needs to be resampled to match the context. For
|
| + // the histogram, we multiply the ratio by 100 and round to the nearest
|
| + // integer. If the context is closed, don't record this because we
|
| + // don't have a sample rate for closed context.
|
| + if (!isContextClosed()) {
|
| + DEFINE_STATIC_LOCAL(SparseHistogram, audioBufferSampleRateRatioHistogram,
|
| + ("WebAudio.AudioBuffer.SampleRateRatio"));
|
| + float ratio = 100 * sampleRate / this->sampleRate();
|
| + audioBufferSampleRateRatioHistogram.sample(static_cast<int>(ratio + 0.5));
|
| + }
|
| + }
|
| +
|
| + return buffer;
|
| }
|
|
|
| ScriptPromise AbstractAudioContext::decodeAudioData(ScriptState* scriptState, DOMArrayBuffer* audioData, AudioBufferCallback* successCallback, AudioBufferCallback* errorCallback, ExceptionState& exceptionState)
|
|
|