Index: third_party/WebKit/LayoutTests/webaudio/mixing.html |
diff --git a/third_party/WebKit/LayoutTests/webaudio/mixing.html b/third_party/WebKit/LayoutTests/webaudio/mixing.html |
index 1d041491d6bcf0e926b0547fedaaaf7332282dfe..9d45d03334a6ad94e194853bc462dafed00b61f7 100644 |
--- a/third_party/WebKit/LayoutTests/webaudio/mixing.html |
+++ b/third_party/WebKit/LayoutTests/webaudio/mixing.html |
@@ -7,57 +7,122 @@ The result should be some laughing playing at the same time as the drumming. |
<html> |
<head> |
+<script src="../resources/testharness.js"></script> |
+<script src="../resources/testharnessreport.js"></script> |
<script src="resources/audit-util.js"></script> |
-<script src="resources/audio-testing.js"></script> |
-<script type="text/javascript" src="resources/buffer-loader.js"></script> |
- |
+<script src="resources/audit.js"></script> |
</head> |
<body> |
<script> |
+let audit = Audit.createTaskRunner(); |
-window.onload = init; |
- |
-var sampleRate = 44100.0; |
-var lengthInSeconds = 2; |
- |
-var context = 0; |
-var bufferLoader = 0; |
- |
-function init() { |
- if (!window.testRunner) |
- return; |
- |
- // Create offline audio context. |
- context = new OfflineAudioContext(2, sampleRate * lengthInSeconds, sampleRate); |
- |
- bufferLoader = new BufferLoader( |
- context, |
- [ |
- "resources/hyper-reality/br-jam-loop.wav", |
- "resources/hyper-reality/laughter.wav", |
- ], |
- finishedLoading |
- ); |
- |
- bufferLoader.load(); |
- testRunner.waitUntilDone(); |
-} |
+let sampleRate = 44100.0; |
+let lengthInSeconds = 2; |
+ |
+audit.define('test', (task, should) => { |
+ // Create offline audio context. |
+ let context = |
+ new OfflineAudioContext(2, sampleRate * lengthInSeconds, sampleRate); |
+ |
+ // Load up audio files |
hongchan
2017/02/27 18:10:36
It also runs the test. Or you can separate this ta
Raymond Toy
2017/02/27 19:02:51
Updated comment.
|
+ Promise |
+ .all([ |
+ // This file is stereo |
+ Audit.loadFileFromUrl('resources/hyper-reality/br-jam-loop.wav') |
+ .then(response => { return context.decodeAudioData(response); }), |
+ // This file is mono |
+ Audit.loadFileFromUrl('resources/hyper-reality/laughter.wav') |
+ .then(response => { return context.decodeAudioData(response); }), |
+ ]) |
+ .then(audioBuffers => { |
+ // Thresholds are experimentally determined |
+ return runTest(context, audioBuffers, should, [ |
+ {snrThreshold: Infinity, errorThreshold: 0}, |
+ {snrThreshold: Infinity, errorThreshold: 0} |
+ ]); |
+ }) |
+ .then(() => task.done()); |
+}); |
+ |
+audit.run(); |
+ |
+function runTest(context, bufferList, should, testThresholds) { |
+ should(bufferList.length, 'Number of decoded files').beEqualTo(2); |
+ |
+ // Create two sources and play them at the same time. |
+ let source1 = context.createBufferSource(); |
+ let source2 = context.createBufferSource(); |
+ source1.buffer = bufferList[0]; |
+ source2.buffer = bufferList[1]; |
+ |
+ source1.connect(context.destination); |
+ source2.connect(context.destination); |
+ source1.start(0); |
+ source2.start(0); |
+ |
+ // Verify the number of channels in each source and the expected result. |
+ should(bufferList[0].numberOfChannels, 'Number of channels in stereo source') |
+ .beEqualTo(2); |
+ |
+ should(bufferList[1].numberOfChannels, 'Number of channels in mono source') |
+ .beEqualTo(1); |
+ |
+ return context.startRendering().then(renderedBuffer => { |
+ // Test only works if we have a stereo result. |
hongchan
2017/02/27 18:10:36
This resolver can be refactored to something like
Raymond Toy
2017/02/27 19:02:51
Done.
|
+ should(renderedBuffer.numberOfChannels, 'Number of channels in rendered output') |
+ .beEqualTo(2); |
+ |
+ // Note: the source lengths may not match the context length. Create copies |
+ // of the sources truncated or zero-filled to the rendering length. |
+ |
+ let stereoSource = new AudioBuffer({ |
+ length: renderedBuffer.length, |
+ numberOfChannels: 2, |
+ sampleRate: context.sampleRate |
+ }); |
+ stereoSource.copyToChannel(bufferList[0].getChannelData(0), 0); |
+ stereoSource.copyToChannel(bufferList[0].getChannelData(1), 1); |
+ |
+ let monoSource = new AudioBuffer({ |
+ length: renderedBuffer.length, |
+ numberOfChannels: 1, |
+ sampleRate: context.sampleRate |
+ }); |
+ monoSource.copyToChannel(bufferList[1].getChannelData(0), 0); |
+ |
+ // Compute the expected result buffer0 is stereo and buffer1 is mono. The |
+ // result should be stereo, with the mono source implicitly upmixed to |
+ // stereo to produce the expected result. |
+ let expectedBuffer = new AudioBuffer({ |
+ length: renderedBuffer.length, |
+ numberOfChannels: 2, |
+ sampleRate: context.sampleRate |
+ }); |
+ |
+ let monoData = monoSource.getChannelData(0); |
+ for (let c = 0; c < expectedBuffer.numberOfChannels; ++c) { |
+ let expectedData = expectedBuffer.getChannelData(c); |
+ let stereoData = stereoSource.getChannelData(c); |
+ for (let k = 0; k < expectedBuffer.length; ++k) { |
+ expectedData[k] = stereoData[k] + monoData[k]; |
+ } |
+ } |
+ |
+ // Compare the rendered data with the expected data for each channel. |
+ for (let k = 0; k < renderedBuffer.numberOfChannels; ++k) { |
+ let actualData = renderedBuffer.getChannelData(k); |
+ let expectedData = expectedBuffer.getChannelData(k); |
+ let threshold = testThresholds[k]; |
+ let snr = 10 * Math.log10(computeSNR(actualData, expectedData)); |
-function finishedLoading(bufferList) { |
- // Create two sources and play them at the same time. |
- var source1 = context.createBufferSource(); |
- var source2 = context.createBufferSource(); |
- source1.buffer = bufferList[0]; |
- source2.buffer = bufferList[1]; |
- |
- source1.connect(context.destination); |
- source2.connect(context.destination); |
- source1.start(0); |
- source2.start(0); |
- |
- context.oncomplete = finishAudioTest; |
- context.startRendering(); |
+ should(snr, 'SNR for channel ' + k) |
+ .beGreaterThanOrEqualTo(threshold.snrThreshold); |
+ should(actualData, 'Rendered audio').beCloseToArray(expectedData, { |
+ absoluteThreshold: threshold.errorThreshold |
+ }); |
+ } |
+ }); |
} |
</script> |