OLD | NEW |
---|---|
1 <!DOCTYPE html> | 1 <!DOCTYPE html> |
2 | 2 |
3 <!-- | 3 <!-- |
4 Create two sources and play them simultaneously. This tests unity-gain summing of AudioNode inputs. | 4 Create two sources and play them simultaneously. This tests unity-gain summing of AudioNode inputs. |
5 The result should be some laughing playing at the same time as the drumming. | 5 The result should be some laughing playing at the same time as the drumming. |
6 --> | 6 --> |
7 | 7 |
8 <html> | 8 <html> |
9 <head> | 9 <head> |
10 <script src="../resources/testharness.js"></script> | |
11 <script src="../resources/testharnessreport.js"></script> | |
10 <script src="resources/audit-util.js"></script> | 12 <script src="resources/audit-util.js"></script> |
11 <script src="resources/audio-testing.js"></script> | 13 <script src="resources/audit.js"></script> |
12 <script type="text/javascript" src="resources/buffer-loader.js"></script> | |
13 | |
14 </head> | 14 </head> |
15 <body> | 15 <body> |
16 | 16 |
17 <script> | 17 <script> |
18 let audit = Audit.createTaskRunner(); | |
18 | 19 |
19 window.onload = init; | 20 let sampleRate = 44100.0; |
21 let lengthInSeconds = 2; | |
20 | 22 |
21 var sampleRate = 44100.0; | 23 audit.define('test', (task, should) => { |
22 var lengthInSeconds = 2; | 24 // Create offline audio context. |
25 let context = | |
26 new OfflineAudioContext(2, sampleRate * lengthInSeconds, sampleRate); | |
23 | 27 |
24 var context = 0; | 28 // Load up audio files |
hongchan
2017/02/27 18:10:36
It also runs the test. Or you can separate this ta
Raymond Toy
2017/02/27 19:02:51
Updated comment.
| |
25 var bufferLoader = 0; | 29 Promise |
30 .all([ | |
31 // This file is stereo | |
32 Audit.loadFileFromUrl('resources/hyper-reality/br-jam-loop.wav') | |
33 .then(response => { return context.decodeAudioData(response); }), | |
34 // This file is mono | |
35 Audit.loadFileFromUrl('resources/hyper-reality/laughter.wav') | |
36 .then(response => { return context.decodeAudioData(response); }), | |
37 ]) | |
38 .then(audioBuffers => { | |
39 // Thresholds are experimentally determined | |
40 return runTest(context, audioBuffers, should, [ | |
41 {snrThreshold: Infinity, errorThreshold: 0}, | |
42 {snrThreshold: Infinity, errorThreshold: 0} | |
43 ]); | |
44 }) | |
45 .then(() => task.done()); | |
46 }); | |
26 | 47 |
27 function init() { | 48 audit.run(); |
28 if (!window.testRunner) | |
29 return; | |
30 | |
31 // Create offline audio context. | |
32 context = new OfflineAudioContext(2, sampleRate * lengthInSeconds, sampleRat e); | |
33 | |
34 bufferLoader = new BufferLoader( | |
35 context, | |
36 [ | |
37 "resources/hyper-reality/br-jam-loop.wav", | |
38 "resources/hyper-reality/laughter.wav", | |
39 ], | |
40 finishedLoading | |
41 ); | |
42 | 49 |
43 bufferLoader.load(); | 50 function runTest(context, bufferList, should, testThresholds) { |
44 testRunner.waitUntilDone(); | 51 should(bufferList.length, 'Number of decoded files').beEqualTo(2); |
45 } | |
46 | 52 |
47 function finishedLoading(bufferList) { | 53 // Create two sources and play them at the same time. |
48 // Create two sources and play them at the same time. | 54 let source1 = context.createBufferSource(); |
49 var source1 = context.createBufferSource(); | 55 let source2 = context.createBufferSource(); |
50 var source2 = context.createBufferSource(); | 56 source1.buffer = bufferList[0]; |
51 source1.buffer = bufferList[0]; | 57 source2.buffer = bufferList[1]; |
52 source2.buffer = bufferList[1]; | 58 |
53 | 59 source1.connect(context.destination); |
54 source1.connect(context.destination); | 60 source2.connect(context.destination); |
55 source2.connect(context.destination); | 61 source1.start(0); |
56 source1.start(0); | 62 source2.start(0); |
57 source2.start(0); | 63 |
58 | 64 // Verify the number of channels in each source and the expected result. |
59 context.oncomplete = finishAudioTest; | 65 should(bufferList[0].numberOfChannels, 'Number of channels in stereo source') |
60 context.startRendering(); | 66 .beEqualTo(2); |
67 | |
68 should(bufferList[1].numberOfChannels, 'Number of channels in mono source') | |
69 .beEqualTo(1); | |
70 | |
71 return context.startRendering().then(renderedBuffer => { | |
72 // Test only works if we have a stereo result. | |
hongchan
2017/02/27 18:10:36
This resolver can be refactored to something like
Raymond Toy
2017/02/27 19:02:51
Done.
| |
73 should(renderedBuffer.numberOfChannels, 'Number of channels in rendered outp ut') | |
74 .beEqualTo(2); | |
75 | |
76 // Note: the source lengths may not match the context length. Create copies | |
77 // of the sources truncated or zero-filled to the rendering length. | |
78 | |
79 let stereoSource = new AudioBuffer({ | |
80 length: renderedBuffer.length, | |
81 numberOfChannels: 2, | |
82 sampleRate: context.sampleRate | |
83 }); | |
84 stereoSource.copyToChannel(bufferList[0].getChannelData(0), 0); | |
85 stereoSource.copyToChannel(bufferList[0].getChannelData(1), 1); | |
86 | |
87 let monoSource = new AudioBuffer({ | |
88 length: renderedBuffer.length, | |
89 numberOfChannels: 1, | |
90 sampleRate: context.sampleRate | |
91 }); | |
92 monoSource.copyToChannel(bufferList[1].getChannelData(0), 0); | |
93 | |
94 // Compute the expected result buffer0 is stereo and buffer1 is mono. The | |
95 // result should be stereo, with the mono source implicitly upmixed to | |
96 // stereo to produce the expected result. | |
97 let expectedBuffer = new AudioBuffer({ | |
98 length: renderedBuffer.length, | |
99 numberOfChannels: 2, | |
100 sampleRate: context.sampleRate | |
101 }); | |
102 | |
103 let monoData = monoSource.getChannelData(0); | |
104 for (let c = 0; c < expectedBuffer.numberOfChannels; ++c) { | |
105 let expectedData = expectedBuffer.getChannelData(c); | |
106 let stereoData = stereoSource.getChannelData(c); | |
107 for (let k = 0; k < expectedBuffer.length; ++k) { | |
108 expectedData[k] = stereoData[k] + monoData[k]; | |
109 } | |
110 } | |
111 | |
112 // Compare the rendered data with the expected data for each channel. | |
113 for (let k = 0; k < renderedBuffer.numberOfChannels; ++k) { | |
114 let actualData = renderedBuffer.getChannelData(k); | |
115 let expectedData = expectedBuffer.getChannelData(k); | |
116 let threshold = testThresholds[k]; | |
117 let snr = 10 * Math.log10(computeSNR(actualData, expectedData)); | |
118 | |
119 should(snr, 'SNR for channel ' + k) | |
120 .beGreaterThanOrEqualTo(threshold.snrThreshold); | |
121 should(actualData, 'Rendered audio').beCloseToArray(expectedData, { | |
122 absoluteThreshold: threshold.errorThreshold | |
123 }); | |
124 } | |
125 }); | |
61 } | 126 } |
62 | 127 |
63 </script> | 128 </script> |
64 | 129 |
65 </body> | 130 </body> |
66 </html> | 131 </html> |
OLD | NEW |