OLD | NEW |
1 <!DOCTYPE html> | 1 <!DOCTYPE html> |
2 | |
3 <!-- | 2 <!-- |
4 Create two sources and play them simultaneously. This tests unity-gain summing
of AudioNode inputs. | 3 Create two sources and play them simultaneously. This tests unity-gain summing
of AudioNode inputs. |
5 The result should be some laughing playing at the same time as the drumming. | 4 The result should be some laughing playing at the same time as the drumming. |
6 --> | 5 --> |
| 6 <html> |
| 7 <head> |
| 8 <title> |
| 9 mixing.html |
| 10 </title> |
| 11 <script src="../resources/testharness.js"></script> |
| 12 <script src="../resources/testharnessreport.js"></script> |
| 13 <script src="resources/audit-util.js"></script> |
| 14 <script src="resources/audit.js"></script> |
| 15 </head> |
| 16 <body> |
| 17 <script id="layout-test-code"> |
| 18 let audit = Audit.createTaskRunner(); |
7 | 19 |
8 <html> | 20 let sampleRate = 44100.0; |
9 <head> | 21 let lengthInSeconds = 2; |
10 <script src="../resources/testharness.js"></script> | |
11 <script src="../resources/testharnessreport.js"></script> | |
12 <script src="resources/audit-util.js"></script> | |
13 <script src="resources/audit.js"></script> | |
14 </head> | |
15 <body> | |
16 | 22 |
17 <script> | 23 audit.define('test', (task, should) => { |
18 let audit = Audit.createTaskRunner(); | 24 // Create offline audio context. |
| 25 let context = new OfflineAudioContext( |
| 26 2, sampleRate * lengthInSeconds, sampleRate); |
19 | 27 |
20 let sampleRate = 44100.0; | 28 // Load up audio files and test |
21 let lengthInSeconds = 2; | 29 Promise |
| 30 .all([ |
| 31 // This file is stereo |
| 32 Audit.loadFileFromUrl('resources/hyper-reality/br-jam-loop.wav') |
| 33 .then(response => { |
| 34 return context.decodeAudioData(response); |
| 35 }), |
| 36 // This file is mono |
| 37 Audit.loadFileFromUrl('resources/hyper-reality/laughter.wav') |
| 38 .then(response => { |
| 39 return context.decodeAudioData(response); |
| 40 }), |
| 41 ]) |
| 42 .then(audioBuffers => { |
| 43 // Thresholds are experimentally determined |
| 44 return runTest(context, audioBuffers, should, [ |
| 45 {snrThreshold: Infinity, errorThreshold: 0}, |
| 46 {snrThreshold: Infinity, errorThreshold: 0} |
| 47 ]); |
| 48 }) |
| 49 .then(() => task.done()); |
| 50 }); |
22 | 51 |
23 audit.define('test', (task, should) => { | 52 audit.run(); |
24 // Create offline audio context. | |
25 let context = | |
26 new OfflineAudioContext(2, sampleRate * lengthInSeconds, sampleRate); | |
27 | 53 |
28 // Load up audio files and test | 54 function runTest(context, bufferList, should, testThresholds) { |
29 Promise | 55 should(bufferList.length, 'Number of decoded files').beEqualTo(2); |
30 .all([ | |
31 // This file is stereo | |
32 Audit.loadFileFromUrl('resources/hyper-reality/br-jam-loop.wav') | |
33 .then(response => { return context.decodeAudioData(response); }), | |
34 // This file is mono | |
35 Audit.loadFileFromUrl('resources/hyper-reality/laughter.wav') | |
36 .then(response => { return context.decodeAudioData(response); }), | |
37 ]) | |
38 .then(audioBuffers => { | |
39 // Thresholds are experimentally determined | |
40 return runTest(context, audioBuffers, should, [ | |
41 {snrThreshold: Infinity, errorThreshold: 0}, | |
42 {snrThreshold: Infinity, errorThreshold: 0} | |
43 ]); | |
44 }) | |
45 .then(() => task.done()); | |
46 }); | |
47 | 56 |
48 audit.run(); | 57 // Create two sources and play them at the same time. |
| 58 let source1 = context.createBufferSource(); |
| 59 let source2 = context.createBufferSource(); |
| 60 source1.buffer = bufferList[0]; |
| 61 source2.buffer = bufferList[1]; |
49 | 62 |
50 function runTest(context, bufferList, should, testThresholds) { | 63 source1.connect(context.destination); |
51 should(bufferList.length, 'Number of decoded files').beEqualTo(2); | 64 source2.connect(context.destination); |
| 65 source1.start(0); |
| 66 source2.start(0); |
52 | 67 |
53 // Create two sources and play them at the same time. | 68 // Verify the number of channels in each source and the expected result. |
54 let source1 = context.createBufferSource(); | 69 should( |
55 let source2 = context.createBufferSource(); | 70 bufferList[0].numberOfChannels, |
56 source1.buffer = bufferList[0]; | 71 'Number of channels in stereo source') |
57 source2.buffer = bufferList[1]; | 72 .beEqualTo(2); |
58 | 73 |
59 source1.connect(context.destination); | 74 should( |
60 source2.connect(context.destination); | 75 bufferList[1].numberOfChannels, 'Number of channels in mono source') |
61 source1.start(0); | 76 .beEqualTo(1); |
62 source2.start(0); | |
63 | 77 |
64 // Verify the number of channels in each source and the expected result. | 78 return context.startRendering().then(audioBuffer => { |
65 should(bufferList[0].numberOfChannels, 'Number of channels in stereo source') | 79 verifyResult( |
66 .beEqualTo(2); | 80 audioBuffer, context, bufferList, testThresholds, should); |
| 81 }); |
| 82 } |
67 | 83 |
68 should(bufferList[1].numberOfChannels, 'Number of channels in mono source') | 84 function verifyResult( |
69 .beEqualTo(1); | 85 renderedBuffer, context, bufferList, testThresholds, should) { |
| 86 // Test only works if we have a stereo result. |
| 87 should( |
| 88 renderedBuffer.numberOfChannels, |
| 89 'Number of channels in rendered output') |
| 90 .beEqualTo(2); |
70 | 91 |
71 return context.startRendering().then(audioBuffer => { | 92 // Note: the source lengths may not match the context length. Create |
72 verifyResult(audioBuffer, context, bufferList, testThresholds, should); | 93 // copies of the sources truncated or zero-filled to the rendering |
73 }); | 94 // length. |
74 } | |
75 | 95 |
76 function verifyResult(renderedBuffer, context, bufferList, testThresholds, shoul
d) { | 96 let stereoSource = new AudioBuffer({ |
77 // Test only works if we have a stereo result. | 97 length: renderedBuffer.length, |
78 should( | 98 numberOfChannels: 2, |
79 renderedBuffer.numberOfChannels, 'Number of channels in rendered output') | 99 sampleRate: context.sampleRate |
80 .beEqualTo(2); | 100 }); |
| 101 stereoSource.copyToChannel(bufferList[0].getChannelData(0), 0); |
| 102 stereoSource.copyToChannel(bufferList[0].getChannelData(1), 1); |
81 | 103 |
82 // Note: the source lengths may not match the context length. Create copies | 104 let monoSource = new AudioBuffer({ |
83 // of the sources truncated or zero-filled to the rendering length. | 105 length: renderedBuffer.length, |
| 106 numberOfChannels: 1, |
| 107 sampleRate: context.sampleRate |
| 108 }); |
| 109 monoSource.copyToChannel(bufferList[1].getChannelData(0), 0); |
84 | 110 |
85 let stereoSource = new AudioBuffer({ | 111 // Compute the expected result buffer0 is stereo and buffer1 is mono. |
86 length: renderedBuffer.length, | 112 // The result should be stereo, with the mono source implicitly upmixed |
87 numberOfChannels: 2, | 113 // to stereo to produce the expected result. |
88 sampleRate: context.sampleRate | 114 let expectedBuffer = new AudioBuffer({ |
89 }); | 115 length: renderedBuffer.length, |
90 stereoSource.copyToChannel(bufferList[0].getChannelData(0), 0); | 116 numberOfChannels: 2, |
91 stereoSource.copyToChannel(bufferList[0].getChannelData(1), 1); | 117 sampleRate: context.sampleRate |
| 118 }); |
92 | 119 |
93 let monoSource = new AudioBuffer({ | 120 let monoData = monoSource.getChannelData(0); |
94 length: renderedBuffer.length, | 121 for (let c = 0; c < expectedBuffer.numberOfChannels; ++c) { |
95 numberOfChannels: 1, | 122 let expectedData = expectedBuffer.getChannelData(c); |
96 sampleRate: context.sampleRate | 123 let stereoData = stereoSource.getChannelData(c); |
97 }); | 124 for (let k = 0; k < expectedBuffer.length; ++k) { |
98 monoSource.copyToChannel(bufferList[1].getChannelData(0), 0); | 125 expectedData[k] = stereoData[k] + monoData[k]; |
| 126 } |
| 127 } |
99 | 128 |
100 // Compute the expected result buffer0 is stereo and buffer1 is mono. The | 129 // Compare the rendered data with the expected data for each channel. |
101 // result should be stereo, with the mono source implicitly upmixed to | 130 for (let k = 0; k < renderedBuffer.numberOfChannels; ++k) { |
102 // stereo to produce the expected result. | 131 let actualData = renderedBuffer.getChannelData(k); |
103 let expectedBuffer = new AudioBuffer({ | 132 let expectedData = expectedBuffer.getChannelData(k); |
104 length: renderedBuffer.length, | 133 let threshold = testThresholds[k]; |
105 numberOfChannels: 2, | 134 let snr = 10 * Math.log10(computeSNR(actualData, expectedData)); |
106 sampleRate: context.sampleRate | |
107 }); | |
108 | 135 |
109 let monoData = monoSource.getChannelData(0); | 136 should(snr, 'SNR for channel ' + k) |
110 for (let c = 0; c < expectedBuffer.numberOfChannels; ++c) { | 137 .beGreaterThanOrEqualTo(threshold.snrThreshold); |
111 let expectedData = expectedBuffer.getChannelData(c); | 138 should(actualData, 'Rendered audio').beCloseToArray(expectedData, { |
112 let stereoData = stereoSource.getChannelData(c); | 139 absoluteThreshold: threshold.errorThreshold |
113 for (let k = 0; k < expectedBuffer.length; ++k) { | 140 }); |
114 expectedData[k] = stereoData[k] + monoData[k]; | 141 } |
115 } | 142 } |
116 } | 143 </script> |
117 | 144 </body> |
118 // Compare the rendered data with the expected data for each channel. | |
119 for (let k = 0; k < renderedBuffer.numberOfChannels; ++k) { | |
120 let actualData = renderedBuffer.getChannelData(k); | |
121 let expectedData = expectedBuffer.getChannelData(k); | |
122 let threshold = testThresholds[k]; | |
123 let snr = 10 * Math.log10(computeSNR(actualData, expectedData)); | |
124 | |
125 should(snr, 'SNR for channel ' + k) | |
126 .beGreaterThanOrEqualTo(threshold.snrThreshold); | |
127 should(actualData, 'Rendered audio').beCloseToArray(expectedData, { | |
128 absoluteThreshold: threshold.errorThreshold | |
129 }); | |
130 } | |
131 } | |
132 </script> | |
133 | |
134 </body> | |
135 </html> | 145 </html> |
OLD | NEW |