| OLD | NEW |
| 1 /* global self */ | 1 /* global self */ |
| 2 | 2 |
| 3 // testharness.js has the higher priority. | 3 // testharness.js has the higher priority. |
| 4 var TESTHARNESS = true; | 4 var TESTHARNESS = true; |
| 5 var JSTEST = false; | 5 var JSTEST = false; |
| 6 | 6 |
| 7 (function () { | 7 (function () { |
| 8 // Selected properies from testharness.js | 8 // Selected properies from testharness.js |
| 9 var testharnessProperties = [ | 9 var testharnessProperties = [ |
| 10 'test', 'async_test', 'promise_test', 'promise_rejects', | 10 'test', 'async_test', 'promise_test', 'promise_rejects', |
| (...skipping 27 matching lines...) Expand all Loading... |
| 38 break; | 38 break; |
| 39 } | 39 } |
| 40 } | 40 } |
| 41 | 41 |
| 42 // If both are not loaded at all, throw here. | 42 // If both are not loaded at all, throw here. |
| 43 if (!JSTEST) | 43 if (!JSTEST) |
| 44 throw new Error('Cannot proceed. No test infrastructure is loaded.'); | 44 throw new Error('Cannot proceed. No test infrastructure is loaded.'); |
| 45 })(); | 45 })(); |
| 46 | 46 |
| 47 | 47 |
| 48 | |
| 49 function writeString(s, a, offset) { | |
| 50 for (var i = 0; i < s.length; ++i) { | |
| 51 a[offset + i] = s.charCodeAt(i); | |
| 52 } | |
| 53 } | |
| 54 | |
| 55 function writeInt16(n, a, offset) { | |
| 56 n = Math.floor(n); | |
| 57 | |
| 58 var b1 = n & 255; | |
| 59 var b2 = (n >> 8) & 255; | |
| 60 | |
| 61 a[offset + 0] = b1; | |
| 62 a[offset + 1] = b2; | |
| 63 } | |
| 64 | |
| 65 function writeInt32(n, a, offset) { | |
| 66 n = Math.floor(n); | |
| 67 var b1 = n & 255; | |
| 68 var b2 = (n >> 8) & 255; | |
| 69 var b3 = (n >> 16) & 255; | |
| 70 var b4 = (n >> 24) & 255; | |
| 71 | |
| 72 a[offset + 0] = b1; | |
| 73 a[offset + 1] = b2; | |
| 74 a[offset + 2] = b3; | |
| 75 a[offset + 3] = b4; | |
| 76 } | |
| 77 | |
| 78 function writeAudioBuffer(audioBuffer, a, offset) { | |
| 79 var n = audioBuffer.length; | |
| 80 var channels = audioBuffer.numberOfChannels; | |
| 81 | |
| 82 for (var i = 0; i < n; ++i) { | |
| 83 for (var k = 0; k < channels; ++k) { | |
| 84 var buffer = audioBuffer.getChannelData(k); | |
| 85 var sample = buffer[i] * 32768.0; | |
| 86 | |
| 87 // Clip samples to the limitations of 16-bit. | |
| 88 // If we don't do this then we'll get nasty wrap-around distortion. | |
| 89 if (sample < -32768) | |
| 90 sample = -32768; | |
| 91 if (sample > 32767) | |
| 92 sample = 32767; | |
| 93 | |
| 94 writeInt16(sample, a, offset); | |
| 95 offset += 2; | |
| 96 } | |
| 97 } | |
| 98 } | |
| 99 | |
| 100 function createWaveFileData(audioBuffer) { | |
| 101 var frameLength = audioBuffer.length; | |
| 102 var numberOfChannels = audioBuffer.numberOfChannels; | |
| 103 var sampleRate = audioBuffer.sampleRate; | |
| 104 var bitsPerSample = 16; | |
| 105 var byteRate = sampleRate * numberOfChannels * bitsPerSample/8; | |
| 106 var blockAlign = numberOfChannels * bitsPerSample/8; | |
| 107 var wavDataByteLength = frameLength * numberOfChannels * 2; // 16-bit audio | |
| 108 var headerByteLength = 44; | |
| 109 var totalLength = headerByteLength + wavDataByteLength; | |
| 110 | |
| 111 var waveFileData = new Uint8Array(totalLength); | |
| 112 | |
| 113 var subChunk1Size = 16; // for linear PCM | |
| 114 var subChunk2Size = wavDataByteLength; | |
| 115 var chunkSize = 4 + (8 + subChunk1Size) + (8 + subChunk2Size); | |
| 116 | |
| 117 writeString("RIFF", waveFileData, 0); | |
| 118 writeInt32(chunkSize, waveFileData, 4); | |
| 119 writeString("WAVE", waveFileData, 8); | |
| 120 writeString("fmt ", waveFileData, 12); | |
| 121 | |
| 122 writeInt32(subChunk1Size, waveFileData, 16); // SubChunk1Size (4) | |
| 123 writeInt16(1, waveFileData, 20); // AudioFormat (2) | |
| 124 writeInt16(numberOfChannels, waveFileData, 22); // NumChannels (2) | |
| 125 writeInt32(sampleRate, waveFileData, 24); // SampleRate (4) | |
| 126 writeInt32(byteRate, waveFileData, 28); // ByteRate (4) | |
| 127 writeInt16(blockAlign, waveFileData, 32); // BlockAlign (2) | |
| 128 writeInt32(bitsPerSample, waveFileData, 34); // BitsPerSample (4) | |
| 129 | |
| 130 writeString("data", waveFileData, 36); | |
| 131 writeInt32(subChunk2Size, waveFileData, 40); // SubChunk2Size (4) | |
| 132 | |
| 133 // Write actual audio data starting at offset 44. | |
| 134 writeAudioBuffer(audioBuffer, waveFileData, 44); | |
| 135 | |
| 136 return waveFileData; | |
| 137 } | |
| 138 | |
| 139 function createAudioData(audioBuffer) { | |
| 140 return createWaveFileData(audioBuffer); | |
| 141 } | |
| 142 | |
| 143 function finishAudioTest(event) { | |
| 144 var audioData = createAudioData(event.renderedBuffer); | |
| 145 testRunner.setAudioData(audioData); | |
| 146 testRunner.notifyDone(); | |
| 147 } | |
| 148 | |
| 149 // Compare two arrays (commonly extracted from buffer.getChannelData()) with | |
| 150 // constraints: | |
| 151 // options.thresholdSNR: Minimum allowed SNR between the actual and expected | |
| 152 // signal. The default value is 10000. | |
| 153 // options.thresholdDiffULP: Maximum allowed difference between the actual | |
| 154 // and expected signal in ULP(Unit in the last place). The default is 0. | |
| 155 // options.thresholdDiffCount: Maximum allowed number of sample differences | |
| 156 // which exceeds the threshold. The default is 0. | |
| 157 // options.bitDepth: The expected result is assumed to come from an audio | |
| 158 // file with this number of bits of precision. The default is 16. | |
| 159 function compareBuffersWithConstraints(actual, expected, options) { | |
| 160 if (!options) | |
| 161 options = {}; | |
| 162 | |
| 163 if (actual.length !== expected.length) | |
| 164 testFailed('Buffer length mismatches.'); | |
| 165 | |
| 166 var maxError = -1; | |
| 167 var diffCount = 0; | |
| 168 var errorPosition = -1; | |
| 169 var thresholdSNR = (options.thresholdSNR || 10000); | |
| 170 | |
| 171 var thresholdDiffULP = (options.thresholdDiffULP || 0); | |
| 172 var thresholdDiffCount = (options.thresholdDiffCount || 0); | |
| 173 | |
| 174 // By default, the bit depth is 16. | |
| 175 var bitDepth = (options.bitDepth || 16); | |
| 176 var scaleFactor = Math.pow(2, bitDepth - 1); | |
| 177 | |
| 178 var noisePower = 0, signalPower = 0; | |
| 179 | |
| 180 for (var i = 0; i < actual.length; i++) { | |
| 181 var diff = actual[i] - expected[i]; | |
| 182 noisePower += diff * diff; | |
| 183 signalPower += expected[i] * expected[i]; | |
| 184 | |
| 185 if (Math.abs(diff) > maxError) { | |
| 186 maxError = Math.abs(diff); | |
| 187 errorPosition = i; | |
| 188 } | |
| 189 | |
| 190 // The reference file is a 16-bit WAV file, so we will almost never get | |
| 191 // an exact match between it and the actual floating-point result. | |
| 192 if (Math.abs(diff) > scaleFactor) | |
| 193 diffCount++; | |
| 194 } | |
| 195 | |
| 196 var snr = 10 * Math.log10(signalPower / noisePower); | |
| 197 var maxErrorULP = maxError * scaleFactor; | |
| 198 | |
| 199 if (snr >= thresholdSNR) { | |
| 200 testPassed('Exceeded SNR threshold of ' + thresholdSNR + ' dB.'); | |
| 201 } else { | |
| 202 testFailed('Expected SNR of ' + thresholdSNR + ' dB, but actual SNR is '
+ | |
| 203 snr + ' dB.'); | |
| 204 } | |
| 205 | |
| 206 if (maxErrorULP <= thresholdDiffULP) { | |
| 207 testPassed('Maximum difference below threshold of ' + | |
| 208 thresholdDiffULP + ' ulp (' + bitDepth + '-bits).'); | |
| 209 } else { | |
| 210 testFailed('Maximum difference of ' + maxErrorULP + | |
| 211 ' at the index ' + errorPosition + ' exceeded threshold of ' + | |
| 212 thresholdDiffULP + ' ulp (' + bitDepth + '-bits).'); | |
| 213 } | |
| 214 | |
| 215 if (diffCount <= thresholdDiffCount) { | |
| 216 testPassed('Number of differences between results is ' + | |
| 217 diffCount + ' out of ' + actual.length + '.'); | |
| 218 } else { | |
| 219 testFailed(diffCount + ' differences found but expected no more than ' + | |
| 220 diffCount + ' out of ' + actual.length + '.'); | |
| 221 } | |
| 222 } | |
| 223 | |
| 224 // Create an impulse in a buffer of length sampleFrameLength | |
| 225 function createImpulseBuffer(context, sampleFrameLength) { | |
| 226 var audioBuffer = context.createBuffer(1, sampleFrameLength, context.sampleR
ate); | |
| 227 var n = audioBuffer.length; | |
| 228 var dataL = audioBuffer.getChannelData(0); | |
| 229 | |
| 230 for (var k = 0; k < n; ++k) { | |
| 231 dataL[k] = 0; | |
| 232 } | |
| 233 dataL[0] = 1; | |
| 234 | |
| 235 return audioBuffer; | |
| 236 } | |
| 237 | |
| 238 // Create a buffer of the given length with a linear ramp having values 0 <= x <
1. | |
| 239 function createLinearRampBuffer(context, sampleFrameLength) { | |
| 240 var audioBuffer = context.createBuffer(1, sampleFrameLength, context.sampleR
ate); | |
| 241 var n = audioBuffer.length; | |
| 242 var dataL = audioBuffer.getChannelData(0); | |
| 243 | |
| 244 for (var i = 0; i < n; ++i) | |
| 245 dataL[i] = i / n; | |
| 246 | |
| 247 return audioBuffer; | |
| 248 } | |
| 249 | |
| 250 // Create an AudioBuffer of length |sampleFrameLength| having a constant value |
constantValue|. If | |
| 251 // |constantValue| is a number, the buffer has one channel filled with that valu
e. If | |
| 252 // |constantValue| is an array, the buffer is created wit a number of channels e
qual to the length | |
| 253 // of the array, and channel k is filled with the k'th element of the |constantV
alue| array. | |
| 254 function createConstantBuffer(context, sampleFrameLength, constantValue) { | |
| 255 var channels; | |
| 256 var values; | |
| 257 | |
| 258 if (typeof constantValue === "number") { | |
| 259 channels = 1; | |
| 260 values = [constantValue]; | |
| 261 } else { | |
| 262 channels = constantValue.length; | |
| 263 values = constantValue; | |
| 264 } | |
| 265 | |
| 266 var audioBuffer = context.createBuffer(channels, sampleFrameLength, context.
sampleRate); | |
| 267 var n = audioBuffer.length; | |
| 268 | |
| 269 for (var c = 0; c < channels; ++c) { | |
| 270 var data = audioBuffer.getChannelData(c); | |
| 271 for (var i = 0; i < n; ++i) | |
| 272 data[i] = values[c]; | |
| 273 } | |
| 274 | |
| 275 return audioBuffer; | |
| 276 } | |
| 277 | |
| 278 // Create a stereo impulse in a buffer of length sampleFrameLength | |
| 279 function createStereoImpulseBuffer(context, sampleFrameLength) { | |
| 280 var audioBuffer = context.createBuffer(2, sampleFrameLength, context.sampleR
ate); | |
| 281 var n = audioBuffer.length; | |
| 282 var dataL = audioBuffer.getChannelData(0); | |
| 283 var dataR = audioBuffer.getChannelData(1); | |
| 284 | |
| 285 for (var k = 0; k < n; ++k) { | |
| 286 dataL[k] = 0; | |
| 287 dataR[k] = 0; | |
| 288 } | |
| 289 dataL[0] = 1; | |
| 290 dataR[0] = 1; | |
| 291 | |
| 292 return audioBuffer; | |
| 293 } | |
| 294 | |
| 295 // Convert time (in seconds) to sample frames. | |
| 296 function timeToSampleFrame(time, sampleRate) { | |
| 297 return Math.floor(0.5 + time * sampleRate); | |
| 298 } | |
| 299 | |
| 300 // Compute the number of sample frames consumed by noteGrainOn with | |
| 301 // the specified |grainOffset|, |duration|, and |sampleRate|. | |
| 302 function grainLengthInSampleFrames(grainOffset, duration, sampleRate) { | |
| 303 var startFrame = timeToSampleFrame(grainOffset, sampleRate); | |
| 304 var endFrame = timeToSampleFrame(grainOffset + duration, sampleRate); | |
| 305 | |
| 306 return endFrame - startFrame; | |
| 307 } | |
| 308 | |
| 309 // True if the number is not an infinity or NaN | |
| 310 function isValidNumber(x) { | |
| 311 return !isNaN(x) && (x != Infinity) && (x != -Infinity); | |
| 312 } | |
| 313 | |
| 314 | |
| 315 // |Audit| is a task runner for web audio test. It makes asynchronous web audio | 48 // |Audit| is a task runner for web audio test. It makes asynchronous web audio |
| 316 // testing simple and manageable. | 49 // testing simple and manageable. |
| 317 // | 50 // |
| 318 // EXAMPLE: | 51 // EXAMPLE: |
| 319 // | 52 // |
| 320 // var audit = Audit.createTaskRunner(); | 53 // var audit = Audit.createTaskRunner(); |
| 321 // // Define test routine. Make sure to call done() when reached at the end. | 54 // // Define test routine. Make sure to call done() when reached at the end. |
| 322 // audit.defineTask('foo', function (done) { | 55 // audit.defineTask('foo', function (done) { |
| 323 // var context = new AudioContext(); | 56 // var context = new AudioContext(); |
| 324 // // do things | 57 // // do things |
| (...skipping 872 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1197 if (opts.hasOwnProperty('brief')) | 930 if (opts.hasOwnProperty('brief')) |
| 1198 _opts.brief = opts.brief; | 931 _opts.brief = opts.brief; |
| 1199 if (opts.hasOwnProperty('precision')) | 932 if (opts.hasOwnProperty('precision')) |
| 1200 _opts.precision = opts.precision; | 933 _opts.precision = opts.precision; |
| 1201 } | 934 } |
| 1202 | 935 |
| 1203 return new ShouldModel(desc, target, _opts); | 936 return new ShouldModel(desc, target, _opts); |
| 1204 }; | 937 }; |
| 1205 | 938 |
| 1206 })(); | 939 })(); |
| OLD | NEW |