Index: third_party/WebKit/LayoutTests/webaudio/resources/audit-util.js |
diff --git a/third_party/WebKit/LayoutTests/webaudio/resources/audit-util.js b/third_party/WebKit/LayoutTests/webaudio/resources/audit-util.js |
index 389a2e7476c325bbc1cefc2386a39db384735013..671bcf1716c5a8f1711ad09a746b787a4c781888 100644 |
--- a/third_party/WebKit/LayoutTests/webaudio/resources/audit-util.js |
+++ b/third_party/WebKit/LayoutTests/webaudio/resources/audit-util.js |
@@ -10,125 +10,125 @@ |
function writeString(s, a, offset) { |
- for (var i = 0; i < s.length; ++i) { |
- a[offset + i] = s.charCodeAt(i); |
- } |
+ for (let i = 0; i < s.length; ++i) { |
+ a[offset + i] = s.charCodeAt(i); |
+ } |
} |
function writeInt16(n, a, offset) { |
- n = Math.floor(n); |
+ n = Math.floor(n); |
- var b1 = n & 255; |
- var b2 = (n >> 8) & 255; |
+ let b1 = n & 255; |
+ let b2 = (n >> 8) & 255; |
- a[offset + 0] = b1; |
- a[offset + 1] = b2; |
+ a[offset + 0] = b1; |
+ a[offset + 1] = b2; |
} |
function writeInt32(n, a, offset) { |
- n = Math.floor(n); |
- var b1 = n & 255; |
- var b2 = (n >> 8) & 255; |
- var b3 = (n >> 16) & 255; |
- var b4 = (n >> 24) & 255; |
- |
- a[offset + 0] = b1; |
- a[offset + 1] = b2; |
- a[offset + 2] = b3; |
- a[offset + 3] = b4; |
+ n = Math.floor(n); |
+ let b1 = n & 255; |
+ let b2 = (n >> 8) & 255; |
+ let b3 = (n >> 16) & 255; |
+ let b4 = (n >> 24) & 255; |
+ |
+ a[offset + 0] = b1; |
+ a[offset + 1] = b2; |
+ a[offset + 2] = b3; |
+ a[offset + 3] = b4; |
} |
// Return the bits of the float as a 32-bit integer value. This |
// produces the raw bits; no intepretation of the value is done. |
function floatBits(f) { |
- var buf = new ArrayBuffer(4); |
+ let buf = new ArrayBuffer(4); |
(new Float32Array(buf))[0] = f; |
- var bits = (new Uint32Array(buf))[0]; |
+ let bits = (new Uint32Array(buf))[0]; |
// Return as a signed integer. |
return bits | 0; |
} |
function writeAudioBuffer(audioBuffer, a, offset, asFloat) { |
- var n = audioBuffer.length; |
- var channels = audioBuffer.numberOfChannels; |
- |
- for (var i = 0; i < n; ++i) { |
- for (var k = 0; k < channels; ++k) { |
- var buffer = audioBuffer.getChannelData(k); |
- if (asFloat) { |
- var sample = floatBits(buffer[i]); |
- writeInt32(sample, a, offset); |
- offset += 4; |
- } else { |
- var sample = buffer[i] * 32768.0; |
- |
- // Clip samples to the limitations of 16-bit. |
- // If we don't do this then we'll get nasty wrap-around distortion. |
- if (sample < -32768) |
- sample = -32768; |
- if (sample > 32767) |
- sample = 32767; |
- |
- writeInt16(sample, a, offset); |
- offset += 2; |
- } |
- } |
+ let n = audioBuffer.length; |
+ let channels = audioBuffer.numberOfChannels; |
+ |
+ for (let i = 0; i < n; ++i) { |
+ for (let k = 0; k < channels; ++k) { |
+ let buffer = audioBuffer.getChannelData(k); |
+ if (asFloat) { |
+ let sample = floatBits(buffer[i]); |
+ writeInt32(sample, a, offset); |
+ offset += 4; |
+ } else { |
+ let sample = buffer[i] * 32768.0; |
+ |
+ // Clip samples to the limitations of 16-bit. |
+ // If we don't do this then we'll get nasty wrap-around distortion. |
+ if (sample < -32768) |
+ sample = -32768; |
+ if (sample > 32767) |
+ sample = 32767; |
+ |
+ writeInt16(sample, a, offset); |
+ offset += 2; |
+ } |
} |
+ } |
} |
// See http://soundfile.sapp.org/doc/WaveFormat/ and |
// http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/WAVE.html |
// for a quick introduction to the WAVE PCM format. |
function createWaveFileData(audioBuffer, asFloat) { |
- var bytesPerSample = asFloat ? 4 : 2; |
- var frameLength = audioBuffer.length; |
- var numberOfChannels = audioBuffer.numberOfChannels; |
- var sampleRate = audioBuffer.sampleRate; |
- var bitsPerSample = 8 * bytesPerSample; |
- var byteRate = sampleRate * numberOfChannels * bitsPerSample/8; |
- var blockAlign = numberOfChannels * bitsPerSample/8; |
- var wavDataByteLength = frameLength * numberOfChannels * bytesPerSample; |
- var headerByteLength = 44; |
- var totalLength = headerByteLength + wavDataByteLength; |
- |
- var waveFileData = new Uint8Array(totalLength); |
- |
- var subChunk1Size = 16; // for linear PCM |
- var subChunk2Size = wavDataByteLength; |
- var chunkSize = 4 + (8 + subChunk1Size) + (8 + subChunk2Size); |
- |
- writeString("RIFF", waveFileData, 0); |
- writeInt32(chunkSize, waveFileData, 4); |
- writeString("WAVE", waveFileData, 8); |
- writeString("fmt ", waveFileData, 12); |
- |
- writeInt32(subChunk1Size, waveFileData, 16); // SubChunk1Size (4) |
- // The format tag value is 1 for integer PCM data and 3 for IEEE |
- // float data. |
- writeInt16(asFloat ? 3 : 1, waveFileData, 20); // AudioFormat (2) |
- writeInt16(numberOfChannels, waveFileData, 22); // NumChannels (2) |
- writeInt32(sampleRate, waveFileData, 24); // SampleRate (4) |
- writeInt32(byteRate, waveFileData, 28); // ByteRate (4) |
- writeInt16(blockAlign, waveFileData, 32); // BlockAlign (2) |
- writeInt32(bitsPerSample, waveFileData, 34); // BitsPerSample (4) |
- |
- writeString("data", waveFileData, 36); |
- writeInt32(subChunk2Size, waveFileData, 40); // SubChunk2Size (4) |
- |
- // Write actual audio data starting at offset 44. |
- writeAudioBuffer(audioBuffer, waveFileData, 44, asFloat); |
- |
- return waveFileData; |
+ let bytesPerSample = asFloat ? 4 : 2; |
+ let frameLength = audioBuffer.length; |
+ let numberOfChannels = audioBuffer.numberOfChannels; |
+ let sampleRate = audioBuffer.sampleRate; |
+ let bitsPerSample = 8 * bytesPerSample; |
+ let byteRate = sampleRate * numberOfChannels * bitsPerSample / 8; |
+ let blockAlign = numberOfChannels * bitsPerSample / 8; |
+ let wavDataByteLength = frameLength * numberOfChannels * bytesPerSample; |
+ let headerByteLength = 44; |
+ let totalLength = headerByteLength + wavDataByteLength; |
+ |
+ let waveFileData = new Uint8Array(totalLength); |
+ |
+ let subChunk1Size = 16; // for linear PCM |
+ let subChunk2Size = wavDataByteLength; |
+ let chunkSize = 4 + (8 + subChunk1Size) + (8 + subChunk2Size); |
+ |
+ writeString('RIFF', waveFileData, 0); |
+ writeInt32(chunkSize, waveFileData, 4); |
+ writeString('WAVE', waveFileData, 8); |
+ writeString('fmt ', waveFileData, 12); |
+ |
+ writeInt32(subChunk1Size, waveFileData, 16); // SubChunk1Size (4) |
+ // The format tag value is 1 for integer PCM data and 3 for IEEE |
+ // float data. |
+ writeInt16(asFloat ? 3 : 1, waveFileData, 20); // AudioFormat (2) |
+ writeInt16(numberOfChannels, waveFileData, 22); // NumChannels (2) |
+ writeInt32(sampleRate, waveFileData, 24); // SampleRate (4) |
+ writeInt32(byteRate, waveFileData, 28); // ByteRate (4) |
+ writeInt16(blockAlign, waveFileData, 32); // BlockAlign (2) |
+ writeInt32(bitsPerSample, waveFileData, 34); // BitsPerSample (4) |
+ |
+ writeString('data', waveFileData, 36); |
+ writeInt32(subChunk2Size, waveFileData, 40); // SubChunk2Size (4) |
+ |
+ // Write actual audio data starting at offset 44. |
+ writeAudioBuffer(audioBuffer, waveFileData, 44, asFloat); |
+ |
+ return waveFileData; |
} |
function createAudioData(audioBuffer, asFloat) { |
- return createWaveFileData(audioBuffer, asFloat); |
+ return createWaveFileData(audioBuffer, asFloat); |
} |
function finishAudioTest(event) { |
- var audioData = createAudioData(event.renderedBuffer); |
- testRunner.setAudioData(audioData); |
- testRunner.notifyDone(); |
+ let audioData = createAudioData(event.renderedBuffer); |
+ testRunner.setAudioData(audioData); |
+ testRunner.notifyDone(); |
} |
// Save the given |audioBuffer| to a WAV file using the name given by |
@@ -139,27 +139,27 @@ function finishAudioTest(event) { |
// float format (full WebAudio resolution). Otherwise a 16-bit PCM |
// WAV file is produced. |
function downloadAudioBuffer(audioBuffer, filename, asFloat) { |
- // Don't download if testRunner is defined; we're running a layout |
- // test where this won't be useful in general. |
- if (window.testRunner) |
- return false; |
- // Convert the audio buffer to an array containing the WAV file |
- // contents. Then convert it to a blob that can be saved as a WAV |
- // file. |
- let wavData = createAudioData(audioBuffer, asFloat); |
- let blob = new Blob([wavData], {type: 'audio/wav'}); |
- // Manually create html tags for downloading, and simulate a click |
- // to download the file to the given file name. |
- let a = document.createElement('a'); |
- a.style.display = 'none'; |
- a.download = filename; |
- let audioURL = window.URL.createObjectURL(blob); |
- let audio = new Audio(); |
- audio.src = audioURL; |
- a.href = audioURL; |
- document.body.appendChild(a); |
- a.click(); |
- return true; |
+ // Don't download if testRunner is defined; we're running a layout |
+ // test where this won't be useful in general. |
+ if (window.testRunner) |
+ return false; |
+ // Convert the audio buffer to an array containing the WAV file |
+ // contents. Then convert it to a blob that can be saved as a WAV |
+ // file. |
+ let wavData = createAudioData(audioBuffer, asFloat); |
+ let blob = new Blob([wavData], {type: 'audio/wav'}); |
+ // Manually create html tags for downloading, and simulate a click |
+ // to download the file to the given file name. |
+ let a = document.createElement('a'); |
+ a.style.display = 'none'; |
+ a.download = filename; |
+ let audioURL = window.URL.createObjectURL(blob); |
+ let audio = new Audio(); |
+ audio.src = audioURL; |
+ a.href = audioURL; |
+ document.body.appendChild(a); |
+ a.click(); |
+ return true; |
} |
// Compare two arrays (commonly extracted from buffer.getChannelData()) with |
@@ -173,167 +173,174 @@ function downloadAudioBuffer(audioBuffer, filename, asFloat) { |
// options.bitDepth: The expected result is assumed to come from an audio |
// file with this number of bits of precision. The default is 16. |
function compareBuffersWithConstraints(should, actual, expected, options) { |
- if (!options) |
- options = {}; |
- |
- // Only print out the message if the lengths are different; the |
- // expectation is that they are the same, so don't clutter up the |
- // output. |
- if (actual.length !== expected.length) { |
- should(actual.length === expected.length, |
- "Length of actual and expected buffers should match") |
- .beTrue(); |
+ if (!options) |
+ options = {}; |
+ |
+ // Only print out the message if the lengths are different; the |
+ // expectation is that they are the same, so don't clutter up the |
+ // output. |
+ if (actual.length !== expected.length) { |
+ should( |
+ actual.length === expected.length, |
+ 'Length of actual and expected buffers should match') |
+ .beTrue(); |
+ } |
+ |
+ let maxError = -1; |
+ let diffCount = 0; |
+ let errorPosition = -1; |
+ let thresholdSNR = (options.thresholdSNR || 10000); |
+ |
+ let thresholdDiffULP = (options.thresholdDiffULP || 0); |
+ let thresholdDiffCount = (options.thresholdDiffCount || 0); |
+ |
+ // By default, the bit depth is 16. |
+ let bitDepth = (options.bitDepth || 16); |
+ let scaleFactor = Math.pow(2, bitDepth - 1); |
+ |
+ let noisePower = 0, signalPower = 0; |
+ |
+ for (let i = 0; i < actual.length; i++) { |
+ let diff = actual[i] - expected[i]; |
+ noisePower += diff * diff; |
+ signalPower += expected[i] * expected[i]; |
+ |
+ if (Math.abs(diff) > maxError) { |
+ maxError = Math.abs(diff); |
+ errorPosition = i; |
} |
- var maxError = -1; |
- var diffCount = 0; |
- var errorPosition = -1; |
- var thresholdSNR = (options.thresholdSNR || 10000); |
- |
- var thresholdDiffULP = (options.thresholdDiffULP || 0); |
- var thresholdDiffCount = (options.thresholdDiffCount || 0); |
- |
- // By default, the bit depth is 16. |
- var bitDepth = (options.bitDepth || 16); |
- var scaleFactor = Math.pow(2, bitDepth - 1); |
+ // The reference file is a 16-bit WAV file, so we will almost never get |
+ // an exact match between it and the actual floating-point result. |
+ if (Math.abs(diff) > scaleFactor) |
+ diffCount++; |
+ } |
- var noisePower = 0, signalPower = 0; |
+ let snr = 10 * Math.log10(signalPower / noisePower); |
+ let maxErrorULP = maxError * scaleFactor; |
- for (var i = 0; i < actual.length; i++) { |
- var diff = actual[i] - expected[i]; |
- noisePower += diff * diff; |
- signalPower += expected[i] * expected[i]; |
+ should(snr, 'SNR').beGreaterThanOrEqualTo(thresholdSNR); |
- if (Math.abs(diff) > maxError) { |
- maxError = Math.abs(diff); |
- errorPosition = i; |
- } |
- |
- // The reference file is a 16-bit WAV file, so we will almost never get |
- // an exact match between it and the actual floating-point result. |
- if (Math.abs(diff) > scaleFactor) |
- diffCount++; |
- } |
+ should( |
+ maxErrorULP, |
+ options.prefix + ': Maximum difference (in ulp units (' + bitDepth + |
+ '-bits))') |
+ .beLessThanOrEqualTo(thresholdDiffULP); |
- var snr = 10 * Math.log10(signalPower / noisePower); |
- var maxErrorULP = maxError * scaleFactor; |
- |
- should(snr, "SNR").beGreaterThanOrEqualTo(thresholdSNR); |
- |
- should(maxErrorULP, |
- options.prefix + ': Maximum difference (in ulp units (' + bitDepth + |
- '-bits))' |
- ).beLessThanOrEqualTo(thresholdDiffULP); |
- |
- should(diffCount, options.prefix + |
- ': Number of differences between results').beLessThanOrEqualTo( |
- thresholdDiffCount); |
+ should(diffCount, options.prefix + ': Number of differences between results') |
+ .beLessThanOrEqualTo(thresholdDiffCount); |
} |
// Create an impulse in a buffer of length sampleFrameLength |
function createImpulseBuffer(context, sampleFrameLength) { |
- var audioBuffer = context.createBuffer(1, sampleFrameLength, context.sampleRate); |
- var n = audioBuffer.length; |
- var dataL = audioBuffer.getChannelData(0); |
+ let audioBuffer = |
+ context.createBuffer(1, sampleFrameLength, context.sampleRate); |
+ let n = audioBuffer.length; |
+ let dataL = audioBuffer.getChannelData(0); |
- for (var k = 0; k < n; ++k) { |
- dataL[k] = 0; |
- } |
- dataL[0] = 1; |
+ for (let k = 0; k < n; ++k) { |
+ dataL[k] = 0; |
+ } |
+ dataL[0] = 1; |
- return audioBuffer; |
+ return audioBuffer; |
} |
-// Create a buffer of the given length with a linear ramp having values 0 <= x < 1. |
+// Create a buffer of the given length with a linear ramp having values 0 <= x < |
+// 1. |
function createLinearRampBuffer(context, sampleFrameLength) { |
- var audioBuffer = context.createBuffer(1, sampleFrameLength, context.sampleRate); |
- var n = audioBuffer.length; |
- var dataL = audioBuffer.getChannelData(0); |
+ let audioBuffer = |
+ context.createBuffer(1, sampleFrameLength, context.sampleRate); |
+ let n = audioBuffer.length; |
+ let dataL = audioBuffer.getChannelData(0); |
- for (var i = 0; i < n; ++i) |
- dataL[i] = i / n; |
+ for (let i = 0; i < n; ++i) |
+ dataL[i] = i / n; |
- return audioBuffer; |
+ return audioBuffer; |
} |
-// Create an AudioBuffer of length |sampleFrameLength| having a constant value |constantValue|. If |
-// |constantValue| is a number, the buffer has one channel filled with that value. If |
-// |constantValue| is an array, the buffer is created wit a number of channels equal to the length |
-// of the array, and channel k is filled with the k'th element of the |constantValue| array. |
+// Create an AudioBuffer of length |sampleFrameLength| having a constant value |
+// |constantValue|. If |constantValue| is a number, the buffer has one channel |
+// filled with that value. If |constantValue| is an array, the buffer is created |
+// wit a number of channels equal to the length of the array, and channel k is |
+// filled with the k'th element of the |constantValue| array. |
function createConstantBuffer(context, sampleFrameLength, constantValue) { |
- var channels; |
- var values; |
- |
- if (typeof constantValue === "number") { |
- channels = 1; |
- values = [constantValue]; |
- } else { |
- channels = constantValue.length; |
- values = constantValue; |
- } |
- |
- var audioBuffer = context.createBuffer(channels, sampleFrameLength, context.sampleRate); |
- var n = audioBuffer.length; |
- |
- for (var c = 0; c < channels; ++c) { |
- var data = audioBuffer.getChannelData(c); |
- for (var i = 0; i < n; ++i) |
- data[i] = values[c]; |
- } |
- |
- return audioBuffer; |
+ let channels; |
+ let values; |
+ |
+ if (typeof constantValue === 'number') { |
+ channels = 1; |
+ values = [constantValue]; |
+ } else { |
+ channels = constantValue.length; |
+ values = constantValue; |
+ } |
+ |
+ let audioBuffer = |
+ context.createBuffer(channels, sampleFrameLength, context.sampleRate); |
+ let n = audioBuffer.length; |
+ |
+ for (let c = 0; c < channels; ++c) { |
+ let data = audioBuffer.getChannelData(c); |
+ for (let i = 0; i < n; ++i) |
+ data[i] = values[c]; |
+ } |
+ |
+ return audioBuffer; |
} |
// Create a stereo impulse in a buffer of length sampleFrameLength |
function createStereoImpulseBuffer(context, sampleFrameLength) { |
- var audioBuffer = context.createBuffer(2, sampleFrameLength, context.sampleRate); |
- var n = audioBuffer.length; |
- var dataL = audioBuffer.getChannelData(0); |
- var dataR = audioBuffer.getChannelData(1); |
- |
- for (var k = 0; k < n; ++k) { |
- dataL[k] = 0; |
- dataR[k] = 0; |
- } |
- dataL[0] = 1; |
- dataR[0] = 1; |
- |
- return audioBuffer; |
+ let audioBuffer = |
+ context.createBuffer(2, sampleFrameLength, context.sampleRate); |
+ let n = audioBuffer.length; |
+ let dataL = audioBuffer.getChannelData(0); |
+ let dataR = audioBuffer.getChannelData(1); |
+ |
+ for (let k = 0; k < n; ++k) { |
+ dataL[k] = 0; |
+ dataR[k] = 0; |
+ } |
+ dataL[0] = 1; |
+ dataR[0] = 1; |
+ |
+ return audioBuffer; |
} |
// Convert time (in seconds) to sample frames. |
function timeToSampleFrame(time, sampleRate) { |
- return Math.floor(0.5 + time * sampleRate); |
+ return Math.floor(0.5 + time * sampleRate); |
} |
// Compute the number of sample frames consumed by noteGrainOn with |
// the specified |grainOffset|, |duration|, and |sampleRate|. |
function grainLengthInSampleFrames(grainOffset, duration, sampleRate) { |
- var startFrame = timeToSampleFrame(grainOffset, sampleRate); |
- var endFrame = timeToSampleFrame(grainOffset + duration, sampleRate); |
+ let startFrame = timeToSampleFrame(grainOffset, sampleRate); |
+ let endFrame = timeToSampleFrame(grainOffset + duration, sampleRate); |
- return endFrame - startFrame; |
+ return endFrame - startFrame; |
} |
// True if the number is not an infinity or NaN |
function isValidNumber(x) { |
- return !isNaN(x) && (x != Infinity) && (x != -Infinity); |
+ return !isNaN(x) && (x != Infinity) && (x != -Infinity); |
} |
// Compute the (linear) signal-to-noise ratio between |actual| and |
// |expected|. The result is NOT in dB! If the |actual| and |
// |expected| have different lengths, the shorter length is used. |
function computeSNR(actual, expected) { |
- var signalPower = 0; |
- var noisePower = 0; |
+ let signalPower = 0; |
+ let noisePower = 0; |
- var length = Math.min(actual.length, expected.length); |
+ let length = Math.min(actual.length, expected.length); |
- for (var k = 0; k < length; ++k) { |
- var diff = actual[k] - expected[k]; |
- signalPower += expected[k] * expected[k]; |
- noisePower += diff * diff; |
- } |
+ for (let k = 0; k < length; ++k) { |
+ let diff = actual[k] - expected[k]; |
+ signalPower += expected[k] * expected[k]; |
+ noisePower += diff * diff; |
+ } |
- return signalPower / noisePower; |
+ return signalPower / noisePower; |
} |