| OLD | NEW |
| (Empty) |
| 1 <!doctype html> | |
| 2 <html> | |
| 3 <head> | |
| 4 <title>A disabled audio track is rendered as silence</title> | |
| 5 <link rel="author" title="Dominique Hazael-Massieux" href="mailto:dom@w3.org"/> | |
| 6 <link rel="help" href="http://dev.w3.org/2011/webrtc/editor/getusermedia.html#in
troduction"> | |
| 7 <link rel="help" href="http://dev.w3.org/2011/webrtc/editor/getusermedia.html#me
diastreams-as-media-elements"> | |
| 8 </head> | |
| 9 <body> | |
| 10 <p class="instructions">When prompted, accept to share your audio stream.</p> | |
| 11 <h1 class="instructions">Description</h1> | |
| 12 <p class="instructions">This test checks that a disabled audio track in a | |
| 13 MediaStream is rendered as silence. It relies on the | |
| 14 <a href="https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html"> | |
| 15 Web Audio API</a>.</p> | |
| 16 | |
| 17 <div id='log'></div> | |
| 18 <script src=/resources/testharness.js></script> | |
| 19 <script src=/resources/testharnessreport.js></script> | |
| 20 <script src="/common/vendor-prefix.js" data-prefixed-objects='[{"ancestors":["na
vigator"], "name":"getUserMedia"}, {"ancestors":["window"], "name":"AudioContext
"}]'></script> | |
| 21 <script> | |
| 22 var t = async_test("Tests that a disabled audio track in a MediaStream is render
ed as silence", {timeout: 200000}); | |
| 23 var aud = document.getElementById("aud"); | |
| 24 t.step(function() { | |
| 25 navigator.getUserMedia({audio: true}, t.step_func(function (stream) { | |
| 26 var ctx = new AudioContext(); | |
| 27 var streamSource = ctx.createMediaStreamSource(stream); | |
| 28 var silenceDetector = ctx.createScriptProcessor(1024); | |
| 29 var count = 10; | |
| 30 silenceDetector.onaudioprocess = t.step_func(function (e) { | |
| 31 var buffer1 = e.inputBuffer.getChannelData(0); | |
| 32 var buffer2 = e.inputBuffer.getChannelData(1); | |
| 33 var out = e.outputBuffer.getChannelData(0); | |
| 34 out = new Float32Array(buffer1); | |
| 35 for (var i = 0; i < buffer1.length; i++) { | |
| 36 assert_equals(buffer1[i], 0, "Audio buffer entry #" + i + " in channel
0 is silent"); | |
| 37 } | |
| 38 for (var i = 0; i < buffer2.length; i++) { | |
| 39 assert_equals(buffer2[i], 0, "Audio buffer entry #" + i + " in channel
1 is silent"); | |
| 40 } | |
| 41 count--; | |
| 42 if (count === 0) { | |
| 43 silenceDetector.onaudioprocess = null; | |
| 44 t.done(); | |
| 45 } | |
| 46 }); | |
| 47 stream.getAudioTracks()[0].enabled = false; | |
| 48 | |
| 49 streamSource.connect(silenceDetector); | |
| 50 silenceDetector.connect(ctx.destination); | |
| 51 }), function(error) {}); | |
| 52 }); | |
| 53 </script> | |
| 54 </body> | |
| 55 </html> | |
| OLD | NEW |