OLD | NEW |
(Empty) | |
| 1 |
| 2 class _AudioContextImpl extends _DOMTypeBase implements AudioContext { |
| 3 _AudioContextImpl._wrap(ptr) : super._wrap(ptr); |
| 4 |
| 5 num get currentTime() => _wrap(_ptr.currentTime); |
| 6 |
| 7 AudioDestinationNode get destination() => _wrap(_ptr.destination); |
| 8 |
| 9 AudioListener get listener() => _wrap(_ptr.listener); |
| 10 |
| 11 EventListener get oncomplete() => _wrap(_ptr.oncomplete); |
| 12 |
| 13 void set oncomplete(EventListener value) { _ptr.oncomplete = _unwrap(value); } |
| 14 |
| 15 num get sampleRate() => _wrap(_ptr.sampleRate); |
| 16 |
| 17 RealtimeAnalyserNode createAnalyser() { |
| 18 return _wrap(_ptr.createAnalyser()); |
| 19 } |
| 20 |
| 21 BiquadFilterNode createBiquadFilter() { |
| 22 return _wrap(_ptr.createBiquadFilter()); |
| 23 } |
| 24 |
| 25 AudioBuffer createBuffer(var buffer_OR_numberOfChannels, var mixToMono_OR_numb
erOfFrames, [num sampleRate = null]) { |
| 26 if (buffer_OR_numberOfChannels is ArrayBuffer) { |
| 27 if (mixToMono_OR_numberOfFrames is bool) { |
| 28 if (sampleRate === null) { |
| 29 return _wrap(_ptr.createBuffer(_unwrap(buffer_OR_numberOfChannels), _u
nwrap(mixToMono_OR_numberOfFrames))); |
| 30 } |
| 31 } |
| 32 } else { |
| 33 if (buffer_OR_numberOfChannels is int) { |
| 34 if (mixToMono_OR_numberOfFrames is int) { |
| 35 return _wrap(_ptr.createBuffer(_unwrap(buffer_OR_numberOfChannels), _u
nwrap(mixToMono_OR_numberOfFrames), _unwrap(sampleRate))); |
| 36 } |
| 37 } |
| 38 } |
| 39 throw "Incorrect number or type of arguments"; |
| 40 } |
| 41 |
| 42 AudioBufferSourceNode createBufferSource() { |
| 43 return _wrap(_ptr.createBufferSource()); |
| 44 } |
| 45 |
| 46 AudioChannelMerger createChannelMerger() { |
| 47 return _wrap(_ptr.createChannelMerger()); |
| 48 } |
| 49 |
| 50 AudioChannelSplitter createChannelSplitter() { |
| 51 return _wrap(_ptr.createChannelSplitter()); |
| 52 } |
| 53 |
| 54 ConvolverNode createConvolver() { |
| 55 return _wrap(_ptr.createConvolver()); |
| 56 } |
| 57 |
| 58 DelayNode createDelayNode() { |
| 59 return _wrap(_ptr.createDelayNode()); |
| 60 } |
| 61 |
| 62 DynamicsCompressorNode createDynamicsCompressor() { |
| 63 return _wrap(_ptr.createDynamicsCompressor()); |
| 64 } |
| 65 |
| 66 AudioGainNode createGainNode() { |
| 67 return _wrap(_ptr.createGainNode()); |
| 68 } |
| 69 |
| 70 HighPass2FilterNode createHighPass2Filter() { |
| 71 return _wrap(_ptr.createHighPass2Filter()); |
| 72 } |
| 73 |
| 74 JavaScriptAudioNode createJavaScriptNode(int bufferSize) { |
| 75 return _wrap(_ptr.createJavaScriptNode(_unwrap(bufferSize))); |
| 76 } |
| 77 |
| 78 LowPass2FilterNode createLowPass2Filter() { |
| 79 return _wrap(_ptr.createLowPass2Filter()); |
| 80 } |
| 81 |
| 82 MediaElementAudioSourceNode createMediaElementSource(MediaElement mediaElement
) { |
| 83 return _wrap(_ptr.createMediaElementSource(_unwrap(mediaElement))); |
| 84 } |
| 85 |
| 86 AudioPannerNode createPanner() { |
| 87 return _wrap(_ptr.createPanner()); |
| 88 } |
| 89 |
| 90 WaveShaperNode createWaveShaper() { |
| 91 return _wrap(_ptr.createWaveShaper()); |
| 92 } |
| 93 |
| 94 void decodeAudioData(ArrayBuffer audioData, AudioBufferCallback successCallbac
k, [AudioBufferCallback errorCallback = null]) { |
| 95 if (errorCallback === null) { |
| 96 _ptr.decodeAudioData(_unwrap(audioData), _unwrap(successCallback)); |
| 97 return; |
| 98 } else { |
| 99 _ptr.decodeAudioData(_unwrap(audioData), _unwrap(successCallback), _unwrap
(errorCallback)); |
| 100 return; |
| 101 } |
| 102 } |
| 103 |
| 104 void startRendering() { |
| 105 _ptr.startRendering(); |
| 106 return; |
| 107 } |
| 108 } |
OLD | NEW |