Index: sdk/lib/web_audio/dartium/web_audio_dartium.dart |
diff --git a/sdk/lib/web_audio/dartium/web_audio_dartium.dart b/sdk/lib/web_audio/dartium/web_audio_dartium.dart |
index 950707c0db9427d1640ff948948b5d206dbd4bb4..724a3f2d885112eba81e7b1a6befea55dab277a1 100644 |
--- a/sdk/lib/web_audio/dartium/web_audio_dartium.dart |
+++ b/sdk/lib/web_audio/dartium/web_audio_dartium.dart |
@@ -188,11 +188,11 @@ class AudioBufferSourceNode extends AudioSourceNode { |
int get playbackState native "AudioBufferSourceNode_playbackState_Getter"; |
void start(num when, [num grainOffset, num grainDuration]) { |
- if ((when is num || when == null) && !?grainOffset && !?grainDuration) { |
+ if ((when is num || when == null) && grainOffset == null && grainDuration == null) { |
_start_1(when); |
return; |
} |
- if ((when is num || when == null) && (grainOffset is num || grainOffset == null) && !?grainDuration) { |
+ if ((when is num || when == null) && (grainOffset is num || grainOffset == null) && grainDuration == null) { |
_start_2(when, grainOffset); |
return; |
} |
@@ -266,7 +266,7 @@ class AudioContext extends EventTarget { |
if ((buffer_OR_numberOfChannels is int || buffer_OR_numberOfChannels == null) && (mixToMono_OR_numberOfFrames is int || mixToMono_OR_numberOfFrames == null) && (sampleRate is num || sampleRate == null)) { |
return _createBuffer_1(buffer_OR_numberOfChannels, mixToMono_OR_numberOfFrames, sampleRate); |
} |
- if ((buffer_OR_numberOfChannels is ByteBuffer || buffer_OR_numberOfChannels == null) && (mixToMono_OR_numberOfFrames is bool || mixToMono_OR_numberOfFrames == null) && !?sampleRate) { |
+ if ((buffer_OR_numberOfChannels is ByteBuffer || buffer_OR_numberOfChannels == null) && (mixToMono_OR_numberOfFrames is bool || mixToMono_OR_numberOfFrames == null) && sampleRate == null) { |
return _createBuffer_2(buffer_OR_numberOfChannels, mixToMono_OR_numberOfFrames); |
} |
throw new ArgumentError("Incorrect number or type of arguments"); |
@@ -281,7 +281,7 @@ class AudioContext extends EventTarget { |
AudioBufferSourceNode createBufferSource() native "AudioContext_createBufferSource_Callback"; |
ChannelMergerNode createChannelMerger([int numberOfInputs]) { |
- if (?numberOfInputs) { |
+ if (numberOfInputs != null) { |
return _createChannelMerger_1(numberOfInputs); |
} |
return _createChannelMerger_2(); |
@@ -292,7 +292,7 @@ class AudioContext extends EventTarget { |
ChannelMergerNode _createChannelMerger_2() native "AudioContext__createChannelMerger_2_Callback"; |
ChannelSplitterNode createChannelSplitter([int numberOfOutputs]) { |
- if (?numberOfOutputs) { |
+ if (numberOfOutputs != null) { |
return _createChannelSplitter_1(numberOfOutputs); |
} |
return _createChannelSplitter_2(); |
@@ -307,7 +307,7 @@ class AudioContext extends EventTarget { |
ConvolverNode createConvolver() native "AudioContext_createConvolver_Callback"; |
DelayNode createDelay([num maxDelayTime]) { |
- if (?maxDelayTime) { |
+ if (maxDelayTime != null) { |
return _createDelay_1(maxDelayTime); |
} |
return _createDelay_2(); |
@@ -346,10 +346,10 @@ class AudioContext extends EventTarget { |
PannerNode createPanner() native "AudioContext_createPanner_Callback"; |
ScriptProcessorNode createScriptProcessor(int bufferSize, [int numberOfInputChannels, int numberOfOutputChannels]) { |
- if (?numberOfOutputChannels) { |
+ if (numberOfOutputChannels != null) { |
return _createScriptProcessor_1(bufferSize, numberOfInputChannels, numberOfOutputChannels); |
} |
- if (?numberOfInputChannels) { |
+ if (numberOfInputChannels != null) { |
return _createScriptProcessor_2(bufferSize, numberOfInputChannels); |
} |
return _createScriptProcessor_3(bufferSize); |
@@ -493,7 +493,7 @@ class AudioNode extends NativeFieldWrapperClass1 { |
_connect_1(destination, output, input); |
return; |
} |
- if ((destination is AudioParam || destination == null) && (output is int || output == null) && !?input) { |
+ if ((destination is AudioParam || destination == null) && (output is int || output == null) && input == null) { |
_connect_2(destination, output); |
return; |
} |