Index: sdk/lib/web_audio/dartium/web_audio_dartium.dart |
diff --git a/sdk/lib/web_audio/dartium/web_audio_dartium.dart b/sdk/lib/web_audio/dartium/web_audio_dartium.dart |
index d71875ca41f14b59081b21e0a726c90a2d78f6ec..260cb0715366750f9da014cc43973f78d77502b3 100644 |
--- a/sdk/lib/web_audio/dartium/web_audio_dartium.dart |
+++ b/sdk/lib/web_audio/dartium/web_audio_dartium.dart |
@@ -196,15 +196,15 @@ class AudioBufferSourceNode extends AudioSourceNode { |
int get playbackState native "AudioBufferSourceNode_playbackState_Getter"; |
void start(num when, [num grainOffset, num grainDuration]) { |
- if ((when is num || when == null) && !?grainOffset && !?grainDuration) { |
+ if (when is num && !?grainOffset && !?grainDuration) { |
blois
2013/05/13 21:09:16
when is when not a num?
|
_start_1(when); |
return; |
} |
- if ((when is num || when == null) && (grainOffset is num || grainOffset == null) && !?grainDuration) { |
+ if (grainOffset is num && grainOffset != null && when is num && !?grainDuration) { |
_start_2(when, grainOffset); |
return; |
} |
- if ((when is num || when == null) && (grainOffset is num || grainOffset == null) && (grainDuration is num || grainDuration == null)) { |
+ if (grainDuration is num && grainDuration != null && grainOffset is num && grainOffset != null && when is num) { |
_start_3(when, grainOffset, grainDuration); |
return; |
} |
@@ -271,10 +271,10 @@ class AudioContext extends EventTarget { |
BiquadFilterNode createBiquadFilter() native "AudioContext_createBiquadFilter_Callback"; |
AudioBuffer createBuffer(buffer_OR_numberOfChannels, mixToMono_OR_numberOfFrames, [num sampleRate]) { |
- if ((buffer_OR_numberOfChannels is int || buffer_OR_numberOfChannels == null) && (mixToMono_OR_numberOfFrames is int || mixToMono_OR_numberOfFrames == null) && (sampleRate is num || sampleRate == null)) { |
+ if (sampleRate is num && sampleRate != null && mixToMono_OR_numberOfFrames is int && mixToMono_OR_numberOfFrames != null && buffer_OR_numberOfChannels is int && buffer_OR_numberOfChannels != null) { |
return _createBuffer_1(buffer_OR_numberOfChannels, mixToMono_OR_numberOfFrames, sampleRate); |
} |
- if ((buffer_OR_numberOfChannels is ByteBuffer || buffer_OR_numberOfChannels == null) && (mixToMono_OR_numberOfFrames is bool || mixToMono_OR_numberOfFrames == null) && !?sampleRate) { |
+ if (mixToMono_OR_numberOfFrames is bool && mixToMono_OR_numberOfFrames != null && buffer_OR_numberOfChannels is ByteBuffer && buffer_OR_numberOfChannels != null && !?sampleRate) { |
return _createBuffer_2(buffer_OR_numberOfChannels, mixToMono_OR_numberOfFrames); |
} |
throw new ArgumentError("Incorrect number or type of arguments"); |
@@ -497,11 +497,11 @@ class AudioNode extends NativeFieldWrapperClass1 { |
int get numberOfOutputs native "AudioNode_numberOfOutputs_Getter"; |
void connect(destination, int output, [int input]) { |
- if ((destination is AudioNode || destination == null) && (output is int || output == null) && (input is int || input == null)) { |
+ if (input is int && input != null && output is int && destination is AudioNode && destination != null) { |
_connect_1(destination, output, input); |
return; |
} |
- if ((destination is AudioParam || destination == null) && (output is int || output == null) && !?input) { |
+ if (output is int && destination is AudioParam && destination != null && !?input) { |
_connect_2(destination, output); |
return; |
} |