| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2010 Google Inc. All rights reserved. | 2 * Copyright (C) 2010 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
| 6 * are met: | 6 * are met: |
| 7 * | 7 * |
| 8 * 1. Redistributions of source code must retain the above copyright | 8 * 1. Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * 2. Redistributions in binary form must reproduce the above copyright | 10 * 2. Redistributions in binary form must reproduce the above copyright |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 42 AudioParamType paramType, | 42 AudioParamType paramType, |
| 43 double defaultValue, | 43 double defaultValue, |
| 44 float minValue, | 44 float minValue, |
| 45 float maxValue) | 45 float maxValue) |
| 46 : AudioSummingJunction(context.deferredTaskHandler()), | 46 : AudioSummingJunction(context.deferredTaskHandler()), |
| 47 m_paramType(paramType), | 47 m_paramType(paramType), |
| 48 m_intrinsicValue(defaultValue), | 48 m_intrinsicValue(defaultValue), |
| 49 m_defaultValue(defaultValue), | 49 m_defaultValue(defaultValue), |
| 50 m_minValue(minValue), | 50 m_minValue(minValue), |
| 51 m_maxValue(maxValue) { | 51 m_maxValue(maxValue) { |
| 52 // The destination MUST exist because we need the destination handler for the
AudioParam. | 52 // The destination MUST exist because we need the destination handler for the |
| 53 // AudioParam. |
| 53 RELEASE_ASSERT(context.destination()); | 54 RELEASE_ASSERT(context.destination()); |
| 54 | 55 |
| 55 m_destinationHandler = &context.destination()->audioDestinationHandler(); | 56 m_destinationHandler = &context.destination()->audioDestinationHandler(); |
| 56 m_timeline.setSmoothedValue(defaultValue); | 57 m_timeline.setSmoothedValue(defaultValue); |
| 57 } | 58 } |
| 58 | 59 |
| 59 AudioDestinationHandler& AudioParamHandler::destinationHandler() const { | 60 AudioDestinationHandler& AudioParamHandler::destinationHandler() const { |
| 60 return *m_destinationHandler; | 61 return *m_destinationHandler; |
| 61 } | 62 } |
| 62 | 63 |
| 63 void AudioParamHandler::setParamType(AudioParamType paramType) { | 64 void AudioParamHandler::setParamType(AudioParamType paramType) { |
| 64 m_paramType = paramType; | 65 m_paramType = paramType; |
| 65 } | 66 } |
| 66 | 67 |
| 67 String AudioParamHandler::getParamName() const { | 68 String AudioParamHandler::getParamName() const { |
| 68 // The returned string should be the name of the node and the name of the Audi
oParam for | 69 // The returned string should be the name of the node and the name of the |
| 69 // that node. | 70 // AudioParam for that node. |
| 70 switch (m_paramType) { | 71 switch (m_paramType) { |
| 71 case ParamTypeAudioBufferSourcePlaybackRate: | 72 case ParamTypeAudioBufferSourcePlaybackRate: |
| 72 return "AudioBufferSource.playbackRate"; | 73 return "AudioBufferSource.playbackRate"; |
| 73 case ParamTypeAudioBufferSourceDetune: | 74 case ParamTypeAudioBufferSourceDetune: |
| 74 return "AudioBufferSource.detune"; | 75 return "AudioBufferSource.detune"; |
| 75 case ParamTypeBiquadFilterFrequency: | 76 case ParamTypeBiquadFilterFrequency: |
| 76 return "BiquadFilter.frequency"; | 77 return "BiquadFilter.frequency"; |
| 77 case ParamTypeBiquadFilterQ: | 78 case ParamTypeBiquadFilterQ: |
| 78 case ParamTypeBiquadFilterQLowpass: | 79 case ParamTypeBiquadFilterQLowpass: |
| 79 case ParamTypeBiquadFilterQHighpass: | 80 case ParamTypeBiquadFilterQHighpass: |
| 80 // We don't really need separate names for the Q parameter for lowpass and
highpass filters. | 81 // We don't really need separate names for the Q parameter for lowpass and |
| 81 // The difference is only for the histograms. | 82 // highpass filters. The difference is only for the histograms. |
| 82 return "BiquadFilter.Q"; | 83 return "BiquadFilter.Q"; |
| 83 case ParamTypeBiquadFilterGain: | 84 case ParamTypeBiquadFilterGain: |
| 84 return "BiquadFilter.gain"; | 85 return "BiquadFilter.gain"; |
| 85 case ParamTypeBiquadFilterDetune: | 86 case ParamTypeBiquadFilterDetune: |
| 86 return "BiquadFilter.detune"; | 87 return "BiquadFilter.detune"; |
| 87 case ParamTypeDelayDelayTime: | 88 case ParamTypeDelayDelayTime: |
| 88 return "Delay.delayTime"; | 89 return "Delay.delayTime"; |
| 89 case ParamTypeDynamicsCompressorThreshold: | 90 case ParamTypeDynamicsCompressorThreshold: |
| 90 return "DynamicsCompressor.threshold"; | 91 return "DynamicsCompressor.threshold"; |
| 91 case ParamTypeDynamicsCompressorKnee: | 92 case ParamTypeDynamicsCompressorKnee: |
| (...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 164 void AudioParamHandler::setValue(float value) { | 165 void AudioParamHandler::setValue(float value) { |
| 165 setIntrinsicValue(value); | 166 setIntrinsicValue(value); |
| 166 updateHistograms(value); | 167 updateHistograms(value); |
| 167 } | 168 } |
| 168 | 169 |
| 169 float AudioParamHandler::smoothedValue() { | 170 float AudioParamHandler::smoothedValue() { |
| 170 return m_timeline.smoothedValue(); | 171 return m_timeline.smoothedValue(); |
| 171 } | 172 } |
| 172 | 173 |
| 173 bool AudioParamHandler::smooth() { | 174 bool AudioParamHandler::smooth() { |
| 174 // If values have been explicitly scheduled on the timeline, then use the exac
t value. | 175 // If values have been explicitly scheduled on the timeline, then use the |
| 175 // Smoothing effectively is performed by the timeline. | 176 // exact value. Smoothing effectively is performed by the timeline. |
| 176 bool useTimelineValue = false; | 177 bool useTimelineValue = false; |
| 177 float value = | 178 float value = |
| 178 m_timeline.valueForContextTime(destinationHandler(), intrinsicValue(), | 179 m_timeline.valueForContextTime(destinationHandler(), intrinsicValue(), |
| 179 useTimelineValue, minValue(), maxValue()); | 180 useTimelineValue, minValue(), maxValue()); |
| 180 | 181 |
| 181 float smoothedValue = m_timeline.smoothedValue(); | 182 float smoothedValue = m_timeline.smoothedValue(); |
| 182 if (smoothedValue == value) { | 183 if (smoothedValue == value) { |
| 183 // Smoothed value has already approached and snapped to value. | 184 // Smoothed value has already approached and snapped to value. |
| 184 setIntrinsicValue(value); | 185 setIntrinsicValue(value); |
| 185 return true; | 186 return true; |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 222 | 223 |
| 223 void AudioParamHandler::calculateFinalValues(float* values, | 224 void AudioParamHandler::calculateFinalValues(float* values, |
| 224 unsigned numberOfValues, | 225 unsigned numberOfValues, |
| 225 bool sampleAccurate) { | 226 bool sampleAccurate) { |
| 226 bool isGood = | 227 bool isGood = |
| 227 deferredTaskHandler().isAudioThread() && values && numberOfValues; | 228 deferredTaskHandler().isAudioThread() && values && numberOfValues; |
| 228 DCHECK(isGood); | 229 DCHECK(isGood); |
| 229 if (!isGood) | 230 if (!isGood) |
| 230 return; | 231 return; |
| 231 | 232 |
| 232 // The calculated result will be the "intrinsic" value summed with all audio-r
ate connections. | 233 // The calculated result will be the "intrinsic" value summed with all |
| 234 // audio-rate connections. |
| 233 | 235 |
| 234 if (sampleAccurate) { | 236 if (sampleAccurate) { |
| 235 // Calculate sample-accurate (a-rate) intrinsic values. | 237 // Calculate sample-accurate (a-rate) intrinsic values. |
| 236 calculateTimelineValues(values, numberOfValues); | 238 calculateTimelineValues(values, numberOfValues); |
| 237 } else { | 239 } else { |
| 238 // Calculate control-rate (k-rate) intrinsic value. | 240 // Calculate control-rate (k-rate) intrinsic value. |
| 239 bool hasValue; | 241 bool hasValue; |
| 240 float value = intrinsicValue(); | 242 float value = intrinsicValue(); |
| 241 float timelineValue = m_timeline.valueForContextTime( | 243 float timelineValue = m_timeline.valueForContextTime( |
| 242 destinationHandler(), value, hasValue, minValue(), maxValue()); | 244 destinationHandler(), value, hasValue, minValue(), maxValue()); |
| 243 | 245 |
| 244 if (hasValue) | 246 if (hasValue) |
| 245 value = timelineValue; | 247 value = timelineValue; |
| 246 | 248 |
| 247 values[0] = value; | 249 values[0] = value; |
| 248 setIntrinsicValue(value); | 250 setIntrinsicValue(value); |
| 249 } | 251 } |
| 250 | 252 |
| 251 // Now sum all of the audio-rate connections together (unity-gain summing junc
tion). | 253 // Now sum all of the audio-rate connections together (unity-gain summing |
| 252 // Note that connections would normally be mono, but we mix down to mono if ne
cessary. | 254 // junction). Note that connections would normally be mono, but we mix down |
| 255 // to mono if necessary. |
| 253 RefPtr<AudioBus> summingBus = AudioBus::create(1, numberOfValues, false); | 256 RefPtr<AudioBus> summingBus = AudioBus::create(1, numberOfValues, false); |
| 254 summingBus->setChannelMemory(0, values, numberOfValues); | 257 summingBus->setChannelMemory(0, values, numberOfValues); |
| 255 | 258 |
| 256 for (unsigned i = 0; i < numberOfRenderingConnections(); ++i) { | 259 for (unsigned i = 0; i < numberOfRenderingConnections(); ++i) { |
| 257 AudioNodeOutput* output = renderingOutput(i); | 260 AudioNodeOutput* output = renderingOutput(i); |
| 258 DCHECK(output); | 261 DCHECK(output); |
| 259 | 262 |
| 260 // Render audio from this output. | 263 // Render audio from this output. |
| 261 AudioBus* connectionBus = | 264 AudioBus* connectionBus = |
| 262 output->pull(0, AudioHandler::ProcessingSizeInFrames); | 265 output->pull(0, AudioHandler::ProcessingSizeInFrames); |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 296 ASSERT(deferredTaskHandler().isGraphOwner()); | 299 ASSERT(deferredTaskHandler().isGraphOwner()); |
| 297 | 300 |
| 298 if (m_outputs.contains(&output)) { | 301 if (m_outputs.contains(&output)) { |
| 299 m_outputs.remove(&output); | 302 m_outputs.remove(&output); |
| 300 changedOutputs(); | 303 changedOutputs(); |
| 301 output.removeParam(*this); | 304 output.removeParam(*this); |
| 302 } | 305 } |
| 303 } | 306 } |
| 304 | 307 |
| 305 int AudioParamHandler::computeQHistogramValue(float newValue) const { | 308 int AudioParamHandler::computeQHistogramValue(float newValue) const { |
| 306 // For the Q value, assume a useful range is [0, 25] and that 0.25 dB resoluti
on is good enough. | 309 // For the Q value, assume a useful range is [0, 25] and that 0.25 dB |
| 307 // Then, we can map the floating point Q value (in dB) to an integer just by m
ultipling by 4 and | 310 // resolution is good enough. Then, we can map the floating point Q value (in |
| 308 // rounding. | 311 // dB) to an integer just by multipling by 4 and rounding. |
| 309 newValue = clampTo(newValue, 0.0, 25.0); | 312 newValue = clampTo(newValue, 0.0, 25.0); |
| 310 return static_cast<int>(4 * newValue + 0.5); | 313 return static_cast<int>(4 * newValue + 0.5); |
| 311 } | 314 } |
| 312 | 315 |
| 313 void AudioParamHandler::updateHistograms(float newValue) { | 316 void AudioParamHandler::updateHistograms(float newValue) { |
| 314 switch (m_paramType) { | 317 switch (m_paramType) { |
| 315 case ParamTypeBiquadFilterQLowpass: { | 318 case ParamTypeBiquadFilterQLowpass: { |
| 316 // The histogram for the Q value for a lowpass biquad filter. | 319 // The histogram for the Q value for a lowpass biquad filter. |
| 317 DEFINE_STATIC_LOCAL(SparseHistogram, lowpassQHistogram, | 320 DEFINE_STATIC_LOCAL(SparseHistogram, lowpassQHistogram, |
| 318 ("WebAudio.BiquadFilter.Q.Lowpass")); | 321 ("WebAudio.BiquadFilter.Q.Lowpass")); |
| (...skipping 23 matching lines...) Expand all Loading... |
| 342 : m_handler(AudioParamHandler::create(context, | 345 : m_handler(AudioParamHandler::create(context, |
| 343 paramType, | 346 paramType, |
| 344 defaultValue, | 347 defaultValue, |
| 345 minValue, | 348 minValue, |
| 346 maxValue)), | 349 maxValue)), |
| 347 m_context(context) {} | 350 m_context(context) {} |
| 348 | 351 |
| 349 AudioParam* AudioParam::create(BaseAudioContext& context, | 352 AudioParam* AudioParam::create(BaseAudioContext& context, |
| 350 AudioParamType paramType, | 353 AudioParamType paramType, |
| 351 double defaultValue) { | 354 double defaultValue) { |
| 352 // Default nominal range is most negative float to most positive. This basica
lly means any | 355 // Default nominal range is most negative float to most positive. This |
| 353 // value is valid, except that floating-point infinities are excluded. | 356 // basically means any value is valid, except that floating-point infinities |
| 357 // are excluded. |
| 354 float limit = std::numeric_limits<float>::max(); | 358 float limit = std::numeric_limits<float>::max(); |
| 355 return new AudioParam(context, paramType, defaultValue, -limit, limit); | 359 return new AudioParam(context, paramType, defaultValue, -limit, limit); |
| 356 } | 360 } |
| 357 | 361 |
| 358 AudioParam* AudioParam::create(BaseAudioContext& context, | 362 AudioParam* AudioParam::create(BaseAudioContext& context, |
| 359 AudioParamType paramType, | 363 AudioParamType paramType, |
| 360 double defaultValue, | 364 double defaultValue, |
| 361 float minValue, | 365 float minValue, |
| 362 float maxValue) { | 366 float maxValue) { |
| 363 DCHECK_LE(minValue, maxValue); | 367 DCHECK_LE(minValue, maxValue); |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 415 | 419 |
| 416 AudioParam* AudioParam::linearRampToValueAtTime( | 420 AudioParam* AudioParam::linearRampToValueAtTime( |
| 417 float value, | 421 float value, |
| 418 double time, | 422 double time, |
| 419 ExceptionState& exceptionState) { | 423 ExceptionState& exceptionState) { |
| 420 warnIfOutsideRange("linearRampToValueAtTime value", value); | 424 warnIfOutsideRange("linearRampToValueAtTime value", value); |
| 421 handler().timeline().linearRampToValueAtTime( | 425 handler().timeline().linearRampToValueAtTime( |
| 422 value, time, handler().intrinsicValue(), context()->currentTime(), | 426 value, time, handler().intrinsicValue(), context()->currentTime(), |
| 423 exceptionState); | 427 exceptionState); |
| 424 | 428 |
| 425 // This is probably the best we can do for the histogram. We don't want to ru
n the automation | 429 // This is probably the best we can do for the histogram. We don't want to |
| 426 // to get all the values and use them to update the histogram. | 430 // run the automation to get all the values and use them to update the |
| 431 // histogram. |
| 427 handler().updateHistograms(value); | 432 handler().updateHistograms(value); |
| 428 | 433 |
| 429 return this; | 434 return this; |
| 430 } | 435 } |
| 431 | 436 |
| 432 AudioParam* AudioParam::exponentialRampToValueAtTime( | 437 AudioParam* AudioParam::exponentialRampToValueAtTime( |
| 433 float value, | 438 float value, |
| 434 double time, | 439 double time, |
| 435 ExceptionState& exceptionState) { | 440 ExceptionState& exceptionState) { |
| 436 warnIfOutsideRange("exponentialRampToValue value", value); | 441 warnIfOutsideRange("exponentialRampToValue value", value); |
| 437 handler().timeline().exponentialRampToValueAtTime( | 442 handler().timeline().exponentialRampToValueAtTime( |
| 438 value, time, handler().intrinsicValue(), context()->currentTime(), | 443 value, time, handler().intrinsicValue(), context()->currentTime(), |
| 439 exceptionState); | 444 exceptionState); |
| 440 | 445 |
| 441 // This is probably the best we can do for the histogram. We don't want to ru
n the automation | 446 // This is probably the best we can do for the histogram. We don't want to |
| 442 // to get all the values and use them to update the histogram. | 447 // run the automation to get all the values and use them to update the |
| 448 // histogram. |
| 443 handler().updateHistograms(value); | 449 handler().updateHistograms(value); |
| 444 | 450 |
| 445 return this; | 451 return this; |
| 446 } | 452 } |
| 447 | 453 |
| 448 AudioParam* AudioParam::setTargetAtTime(float target, | 454 AudioParam* AudioParam::setTargetAtTime(float target, |
| 449 double time, | 455 double time, |
| 450 double timeConstant, | 456 double timeConstant, |
| 451 ExceptionState& exceptionState) { | 457 ExceptionState& exceptionState) { |
| 452 warnIfOutsideRange("setTargetAtTime value", target); | 458 warnIfOutsideRange("setTargetAtTime value", target); |
| 453 handler().timeline().setTargetAtTime(target, time, timeConstant, | 459 handler().timeline().setTargetAtTime(target, time, timeConstant, |
| 454 exceptionState); | 460 exceptionState); |
| 455 | 461 |
| 456 // Don't update the histogram here. It's not clear in normal usage if the par
ameter value will | 462 // Don't update the histogram here. It's not clear in normal usage if the |
| 457 // actually reach |target|. | 463 // parameter value will actually reach |target|. |
| 458 return this; | 464 return this; |
| 459 } | 465 } |
| 460 | 466 |
| 461 AudioParam* AudioParam::setValueCurveAtTime(DOMFloat32Array* curve, | 467 AudioParam* AudioParam::setValueCurveAtTime(DOMFloat32Array* curve, |
| 462 double time, | 468 double time, |
| 463 double duration, | 469 double duration, |
| 464 ExceptionState& exceptionState) { | 470 ExceptionState& exceptionState) { |
| 465 float* curveData = curve->data(); | 471 float* curveData = curve->data(); |
| 466 float min = minValue(); | 472 float min = minValue(); |
| 467 float max = maxValue(); | 473 float max = maxValue(); |
| (...skipping 20 matching lines...) Expand all Loading... |
| 488 | 494 |
| 489 if (value < min || value > max) { | 495 if (value < min || value > max) { |
| 490 warnIfOutsideRange("setValueCurveAtTime value", value); | 496 warnIfOutsideRange("setValueCurveAtTime value", value); |
| 491 break; | 497 break; |
| 492 } | 498 } |
| 493 } | 499 } |
| 494 | 500 |
| 495 handler().timeline().setValueCurveAtTime(curve, time, duration, | 501 handler().timeline().setValueCurveAtTime(curve, time, duration, |
| 496 exceptionState); | 502 exceptionState); |
| 497 | 503 |
| 498 // We could update the histogram with every value in the curve, due to interpo
lation, we'll | 504 // We could update the histogram with every value in the curve, due to |
| 499 // probably be missing many values. So we don't update the histogram. setVal
ueCurveAtTime is | 505 // interpolation, we'll probably be missing many values. So we don't update |
| 500 // probably a fairly rare method anyway. | 506 // the histogram. setValueCurveAtTime is probably a fairly rare method |
| 507 // anyway. |
| 501 return this; | 508 return this; |
| 502 } | 509 } |
| 503 | 510 |
| 504 AudioParam* AudioParam::cancelScheduledValues(double startTime, | 511 AudioParam* AudioParam::cancelScheduledValues(double startTime, |
| 505 ExceptionState& exceptionState) { | 512 ExceptionState& exceptionState) { |
| 506 handler().timeline().cancelScheduledValues(startTime, exceptionState); | 513 handler().timeline().cancelScheduledValues(startTime, exceptionState); |
| 507 return this; | 514 return this; |
| 508 } | 515 } |
| 509 | 516 |
| 510 } // namespace blink | 517 } // namespace blink |
| OLD | NEW |