| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2010, Google Inc. All rights reserved. | 2 * Copyright (C) 2010, Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
| 6 * are met: | 6 * are met: |
| 7 * 1. Redistributions of source code must retain the above copyright | 7 * 1. Redistributions of source code must retain the above copyright |
| 8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
| 9 * 2. Redistributions in binary form must reproduce the above copyright | 9 * 2. Redistributions in binary form must reproduce the above copyright |
| 10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
| (...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 109 return; | 109 return; |
| 110 } | 110 } |
| 111 | 111 |
| 112 size_t quantumFrameOffset; | 112 size_t quantumFrameOffset; |
| 113 size_t bufferFramesToProcess; | 113 size_t bufferFramesToProcess; |
| 114 | 114 |
| 115 updateSchedulingInfo(framesToProcess, | 115 updateSchedulingInfo(framesToProcess, |
| 116 outputBus, | 116 outputBus, |
| 117 quantumFrameOffset, | 117 quantumFrameOffset, |
| 118 bufferFramesToProcess); | 118 bufferFramesToProcess); |
| 119 | 119 |
| 120 if (!bufferFramesToProcess) { | 120 if (!bufferFramesToProcess) { |
| 121 outputBus->zero(); | 121 outputBus->zero(); |
| 122 return; | 122 return; |
| 123 } | 123 } |
| 124 | 124 |
| 125 for (unsigned i = 0; i < outputBus->numberOfChannels(); ++i) | 125 for (unsigned i = 0; i < outputBus->numberOfChannels(); ++i) |
| 126 m_destinationChannels[i] = outputBus->channel(i)->mutableData(); | 126 m_destinationChannels[i] = outputBus->channel(i)->mutableData(); |
| 127 | 127 |
| 128 // Render by reading directly from the buffer. | 128 // Render by reading directly from the buffer. |
| 129 if (!renderFromBuffer(outputBus, quantumFrameOffset, bufferFramesToProce
ss)) { | 129 if (!renderFromBuffer(outputBus, quantumFrameOffset, bufferFramesToProce
ss)) { |
| (...skipping 13 matching lines...) Expand all Loading... |
| 143 | 143 |
| 144 // Returns true if we're finished. | 144 // Returns true if we're finished. |
| 145 bool AudioBufferSourceNode::renderSilenceAndFinishIfNotLooping(AudioBus*, unsign
ed index, size_t framesToProcess) | 145 bool AudioBufferSourceNode::renderSilenceAndFinishIfNotLooping(AudioBus*, unsign
ed index, size_t framesToProcess) |
| 146 { | 146 { |
| 147 if (!loop()) { | 147 if (!loop()) { |
| 148 // If we're not looping, then stop playing when we get to the end. | 148 // If we're not looping, then stop playing when we get to the end. |
| 149 | 149 |
| 150 if (framesToProcess > 0) { | 150 if (framesToProcess > 0) { |
| 151 // We're not looping and we've reached the end of the sample data, b
ut we still need to provide more output, | 151 // We're not looping and we've reached the end of the sample data, b
ut we still need to provide more output, |
| 152 // so generate silence for the remaining. | 152 // so generate silence for the remaining. |
| 153 for (unsigned i = 0; i < numberOfChannels(); ++i) | 153 for (unsigned i = 0; i < numberOfChannels(); ++i) |
| 154 memset(m_destinationChannels[i] + index, 0, sizeof(float) * fram
esToProcess); | 154 memset(m_destinationChannels[i] + index, 0, sizeof(float) * fram
esToProcess); |
| 155 } | 155 } |
| 156 | 156 |
| 157 finish(); | 157 finish(); |
| 158 return true; | 158 return true; |
| 159 } | 159 } |
| 160 return false; | 160 return false; |
| 161 } | 161 } |
| 162 | 162 |
| 163 bool AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination
FrameOffset, size_t numberOfFrames) | 163 bool AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination
FrameOffset, size_t numberOfFrames) |
| (...skipping 22 matching lines...) Expand all Loading... |
| 186 if (!isLengthGood) | 186 if (!isLengthGood) |
| 187 return false; | 187 return false; |
| 188 | 188 |
| 189 bool isOffsetGood = destinationFrameOffset <= destinationLength && destinati
onFrameOffset + numberOfFrames <= destinationLength; | 189 bool isOffsetGood = destinationFrameOffset <= destinationLength && destinati
onFrameOffset + numberOfFrames <= destinationLength; |
| 190 ASSERT(isOffsetGood); | 190 ASSERT(isOffsetGood); |
| 191 if (!isOffsetGood) | 191 if (!isOffsetGood) |
| 192 return false; | 192 return false; |
| 193 | 193 |
| 194 // Potentially zero out initial frames leading up to the offset. | 194 // Potentially zero out initial frames leading up to the offset. |
| 195 if (destinationFrameOffset) { | 195 if (destinationFrameOffset) { |
| 196 for (unsigned i = 0; i < numberOfChannels; ++i) | 196 for (unsigned i = 0; i < numberOfChannels; ++i) |
| 197 memset(m_destinationChannels[i], 0, sizeof(float) * destinationFrame
Offset); | 197 memset(m_destinationChannels[i], 0, sizeof(float) * destinationFrame
Offset); |
| 198 } | 198 } |
| 199 | 199 |
| 200 // Offset the pointers to the correct offset frame. | 200 // Offset the pointers to the correct offset frame. |
| 201 unsigned writeIndex = destinationFrameOffset; | 201 unsigned writeIndex = destinationFrameOffset; |
| 202 | 202 |
| 203 size_t bufferLength = buffer()->length(); | 203 size_t bufferLength = buffer()->length(); |
| 204 double bufferSampleRate = buffer()->sampleRate(); | 204 double bufferSampleRate = buffer()->sampleRate(); |
| 205 | 205 |
| 206 // Avoid converting from time to sample-frames twice by computing | 206 // Avoid converting from time to sample-frames twice by computing |
| 207 // the grain end time first before computing the sample frame. | 207 // the grain end time first before computing the sample frame. |
| 208 unsigned endFrame = m_isGrain ? AudioUtilities::timeToSampleFrame(m_grainOff
set + m_grainDuration, bufferSampleRate) : bufferLength; | 208 unsigned endFrame = m_isGrain ? AudioUtilities::timeToSampleFrame(m_grainOff
set + m_grainDuration, bufferSampleRate) : bufferLength; |
| 209 | 209 |
| 210 // This is a HACK to allow for HRTF tail-time - avoids glitch at end. | 210 // This is a HACK to allow for HRTF tail-time - avoids glitch at end. |
| 211 // FIXME: implement tailTime for each AudioNode for a more general solution
to this problem. | 211 // FIXME: implement tailTime for each AudioNode for a more general solution
to this problem. |
| 212 // https://bugs.webkit.org/show_bug.cgi?id=77224 | 212 // https://bugs.webkit.org/show_bug.cgi?id=77224 |
| 213 if (m_isGrain) | 213 if (m_isGrain) |
| 214 endFrame += 512; | 214 endFrame += 512; |
| 215 | 215 |
| 216 // Do some sanity checking. | 216 // Do some sanity checking. |
| 217 if (endFrame > bufferLength) | 217 if (endFrame > bufferLength) |
| 218 endFrame = bufferLength; | 218 endFrame = bufferLength; |
| 219 if (m_virtualReadIndex >= endFrame) | 219 if (m_virtualReadIndex >= endFrame) |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 255 && virtualDeltaFrames == floor(virtualDeltaFrames) | 255 && virtualDeltaFrames == floor(virtualDeltaFrames) |
| 256 && virtualEndFrame == floor(virtualEndFrame)) { | 256 && virtualEndFrame == floor(virtualEndFrame)) { |
| 257 unsigned readIndex = static_cast<unsigned>(virtualReadIndex); | 257 unsigned readIndex = static_cast<unsigned>(virtualReadIndex); |
| 258 unsigned deltaFrames = static_cast<unsigned>(virtualDeltaFrames); | 258 unsigned deltaFrames = static_cast<unsigned>(virtualDeltaFrames); |
| 259 endFrame = static_cast<unsigned>(virtualEndFrame); | 259 endFrame = static_cast<unsigned>(virtualEndFrame); |
| 260 while (framesToProcess > 0) { | 260 while (framesToProcess > 0) { |
| 261 int framesToEnd = endFrame - readIndex; | 261 int framesToEnd = endFrame - readIndex; |
| 262 int framesThisTime = min(framesToProcess, framesToEnd); | 262 int framesThisTime = min(framesToProcess, framesToEnd); |
| 263 framesThisTime = max(0, framesThisTime); | 263 framesThisTime = max(0, framesThisTime); |
| 264 | 264 |
| 265 for (unsigned i = 0; i < numberOfChannels; ++i) | 265 for (unsigned i = 0; i < numberOfChannels; ++i) |
| 266 memcpy(destinationChannels[i] + writeIndex, sourceChannels[i] +
readIndex, sizeof(float) * framesThisTime); | 266 memcpy(destinationChannels[i] + writeIndex, sourceChannels[i] +
readIndex, sizeof(float) * framesThisTime); |
| 267 | 267 |
| 268 writeIndex += framesThisTime; | 268 writeIndex += framesThisTime; |
| 269 readIndex += framesThisTime; | 269 readIndex += framesThisTime; |
| 270 framesToProcess -= framesThisTime; | 270 framesToProcess -= framesThisTime; |
| 271 | 271 |
| 272 // Wrap-around. | 272 // Wrap-around. |
| 273 if (readIndex >= endFrame) { | 273 if (readIndex >= endFrame) { |
| 274 readIndex -= deltaFrames; | 274 readIndex -= deltaFrames; |
| 275 if (renderSilenceAndFinishIfNotLooping(bus, writeIndex, framesTo
Process)) | 275 if (renderSilenceAndFinishIfNotLooping(bus, writeIndex, framesTo
Process)) |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 331 | 331 |
| 332 void AudioBufferSourceNode::reset() | 332 void AudioBufferSourceNode::reset() |
| 333 { | 333 { |
| 334 m_virtualReadIndex = 0; | 334 m_virtualReadIndex = 0; |
| 335 m_lastGain = gain()->value(); | 335 m_lastGain = gain()->value(); |
| 336 } | 336 } |
| 337 | 337 |
| 338 bool AudioBufferSourceNode::setBuffer(AudioBuffer* buffer) | 338 bool AudioBufferSourceNode::setBuffer(AudioBuffer* buffer) |
| 339 { | 339 { |
| 340 ASSERT(isMainThread()); | 340 ASSERT(isMainThread()); |
| 341 | 341 |
| 342 // The context must be locked since changing the buffer can re-configure the
number of channels that are output. | 342 // The context must be locked since changing the buffer can re-configure the
number of channels that are output. |
| 343 AudioContext::AutoLocker contextLocker(context()); | 343 AudioContext::AutoLocker contextLocker(context()); |
| 344 | 344 |
| 345 // This synchronizes with process(). | 345 // This synchronizes with process(). |
| 346 MutexLocker processLocker(m_processLock); | 346 MutexLocker processLocker(m_processLock); |
| 347 | 347 |
| 348 if (buffer) { | 348 if (buffer) { |
| 349 // Do any necesssary re-configuration to the buffer's number of channels
. | 349 // Do any necesssary re-configuration to the buffer's number of channels
. |
| 350 unsigned numberOfChannels = buffer->numberOfChannels(); | 350 unsigned numberOfChannels = buffer->numberOfChannels(); |
| 351 | 351 |
| 352 if (numberOfChannels > AudioContext::maxNumberOfChannels()) | 352 if (numberOfChannels > AudioContext::maxNumberOfChannels()) |
| 353 return false; | 353 return false; |
| 354 | 354 |
| 355 output(0)->setNumberOfChannels(numberOfChannels); | 355 output(0)->setNumberOfChannels(numberOfChannels); |
| 356 | 356 |
| 357 m_sourceChannels = adoptArrayPtr(new const float* [numberOfChannels]); | 357 m_sourceChannels = adoptArrayPtr(new const float* [numberOfChannels]); |
| 358 m_destinationChannels = adoptArrayPtr(new float* [numberOfChannels]); | 358 m_destinationChannels = adoptArrayPtr(new float* [numberOfChannels]); |
| 359 | 359 |
| 360 for (unsigned i = 0; i < numberOfChannels; ++i) | 360 for (unsigned i = 0; i < numberOfChannels; ++i) |
| 361 m_sourceChannels[i] = buffer->getChannelData(i)->data(); | 361 m_sourceChannels[i] = buffer->getChannelData(i)->data(); |
| 362 } | 362 } |
| 363 | 363 |
| 364 m_virtualReadIndex = 0; | 364 m_virtualReadIndex = 0; |
| 365 m_buffer = buffer; | 365 m_buffer = buffer; |
| 366 | 366 |
| 367 return true; | 367 return true; |
| 368 } | 368 } |
| 369 | 369 |
| 370 unsigned AudioBufferSourceNode::numberOfChannels() | 370 unsigned AudioBufferSourceNode::numberOfChannels() |
| 371 { | 371 { |
| 372 return output(0)->numberOfChannels(); | 372 return output(0)->numberOfChannels(); |
| 373 } | 373 } |
| 374 | 374 |
| 375 void AudioBufferSourceNode::startGrain(double when, double grainOffset) | 375 void AudioBufferSourceNode::startGrain(double when, double grainOffset) |
| 376 { | 376 { |
| 377 // Duration of 0 has special value, meaning calculate based on the entire bu
ffer's duration. | 377 // Duration of 0 has special value, meaning calculate based on the entire bu
ffer's duration. |
| 378 startGrain(when, grainOffset, 0); | 378 startGrain(when, grainOffset, 0); |
| 379 } | 379 } |
| 380 | 380 |
| 381 void AudioBufferSourceNode::startGrain(double when, double grainOffset, double g
rainDuration) | 381 void AudioBufferSourceNode::startGrain(double when, double grainOffset, double g
rainDuration) |
| 382 { | 382 { |
| 383 ASSERT(isMainThread()); | 383 ASSERT(isMainThread()); |
| 384 | 384 |
| 385 if (m_playbackState != UNSCHEDULED_STATE) | 385 if (m_playbackState != UNSCHEDULED_STATE) |
| 386 return; | 386 return; |
| 387 | 387 |
| 388 if (!buffer()) | 388 if (!buffer()) |
| 389 return; | 389 return; |
| 390 | 390 |
| 391 // Do sanity checking of grain parameters versus buffer size. | 391 // Do sanity checking of grain parameters versus buffer size. |
| 392 double bufferDuration = buffer()->duration(); | 392 double bufferDuration = buffer()->duration(); |
| 393 | 393 |
| 394 grainOffset = max(0.0, grainOffset); | 394 grainOffset = max(0.0, grainOffset); |
| 395 grainOffset = min(bufferDuration, grainOffset); | 395 grainOffset = min(bufferDuration, grainOffset); |
| 396 m_grainOffset = grainOffset; | 396 m_grainOffset = grainOffset; |
| 397 | 397 |
| 398 // Handle default/unspecified duration. | 398 // Handle default/unspecified duration. |
| 399 double maxDuration = bufferDuration - grainOffset; | 399 double maxDuration = bufferDuration - grainOffset; |
| 400 if (!grainDuration) | 400 if (!grainDuration) |
| 401 grainDuration = maxDuration; | 401 grainDuration = maxDuration; |
| 402 | 402 |
| 403 grainDuration = max(0.0, grainDuration); | 403 grainDuration = max(0.0, grainDuration); |
| 404 grainDuration = min(maxDuration, grainDuration); | 404 grainDuration = min(maxDuration, grainDuration); |
| 405 m_grainDuration = grainDuration; | 405 m_grainDuration = grainDuration; |
| 406 | 406 |
| 407 m_isGrain = true; | 407 m_isGrain = true; |
| 408 m_startTime = when; | 408 m_startTime = when; |
| 409 | 409 |
| 410 // We call timeToSampleFrame here since at playbackRate == 1 we don't want t
o go through linear interpolation | 410 // We call timeToSampleFrame here since at playbackRate == 1 we don't want t
o go through linear interpolation |
| 411 // at a sub-sample position since it will degrade the quality. | 411 // at a sub-sample position since it will degrade the quality. |
| 412 // When aligned to the sample-frame the playback will be identical to the PC
M data stored in the buffer. | 412 // When aligned to the sample-frame the playback will be identical to the PC
M data stored in the buffer. |
| 413 // Since playbackRate == 1 is very common, it's worth considering quality. | 413 // Since playbackRate == 1 is very common, it's worth considering quality. |
| 414 m_virtualReadIndex = AudioUtilities::timeToSampleFrame(m_grainOffset, buffer
()->sampleRate()); | 414 m_virtualReadIndex = AudioUtilities::timeToSampleFrame(m_grainOffset, buffer
()->sampleRate()); |
| 415 | 415 |
| 416 m_playbackState = SCHEDULED_STATE; | 416 m_playbackState = SCHEDULED_STATE; |
| 417 } | 417 } |
| 418 | 418 |
| 419 void AudioBufferSourceNode::noteGrainOn(double when, double grainOffset, double
grainDuration) | 419 void AudioBufferSourceNode::noteGrainOn(double when, double grainOffset, double
grainDuration) |
| 420 { | 420 { |
| 421 startGrain(when, grainOffset, grainDuration); | 421 startGrain(when, grainOffset, grainDuration); |
| 422 } | 422 } |
| 423 | 423 |
| 424 double AudioBufferSourceNode::totalPitchRate() | 424 double AudioBufferSourceNode::totalPitchRate() |
| 425 { | 425 { |
| 426 double dopplerRate = 1.0; | 426 double dopplerRate = 1.0; |
| 427 if (m_pannerNode) | 427 if (m_pannerNode) |
| 428 dopplerRate = m_pannerNode->dopplerRate(); | 428 dopplerRate = m_pannerNode->dopplerRate(); |
| 429 | 429 |
| 430 // Incorporate buffer's sample-rate versus AudioContext's sample-rate. | 430 // Incorporate buffer's sample-rate versus AudioContext's sample-rate. |
| 431 // Normally it's not an issue because buffers are loaded at the AudioContext
's sample-rate, but we can handle it in any case. | 431 // Normally it's not an issue because buffers are loaded at the AudioContext
's sample-rate, but we can handle it in any case. |
| 432 double sampleRateFactor = 1.0; | 432 double sampleRateFactor = 1.0; |
| 433 if (buffer()) | 433 if (buffer()) |
| 434 sampleRateFactor = buffer()->sampleRate() / sampleRate(); | 434 sampleRateFactor = buffer()->sampleRate() / sampleRate(); |
| 435 | 435 |
| 436 double basePitchRate = playbackRate()->value(); | 436 double basePitchRate = playbackRate()->value(); |
| 437 | 437 |
| 438 double totalRate = dopplerRate * sampleRateFactor * basePitchRate; | 438 double totalRate = dopplerRate * sampleRateFactor * basePitchRate; |
| 439 | 439 |
| 440 // Sanity check the total rate. It's very important that the resampler not
get any bad rate values. | 440 // Sanity check the total rate. It's very important that the resampler not
get any bad rate values. |
| 441 totalRate = max(0.0, totalRate); | 441 totalRate = max(0.0, totalRate); |
| 442 if (!totalRate) | 442 if (!totalRate) |
| 443 totalRate = 1; // zero rate is considered illegal | 443 totalRate = 1; // zero rate is considered illegal |
| 444 totalRate = min(MaxRate, totalRate); | 444 totalRate = min(MaxRate, totalRate); |
| 445 | 445 |
| 446 bool isTotalRateValid = !std::isnan(totalRate) && !std::isinf(totalRate); | 446 bool isTotalRateValid = !std::isnan(totalRate) && !std::isinf(totalRate); |
| 447 ASSERT(isTotalRateValid); | 447 ASSERT(isTotalRateValid); |
| 448 if (!isTotalRateValid) | 448 if (!isTotalRateValid) |
| 449 totalRate = 1.0; | 449 totalRate = 1.0; |
| 450 | 450 |
| 451 return totalRate; | 451 return totalRate; |
| 452 } | 452 } |
| 453 | 453 |
| 454 bool AudioBufferSourceNode::propagatesSilence() const | 454 bool AudioBufferSourceNode::propagatesSilence() const |
| 455 { | 455 { |
| (...skipping 23 matching lines...) Expand all Loading... |
| 479 void AudioBufferSourceNode::finish() | 479 void AudioBufferSourceNode::finish() |
| 480 { | 480 { |
| 481 clearPannerNode(); | 481 clearPannerNode(); |
| 482 ASSERT(!m_pannerNode); | 482 ASSERT(!m_pannerNode); |
| 483 AudioScheduledSourceNode::finish(); | 483 AudioScheduledSourceNode::finish(); |
| 484 } | 484 } |
| 485 | 485 |
| 486 } // namespace WebCore | 486 } // namespace WebCore |
| 487 | 487 |
| 488 #endif // ENABLE(WEB_AUDIO) | 488 #endif // ENABLE(WEB_AUDIO) |
| OLD | NEW |