| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2011, Google Inc. All rights reserved. | 2 * Copyright (C) 2011, Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
| 6 * are met: | 6 * are met: |
| 7 * 1. Redistributions of source code must retain the above copyright | 7 * 1. Redistributions of source code must retain the above copyright |
| 8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
| 9 * 2. Redistributions in binary form must reproduce the above copyright | 9 * 2. Redistributions in binary form must reproduce the above copyright |
| 10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
| (...skipping 10 matching lines...) Expand all Loading... |
| 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| 22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 23 */ | 23 */ |
| 24 | 24 |
| 25 #include "config.h" | 25 #include "config.h" |
| 26 #if ENABLE(WEB_AUDIO) | 26 #if ENABLE(WEB_AUDIO) |
| 27 #include "modules/webaudio/OfflineAudioDestinationNode.h" | 27 #include "modules/webaudio/OfflineAudioDestinationNode.h" |
| 28 | 28 |
| 29 #include "core/dom/CrossThreadTask.h" | 29 #include "core/dom/CrossThreadTask.h" |
| 30 #include "modules/webaudio/AudioContext.h" | 30 #include "modules/webaudio/AudioContext.h" |
| 31 #include "modules/webaudio/OfflineAudioContext.h" |
| 31 #include "platform/Task.h" | 32 #include "platform/Task.h" |
| 32 #include "platform/audio/AudioBus.h" | 33 #include "platform/audio/AudioBus.h" |
| 33 #include "platform/audio/HRTFDatabaseLoader.h" | 34 #include "platform/audio/HRTFDatabaseLoader.h" |
| 34 #include "public/platform/Platform.h" | 35 #include "public/platform/Platform.h" |
| 35 #include <algorithm> | 36 #include <algorithm> |
| 36 | 37 |
| 37 namespace blink { | 38 namespace blink { |
| 38 | 39 |
| 39 const size_t renderQuantumSize = 128; | 40 const size_t renderQuantumSize = 128; |
| 40 | 41 |
| 41 OfflineAudioDestinationHandler::OfflineAudioDestinationHandler(AudioNode& node,
AudioBuffer* renderTarget) | 42 OfflineAudioDestinationHandler::OfflineAudioDestinationHandler(AudioNode& node,
AudioBuffer* renderTarget) |
| 42 : AudioDestinationHandler(node, renderTarget->sampleRate()) | 43 : AudioDestinationHandler(node, renderTarget->sampleRate()) |
| 43 , m_renderTarget(renderTarget) | 44 , m_renderTarget(renderTarget) |
| 44 , m_startedRendering(false) | 45 , m_renderThread(adoptPtr(Platform::current()->createThread("offline audio r
enderer"))) |
| 46 , m_framesProcessed(0) |
| 47 , m_framesToProcess(0) |
| 48 , m_isRenderingStarted(false) |
| 45 { | 49 { |
| 46 m_renderBus = AudioBus::create(renderTarget->numberOfChannels(), renderQuant
umSize); | 50 m_renderBus = AudioBus::create(renderTarget->numberOfChannels(), renderQuant
umSize); |
| 47 } | 51 } |
| 48 | 52 |
| 49 PassRefPtr<OfflineAudioDestinationHandler> OfflineAudioDestinationHandler::creat
e(AudioNode& node, AudioBuffer* renderTarget) | 53 PassRefPtr<OfflineAudioDestinationHandler> OfflineAudioDestinationHandler::creat
e(AudioNode& node, AudioBuffer* renderTarget) |
| 50 { | 54 { |
| 51 return adoptRef(new OfflineAudioDestinationHandler(node, renderTarget)); | 55 return adoptRef(new OfflineAudioDestinationHandler(node, renderTarget)); |
| 52 } | 56 } |
| 53 | 57 |
| 54 OfflineAudioDestinationHandler::~OfflineAudioDestinationHandler() | 58 OfflineAudioDestinationHandler::~OfflineAudioDestinationHandler() |
| (...skipping 23 matching lines...) Expand all Loading... |
| 78 if (m_renderThread) | 82 if (m_renderThread) |
| 79 m_renderThread.clear(); | 83 m_renderThread.clear(); |
| 80 | 84 |
| 81 AudioHandler::uninitialize(); | 85 AudioHandler::uninitialize(); |
| 82 } | 86 } |
| 83 | 87 |
| 84 void OfflineAudioDestinationHandler::startRendering() | 88 void OfflineAudioDestinationHandler::startRendering() |
| 85 { | 89 { |
| 86 ASSERT(isMainThread()); | 90 ASSERT(isMainThread()); |
| 87 ASSERT(m_renderTarget); | 91 ASSERT(m_renderTarget); |
| 92 ASSERT(m_renderThread); |
| 93 |
| 88 if (!m_renderTarget) | 94 if (!m_renderTarget) |
| 89 return; | 95 return; |
| 90 | 96 |
| 91 if (!m_startedRendering) { | 97 // Rendering was not started. Starting now. |
| 92 m_startedRendering = true; | 98 if (!m_isRenderingStarted) { |
| 93 m_renderThread = adoptPtr(Platform::current()->createThread("Offline Aud
io Renderer")); | 99 m_renderThread->postTask(FROM_HERE, new Task(threadSafeBind(&OfflineAudi
oDestinationHandler::startOfflineRendering, this))); |
| 94 m_renderThread->postTask(FROM_HERE, new Task(threadSafeBind(&OfflineAudi
oDestinationHandler::offlineRender, PassRefPtr<OfflineAudioDestinationHandler>(t
his)))); | 100 m_isRenderingStarted = true; |
| 101 return; |
| 95 } | 102 } |
| 103 |
| 104 // Rendering is already started, which implicitly means we resume the |
| 105 // rendering by calling |runOfflineRendering| on m_renderThread. |
| 106 m_renderThread->postTask(FROM_HERE, threadSafeBind(&OfflineAudioDestinationH
andler::runOfflineRendering, this)); |
| 96 } | 107 } |
| 97 | 108 |
| 98 void OfflineAudioDestinationHandler::stopRendering() | 109 void OfflineAudioDestinationHandler::stopRendering() |
| 99 { | 110 { |
| 100 ASSERT_NOT_REACHED(); | 111 ASSERT_NOT_REACHED(); |
| 101 } | 112 } |
| 102 | 113 |
| 103 void OfflineAudioDestinationHandler::offlineRender() | 114 size_t OfflineAudioDestinationHandler::quantizeTimeToRenderQuantum(double when)
const |
| 104 { | 115 { |
| 105 offlineRenderInternal(); | 116 ASSERT(when >= 0); |
| 106 context()->handlePostRenderTasks(); | 117 |
| 118 size_t whenAsFrame = when * sampleRate(); |
| 119 return whenAsFrame - (whenAsFrame % renderQuantumSize); |
| 107 } | 120 } |
| 108 | 121 |
| 109 void OfflineAudioDestinationHandler::offlineRenderInternal() | 122 WebThread* OfflineAudioDestinationHandler::offlineRenderThread() |
| 123 { |
| 124 ASSERT(context()->isOfflineContext()); |
| 125 ASSERT(m_renderThread); |
| 126 |
| 127 return m_renderThread.get(); |
| 128 } |
| 129 |
| 130 void OfflineAudioDestinationHandler::startOfflineRendering() |
| 110 { | 131 { |
| 111 ASSERT(!isMainThread()); | 132 ASSERT(!isMainThread()); |
| 133 ASSERT(context()->isOfflineContext()); |
| 134 |
| 112 ASSERT(m_renderBus); | 135 ASSERT(m_renderBus); |
| 113 if (!m_renderBus) | 136 if (!m_renderBus) |
| 114 return; | 137 return; |
| 115 | 138 |
| 116 bool isAudioContextInitialized = context()->isInitialized(); | 139 bool isAudioContextInitialized = context()->isInitialized(); |
| 117 ASSERT(isAudioContextInitialized); | 140 ASSERT(isAudioContextInitialized); |
| 118 if (!isAudioContextInitialized) | 141 if (!isAudioContextInitialized) |
| 119 return; | 142 return; |
| 120 | 143 |
| 121 bool channelsMatch = m_renderBus->numberOfChannels() == m_renderTarget->numb
erOfChannels(); | 144 bool channelsMatch = m_renderBus->numberOfChannels() == m_renderTarget->numb
erOfChannels(); |
| 122 ASSERT(channelsMatch); | 145 ASSERT(channelsMatch); |
| 123 if (!channelsMatch) | 146 if (!channelsMatch) |
| 124 return; | 147 return; |
| 125 | 148 |
| 126 bool isRenderBusAllocated = m_renderBus->length() >= renderQuantumSize; | 149 bool isRenderBusAllocated = m_renderBus->length() >= renderQuantumSize; |
| 127 ASSERT(isRenderBusAllocated); | 150 ASSERT(isRenderBusAllocated); |
| 128 if (!isRenderBusAllocated) | 151 if (!isRenderBusAllocated) |
| 129 return; | 152 return; |
| 130 | 153 |
| 131 // Break up the render target into smaller "render quantize" sized pieces. | 154 m_framesToProcess = m_renderTarget->length(); |
| 132 // Render until we're finished. | 155 |
| 133 size_t framesToProcess = m_renderTarget->length(); | 156 // Start rendering. |
| 157 runOfflineRendering(); |
| 158 } |
| 159 |
| 160 void OfflineAudioDestinationHandler::runOfflineRendering() |
| 161 { |
| 162 ASSERT(!isMainThread()); |
| 163 ASSERT(context()->isOfflineContext()); |
| 164 |
| 134 unsigned numberOfChannels = m_renderTarget->numberOfChannels(); | 165 unsigned numberOfChannels = m_renderTarget->numberOfChannels(); |
| 135 | 166 |
| 136 unsigned n = 0; | 167 // If there is more to process and there is no suspension at the moment, |
| 137 while (framesToProcess > 0) { | 168 // do continue to render quanta. If there is a suspend scheduled at the |
| 138 // Render one render quantum. | 169 // current sample frame, stop the render loop and put the context into the |
| 170 // suspended state. Then calling OfflineAudioContext.resume() will pick up |
| 171 // the render loop again from where it was suspended. |
| 172 while (m_framesToProcess > 0 && !context()->shouldSuspendNow()) { |
| 173 |
| 174 // Render one render quantum. Note that this includes pre/post render |
| 175 // tasks from the online audio context. |
| 139 render(0, m_renderBus.get(), renderQuantumSize); | 176 render(0, m_renderBus.get(), renderQuantumSize); |
| 140 | 177 |
| 141 size_t framesAvailableToCopy = std::min(framesToProcess, renderQuantumSi
ze); | 178 size_t framesAvailableToCopy = std::min(m_framesToProcess, renderQuantum
Size); |
| 142 | 179 |
| 143 for (unsigned channelIndex = 0; channelIndex < numberOfChannels; ++chann
elIndex) { | 180 for (unsigned channelIndex = 0; channelIndex < numberOfChannels; ++chann
elIndex) { |
| 144 const float* source = m_renderBus->channel(channelIndex)->data(); | 181 const float* source = m_renderBus->channel(channelIndex)->data(); |
| 145 float* destination = m_renderTarget->getChannelData(channelIndex)->d
ata(); | 182 float* destination = m_renderTarget->getChannelData(channelIndex)->d
ata(); |
| 146 memcpy(destination + n, source, sizeof(float) * framesAvailableToCop
y); | 183 memcpy(destination + m_framesProcessed, source, sizeof(float) * fram
esAvailableToCopy); |
| 147 } | 184 } |
| 148 | 185 |
| 149 n += framesAvailableToCopy; | 186 m_framesProcessed += framesAvailableToCopy; |
| 150 framesToProcess -= framesAvailableToCopy; | 187 m_framesToProcess -= framesAvailableToCopy; |
| 151 } | 188 } |
| 152 | 189 |
| 190 // Finish up the rendering loop if there is no more to process. |
| 191 if (m_framesToProcess <= 0) { |
| 192 ASSERT(m_framesToProcess == 0); |
| 193 finishOfflineRendering(); |
| 194 return; |
| 195 } |
| 196 |
| 197 // Otherwise resolve pending suspend promises. |
| 198 context()->resolvePendingSuspendPromises(); |
| 199 } |
| 200 |
| 201 void OfflineAudioDestinationHandler::finishOfflineRendering() |
| 202 { |
| 203 ASSERT(!isMainThread()); |
| 204 ASSERT(context()->isOfflineContext()); |
| 205 |
| 153 // Our work is done. Let the AudioContext know. | 206 // Our work is done. Let the AudioContext know. |
| 154 if (context()->executionContext()) | 207 if (context()->executionContext()) |
| 155 context()->executionContext()->postTask(FROM_HERE, createCrossThreadTask
(&OfflineAudioDestinationHandler::notifyComplete, PassRefPtr<OfflineAudioDestina
tionHandler>(this))); | 208 context()->executionContext()->postTask(FROM_HERE, createCrossThreadTask
(&OfflineAudioDestinationHandler::notifyComplete, this)); |
| 156 } | 209 } |
| 157 | 210 |
| 158 void OfflineAudioDestinationHandler::notifyComplete() | 211 void OfflineAudioDestinationHandler::notifyComplete() |
| 159 { | 212 { |
| 160 // The AudioContext might be gone. | 213 // The OfflineAudioContext might be gone. |
| 161 if (context()) | 214 if (context()) |
| 162 context()->fireCompletionEvent(); | 215 context()->fireCompletionEvent(); |
| 163 } | 216 } |
| 164 | 217 |
| 165 // ---------------------------------------------------------------- | 218 // ---------------------------------------------------------------- |
| 166 | 219 |
| 167 OfflineAudioDestinationNode::OfflineAudioDestinationNode(AudioContext& context,
AudioBuffer* renderTarget) | 220 OfflineAudioDestinationNode::OfflineAudioDestinationNode(AudioContext& context,
AudioBuffer* renderTarget) |
| 168 : AudioDestinationNode(context) | 221 : AudioDestinationNode(context) |
| 169 { | 222 { |
| 170 setHandler(OfflineAudioDestinationHandler::create(*this, renderTarget)); | 223 setHandler(OfflineAudioDestinationHandler::create(*this, renderTarget)); |
| 171 } | 224 } |
| 172 | 225 |
| 173 OfflineAudioDestinationNode* OfflineAudioDestinationNode::create(AudioContext* c
ontext, AudioBuffer* renderTarget) | 226 OfflineAudioDestinationNode* OfflineAudioDestinationNode::create(AudioContext* c
ontext, AudioBuffer* renderTarget) |
| 174 { | 227 { |
| 175 return new OfflineAudioDestinationNode(*context, renderTarget); | 228 return new OfflineAudioDestinationNode(*context, renderTarget); |
| 176 } | 229 } |
| 177 | 230 |
| 178 } // namespace blink | 231 } // namespace blink |
| 179 | 232 |
| 180 #endif // ENABLE(WEB_AUDIO) | 233 #endif // ENABLE(WEB_AUDIO) |
| OLD | NEW |