| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2011, Google Inc. All rights reserved. | 2 * Copyright (C) 2011, Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
| 6 * are met: | 6 * are met: |
| 7 * 1. Redistributions of source code must retain the above copyright | 7 * 1. Redistributions of source code must retain the above copyright |
| 8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
| 9 * 2. Redistributions in binary form must reproduce the above copyright | 9 * 2. Redistributions in binary form must reproduce the above copyright |
| 10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
| (...skipping 17 matching lines...) Expand all Loading... |
| 28 | 28 |
| 29 #include "modules/webaudio/OfflineAudioDestinationNode.h" | 29 #include "modules/webaudio/OfflineAudioDestinationNode.h" |
| 30 | 30 |
| 31 #include "core/platform/audio/AudioBus.h" | 31 #include "core/platform/audio/AudioBus.h" |
| 32 #include "core/platform/audio/HRTFDatabaseLoader.h" | 32 #include "core/platform/audio/HRTFDatabaseLoader.h" |
| 33 #include "modules/webaudio/AudioContext.h" | 33 #include "modules/webaudio/AudioContext.h" |
| 34 #include <algorithm> | 34 #include <algorithm> |
| 35 #include "wtf/MainThread.h" | 35 #include "wtf/MainThread.h" |
| 36 | 36 |
| 37 using namespace std; | 37 using namespace std; |
| 38 | 38 |
| 39 namespace WebCore { | 39 namespace WebCore { |
| 40 | 40 |
| 41 const size_t renderQuantumSize = 128; | 41 const size_t renderQuantumSize = 128; |
| 42 | 42 |
| 43 OfflineAudioDestinationNode::OfflineAudioDestinationNode(AudioContext* context,
AudioBuffer* renderTarget) | 43 OfflineAudioDestinationNode::OfflineAudioDestinationNode(AudioContext* context,
AudioBuffer* renderTarget) |
| 44 : AudioDestinationNode(context, renderTarget->sampleRate()) | 44 : AudioDestinationNode(context, renderTarget->sampleRate()) |
| 45 , m_renderTarget(renderTarget) | 45 , m_renderTarget(renderTarget) |
| 46 , m_renderThread(0) | 46 , m_renderThread(0) |
| 47 , m_startedRendering(false) | 47 , m_startedRendering(false) |
| 48 { | 48 { |
| 49 m_renderBus = AudioBus::create(renderTarget->numberOfChannels(), renderQuant
umSize); | 49 m_renderBus = AudioBus::create(renderTarget->numberOfChannels(), renderQuant
umSize); |
| 50 } | 50 } |
| 51 | 51 |
| (...skipping 22 matching lines...) Expand all Loading... |
| 74 | 74 |
| 75 AudioNode::uninitialize(); | 75 AudioNode::uninitialize(); |
| 76 } | 76 } |
| 77 | 77 |
| 78 void OfflineAudioDestinationNode::startRendering() | 78 void OfflineAudioDestinationNode::startRendering() |
| 79 { | 79 { |
| 80 ASSERT(isMainThread()); | 80 ASSERT(isMainThread()); |
| 81 ASSERT(m_renderTarget.get()); | 81 ASSERT(m_renderTarget.get()); |
| 82 if (!m_renderTarget.get()) | 82 if (!m_renderTarget.get()) |
| 83 return; | 83 return; |
| 84 | 84 |
| 85 if (!m_startedRendering) { | 85 if (!m_startedRendering) { |
| 86 m_startedRendering = true; | 86 m_startedRendering = true; |
| 87 ref(); // See corresponding deref() call in notifyCompleteDispatch(). | 87 ref(); // See corresponding deref() call in notifyCompleteDispatch(). |
| 88 m_renderThread = createThread(OfflineAudioDestinationNode::offlineRender
Entry, this, "offline renderer"); | 88 m_renderThread = createThread(OfflineAudioDestinationNode::offlineRender
Entry, this, "offline renderer"); |
| 89 } | 89 } |
| 90 } | 90 } |
| 91 | 91 |
| 92 // Do offline rendering in this thread. | 92 // Do offline rendering in this thread. |
| 93 void OfflineAudioDestinationNode::offlineRenderEntry(void* threadData) | 93 void OfflineAudioDestinationNode::offlineRenderEntry(void* threadData) |
| 94 { | 94 { |
| 95 OfflineAudioDestinationNode* destinationNode = reinterpret_cast<OfflineAudio
DestinationNode*>(threadData); | 95 OfflineAudioDestinationNode* destinationNode = reinterpret_cast<OfflineAudio
DestinationNode*>(threadData); |
| 96 ASSERT(destinationNode); | 96 ASSERT(destinationNode); |
| 97 destinationNode->offlineRender(); | 97 destinationNode->offlineRender(); |
| 98 } | 98 } |
| 99 | 99 |
| 100 void OfflineAudioDestinationNode::offlineRender() | 100 void OfflineAudioDestinationNode::offlineRender() |
| 101 { | 101 { |
| 102 ASSERT(!isMainThread()); | 102 ASSERT(!isMainThread()); |
| 103 ASSERT(m_renderBus.get()); | 103 ASSERT(m_renderBus.get()); |
| 104 if (!m_renderBus.get()) | 104 if (!m_renderBus.get()) |
| 105 return; | 105 return; |
| 106 | 106 |
| 107 bool channelsMatch = m_renderBus->numberOfChannels() == m_renderTarget->numb
erOfChannels(); | 107 bool channelsMatch = m_renderBus->numberOfChannels() == m_renderTarget->numb
erOfChannels(); |
| 108 ASSERT(channelsMatch); | 108 ASSERT(channelsMatch); |
| 109 if (!channelsMatch) | 109 if (!channelsMatch) |
| 110 return; | 110 return; |
| 111 | 111 |
| 112 bool isRenderBusAllocated = m_renderBus->length() >= renderQuantumSize; | 112 bool isRenderBusAllocated = m_renderBus->length() >= renderQuantumSize; |
| 113 ASSERT(isRenderBusAllocated); | 113 ASSERT(isRenderBusAllocated); |
| 114 if (!isRenderBusAllocated) | 114 if (!isRenderBusAllocated) |
| 115 return; | 115 return; |
| 116 | 116 |
| 117 // Synchronize with HRTFDatabaseLoader. | 117 // Synchronize with HRTFDatabaseLoader. |
| 118 // The database must be loaded before we can proceed. | 118 // The database must be loaded before we can proceed. |
| 119 HRTFDatabaseLoader* loader = context()->hrtfDatabaseLoader(); | 119 HRTFDatabaseLoader* loader = context()->hrtfDatabaseLoader(); |
| 120 ASSERT(loader); | 120 ASSERT(loader); |
| 121 if (!loader) | 121 if (!loader) |
| 122 return; | 122 return; |
| 123 | 123 |
| 124 loader->waitForLoaderThreadCompletion(); | 124 loader->waitForLoaderThreadCompletion(); |
| 125 | 125 |
| 126 // Break up the render target into smaller "render quantize" sized pieces. | 126 // Break up the render target into smaller "render quantize" sized pieces. |
| 127 // Render until we're finished. | 127 // Render until we're finished. |
| 128 size_t framesToProcess = m_renderTarget->length(); | 128 size_t framesToProcess = m_renderTarget->length(); |
| 129 unsigned numberOfChannels = m_renderTarget->numberOfChannels(); | 129 unsigned numberOfChannels = m_renderTarget->numberOfChannels(); |
| 130 | 130 |
| 131 unsigned n = 0; | 131 unsigned n = 0; |
| 132 while (framesToProcess > 0) { | 132 while (framesToProcess > 0) { |
| 133 // Render one render quantum. | 133 // Render one render quantum. |
| 134 render(0, m_renderBus.get(), renderQuantumSize); | 134 render(0, m_renderBus.get(), renderQuantumSize); |
| 135 | 135 |
| 136 size_t framesAvailableToCopy = min(framesToProcess, renderQuantumSize); | 136 size_t framesAvailableToCopy = min(framesToProcess, renderQuantumSize); |
| 137 | 137 |
| 138 for (unsigned channelIndex = 0; channelIndex < numberOfChannels; ++chann
elIndex) { | 138 for (unsigned channelIndex = 0; channelIndex < numberOfChannels; ++chann
elIndex) { |
| 139 const float* source = m_renderBus->channel(channelIndex)->data(); | 139 const float* source = m_renderBus->channel(channelIndex)->data(); |
| 140 float* destination = m_renderTarget->getChannelData(channelIndex)->d
ata(); | 140 float* destination = m_renderTarget->getChannelData(channelIndex)->d
ata(); |
| 141 memcpy(destination + n, source, sizeof(float) * framesAvailableToCop
y); | 141 memcpy(destination + n, source, sizeof(float) * framesAvailableToCop
y); |
| 142 } | 142 } |
| 143 | 143 |
| 144 n += framesAvailableToCopy; | 144 n += framesAvailableToCopy; |
| 145 framesToProcess -= framesAvailableToCopy; | 145 framesToProcess -= framesAvailableToCopy; |
| 146 } | 146 } |
| 147 | 147 |
| 148 // Our work is done. Let the AudioContext know. | 148 // Our work is done. Let the AudioContext know. |
| 149 callOnMainThread(notifyCompleteDispatch, this); | 149 callOnMainThread(notifyCompleteDispatch, this); |
| 150 } | 150 } |
| 151 | 151 |
| 152 void OfflineAudioDestinationNode::notifyCompleteDispatch(void* userData) | 152 void OfflineAudioDestinationNode::notifyCompleteDispatch(void* userData) |
| 153 { | 153 { |
| 154 OfflineAudioDestinationNode* destinationNode = static_cast<OfflineAudioDesti
nationNode*>(userData); | 154 OfflineAudioDestinationNode* destinationNode = static_cast<OfflineAudioDesti
nationNode*>(userData); |
| 155 ASSERT(destinationNode); | 155 ASSERT(destinationNode); |
| 156 if (!destinationNode) | 156 if (!destinationNode) |
| 157 return; | 157 return; |
| 158 | 158 |
| 159 destinationNode->notifyComplete(); | 159 destinationNode->notifyComplete(); |
| 160 destinationNode->deref(); | 160 destinationNode->deref(); |
| 161 } | 161 } |
| 162 | 162 |
| 163 void OfflineAudioDestinationNode::notifyComplete() | 163 void OfflineAudioDestinationNode::notifyComplete() |
| 164 { | 164 { |
| 165 context()->fireCompletionEvent(); | 165 context()->fireCompletionEvent(); |
| 166 } | 166 } |
| 167 | 167 |
| 168 } // namespace WebCore | 168 } // namespace WebCore |
| 169 | 169 |
| 170 #endif // ENABLE(WEB_AUDIO) | 170 #endif // ENABLE(WEB_AUDIO) |
| OLD | NEW |