Index: webkit/media/android/audio_decoder_android.cc |
diff --git a/webkit/media/android/audio_decoder_android.cc b/webkit/media/android/audio_decoder_android.cc |
index a04d36eadb6a23a04ade01d8110e15892182b6a7..11f9c7d59dae9efb87ae96517838f8dcf9d3fd6b 100644 |
--- a/webkit/media/android/audio_decoder_android.cc |
+++ b/webkit/media/android/audio_decoder_android.cc |
@@ -4,14 +4,187 @@ |
#include "webkit/media/audio_decoder.h" |
+#include <fcntl.h> |
+#include <sys/mman.h> |
+#include <unistd.h> |
+#include <vector> |
+ |
+#include "base/callback.h" |
+#include "base/file_descriptor_posix.h" |
#include "base/logging.h" |
+#include "base/shared_memory.h" |
+#include "media/base/audio_bus.h" |
+#include "media/base/limits.h" |
+#include "third_party/WebKit/Source/Platform/chromium/public/WebAudioBus.h" |
namespace webkit_media { |
+class AudioDecoderIO { |
+public: |
+ AudioDecoderIO(const char* data, size_t data_size); |
+ ~AudioDecoderIO(); |
+ bool ShareEncodedToProcess(base::SharedMemoryHandle* handle); |
+ bool IsValid() const { return is_valid_; } |
+ int GetReadFd() const { return pipefd_[0]; } |
acolwell GONE FROM CHROMIUM
2013/04/04 20:53:52
nit: How about just replacing pipefd_ with read_fd
|
+ int GetWriteFd() const { return pipefd_[1]; } |
+ |
+private: |
+ // Shared memory that will hold the encoded audio data. This is |
+ // used by MediaCodec for decoding. |
+ base::SharedMemory encoded_shared_memory_; |
+ |
+ // A pipe used to communicate with MediaCodec. MediaCodec writes to |
+ // pipefd_[0]. |
+ int pipefd_[2]; |
+ |
+ // True if everything was created correctly. |
+ bool is_valid_; |
+}; |
+ |
+AudioDecoderIO::AudioDecoderIO(const char* data, size_t data_size) |
+ : is_valid_(true) { |
+ pipefd_[0] = -1; |
+ pipefd_[1] = -1; |
+ |
+ // Create the shared memory and copy our data to it so that |
+ // MediaCodec can access it. |
+ encoded_shared_memory_.CreateAndMapAnonymous(data_size); |
+ |
+ if (encoded_shared_memory_.memory()) { |
+ memcpy(encoded_shared_memory_.memory(), data, data_size); |
+ |
+ // Create a pipe for reading/writing the decoded pcm data |
+ if (pipe(pipefd_)) { |
+ // Pipe was not created |
+ pipefd_[0] = -1; |
+ pipefd_[1] = -1; |
+ is_valid_ = false; |
+ } |
+ } else { |
+ is_valid_ = false; |
+ } |
+} |
+ |
+AudioDecoderIO::~AudioDecoderIO() { |
+ // Close the read end of the pipe. The write end should have been |
+ // closed by MediaCodec. |
+ if (pipefd_[0] >= 0) { |
+ DVLOG(0) << "Closing read end of pipe: " << pipefd_[0]; |
+ if (close(pipefd_[0])) |
+ DVLOG(0) << "Failed to close read end!"; |
+ } |
+} |
+ |
+bool AudioDecoderIO::ShareEncodedToProcess(base::SharedMemoryHandle* handle) { |
+ return encoded_shared_memory_.ShareToProcess( |
+ base::Process::Current().handle(), |
+ handle); |
+} |
+ |
+// |
+// To decode audio data, we want to use the Android MediaCodec class. |
+// But this can't run in a sandboxed process so we need to do the |
+// decoding in the browser. To do this, we create a shared memory |
+// buffer that holds the audio data. We send a message to the browser |
+// to start the decoder using this buffer and one end of a pipe. The |
+// MediaCodec class will decode the data from the shared memory and |
+// write the pcm samples back to us over a pipe. |
bool DecodeAudioFileData(WebKit::WebAudioBus* destination_bus, const char* data, |
- size_t data_size, double sample_rate) { |
- NOTIMPLEMENTED(); |
- return false; |
+ size_t data_size, double sample_rate, |
+ const WebAudioMediaCodecRunner& runner) { |
+ AudioDecoderIO audio_decoder(data, data_size); |
+ |
+ if (!audio_decoder.IsValid()) |
+ return false; |
+ |
+ base::SharedMemoryHandle encoded_data_handle; |
+ audio_decoder.ShareEncodedToProcess(&encoded_data_handle); |
+ base::FileDescriptor fd(audio_decoder.GetWriteFd(), true); |
+ |
+ DVLOG(0) << "DecodeAudioFileData: Starting MediaCodec"; |
+ |
+ // Start MediaCodec processing in the browser which will read from |
+ // encoded_data_handle for our shared memory and write the decoded |
+ // pcm samples (16-bit integer) to our pipe. |
+ |
+ runner.Run(encoded_data_handle, fd); |
+ |
+ // First, read the number of channels, the sample rate, and the |
+ // number of frames and a flag indicating if the file is an |
+ // ogg/vorbis file. This must be coordinated with |
+ // WebAudioMediaCodecBridge! |
+ // |
+ // TODO(rtoy): If we know the number of samples, we can create the |
+ // destination bus directly and do the conversion directly to the |
+ // bus instead of buffering up everything before saving the data to |
+ // the bus. |
+ |
+ int input_fd = audio_decoder.GetReadFd(); |
+ long info[4]; |
+ |
+ DVLOG(0) << "Reading audio file info from fd " << input_fd; |
+ ssize_t nread = read(input_fd, info, sizeof(info)); |
+ DVLOG(0) << "read: " << nread << " bytes:\n" |
+ << " 0: number of channels = " << info[0] << "\n" |
+ << " 1: sample rate = " << info[1] << "\n" |
+ << " 2: number of frames = " << info[2] << "\n" |
+ << " 3: is vorbis = " << info[3]; |
+ |
+ if (nread != sizeof(info)) |
+ return false; |
+ |
+ int number_of_channels = info[0]; |
+ unsigned long expected_number_of_samples = info[2] * number_of_channels; |
+ double file_sample_rate = static_cast<double>(info[1]); |
+ bool is_vorbis = static_cast<bool>(info[3]); |
+ |
+ // Sanity checks |
+ if (!number_of_channels || |
+ number_of_channels > media::limits::kMaxChannels || |
+ file_sample_rate < media::limits::kMinSampleRate || |
+ file_sample_rate > media::limits::kMaxSampleRate) { |
+ return false; |
+ } |
+ |
+ short pipe_data[PIPE_BUF / sizeof(short)]; |
+ std::vector<short> decoded_samples; |
+ |
+ // Keep reading from the pipe until it's closed. |
+ while ((nread = read(input_fd, pipe_data, sizeof(pipe_data))) > 0) { |
+ int nsamples = nread / sizeof(short); |
+ decoded_samples.reserve(decoded_samples.size() + nsamples); |
+ for (int k = 0; k < nsamples; ++k) { |
+ decoded_samples.push_back(pipe_data[k]); |
+ } |
+ } |
+ |
+ DVLOG(0) << "Total samples read = " << decoded_samples.size(); |
+ |
+ if (!is_vorbis && |
+ expected_number_of_samples && |
+ decoded_samples.size() != expected_number_of_samples) { |
+ VLOG(0) << "Expected " << expected_number_of_samples |
+ << " but received " << decoded_samples.size(); |
+ } |
+ |
+ // Convert the samples and save them in the audio bus. |
+ int number_of_samples = decoded_samples.size(); |
+ int number_of_frames = number_of_samples / number_of_channels; |
+ |
+ destination_bus->initialize(number_of_channels, |
+ number_of_frames, |
+ file_sample_rate); |
+ |
+ int decoded_frames = 0; |
+ for (int m = 0; m < number_of_samples; m += number_of_channels) { |
+ for (int k = 0; k < number_of_channels; ++k) { |
+ destination_bus->channelData(k)[decoded_frames] = |
+ decoded_samples[m + k] / 32768.0; |
+ } |
+ ++decoded_frames; |
+ } |
+ |
+ return true; |
} |
} // namespace webkit_media |