| OLD | NEW |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/cast/sender/audio_encoder.h" | 5 #include "media/cast/sender/audio_encoder.h" |
| 6 | 6 |
| 7 #include <stdint.h> | 7 #include <stdint.h> |
| 8 | 8 |
| 9 #include <algorithm> | 9 #include <algorithm> |
| 10 #include <limits> | 10 #include <limits> |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 79 OperationalStatus InitializationResult() const { | 79 OperationalStatus InitializationResult() const { |
| 80 return operational_status_; | 80 return operational_status_; |
| 81 } | 81 } |
| 82 | 82 |
| 83 int samples_per_frame() const { | 83 int samples_per_frame() const { |
| 84 return samples_per_frame_; | 84 return samples_per_frame_; |
| 85 } | 85 } |
| 86 | 86 |
| 87 base::TimeDelta frame_duration() const { return frame_duration_; } | 87 base::TimeDelta frame_duration() const { return frame_duration_; } |
| 88 | 88 |
| 89 void EncodeAudio(scoped_ptr<AudioBus> audio_bus, | 89 void EncodeAudio(std::unique_ptr<AudioBus> audio_bus, |
| 90 const base::TimeTicks& recorded_time) { | 90 const base::TimeTicks& recorded_time) { |
| 91 DCHECK_EQ(operational_status_, STATUS_INITIALIZED); | 91 DCHECK_EQ(operational_status_, STATUS_INITIALIZED); |
| 92 DCHECK(!recorded_time.is_null()); | 92 DCHECK(!recorded_time.is_null()); |
| 93 | 93 |
| 94 // Determine whether |recorded_time| is consistent with the amount of audio | 94 // Determine whether |recorded_time| is consistent with the amount of audio |
| 95 // data having been processed in the past. Resolve the underrun problem by | 95 // data having been processed in the past. Resolve the underrun problem by |
| 96 // dropping data from the internal buffer and skipping ahead the next | 96 // dropping data from the internal buffer and skipping ahead the next |
| 97 // frame's RTP timestamp by the estimated number of frames missed. On the | 97 // frame's RTP timestamp by the estimated number of frames missed. On the |
| 98 // other hand, don't attempt to resolve overruns: A receiver should | 98 // other hand, don't attempt to resolve overruns: A receiver should |
| 99 // gracefully deal with an excess of audio data. | 99 // gracefully deal with an excess of audio data. |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 132 samples_per_frame_ - buffer_fill_end_, audio_bus->frames() - src_pos); | 132 samples_per_frame_ - buffer_fill_end_, audio_bus->frames() - src_pos); |
| 133 DCHECK_EQ(audio_bus->channels(), num_channels_); | 133 DCHECK_EQ(audio_bus->channels(), num_channels_); |
| 134 TransferSamplesIntoBuffer( | 134 TransferSamplesIntoBuffer( |
| 135 audio_bus.get(), src_pos, buffer_fill_end_, num_samples_to_xfer); | 135 audio_bus.get(), src_pos, buffer_fill_end_, num_samples_to_xfer); |
| 136 src_pos += num_samples_to_xfer; | 136 src_pos += num_samples_to_xfer; |
| 137 buffer_fill_end_ += num_samples_to_xfer; | 137 buffer_fill_end_ += num_samples_to_xfer; |
| 138 | 138 |
| 139 if (buffer_fill_end_ < samples_per_frame_) | 139 if (buffer_fill_end_ < samples_per_frame_) |
| 140 break; | 140 break; |
| 141 | 141 |
| 142 scoped_ptr<SenderEncodedFrame> audio_frame( | 142 std::unique_ptr<SenderEncodedFrame> audio_frame(new SenderEncodedFrame()); |
| 143 new SenderEncodedFrame()); | |
| 144 audio_frame->dependency = EncodedFrame::KEY; | 143 audio_frame->dependency = EncodedFrame::KEY; |
| 145 audio_frame->frame_id = frame_id_; | 144 audio_frame->frame_id = frame_id_; |
| 146 audio_frame->referenced_frame_id = frame_id_; | 145 audio_frame->referenced_frame_id = frame_id_; |
| 147 audio_frame->rtp_timestamp = frame_rtp_timestamp_; | 146 audio_frame->rtp_timestamp = frame_rtp_timestamp_; |
| 148 audio_frame->reference_time = frame_capture_time_; | 147 audio_frame->reference_time = frame_capture_time_; |
| 149 | 148 |
| 150 TRACE_EVENT_ASYNC_BEGIN2( | 149 TRACE_EVENT_ASYNC_BEGIN2( |
| 151 "cast.stream", | 150 "cast.stream", |
| 152 "Audio Encode", audio_frame.get(), | 151 "Audio Encode", audio_frame.get(), |
| 153 "frame_id", frame_id_, | 152 "frame_id", frame_id_, |
| (...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 314 static bool IsValidFrameDuration(base::TimeDelta duration) { | 313 static bool IsValidFrameDuration(base::TimeDelta duration) { |
| 315 // See https://tools.ietf.org/html/rfc6716#section-2.1.4 | 314 // See https://tools.ietf.org/html/rfc6716#section-2.1.4 |
| 316 return duration == base::TimeDelta::FromMicroseconds(2500) || | 315 return duration == base::TimeDelta::FromMicroseconds(2500) || |
| 317 duration == base::TimeDelta::FromMilliseconds(5) || | 316 duration == base::TimeDelta::FromMilliseconds(5) || |
| 318 duration == base::TimeDelta::FromMilliseconds(10) || | 317 duration == base::TimeDelta::FromMilliseconds(10) || |
| 319 duration == base::TimeDelta::FromMilliseconds(20) || | 318 duration == base::TimeDelta::FromMilliseconds(20) || |
| 320 duration == base::TimeDelta::FromMilliseconds(40) || | 319 duration == base::TimeDelta::FromMilliseconds(40) || |
| 321 duration == base::TimeDelta::FromMilliseconds(60); | 320 duration == base::TimeDelta::FromMilliseconds(60); |
| 322 } | 321 } |
| 323 | 322 |
| 324 const scoped_ptr<uint8_t[]> encoder_memory_; | 323 const std::unique_ptr<uint8_t[]> encoder_memory_; |
| 325 OpusEncoder* const opus_encoder_; | 324 OpusEncoder* const opus_encoder_; |
| 326 const scoped_ptr<float[]> buffer_; | 325 const std::unique_ptr<float[]> buffer_; |
| 327 | 326 |
| 328 // This is the recommended value, according to documentation in | 327 // This is the recommended value, according to documentation in |
| 329 // third_party/opus/src/include/opus.h, so that the Opus encoder does not | 328 // third_party/opus/src/include/opus.h, so that the Opus encoder does not |
| 330 // degrade the audio due to memory constraints. | 329 // degrade the audio due to memory constraints. |
| 331 // | 330 // |
| 332 // Note: Whereas other RTP implementations do not, the cast library is | 331 // Note: Whereas other RTP implementations do not, the cast library is |
| 333 // perfectly capable of transporting larger than MTU-sized audio frames. | 332 // perfectly capable of transporting larger than MTU-sized audio frames. |
| 334 static const int kOpusMaxPayloadSize = 4000; | 333 static const int kOpusMaxPayloadSize = 4000; |
| 335 | 334 |
| 336 DISALLOW_COPY_AND_ASSIGN(OpusImpl); | 335 DISALLOW_COPY_AND_ASSIGN(OpusImpl); |
| (...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 488 return false; | 487 return false; |
| 489 } | 488 } |
| 490 } | 489 } |
| 491 | 490 |
| 492 // This is the only location where the implementation modifies | 491 // This is the only location where the implementation modifies |
| 493 // |max_access_unit_size_|. | 492 // |max_access_unit_size_|. |
| 494 const_cast<uint32_t&>(max_access_unit_size_) = max_access_unit_size; | 493 const_cast<uint32_t&>(max_access_unit_size_) = max_access_unit_size; |
| 495 | 494 |
| 496 // Allocate a buffer to store one access unit. This is the only location | 495 // Allocate a buffer to store one access unit. This is the only location |
| 497 // where the implementation modifies |access_unit_buffer_|. | 496 // where the implementation modifies |access_unit_buffer_|. |
| 498 const_cast<scoped_ptr<uint8_t[]>&>(access_unit_buffer_) | 497 const_cast<std::unique_ptr<uint8_t[]>&>(access_unit_buffer_) |
| 499 .reset(new uint8_t[max_access_unit_size]); | 498 .reset(new uint8_t[max_access_unit_size]); |
| 500 | 499 |
| 501 // Initialize the converter ABL. Note that the buffer size has to be set | 500 // Initialize the converter ABL. Note that the buffer size has to be set |
| 502 // before every encode operation, since the field is modified to indicate | 501 // before every encode operation, since the field is modified to indicate |
| 503 // the size of the output data (on input it indicates the buffer capacity). | 502 // the size of the output data (on input it indicates the buffer capacity). |
| 504 converter_abl_.mNumberBuffers = 1; | 503 converter_abl_.mNumberBuffers = 1; |
| 505 converter_abl_.mBuffers[0].mNumberChannels = num_channels_; | 504 converter_abl_.mBuffers[0].mNumberChannels = num_channels_; |
| 506 converter_abl_.mBuffers[0].mData = access_unit_buffer_.get(); | 505 converter_abl_.mBuffers[0].mData = access_unit_buffer_.get(); |
| 507 | 506 |
| 508 // The "magic cookie" is an encoder state vector required for decoding and | 507 // The "magic cookie" is an encoder state vector required for decoding and |
| 509 // packetization. It is queried now from |converter_| then set on |file_| | 508 // packetization. It is queried now from |converter_| then set on |file_| |
| 510 // after initialization. | 509 // after initialization. |
| 511 UInt32 cookie_size; | 510 UInt32 cookie_size; |
| 512 if (AudioConverterGetPropertyInfo(converter_, | 511 if (AudioConverterGetPropertyInfo(converter_, |
| 513 kAudioConverterCompressionMagicCookie, | 512 kAudioConverterCompressionMagicCookie, |
| 514 &cookie_size, | 513 &cookie_size, |
| 515 nullptr) != noErr) { | 514 nullptr) != noErr) { |
| 516 return false; | 515 return false; |
| 517 } | 516 } |
| 518 scoped_ptr<uint8_t[]> cookie_data(new uint8_t[cookie_size]); | 517 std::unique_ptr<uint8_t[]> cookie_data(new uint8_t[cookie_size]); |
| 519 if (AudioConverterGetProperty(converter_, | 518 if (AudioConverterGetProperty(converter_, |
| 520 kAudioConverterCompressionMagicCookie, | 519 kAudioConverterCompressionMagicCookie, |
| 521 &cookie_size, | 520 &cookie_size, |
| 522 cookie_data.get()) != noErr) { | 521 cookie_data.get()) != noErr) { |
| 523 return false; | 522 return false; |
| 524 } | 523 } |
| 525 | 524 |
| 526 if (AudioFileInitializeWithCallbacks(this, | 525 if (AudioFileInitializeWithCallbacks(this, |
| 527 &FileReadCallback, | 526 &FileReadCallback, |
| 528 &FileWriteCallback, | 527 &FileWriteCallback, |
| (...skipping 156 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 685 return 0; | 684 return 0; |
| 686 } | 685 } |
| 687 | 686 |
| 688 // The AudioFile setsize callback function. | 687 // The AudioFile setsize callback function. |
| 689 static OSStatus FileSetSizeCallback(void* in_encoder, SInt64 in_size) { | 688 static OSStatus FileSetSizeCallback(void* in_encoder, SInt64 in_size) { |
| 690 return noErr; | 689 return noErr; |
| 691 } | 690 } |
| 692 | 691 |
| 693 // Buffer that holds one AAC access unit worth of samples. The input callback | 692 // Buffer that holds one AAC access unit worth of samples. The input callback |
| 694 // function provides samples from this buffer via |input_bus_| to the encoder. | 693 // function provides samples from this buffer via |input_bus_| to the encoder. |
| 695 const scoped_ptr<AudioBus> input_buffer_; | 694 const std::unique_ptr<AudioBus> input_buffer_; |
| 696 | 695 |
| 697 // Wrapper AudioBus used by the input callback function. Normally it wraps | 696 // Wrapper AudioBus used by the input callback function. Normally it wraps |
| 698 // |input_buffer_|. However, as an optimization when the client submits a | 697 // |input_buffer_|. However, as an optimization when the client submits a |
| 699 // buffer containing exactly one access unit worth of samples, the bus is | 698 // buffer containing exactly one access unit worth of samples, the bus is |
| 700 // redirected to the client buffer temporarily. We know that the base | 699 // redirected to the client buffer temporarily. We know that the base |
| 701 // implementation will call us right after to encode the buffer and thus we | 700 // implementation will call us right after to encode the buffer and thus we |
| 702 // can eliminate the copy into |input_buffer_|. | 701 // can eliminate the copy into |input_buffer_|. |
| 703 const scoped_ptr<AudioBus> input_bus_; | 702 const std::unique_ptr<AudioBus> input_bus_; |
| 704 | 703 |
| 705 // A buffer that holds one AAC access unit. Initialized in |Initialize| once | 704 // A buffer that holds one AAC access unit. Initialized in |Initialize| once |
| 706 // the maximum access unit size is known. | 705 // the maximum access unit size is known. |
| 707 const scoped_ptr<uint8_t[]> access_unit_buffer_; | 706 const std::unique_ptr<uint8_t[]> access_unit_buffer_; |
| 708 | 707 |
| 709 // The maximum size of an access unit that the encoder can emit. | 708 // The maximum size of an access unit that the encoder can emit. |
| 710 const uint32_t max_access_unit_size_; | 709 const uint32_t max_access_unit_size_; |
| 711 | 710 |
| 712 // A temporary pointer to the current output buffer. Only non-null when | 711 // A temporary pointer to the current output buffer. Only non-null when |
| 713 // writing an access unit. Accessed by the AudioFile write callback function. | 712 // writing an access unit. Accessed by the AudioFile write callback function. |
| 714 std::string* output_buffer_; | 713 std::string* output_buffer_; |
| 715 | 714 |
| 716 // The |AudioConverter| is responsible for AAC encoding. This is a Core Audio | 715 // The |AudioConverter| is responsible for AAC encoding. This is a Core Audio |
| 717 // object, not to be confused with |media::AudioConverter|. | 716 // object, not to be confused with |media::AudioConverter|. |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 775 out->resize(num_channels_ * samples_per_frame_ * sizeof(int16_t)); | 774 out->resize(num_channels_ * samples_per_frame_ * sizeof(int16_t)); |
| 776 const int16_t* src = buffer_.get(); | 775 const int16_t* src = buffer_.get(); |
| 777 const int16_t* const src_end = src + num_channels_ * samples_per_frame_; | 776 const int16_t* const src_end = src + num_channels_ * samples_per_frame_; |
| 778 uint16_t* dest = reinterpret_cast<uint16_t*>(&out->at(0)); | 777 uint16_t* dest = reinterpret_cast<uint16_t*>(&out->at(0)); |
| 779 for (; src < src_end; ++src, ++dest) | 778 for (; src < src_end; ++src, ++dest) |
| 780 *dest = base::HostToNet16(*src); | 779 *dest = base::HostToNet16(*src); |
| 781 return true; | 780 return true; |
| 782 } | 781 } |
| 783 | 782 |
| 784 private: | 783 private: |
| 785 const scoped_ptr<int16_t[]> buffer_; | 784 const std::unique_ptr<int16_t[]> buffer_; |
| 786 | 785 |
| 787 DISALLOW_COPY_AND_ASSIGN(Pcm16Impl); | 786 DISALLOW_COPY_AND_ASSIGN(Pcm16Impl); |
| 788 }; | 787 }; |
| 789 | 788 |
| 790 AudioEncoder::AudioEncoder( | 789 AudioEncoder::AudioEncoder( |
| 791 const scoped_refptr<CastEnvironment>& cast_environment, | 790 const scoped_refptr<CastEnvironment>& cast_environment, |
| 792 int num_channels, | 791 int num_channels, |
| 793 int sampling_rate, | 792 int sampling_rate, |
| 794 int bitrate, | 793 int bitrate, |
| 795 Codec codec, | 794 Codec codec, |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 850 | 849 |
| 851 base::TimeDelta AudioEncoder::GetFrameDuration() const { | 850 base::TimeDelta AudioEncoder::GetFrameDuration() const { |
| 852 DCHECK(insert_thread_checker_.CalledOnValidThread()); | 851 DCHECK(insert_thread_checker_.CalledOnValidThread()); |
| 853 if (InitializationResult() != STATUS_INITIALIZED) { | 852 if (InitializationResult() != STATUS_INITIALIZED) { |
| 854 NOTREACHED(); | 853 NOTREACHED(); |
| 855 return base::TimeDelta(); | 854 return base::TimeDelta(); |
| 856 } | 855 } |
| 857 return impl_->frame_duration(); | 856 return impl_->frame_duration(); |
| 858 } | 857 } |
| 859 | 858 |
| 860 void AudioEncoder::InsertAudio(scoped_ptr<AudioBus> audio_bus, | 859 void AudioEncoder::InsertAudio(std::unique_ptr<AudioBus> audio_bus, |
| 861 const base::TimeTicks& recorded_time) { | 860 const base::TimeTicks& recorded_time) { |
| 862 DCHECK(insert_thread_checker_.CalledOnValidThread()); | 861 DCHECK(insert_thread_checker_.CalledOnValidThread()); |
| 863 DCHECK(audio_bus.get()); | 862 DCHECK(audio_bus.get()); |
| 864 if (InitializationResult() != STATUS_INITIALIZED) { | 863 if (InitializationResult() != STATUS_INITIALIZED) { |
| 865 NOTREACHED(); | 864 NOTREACHED(); |
| 866 return; | 865 return; |
| 867 } | 866 } |
| 868 cast_environment_->PostTask(CastEnvironment::AUDIO, | 867 cast_environment_->PostTask(CastEnvironment::AUDIO, |
| 869 FROM_HERE, | 868 FROM_HERE, |
| 870 base::Bind(&AudioEncoder::ImplBase::EncodeAudio, | 869 base::Bind(&AudioEncoder::ImplBase::EncodeAudio, |
| 871 impl_, | 870 impl_, |
| 872 base::Passed(&audio_bus), | 871 base::Passed(&audio_bus), |
| 873 recorded_time)); | 872 recorded_time)); |
| 874 } | 873 } |
| 875 | 874 |
| 876 } // namespace cast | 875 } // namespace cast |
| 877 } // namespace media | 876 } // namespace media |
| OLD | NEW |