Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(215)

Side by Side Diff: media/filters/chunk_demuxer.cc

Issue 191513002: Extract coded frame processing from SourceState into LegacyFrameProcessor (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Address PS4 nits Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « media/filters/chunk_demuxer.h ('k') | media/filters/frame_processor_base.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "media/filters/chunk_demuxer.h" 5 #include "media/filters/chunk_demuxer.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <deque>
9 #include <limits> 8 #include <limits>
10 #include <list> 9 #include <list>
11 10
12 #include "base/bind.h" 11 #include "base/bind.h"
13 #include "base/callback_helpers.h" 12 #include "base/callback_helpers.h"
14 #include "base/location.h" 13 #include "base/location.h"
15 #include "base/message_loop/message_loop_proxy.h" 14 #include "base/message_loop/message_loop_proxy.h"
16 #include "base/stl_util.h" 15 #include "base/stl_util.h"
17 #include "media/base/audio_decoder_config.h" 16 #include "media/base/audio_decoder_config.h"
18 #include "media/base/bind_to_current_loop.h" 17 #include "media/base/bind_to_current_loop.h"
19 #include "media/base/stream_parser_buffer.h" 18 #include "media/base/stream_parser_buffer.h"
20 #include "media/base/video_decoder_config.h" 19 #include "media/base/video_decoder_config.h"
20 #include "media/filters/legacy_frame_processor.h"
21 #include "media/filters/stream_parser_factory.h" 21 #include "media/filters/stream_parser_factory.h"
22 22
23 using base::TimeDelta; 23 using base::TimeDelta;
24 24
25 namespace media { 25 namespace media {
26 26
27 // List of time ranges for each SourceBuffer. 27 // List of time ranges for each SourceBuffer.
28 typedef std::list<Ranges<TimeDelta> > RangesList; 28 typedef std::list<Ranges<TimeDelta> > RangesList;
29 static Ranges<TimeDelta> ComputeIntersection(const RangesList& activeRanges, 29 static Ranges<TimeDelta> ComputeIntersection(const RangesList& activeRanges,
30 bool ended) { 30 bool ended) {
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
79 return intersection_ranges; 79 return intersection_ranges;
80 } 80 }
81 81
82 // Contains state belonging to a source id. 82 // Contains state belonging to a source id.
83 class SourceState { 83 class SourceState {
84 public: 84 public:
85 // Callback signature used to create ChunkDemuxerStreams. 85 // Callback signature used to create ChunkDemuxerStreams.
86 typedef base::Callback<ChunkDemuxerStream*( 86 typedef base::Callback<ChunkDemuxerStream*(
87 DemuxerStream::Type)> CreateDemuxerStreamCB; 87 DemuxerStream::Type)> CreateDemuxerStreamCB;
88 88
89 // Callback signature used to notify ChunkDemuxer of timestamps
90 // that may cause the duration to be updated.
91 typedef base::Callback<void(
92 TimeDelta, ChunkDemuxerStream*)> IncreaseDurationCB;
93
94 typedef base::Callback<void( 89 typedef base::Callback<void(
95 ChunkDemuxerStream*, const TextTrackConfig&)> NewTextTrackCB; 90 ChunkDemuxerStream*, const TextTrackConfig&)> NewTextTrackCB;
96 91
97 SourceState(scoped_ptr<StreamParser> stream_parser, const LogCB& log_cb, 92 SourceState(
98 const CreateDemuxerStreamCB& create_demuxer_stream_cb, 93 scoped_ptr<StreamParser> stream_parser,
99 const IncreaseDurationCB& increase_duration_cb); 94 scoped_ptr<FrameProcessorBase> frame_processor, const LogCB& log_cb,
95 const CreateDemuxerStreamCB& create_demuxer_stream_cb);
100 96
101 ~SourceState(); 97 ~SourceState();
102 98
103 void Init(const StreamParser::InitCB& init_cb, 99 void Init(const StreamParser::InitCB& init_cb,
104 bool allow_audio, 100 bool allow_audio,
105 bool allow_video, 101 bool allow_video,
106 const StreamParser::NeedKeyCB& need_key_cb, 102 const StreamParser::NeedKeyCB& need_key_cb,
107 const NewTextTrackCB& new_text_track_cb); 103 const NewTextTrackCB& new_text_track_cb);
108 104
109 // Appends new data to the StreamParser. 105 // Appends new data to the StreamParser.
(...skipping 10 matching lines...) Expand all
120 // Aborts the current append sequence and resets the parser. 116 // Aborts the current append sequence and resets the parser.
121 void Abort(); 117 void Abort();
122 118
123 // Calls Remove(|start|, |end|, |duration|) on all 119 // Calls Remove(|start|, |end|, |duration|) on all
124 // ChunkDemuxerStreams managed by this object. 120 // ChunkDemuxerStreams managed by this object.
125 void Remove(TimeDelta start, TimeDelta end, TimeDelta duration); 121 void Remove(TimeDelta start, TimeDelta end, TimeDelta duration);
126 122
127 // Returns true if currently parsing a media segment, or false otherwise. 123 // Returns true if currently parsing a media segment, or false otherwise.
128 bool parsing_media_segment() const { return parsing_media_segment_; } 124 bool parsing_media_segment() const { return parsing_media_segment_; }
129 125
130 // Sets |sequence_mode_| to |sequence_mode|. 126 // Sets |frame_processor_|'s sequence mode to |sequence_mode|.
131 void SetSequenceMode(bool sequence_mode); 127 void SetSequenceMode(bool sequence_mode);
132 128
133 // Returns the range of buffered data in this source, capped at |duration|. 129 // Returns the range of buffered data in this source, capped at |duration|.
134 // |ended| - Set to true if end of stream has been signalled and the special 130 // |ended| - Set to true if end of stream has been signalled and the special
135 // end of stream range logic needs to be executed. 131 // end of stream range logic needs to be executed.
136 Ranges<TimeDelta> GetBufferedRanges(TimeDelta duration, bool ended) const; 132 Ranges<TimeDelta> GetBufferedRanges(TimeDelta duration, bool ended) const;
137 133
138 // Returns the highest buffered duration across all streams managed 134 // Returns the highest buffered duration across all streams managed
139 // by this object. 135 // by this object.
140 // Returns TimeDelta() if none of the streams contain buffered data. 136 // Returns TimeDelta() if none of the streams contain buffered data.
(...skipping 23 matching lines...) Expand all
164 const AudioDecoderConfig& audio_config, 160 const AudioDecoderConfig& audio_config,
165 const VideoDecoderConfig& video_config, 161 const VideoDecoderConfig& video_config,
166 const StreamParser::TextTrackConfigMap& text_configs); 162 const StreamParser::TextTrackConfigMap& text_configs);
167 163
168 // Called by the |stream_parser_| at the beginning of a new media segment. 164 // Called by the |stream_parser_| at the beginning of a new media segment.
169 void OnNewMediaSegment(); 165 void OnNewMediaSegment();
170 166
171 // Called by the |stream_parser_| at the end of a media segment. 167 // Called by the |stream_parser_| at the end of a media segment.
172 void OnEndOfMediaSegment(); 168 void OnEndOfMediaSegment();
173 169
174 // Called by the |stream_parser_| when new buffers have been parsed. It 170 // Called by the |stream_parser_| when new buffers have been parsed.
175 // applies |*timestamp_offset_during_append_| to all buffers in 171 // It processes the new buffers using |frame_processor_|, which includes
176 // |audio_buffers|, |video_buffers| and |text_map|, filters buffers against 172 // appending the processed frames to associated demuxer streams for each
177 // |append_window_[start,end]_during_append_| and then calls Append() 173 // frame's track.
178 // with the surviving modified buffers on |audio_|, |video_| and/or the text
179 // demuxer streams associated with the track numbers in |text_map|.
180 // Returns true on a successful call. Returns false if an error occurred while 174 // Returns true on a successful call. Returns false if an error occurred while
181 // processing the buffers. 175 // processing the buffers.
182 bool OnNewBuffers(const StreamParser::BufferQueue& audio_buffers, 176 bool OnNewBuffers(const StreamParser::BufferQueue& audio_buffers,
183 const StreamParser::BufferQueue& video_buffers, 177 const StreamParser::BufferQueue& video_buffers,
184 const StreamParser::TextBufferQueueMap& text_map); 178 const StreamParser::TextBufferQueueMap& text_map);
185 179
186 // Helper function for OnNewBuffers() when new text buffers have been parsed.
187 // It applies |*timestamp_offset_during_append_| to all buffers in |buffers|,
188 // filters the buffers against |append_window_[start,end]_during_append_| and
189 // then appends the (modified) buffers to the demuxer stream associated with
190 // the track having |text_track_id|.
191 // Returns true on a successful call. Returns false if an error occurred while
192 // processing the buffers.
193 bool OnTextBuffers(StreamParser::TrackId text_track_id,
194 const StreamParser::BufferQueue& buffers);
195
196 // Helper function that appends |buffers| to |stream| and calls
197 // |increase_duration_cb_| to potentially update the duration.
198 // Returns true if the append was successful. Returns false if
199 // |stream| is NULL or something in |buffers| caused the append to fail.
200 bool AppendAndUpdateDuration(ChunkDemuxerStream* stream,
201 const StreamParser::BufferQueue& buffers);
202
203 // Helper function that adds |*timestamp_offset_during_append_| to each buffer
204 // in |buffers|.
205 void AdjustBufferTimestamps(const StreamParser::BufferQueue& buffers);
206
207 // Filters out buffers that are outside of the append window
208 // [|append_window_start_during_append_|, |append_window_end_during_append_|).
209 // |needs_keyframe| is a pointer to the |xxx_need_keyframe_| flag
210 // associated with the |buffers|. Its state is read an updated as
211 // this method filters |buffers|.
212 // Buffers that are inside the append window are appended to the end
213 // of |filtered_buffers|.
214 void FilterWithAppendWindow(const StreamParser::BufferQueue& buffers,
215 bool* needs_keyframe,
216 StreamParser::BufferQueue* filtered_buffers);
217
218 CreateDemuxerStreamCB create_demuxer_stream_cb_; 180 CreateDemuxerStreamCB create_demuxer_stream_cb_;
219 IncreaseDurationCB increase_duration_cb_;
220 NewTextTrackCB new_text_track_cb_; 181 NewTextTrackCB new_text_track_cb_;
221 182
222 // During Append(), if OnNewBuffers() coded frame processing updates the 183 // During Append(), if OnNewBuffers() coded frame processing updates the
223 // timestamp offset then |*timestamp_offset_during_append_| is also updated 184 // timestamp offset then |*timestamp_offset_during_append_| is also updated
224 // so Append()'s caller can know the new offset. This pointer is only non-NULL 185 // so Append()'s caller can know the new offset. This pointer is only non-NULL
225 // during the lifetime of an Append() call. 186 // during the lifetime of an Append() call.
226 TimeDelta* timestamp_offset_during_append_; 187 TimeDelta* timestamp_offset_during_append_;
227 188
228 // During Append(), coded frame processing triggered by OnNewBuffers() 189 // During Append(), coded frame processing triggered by OnNewBuffers()
229 // requires these two attributes. These are only valid during the lifetime of 190 // requires these two attributes. These are only valid during the lifetime of
230 // an Append() call. 191 // an Append() call.
231 TimeDelta append_window_start_during_append_; 192 TimeDelta append_window_start_during_append_;
232 TimeDelta append_window_end_during_append_; 193 TimeDelta append_window_end_during_append_;
233 194
234 // Tracks the mode by which appended media is processed. If true, then
235 // appended media is processed using "sequence" mode. Otherwise, appended
236 // media is processed using "segments" mode.
237 // TODO(wolenetz): Enable "sequence" mode logic. See http://crbug.com/249422
238 // and http://crbug.com/333437.
239 bool sequence_mode_;
240
241 // Set to true if the next buffers appended within the append window 195 // Set to true if the next buffers appended within the append window
242 // represent the start of a new media segment. This flag being set 196 // represent the start of a new media segment. This flag being set
243 // triggers a call to |new_segment_cb_| when the new buffers are 197 // triggers a call to |new_segment_cb_| when the new buffers are
244 // appended. The flag is set on actual media segment boundaries and 198 // appended. The flag is set on actual media segment boundaries and
245 // when the "append window" filtering causes discontinuities in the 199 // when the "append window" filtering causes discontinuities in the
246 // appended data. 200 // appended data.
247 // TODO(wolenetz/acolwell): Investigate if we need this, or if coded frame 201 // TODO(wolenetz/acolwell): Investigate if we need this, or if coded frame
248 // processing's discontinuity logic is enough. See http://crbug.com/351489. 202 // processing's discontinuity logic is enough. See http://crbug.com/351489.
249 bool new_media_segment_; 203 bool new_media_segment_;
250 204
251 // Keeps track of whether a media segment is being parsed. 205 // Keeps track of whether a media segment is being parsed.
252 bool parsing_media_segment_; 206 bool parsing_media_segment_;
253 207
254 // The object used to parse appended data. 208 // The object used to parse appended data.
255 scoped_ptr<StreamParser> stream_parser_; 209 scoped_ptr<StreamParser> stream_parser_;
256 210
257 ChunkDemuxerStream* audio_; // Not owned by |this|. 211 ChunkDemuxerStream* audio_; // Not owned by |this|.
258 bool audio_needs_keyframe_;
259
260 ChunkDemuxerStream* video_; // Not owned by |this|. 212 ChunkDemuxerStream* video_; // Not owned by |this|.
261 bool video_needs_keyframe_;
262 213
263 typedef std::map<StreamParser::TrackId, ChunkDemuxerStream*> TextStreamMap; 214 typedef std::map<StreamParser::TrackId, ChunkDemuxerStream*> TextStreamMap;
264 TextStreamMap text_stream_map_; // |this| owns the map's stream pointers. 215 TextStreamMap text_stream_map_; // |this| owns the map's stream pointers.
265 216
217 scoped_ptr<FrameProcessorBase> frame_processor_;
266 LogCB log_cb_; 218 LogCB log_cb_;
267 219
268 DISALLOW_COPY_AND_ASSIGN(SourceState); 220 DISALLOW_COPY_AND_ASSIGN(SourceState);
269 }; 221 };
270 222
271 class ChunkDemuxerStream : public DemuxerStream { 223 SourceState::SourceState(
272 public: 224 scoped_ptr<StreamParser> stream_parser,
273 typedef std::deque<scoped_refptr<StreamParserBuffer> > BufferQueue; 225 scoped_ptr<FrameProcessorBase> frame_processor,
274 226 const LogCB& log_cb,
275 explicit ChunkDemuxerStream(Type type); 227 const CreateDemuxerStreamCB& create_demuxer_stream_cb)
276 virtual ~ChunkDemuxerStream();
277
278 // ChunkDemuxerStream control methods.
279 void StartReturningData();
280 void AbortReads();
281 void CompletePendingReadIfPossible();
282 void Shutdown();
283
284 // SourceBufferStream manipulation methods.
285 void Seek(TimeDelta time);
286 bool IsSeekWaitingForData() const;
287
288 // Add buffers to this stream. Buffers are stored in SourceBufferStreams,
289 // which handle ordering and overlap resolution.
290 // Returns true if buffers were successfully added.
291 bool Append(const StreamParser::BufferQueue& buffers);
292
293 // Removes buffers between |start| and |end| according to the steps
294 // in the "Coded Frame Removal Algorithm" in the Media Source
295 // Extensions Spec.
296 // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-sourc e.html#sourcebuffer-coded-frame-removal
297 //
298 // |duration| is the current duration of the presentation. It is
299 // required by the computation outlined in the spec.
300 void Remove(TimeDelta start, TimeDelta end, TimeDelta duration);
301
302 // Signal to the stream that duration has changed to |duration|.
303 void OnSetDuration(TimeDelta duration);
304
305 // Returns the range of buffered data in this stream, capped at |duration|.
306 Ranges<TimeDelta> GetBufferedRanges(TimeDelta duration) const;
307
308 // Returns the duration of the buffered data.
309 // Returns TimeDelta() if the stream has no buffered data.
310 TimeDelta GetBufferedDuration() const;
311
312 // Signal to the stream that buffers handed in through subsequent calls to
313 // Append() belong to a media segment that starts at |start_timestamp|.
314 void OnNewMediaSegment(TimeDelta start_timestamp);
315
316 // Called when midstream config updates occur.
317 // Returns true if the new config is accepted.
318 // Returns false if the new config should trigger an error.
319 bool UpdateAudioConfig(const AudioDecoderConfig& config, const LogCB& log_cb);
320 bool UpdateVideoConfig(const VideoDecoderConfig& config, const LogCB& log_cb);
321 void UpdateTextConfig(const TextTrackConfig& config, const LogCB& log_cb);
322
323 void MarkEndOfStream();
324 void UnmarkEndOfStream();
325
326 // DemuxerStream methods.
327 virtual void Read(const ReadCB& read_cb) OVERRIDE;
328 virtual Type type() OVERRIDE;
329 virtual void EnableBitstreamConverter() OVERRIDE;
330 virtual AudioDecoderConfig audio_decoder_config() OVERRIDE;
331 virtual VideoDecoderConfig video_decoder_config() OVERRIDE;
332
333 // Returns the text track configuration. It is an error to call this method
334 // if type() != TEXT.
335 TextTrackConfig text_track_config();
336
337 // Sets the memory limit, in bytes, on the SourceBufferStream.
338 void set_memory_limit_for_testing(int memory_limit) {
339 stream_->set_memory_limit_for_testing(memory_limit);
340 }
341
342 private:
343 enum State {
344 UNINITIALIZED,
345 RETURNING_DATA_FOR_READS,
346 RETURNING_ABORT_FOR_READS,
347 SHUTDOWN,
348 };
349
350 // Assigns |state_| to |state|
351 void ChangeState_Locked(State state);
352
353 void CompletePendingReadIfPossible_Locked();
354
355 // Specifies the type of the stream.
356 Type type_;
357
358 scoped_ptr<SourceBufferStream> stream_;
359
360 mutable base::Lock lock_;
361 State state_;
362 ReadCB read_cb_;
363
364 DISALLOW_IMPLICIT_CONSTRUCTORS(ChunkDemuxerStream);
365 };
366
367 SourceState::SourceState(scoped_ptr<StreamParser> stream_parser,
368 const LogCB& log_cb,
369 const CreateDemuxerStreamCB& create_demuxer_stream_cb,
370 const IncreaseDurationCB& increase_duration_cb)
371 : create_demuxer_stream_cb_(create_demuxer_stream_cb), 228 : create_demuxer_stream_cb_(create_demuxer_stream_cb),
372 increase_duration_cb_(increase_duration_cb),
373 timestamp_offset_during_append_(NULL), 229 timestamp_offset_during_append_(NULL),
374 sequence_mode_(false),
375 new_media_segment_(false), 230 new_media_segment_(false),
376 parsing_media_segment_(false), 231 parsing_media_segment_(false),
377 stream_parser_(stream_parser.release()), 232 stream_parser_(stream_parser.release()),
378 audio_(NULL), 233 audio_(NULL),
379 audio_needs_keyframe_(true),
380 video_(NULL), 234 video_(NULL),
381 video_needs_keyframe_(true), 235 frame_processor_(frame_processor.release()),
382 log_cb_(log_cb) { 236 log_cb_(log_cb) {
383 DCHECK(!create_demuxer_stream_cb_.is_null()); 237 DCHECK(!create_demuxer_stream_cb_.is_null());
384 DCHECK(!increase_duration_cb_.is_null()); 238 DCHECK(frame_processor_);
385 } 239 }
386 240
387 SourceState::~SourceState() { 241 SourceState::~SourceState() {
388 Shutdown(); 242 Shutdown();
389 243
390 STLDeleteValues(&text_stream_map_); 244 STLDeleteValues(&text_stream_map_);
391 } 245 }
392 246
393 void SourceState::Init(const StreamParser::InitCB& init_cb, 247 void SourceState::Init(const StreamParser::InitCB& init_cb,
394 bool allow_audio, 248 bool allow_audio,
(...skipping 14 matching lines...) Expand all
409 base::Bind(&SourceState::OnNewMediaSegment, 263 base::Bind(&SourceState::OnNewMediaSegment,
410 base::Unretained(this)), 264 base::Unretained(this)),
411 base::Bind(&SourceState::OnEndOfMediaSegment, 265 base::Bind(&SourceState::OnEndOfMediaSegment,
412 base::Unretained(this)), 266 base::Unretained(this)),
413 log_cb_); 267 log_cb_);
414 } 268 }
415 269
416 void SourceState::SetSequenceMode(bool sequence_mode) { 270 void SourceState::SetSequenceMode(bool sequence_mode) {
417 DCHECK(!parsing_media_segment_); 271 DCHECK(!parsing_media_segment_);
418 272
419 sequence_mode_ = sequence_mode; 273 frame_processor_->SetSequenceMode(sequence_mode);
420 } 274 }
421 275
422 bool SourceState::Append(const uint8* data, size_t length, 276 bool SourceState::Append(const uint8* data, size_t length,
423 TimeDelta append_window_start, 277 TimeDelta append_window_start,
424 TimeDelta append_window_end, 278 TimeDelta append_window_end,
425 TimeDelta* timestamp_offset) { 279 TimeDelta* timestamp_offset) {
426 DCHECK(timestamp_offset); 280 DCHECK(timestamp_offset);
427 DCHECK(!timestamp_offset_during_append_); 281 DCHECK(!timestamp_offset_during_append_);
428 timestamp_offset_during_append_ = timestamp_offset; 282 timestamp_offset_during_append_ = timestamp_offset;
429 append_window_start_during_append_ = append_window_start; 283 append_window_start_during_append_ = append_window_start;
430 append_window_end_during_append_ = append_window_end; 284 append_window_end_during_append_ = append_window_end;
431 285
432 // TODO(wolenetz/acolwell): Curry and pass a NewBuffersCB here bound with 286 // TODO(wolenetz/acolwell): Curry and pass a NewBuffersCB here bound with
433 // append window and timestamp offset pointer. See http://crbug.com/351454. 287 // append window and timestamp offset pointer. See http://crbug.com/351454.
434 bool err = stream_parser_->Parse(data, length); 288 bool err = stream_parser_->Parse(data, length);
435 timestamp_offset_during_append_ = NULL; 289 timestamp_offset_during_append_ = NULL;
436 return err; 290 return err;
437 } 291 }
438 292
439 void SourceState::Abort() { 293 void SourceState::Abort() {
440 stream_parser_->Flush(); 294 stream_parser_->Flush();
441 audio_needs_keyframe_ = true; 295 frame_processor_->Reset();
442 video_needs_keyframe_ = true;
443 parsing_media_segment_ = false; 296 parsing_media_segment_ = false;
444 } 297 }
445 298
446 void SourceState::Remove(TimeDelta start, TimeDelta end, TimeDelta duration) { 299 void SourceState::Remove(TimeDelta start, TimeDelta end, TimeDelta duration) {
447 if (audio_) 300 if (audio_)
448 audio_->Remove(start, end, duration); 301 audio_->Remove(start, end, duration);
449 302
450 if (video_) 303 if (video_)
451 video_->Remove(start, end, duration); 304 video_->Remove(start, end, duration);
452 305
(...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after
619 // NOTE: We are intentionally not checking the text tracks 472 // NOTE: We are intentionally not checking the text tracks
620 // because text tracks are discontinuous and may not have data 473 // because text tracks are discontinuous and may not have data
621 // for the seek position. This is ok and playback should not be 474 // for the seek position. This is ok and playback should not be
622 // stalled because we don't have cues. If cues, with timestamps after 475 // stalled because we don't have cues. If cues, with timestamps after
623 // the seek time, eventually arrive they will be delivered properly 476 // the seek time, eventually arrive they will be delivered properly
624 // in response to ChunkDemuxerStream::Read() calls. 477 // in response to ChunkDemuxerStream::Read() calls.
625 478
626 return false; 479 return false;
627 } 480 }
628 481
629 void SourceState::AdjustBufferTimestamps(
630 const StreamParser::BufferQueue& buffers) {
631 TimeDelta timestamp_offset = *timestamp_offset_during_append_;
632 if (timestamp_offset == TimeDelta())
633 return;
634
635 for (StreamParser::BufferQueue::const_iterator itr = buffers.begin();
636 itr != buffers.end(); ++itr) {
637 (*itr)->SetDecodeTimestamp(
638 (*itr)->GetDecodeTimestamp() + timestamp_offset);
639 (*itr)->set_timestamp((*itr)->timestamp() + timestamp_offset);
640 }
641 }
642
643 bool SourceState::OnNewConfigs( 482 bool SourceState::OnNewConfigs(
644 bool allow_audio, bool allow_video, 483 bool allow_audio, bool allow_video,
645 const AudioDecoderConfig& audio_config, 484 const AudioDecoderConfig& audio_config,
646 const VideoDecoderConfig& video_config, 485 const VideoDecoderConfig& video_config,
647 const StreamParser::TextTrackConfigMap& text_configs) { 486 const StreamParser::TextTrackConfigMap& text_configs) {
648 DVLOG(1) << "OnNewConfigs(" << allow_audio << ", " << allow_video 487 DVLOG(1) << "OnNewConfigs(" << allow_audio << ", " << allow_video
649 << ", " << audio_config.IsValidConfig() 488 << ", " << audio_config.IsValidConfig()
650 << ", " << video_config.IsValidConfig() << ")"; 489 << ", " << video_config.IsValidConfig() << ")";
651 490
652 if (!audio_config.IsValidConfig() && !video_config.IsValidConfig()) { 491 if (!audio_config.IsValidConfig() && !video_config.IsValidConfig()) {
(...skipping 25 matching lines...) Expand all
678 517
679 bool success = true; 518 bool success = true;
680 if (audio_config.IsValidConfig()) { 519 if (audio_config.IsValidConfig()) {
681 if (!audio_) { 520 if (!audio_) {
682 audio_ = create_demuxer_stream_cb_.Run(DemuxerStream::AUDIO); 521 audio_ = create_demuxer_stream_cb_.Run(DemuxerStream::AUDIO);
683 522
684 if (!audio_) { 523 if (!audio_) {
685 DVLOG(1) << "Failed to create an audio stream."; 524 DVLOG(1) << "Failed to create an audio stream.";
686 return false; 525 return false;
687 } 526 }
527
528 if (!frame_processor_->AddTrack(FrameProcessorBase::kAudioTrackId,
529 audio_)) {
530 DVLOG(1) << "Failed to add audio track to frame processor.";
531 return false;
532 }
688 } 533 }
689 534
690 success &= audio_->UpdateAudioConfig(audio_config, log_cb_); 535 success &= audio_->UpdateAudioConfig(audio_config, log_cb_);
691 } 536 }
692 537
693 if (video_config.IsValidConfig()) { 538 if (video_config.IsValidConfig()) {
694 if (!video_) { 539 if (!video_) {
695 video_ = create_demuxer_stream_cb_.Run(DemuxerStream::VIDEO); 540 video_ = create_demuxer_stream_cb_.Run(DemuxerStream::VIDEO);
696 541
697 if (!video_) { 542 if (!video_) {
698 DVLOG(1) << "Failed to create a video stream."; 543 DVLOG(1) << "Failed to create a video stream.";
699 return false; 544 return false;
700 } 545 }
546
547 if (!frame_processor_->AddTrack(FrameProcessorBase::kVideoTrackId,
548 video_)) {
549 DVLOG(1) << "Failed to add video track to frame processor.";
550 return false;
551 }
701 } 552 }
702 553
703 success &= video_->UpdateVideoConfig(video_config, log_cb_); 554 success &= video_->UpdateVideoConfig(video_config, log_cb_);
704 } 555 }
705 556
706 typedef StreamParser::TextTrackConfigMap::const_iterator TextConfigItr; 557 typedef StreamParser::TextTrackConfigMap::const_iterator TextConfigItr;
707 if (text_stream_map_.empty()) { 558 if (text_stream_map_.empty()) {
708 for (TextConfigItr itr = text_configs.begin(); 559 for (TextConfigItr itr = text_configs.begin();
709 itr != text_configs.end(); ++itr) { 560 itr != text_configs.end(); ++itr) {
710 ChunkDemuxerStream* const text_stream = 561 ChunkDemuxerStream* const text_stream =
711 create_demuxer_stream_cb_.Run(DemuxerStream::TEXT); 562 create_demuxer_stream_cb_.Run(DemuxerStream::TEXT);
563 if (!frame_processor_->AddTrack(itr->first, text_stream)) {
564 success &= false;
565 MEDIA_LOG(log_cb_) << "Failed to add text track ID " << itr->first
566 << " to frame processor.";
567 break;
568 }
712 text_stream->UpdateTextConfig(itr->second, log_cb_); 569 text_stream->UpdateTextConfig(itr->second, log_cb_);
713 text_stream_map_[itr->first] = text_stream; 570 text_stream_map_[itr->first] = text_stream;
714 new_text_track_cb_.Run(text_stream, itr->second); 571 new_text_track_cb_.Run(text_stream, itr->second);
715 } 572 }
716 } else { 573 } else {
717 const size_t text_count = text_stream_map_.size(); 574 const size_t text_count = text_stream_map_.size();
718 if (text_configs.size() != text_count) { 575 if (text_configs.size() != text_count) {
719 success &= false; 576 success &= false;
720 MEDIA_LOG(log_cb_) << "The number of text track configs changed."; 577 MEDIA_LOG(log_cb_) << "The number of text track configs changed.";
721 } else if (text_count == 1) { 578 } else if (text_count == 1) {
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
773 parsing_media_segment_ = false; 630 parsing_media_segment_ = false;
774 new_media_segment_ = false; 631 new_media_segment_ = false;
775 } 632 }
776 633
777 bool SourceState::OnNewBuffers( 634 bool SourceState::OnNewBuffers(
778 const StreamParser::BufferQueue& audio_buffers, 635 const StreamParser::BufferQueue& audio_buffers,
779 const StreamParser::BufferQueue& video_buffers, 636 const StreamParser::BufferQueue& video_buffers,
780 const StreamParser::TextBufferQueueMap& text_map) { 637 const StreamParser::TextBufferQueueMap& text_map) {
781 DVLOG(2) << "OnNewBuffers()"; 638 DVLOG(2) << "OnNewBuffers()";
782 DCHECK(timestamp_offset_during_append_); 639 DCHECK(timestamp_offset_during_append_);
783 DCHECK(!audio_buffers.empty() || !video_buffers.empty() ||
784 !text_map.empty());
785 640
786 // TODO(wolenetz): DCHECK + return false if any of these buffers have UNKNOWN 641 return frame_processor_->ProcessFrames(
787 // type() in upcoming coded frame processing compliant implementation. See 642 audio_buffers, video_buffers, text_map,
788 // http://crbug.com/249422. 643 append_window_start_during_append_, append_window_end_during_append_,
789 644 &new_media_segment_, timestamp_offset_during_append_);
790 AdjustBufferTimestamps(audio_buffers);
791 AdjustBufferTimestamps(video_buffers);
792
793 StreamParser::BufferQueue filtered_audio;
794 StreamParser::BufferQueue filtered_video;
795
796 FilterWithAppendWindow(audio_buffers, &audio_needs_keyframe_,
797 &filtered_audio);
798
799 FilterWithAppendWindow(video_buffers, &video_needs_keyframe_,
800 &filtered_video);
801
802 if ((!filtered_audio.empty() || !filtered_video.empty()) &&
803 new_media_segment_) {
804 // Find the earliest timestamp in the filtered buffers and use that for the
805 // segment start timestamp.
806 TimeDelta segment_timestamp = kNoTimestamp();
807
808 if (!filtered_audio.empty())
809 segment_timestamp = filtered_audio.front()->GetDecodeTimestamp();
810
811 if (!filtered_video.empty() &&
812 (segment_timestamp == kNoTimestamp() ||
813 filtered_video.front()->GetDecodeTimestamp() < segment_timestamp)) {
814 segment_timestamp = filtered_video.front()->GetDecodeTimestamp();
815 }
816
817 new_media_segment_ = false;
818
819 if (audio_)
820 audio_->OnNewMediaSegment(segment_timestamp);
821
822 if (video_)
823 video_->OnNewMediaSegment(segment_timestamp);
824
825 for (TextStreamMap::iterator itr = text_stream_map_.begin();
826 itr != text_stream_map_.end(); ++itr) {
827 itr->second->OnNewMediaSegment(segment_timestamp);
828 }
829 }
830
831 if (!filtered_audio.empty() &&
832 !AppendAndUpdateDuration(audio_, filtered_audio)) {
833 return false;
834 }
835
836 if (!filtered_video.empty() &&
837 !AppendAndUpdateDuration(video_, filtered_video)) {
838 return false;
839 }
840
841 if (text_map.empty())
842 return true;
843
844 // Process any buffers for each of the text tracks in the map.
845 bool all_text_buffers_empty = true;
846 for (StreamParser::TextBufferQueueMap::const_iterator itr = text_map.begin();
847 itr != text_map.end();
848 ++itr) {
849 const StreamParser::BufferQueue text_buffers = itr->second;
850 if (!text_buffers.empty()) {
851 all_text_buffers_empty = false;
852 if (!OnTextBuffers(itr->first, text_buffers))
853 return false;
854 }
855 }
856
857 DCHECK(!all_text_buffers_empty);
858 return true;
859 }
860
861 bool SourceState::OnTextBuffers(
862 StreamParser::TrackId text_track_id,
863 const StreamParser::BufferQueue& buffers) {
864 DCHECK(!buffers.empty());
865
866 TextStreamMap::iterator itr = text_stream_map_.find(text_track_id);
867 if (itr == text_stream_map_.end())
868 return false;
869
870 AdjustBufferTimestamps(buffers);
871
872 StreamParser::BufferQueue filtered_buffers;
873 bool needs_keyframe = false;
874 FilterWithAppendWindow(buffers, &needs_keyframe, &filtered_buffers);
875
876 if (filtered_buffers.empty())
877 return true;
878
879 return AppendAndUpdateDuration(itr->second, filtered_buffers);
880 }
881
882 bool SourceState::AppendAndUpdateDuration(
883 ChunkDemuxerStream* stream,
884 const StreamParser::BufferQueue& buffers) {
885 DCHECK(!buffers.empty());
886
887 if (!stream || !stream->Append(buffers))
888 return false;
889
890 increase_duration_cb_.Run(buffers.back()->timestamp(), stream);
891 return true;
892 }
893
894 void SourceState::FilterWithAppendWindow(
895 const StreamParser::BufferQueue& buffers, bool* needs_keyframe,
896 StreamParser::BufferQueue* filtered_buffers) {
897 DCHECK(needs_keyframe);
898 DCHECK(filtered_buffers);
899
900 // This loop implements steps 1.9, 1.10, & 1.11 of the "Coded frame
901 // processing loop" in the Media Source Extensions spec.
902 // These steps filter out buffers that are not within the "append
903 // window" and handles resyncing on the next random access point
904 // (i.e., next keyframe) if a buffer gets dropped.
905 for (StreamParser::BufferQueue::const_iterator itr = buffers.begin();
906 itr != buffers.end(); ++itr) {
907 // Filter out buffers that are outside the append window. Anytime
908 // a buffer gets dropped we need to set |*needs_keyframe| to true
909 // because we can only resume decoding at keyframes.
910 TimeDelta presentation_timestamp = (*itr)->timestamp();
911
912 // TODO(acolwell): Change |frame_end_timestamp| value to
913 // |presentation_timestamp + (*itr)->duration()|, like the spec
914 // requires, once frame durations are actually present in all buffers.
915 TimeDelta frame_end_timestamp = presentation_timestamp;
916 if (presentation_timestamp < append_window_start_during_append_ ||
917 frame_end_timestamp > append_window_end_during_append_) {
918 DVLOG(1) << "Dropping buffer outside append window."
919 << " presentation_timestamp "
920 << presentation_timestamp.InSecondsF();
921 *needs_keyframe = true;
922
923 // This triggers a discontinuity so we need to treat the next frames
924 // appended within the append window as if they were the beginning of a
925 // new segment.
926 new_media_segment_ = true;
927 continue;
928 }
929
930 // If |*needs_keyframe| is true then filter out buffers until we
931 // encounter the next keyframe.
932 if (*needs_keyframe) {
933 if (!(*itr)->IsKeyframe()) {
934 DVLOG(1) << "Dropping non-keyframe. presentation_timestamp "
935 << presentation_timestamp.InSecondsF();
936 continue;
937 }
938
939 *needs_keyframe = false;
940 }
941
942 filtered_buffers->push_back(*itr);
943 }
944 } 645 }
945 646
946 ChunkDemuxerStream::ChunkDemuxerStream(Type type) 647 ChunkDemuxerStream::ChunkDemuxerStream(Type type)
947 : type_(type), 648 : type_(type),
948 state_(UNINITIALIZED) { 649 state_(UNINITIALIZED) {
949 } 650 }
950 651
951 void ChunkDemuxerStream::StartReturningData() { 652 void ChunkDemuxerStream::StartReturningData() {
952 DVLOG(1) << "ChunkDemuxerStream::StartReturningData()"; 653 DVLOG(1) << "ChunkDemuxerStream::StartReturningData()";
953 base::AutoLock auto_lock(lock_); 654 base::AutoLock auto_lock(lock_);
(...skipping 407 matching lines...) Expand 10 before | Expand all | Expand 10 after
1361 if ((has_audio && !source_id_audio_.empty()) || 1062 if ((has_audio && !source_id_audio_.empty()) ||
1362 (has_video && !source_id_video_.empty())) 1063 (has_video && !source_id_video_.empty()))
1363 return kReachedIdLimit; 1064 return kReachedIdLimit;
1364 1065
1365 if (has_audio) 1066 if (has_audio)
1366 source_id_audio_ = id; 1067 source_id_audio_ = id;
1367 1068
1368 if (has_video) 1069 if (has_video)
1369 source_id_video_ = id; 1070 source_id_video_ = id;
1370 1071
1072 scoped_ptr<FrameProcessorBase> frame_processor(new LegacyFrameProcessor(
1073 base::Bind(&ChunkDemuxer::IncreaseDurationIfNecessary,
1074 base::Unretained(this))));
1075
1371 scoped_ptr<SourceState> source_state( 1076 scoped_ptr<SourceState> source_state(
1372 new SourceState(stream_parser.Pass(), log_cb_, 1077 new SourceState(stream_parser.Pass(),
1078 frame_processor.Pass(), log_cb_,
1373 base::Bind(&ChunkDemuxer::CreateDemuxerStream, 1079 base::Bind(&ChunkDemuxer::CreateDemuxerStream,
1374 base::Unretained(this)),
1375 base::Bind(&ChunkDemuxer::IncreaseDurationIfNecessary,
1376 base::Unretained(this)))); 1080 base::Unretained(this))));
1377 1081
1378 SourceState::NewTextTrackCB new_text_track_cb; 1082 SourceState::NewTextTrackCB new_text_track_cb;
1379 1083
1380 if (enable_text_) { 1084 if (enable_text_) {
1381 new_text_track_cb = base::Bind(&ChunkDemuxer::OnNewTextTrack, 1085 new_text_track_cb = base::Bind(&ChunkDemuxer::OnNewTextTrack,
1382 base::Unretained(this)); 1086 base::Unretained(this));
1383 } 1087 }
1384 1088
1385 source_state->Init( 1089 source_state->Init(
(...skipping 483 matching lines...) Expand 10 before | Expand all | Expand 10 after
1869 } 1573 }
1870 1574
1871 void ChunkDemuxer::ShutdownAllStreams() { 1575 void ChunkDemuxer::ShutdownAllStreams() {
1872 for (SourceStateMap::iterator itr = source_state_map_.begin(); 1576 for (SourceStateMap::iterator itr = source_state_map_.begin();
1873 itr != source_state_map_.end(); ++itr) { 1577 itr != source_state_map_.end(); ++itr) {
1874 itr->second->Shutdown(); 1578 itr->second->Shutdown();
1875 } 1579 }
1876 } 1580 }
1877 1581
1878 } // namespace media 1582 } // namespace media
OLDNEW
« no previous file with comments | « media/filters/chunk_demuxer.h ('k') | media/filters/frame_processor_base.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698