Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(145)

Side by Side Diff: media/filters/frame_processor.cc

Issue 276573002: Add gapless playback support for AAC playback. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Fix msvc error. Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « media/filters/frame_processor.h ('k') | media/filters/frame_processor_base.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "media/filters/frame_processor.h" 5 #include "media/filters/frame_processor.h"
6 6
7 #include "base/stl_util.h" 7 #include "base/stl_util.h"
8 #include "media/base/buffers.h" 8 #include "media/base/buffers.h"
9 #include "media/base/stream_parser_buffer.h" 9 #include "media/base/stream_parser_buffer.h"
10 10
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
68 } 68 }
69 69
70 // 2. - 4. Are handled by the WebMediaPlayer / Pipeline / Media Element. 70 // 2. - 4. Are handled by the WebMediaPlayer / Pipeline / Media Element.
71 71
72 // Step 5: 72 // Step 5:
73 update_duration_cb_.Run(group_end_timestamp_); 73 update_duration_cb_.Run(group_end_timestamp_);
74 74
75 return true; 75 return true;
76 } 76 }
77 77
78 bool FrameProcessor::ProcessFrame(scoped_refptr<StreamParserBuffer> frame, 78 bool FrameProcessor::ProcessFrame(
79 base::TimeDelta append_window_start, 79 const scoped_refptr<StreamParserBuffer>& frame,
80 base::TimeDelta append_window_end, 80 base::TimeDelta append_window_start,
81 base::TimeDelta* timestamp_offset, 81 base::TimeDelta append_window_end,
82 bool* new_media_segment) { 82 base::TimeDelta* timestamp_offset,
83 bool* new_media_segment) {
83 // Implements the loop within step 1 of the coded frame processing algorithm 84 // Implements the loop within step 1 of the coded frame processing algorithm
84 // for a single input frame per April 1, 2014 MSE spec editor's draft: 85 // for a single input frame per April 1, 2014 MSE spec editor's draft:
85 // https://dvcs.w3.org/hg/html-media/raw-file/d471a4412040/media-source/ 86 // https://dvcs.w3.org/hg/html-media/raw-file/d471a4412040/media-source/
86 // media-source.html#sourcebuffer-coded-frame-processing 87 // media-source.html#sourcebuffer-coded-frame-processing
87 88
88 while (true) { 89 while (true) {
89 // 1. Loop Top: Let presentation timestamp be a double precision floating 90 // 1. Loop Top: Let presentation timestamp be a double precision floating
90 // point representation of the coded frame's presentation timestamp in 91 // point representation of the coded frame's presentation timestamp in
91 // seconds. 92 // seconds.
92 // 2. Let decode timestamp be a double precision floating point 93 // 2. Let decode timestamp be a double precision floating point
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
154 // true. 155 // true.
155 SetAllTrackBuffersNeedRandomAccessPoint(); 156 SetAllTrackBuffersNeedRandomAccessPoint();
156 157
157 // 4.4. Unset group start timestamp. 158 // 4.4. Unset group start timestamp.
158 group_start_timestamp_ = kNoTimestamp(); 159 group_start_timestamp_ = kNoTimestamp();
159 } 160 }
160 161
161 // 5. If timestampOffset is not 0, then run the following steps: 162 // 5. If timestampOffset is not 0, then run the following steps:
162 if (*timestamp_offset != base::TimeDelta()) { 163 if (*timestamp_offset != base::TimeDelta()) {
163 // 5.1. Add timestampOffset to the presentation timestamp. 164 // 5.1. Add timestampOffset to the presentation timestamp.
164 // Note: |frame| PTS is only updated if it survives processing. 165 // Note: |frame| PTS is only updated if it survives discontinuity
166 // processing.
165 presentation_timestamp += *timestamp_offset; 167 presentation_timestamp += *timestamp_offset;
166 168
167 // 5.2. Add timestampOffset to the decode timestamp. 169 // 5.2. Add timestampOffset to the decode timestamp.
168 // Frame DTS is only updated if it survives processing. 170 // Frame DTS is only updated if it survives discontinuity processing.
169 decode_timestamp += *timestamp_offset; 171 decode_timestamp += *timestamp_offset;
170 } 172 }
171 173
172 // 6. Let track buffer equal the track buffer that the coded frame will be 174 // 6. Let track buffer equal the track buffer that the coded frame will be
173 // added to. 175 // added to.
174 176
175 // Remap audio and video track types to their special singleton identifiers. 177 // Remap audio and video track types to their special singleton identifiers.
176 StreamParser::TrackId track_id = kAudioTrackId; 178 StreamParser::TrackId track_id = kAudioTrackId;
177 switch (frame->type()) { 179 switch (frame->type()) {
178 case DemuxerStream::AUDIO: 180 case DemuxerStream::AUDIO:
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
244 DVLOG(2) << __FUNCTION__ 246 DVLOG(2) << __FUNCTION__
245 << ": frame PTS=" << presentation_timestamp.InSecondsF() 247 << ": frame PTS=" << presentation_timestamp.InSecondsF()
246 << " or DTS=" << decode_timestamp.InSecondsF() 248 << " or DTS=" << decode_timestamp.InSecondsF()
247 << " negative after applying timestampOffset and handling any " 249 << " negative after applying timestampOffset and handling any "
248 << " discontinuity"; 250 << " discontinuity";
249 return false; 251 return false;
250 } 252 }
251 253
252 // 9. Let frame end timestamp equal the sum of presentation timestamp and 254 // 9. Let frame end timestamp equal the sum of presentation timestamp and
253 // frame duration. 255 // frame duration.
254 base::TimeDelta frame_end_timestamp = presentation_timestamp + 256 const base::TimeDelta frame_end_timestamp =
255 frame_duration; 257 presentation_timestamp + frame_duration;
256 258
257 // 10. If presentation timestamp is less than appendWindowStart, then set 259 // 10. If presentation timestamp is less than appendWindowStart, then set
258 // the need random access point flag to true, drop the coded frame, and 260 // the need random access point flag to true, drop the coded frame, and
259 // jump to the top of the loop to start processing the next coded 261 // jump to the top of the loop to start processing the next coded
260 // frame. 262 // frame.
261 // Note: We keep the result of partial discard of a buffer that overlaps 263 // Note: We keep the result of partial discard of a buffer that overlaps
262 // |append_window_start| and does not end after |append_window_end|. 264 // |append_window_start| and does not end after |append_window_end|.
263 // 11. If frame end timestamp is greater than appendWindowEnd, then set the 265 // 11. If frame end timestamp is greater than appendWindowEnd, then set the
264 // need random access point flag to true, drop the coded frame, and jump 266 // need random access point flag to true, drop the coded frame, and jump
265 // to the top of the loop to start processing the next coded frame. 267 // to the top of the loop to start processing the next coded frame.
268 frame->set_timestamp(presentation_timestamp);
269 frame->SetDecodeTimestamp(decode_timestamp);
270 if (track_buffer->stream()->supports_partial_append_window_trimming() &&
271 HandlePartialAppendWindowTrimming(append_window_start,
272 append_window_end,
273 frame)) {
274 // If |frame| was shortened a discontinuity may exist, so treat the next
275 // frames appended as if they were the beginning of a new media segment.
276 if (frame->timestamp() != presentation_timestamp && !sequence_mode_)
277 *new_media_segment = true;
278
279 // |frame| has been partially trimmed or had preroll added.
280 decode_timestamp = frame->GetDecodeTimestamp();
281 presentation_timestamp = frame->timestamp();
282 frame_duration = frame->duration();
283
284 // The end timestamp of the frame should be unchanged.
285 DCHECK(frame_end_timestamp == presentation_timestamp + frame_duration);
286 }
287
266 if (presentation_timestamp < append_window_start || 288 if (presentation_timestamp < append_window_start ||
267 frame_end_timestamp > append_window_end) { 289 frame_end_timestamp > append_window_end) {
268 // See if a partial discard can be done around |append_window_start|. 290 track_buffer->set_needs_random_access_point(true);
269 // TODO(wolenetz): Refactor this into a base helper across legacy and 291 DVLOG(3) << "Dropping frame that is outside append window.";
270 // new frame processors?
271 if (track_buffer->stream()->supports_partial_append_window_trimming() &&
272 presentation_timestamp < append_window_start &&
273 frame_end_timestamp > append_window_start &&
274 frame_end_timestamp <= append_window_end) {
275 DCHECK(frame->IsKeyframe());
276 DVLOG(1) << "Truncating buffer which overlaps append window start."
277 << " presentation_timestamp "
278 << presentation_timestamp.InSecondsF()
279 << " append_window_start " << append_window_start.InSecondsF();
280 292
281 // Adjust the timestamp of this frame forward to |append_window_start|, 293 if (!sequence_mode_) {
282 // while decreasing the duration appropriately. 294 // This also triggers a discontinuity so we need to treat the next
283 frame->set_discard_padding(std::make_pair( 295 // frames appended within the append window as if they were the
284 append_window_start - presentation_timestamp, base::TimeDelta())); 296 // beginning of a new segment.
285 presentation_timestamp = append_window_start; // |frame| updated below. 297 *new_media_segment = true;
286 decode_timestamp = append_window_start; // |frame| updated below. 298 }
287 frame_duration = frame_end_timestamp - presentation_timestamp;
288 frame->set_duration(frame_duration);
289 299
290 // TODO(dalecurtis): This could also be done with |append_window_end|, 300 return true;
291 // but is not necessary since splice frames covert the overlap there.
292 } else {
293 track_buffer->set_needs_random_access_point(true);
294 DVLOG(3) << "Dropping frame that is outside append window.";
295
296 if (!sequence_mode_) {
297 // This also triggers a discontinuity so we need to treat the next
298 // frames appended within the append window as if they were the
299 // beginning of a new segment.
300 *new_media_segment = true;
301 }
302
303 return true;
304 }
305 } 301 }
306 302
307 // 12. If the need random access point flag on track buffer equals true, 303 // 12. If the need random access point flag on track buffer equals true,
308 // then run the following steps: 304 // then run the following steps:
309 if (track_buffer->needs_random_access_point()) { 305 if (track_buffer->needs_random_access_point()) {
310 // 12.1. If the coded frame is not a random access point, then drop the 306 // 12.1. If the coded frame is not a random access point, then drop the
311 // coded frame and jump to the top of the loop to start processing 307 // coded frame and jump to the top of the loop to start processing
312 // the next coded frame. 308 // the next coded frame.
313 if (!frame->IsKeyframe()) { 309 if (!frame->IsKeyframe()) {
314 DVLOG(3) << __FUNCTION__ 310 DVLOG(3) << __FUNCTION__
315 << ": Dropping frame that is not a random access point"; 311 << ": Dropping frame that is not a random access point";
316 return true; 312 return true;
317 } 313 }
318 314
319 // 12.2. Set the need random access point flag on track buffer to false. 315 // 12.2. Set the need random access point flag on track buffer to false.
320 track_buffer->set_needs_random_access_point(false); 316 track_buffer->set_needs_random_access_point(false);
321 } 317 }
322 318
323 // We now have a processed buffer to append to the track buffer's stream. 319 // We now have a processed buffer to append to the track buffer's stream.
324 // If it is the first in a new media segment or following a discontinuity, 320 // If it is the first in a new media segment or following a discontinuity,
325 // notify all the track buffers' streams that a new segment is beginning. 321 // notify all the track buffers' streams that a new segment is beginning.
326 if (*new_media_segment) { 322 if (*new_media_segment) {
327 *new_media_segment = false; 323 *new_media_segment = false;
328 NotifyNewMediaSegmentStarting(decode_timestamp); 324 NotifyNewMediaSegmentStarting(decode_timestamp);
329 } 325 }
330 326
331 DVLOG(3) << __FUNCTION__ << ": Sending processed frame to stream, " 327 DVLOG(3) << __FUNCTION__ << ": Sending processed frame to stream, "
332 << "PTS=" << presentation_timestamp.InSecondsF() 328 << "PTS=" << presentation_timestamp.InSecondsF()
333 << ", DTS=" << decode_timestamp.InSecondsF(); 329 << ", DTS=" << decode_timestamp.InSecondsF();
334 frame->set_timestamp(presentation_timestamp);
335 frame->SetDecodeTimestamp(decode_timestamp);
336 330
337 // Steps 13-18: 331 // Steps 13-18:
338 // TODO(wolenetz): Collect and emit more than one buffer at a time, if 332 // TODO(wolenetz): Collect and emit more than one buffer at a time, if
339 // possible. Also refactor SourceBufferStream to conform to spec GC timing. 333 // possible. Also refactor SourceBufferStream to conform to spec GC timing.
340 // See http://crbug.com/371197. 334 // See http://crbug.com/371197.
341 StreamParser::BufferQueue buffer_to_append; 335 StreamParser::BufferQueue buffer_to_append;
342 buffer_to_append.push_back(frame); 336 buffer_to_append.push_back(frame);
343 track_buffer->stream()->Append(buffer_to_append); 337 track_buffer->stream()->Append(buffer_to_append);
344 338
345 // 19. Set last decode timestamp for track buffer to decode timestamp. 339 // 19. Set last decode timestamp for track buffer to decode timestamp.
(...skipping 23 matching lines...) Expand all
369 } 363 }
370 364
371 void FrameProcessor::SetAllTrackBuffersNeedRandomAccessPoint() { 365 void FrameProcessor::SetAllTrackBuffersNeedRandomAccessPoint() {
372 for (TrackBufferMap::iterator itr = track_buffers_.begin(); 366 for (TrackBufferMap::iterator itr = track_buffers_.begin();
373 itr != track_buffers_.end(); ++itr) { 367 itr != track_buffers_.end(); ++itr) {
374 itr->second->set_needs_random_access_point(true); 368 itr->second->set_needs_random_access_point(true);
375 } 369 }
376 } 370 }
377 371
378 } // namespace media 372 } // namespace media
OLDNEW
« no previous file with comments | « media/filters/frame_processor.h ('k') | media/filters/frame_processor_base.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698