OLD | NEW |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/filters/frame_processor.h" | 5 #include "media/filters/frame_processor.h" |
6 | 6 |
7 #include <cstdlib> | 7 #include <cstdlib> |
8 | 8 |
9 #include "base/stl_util.h" | 9 #include "base/stl_util.h" |
10 #include "media/base/buffers.h" | 10 #include "media/base/buffers.h" |
11 #include "media/base/stream_parser_buffer.h" | 11 #include "media/base/stream_parser_buffer.h" |
12 | 12 |
13 namespace media { | 13 namespace media { |
14 | 14 |
15 // Helper class to capture per-track details needed by a frame processor. Some | 15 // Helper class to capture per-track details needed by a frame processor. Some |
16 // of this information may be duplicated in the short-term in the associated | 16 // of this information may be duplicated in the short-term in the associated |
17 // ChunkDemuxerStream and SourceBufferStream for a track. | 17 // ChunkDemuxerStream and SourceBufferStream for a track. |
18 // This parallels the MSE spec each of a SourceBuffer's Track Buffers at | 18 // This parallels the MSE spec each of a SourceBuffer's Track Buffers at |
19 // http://www.w3.org/TR/media-source/#track-buffers. | 19 // http://www.w3.org/TR/media-source/#track-buffers. |
20 class MseTrackBuffer { | 20 class MseTrackBuffer { |
21 public: | 21 public: |
22 explicit MseTrackBuffer(ChunkDemuxerStream* stream); | 22 explicit MseTrackBuffer(ChunkDemuxerStream* stream); |
23 ~MseTrackBuffer(); | 23 ~MseTrackBuffer(); |
24 | 24 |
25 // Get/set |last_decode_timestamp_|. | 25 // Get/set |last_decode_timestamp_|. |
26 base::TimeDelta last_decode_timestamp() const { | 26 DecodeTimestamp last_decode_timestamp() const { |
27 return last_decode_timestamp_; | 27 return last_decode_timestamp_; |
28 } | 28 } |
29 void set_last_decode_timestamp(base::TimeDelta timestamp) { | 29 void set_last_decode_timestamp(DecodeTimestamp timestamp) { |
30 last_decode_timestamp_ = timestamp; | 30 last_decode_timestamp_ = timestamp; |
31 } | 31 } |
32 | 32 |
33 // Get/set |last_frame_duration_|. | 33 // Get/set |last_frame_duration_|. |
34 base::TimeDelta last_frame_duration() const { | 34 base::TimeDelta last_frame_duration() const { |
35 return last_frame_duration_; | 35 return last_frame_duration_; |
36 } | 36 } |
37 void set_last_frame_duration(base::TimeDelta duration) { | 37 void set_last_frame_duration(base::TimeDelta duration) { |
38 last_frame_duration_ = duration; | 38 last_frame_duration_ = duration; |
39 } | 39 } |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
71 void EnqueueProcessedFrame(const scoped_refptr<StreamParserBuffer>& frame); | 71 void EnqueueProcessedFrame(const scoped_refptr<StreamParserBuffer>& frame); |
72 | 72 |
73 // Appends |processed_frames_|, if not empty, to |stream_| and clears | 73 // Appends |processed_frames_|, if not empty, to |stream_| and clears |
74 // |processed_frames_|. Returns false if append failed, true otherwise. | 74 // |processed_frames_|. Returns false if append failed, true otherwise. |
75 // |processed_frames_| is cleared in both cases. | 75 // |processed_frames_| is cleared in both cases. |
76 bool FlushProcessedFrames(); | 76 bool FlushProcessedFrames(); |
77 | 77 |
78 private: | 78 private: |
79 // The decode timestamp of the last coded frame appended in the current coded | 79 // The decode timestamp of the last coded frame appended in the current coded |
80 // frame group. Initially kNoTimestamp(), meaning "unset". | 80 // frame group. Initially kNoTimestamp(), meaning "unset". |
81 base::TimeDelta last_decode_timestamp_; | 81 DecodeTimestamp last_decode_timestamp_; |
82 | 82 |
83 // The coded frame duration of the last coded frame appended in the current | 83 // The coded frame duration of the last coded frame appended in the current |
84 // coded frame group. Initially kNoTimestamp(), meaning "unset". | 84 // coded frame group. Initially kNoTimestamp(), meaning "unset". |
85 base::TimeDelta last_frame_duration_; | 85 base::TimeDelta last_frame_duration_; |
86 | 86 |
87 // The highest presentation timestamp encountered in a coded frame appended | 87 // The highest presentation timestamp encountered in a coded frame appended |
88 // in the current coded frame group. Initially kNoTimestamp(), meaning | 88 // in the current coded frame group. Initially kNoTimestamp(), meaning |
89 // "unset". | 89 // "unset". |
90 base::TimeDelta highest_presentation_timestamp_; | 90 base::TimeDelta highest_presentation_timestamp_; |
91 | 91 |
92 // Keeps track of whether the track buffer is waiting for a random access | 92 // Keeps track of whether the track buffer is waiting for a random access |
93 // point coded frame. Initially set to true to indicate that a random access | 93 // point coded frame. Initially set to true to indicate that a random access |
94 // point coded frame is needed before anything can be added to the track | 94 // point coded frame is needed before anything can be added to the track |
95 // buffer. | 95 // buffer. |
96 bool needs_random_access_point_; | 96 bool needs_random_access_point_; |
97 | 97 |
98 // Pointer to the stream associated with this track. The stream is not owned | 98 // Pointer to the stream associated with this track. The stream is not owned |
99 // by |this|. | 99 // by |this|. |
100 ChunkDemuxerStream* const stream_; | 100 ChunkDemuxerStream* const stream_; |
101 | 101 |
102 // Queue of processed frames that have not yet been appended to |stream_|. | 102 // Queue of processed frames that have not yet been appended to |stream_|. |
103 // EnqueueProcessedFrame() adds to this queue, and FlushProcessedFrames() | 103 // EnqueueProcessedFrame() adds to this queue, and FlushProcessedFrames() |
104 // clears it. | 104 // clears it. |
105 StreamParser::BufferQueue processed_frames_; | 105 StreamParser::BufferQueue processed_frames_; |
106 | 106 |
107 DISALLOW_COPY_AND_ASSIGN(MseTrackBuffer); | 107 DISALLOW_COPY_AND_ASSIGN(MseTrackBuffer); |
108 }; | 108 }; |
109 | 109 |
110 MseTrackBuffer::MseTrackBuffer(ChunkDemuxerStream* stream) | 110 MseTrackBuffer::MseTrackBuffer(ChunkDemuxerStream* stream) |
111 : last_decode_timestamp_(kNoTimestamp()), | 111 : last_decode_timestamp_(kNoDecodeTimestamp()), |
112 last_frame_duration_(kNoTimestamp()), | 112 last_frame_duration_(kNoTimestamp()), |
113 highest_presentation_timestamp_(kNoTimestamp()), | 113 highest_presentation_timestamp_(kNoTimestamp()), |
114 needs_random_access_point_(true), | 114 needs_random_access_point_(true), |
115 stream_(stream) { | 115 stream_(stream) { |
116 DCHECK(stream_); | 116 DCHECK(stream_); |
117 } | 117 } |
118 | 118 |
119 MseTrackBuffer::~MseTrackBuffer() { | 119 MseTrackBuffer::~MseTrackBuffer() { |
120 DVLOG(2) << __FUNCTION__ << "()"; | 120 DVLOG(2) << __FUNCTION__ << "()"; |
121 } | 121 } |
122 | 122 |
123 void MseTrackBuffer::Reset() { | 123 void MseTrackBuffer::Reset() { |
124 DVLOG(2) << __FUNCTION__ << "()"; | 124 DVLOG(2) << __FUNCTION__ << "()"; |
125 | 125 |
126 last_decode_timestamp_ = kNoTimestamp(); | 126 last_decode_timestamp_ = kNoDecodeTimestamp(); |
127 last_frame_duration_ = kNoTimestamp(); | 127 last_frame_duration_ = kNoTimestamp(); |
128 highest_presentation_timestamp_ = kNoTimestamp(); | 128 highest_presentation_timestamp_ = kNoTimestamp(); |
129 needs_random_access_point_ = true; | 129 needs_random_access_point_ = true; |
130 } | 130 } |
131 | 131 |
132 void MseTrackBuffer::SetHighestPresentationTimestampIfIncreased( | 132 void MseTrackBuffer::SetHighestPresentationTimestampIfIncreased( |
133 base::TimeDelta timestamp) { | 133 base::TimeDelta timestamp) { |
134 if (highest_presentation_timestamp_ == kNoTimestamp() || | 134 if (highest_presentation_timestamp_ == kNoTimestamp() || |
135 timestamp > highest_presentation_timestamp_) { | 135 timestamp > highest_presentation_timestamp_) { |
136 highest_presentation_timestamp_ = timestamp; | 136 highest_presentation_timestamp_ = timestamp; |
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
295 | 295 |
296 MseTrackBuffer* FrameProcessor::FindTrack(StreamParser::TrackId id) { | 296 MseTrackBuffer* FrameProcessor::FindTrack(StreamParser::TrackId id) { |
297 TrackBufferMap::iterator itr = track_buffers_.find(id); | 297 TrackBufferMap::iterator itr = track_buffers_.find(id); |
298 if (itr == track_buffers_.end()) | 298 if (itr == track_buffers_.end()) |
299 return NULL; | 299 return NULL; |
300 | 300 |
301 return itr->second; | 301 return itr->second; |
302 } | 302 } |
303 | 303 |
304 void FrameProcessor::NotifyNewMediaSegmentStarting( | 304 void FrameProcessor::NotifyNewMediaSegmentStarting( |
305 base::TimeDelta segment_timestamp) { | 305 DecodeTimestamp segment_timestamp) { |
306 DVLOG(2) << __FUNCTION__ << "(" << segment_timestamp.InSecondsF() << ")"; | 306 DVLOG(2) << __FUNCTION__ << "(" << segment_timestamp.InSecondsF() << ")"; |
307 | 307 |
308 for (TrackBufferMap::iterator itr = track_buffers_.begin(); | 308 for (TrackBufferMap::iterator itr = track_buffers_.begin(); |
309 itr != track_buffers_.end(); | 309 itr != track_buffers_.end(); |
310 ++itr) { | 310 ++itr) { |
311 itr->second->stream()->OnNewMediaSegment(segment_timestamp); | 311 itr->second->stream()->OnNewMediaSegment(segment_timestamp); |
312 } | 312 } |
313 } | 313 } |
314 | 314 |
315 bool FrameProcessor::FlushProcessedFrames() { | 315 bool FrameProcessor::FlushProcessedFrames() { |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
383 << " frame_end_timestamp " << frame_end_timestamp.InSecondsF() | 383 << " frame_end_timestamp " << frame_end_timestamp.InSecondsF() |
384 << " append_window_start " << append_window_start.InSecondsF(); | 384 << " append_window_start " << append_window_start.InSecondsF(); |
385 | 385 |
386 // Mark the overlapping portion of the buffer for discard. | 386 // Mark the overlapping portion of the buffer for discard. |
387 buffer->set_discard_padding(std::make_pair( | 387 buffer->set_discard_padding(std::make_pair( |
388 append_window_start - buffer->timestamp(), base::TimeDelta())); | 388 append_window_start - buffer->timestamp(), base::TimeDelta())); |
389 | 389 |
390 // Adjust the timestamp of this buffer forward to |append_window_start| and | 390 // Adjust the timestamp of this buffer forward to |append_window_start| and |
391 // decrease the duration to compensate. | 391 // decrease the duration to compensate. |
392 buffer->set_timestamp(append_window_start); | 392 buffer->set_timestamp(append_window_start); |
393 buffer->SetDecodeTimestamp(append_window_start); | 393 buffer->SetDecodeTimestamp( |
| 394 DecodeTimestamp::FromPresentationTime(append_window_start)); |
394 buffer->set_duration(frame_end_timestamp - append_window_start); | 395 buffer->set_duration(frame_end_timestamp - append_window_start); |
395 processed_buffer = true; | 396 processed_buffer = true; |
396 } | 397 } |
397 | 398 |
398 // See if a partial discard can be done around |append_window_end|. | 399 // See if a partial discard can be done around |append_window_end|. |
399 if (frame_end_timestamp > append_window_end) { | 400 if (frame_end_timestamp > append_window_end) { |
400 DVLOG(1) << "Truncating buffer which overlaps append window end." | 401 DVLOG(1) << "Truncating buffer which overlaps append window end." |
401 << " presentation_timestamp " << buffer->timestamp().InSecondsF() | 402 << " presentation_timestamp " << buffer->timestamp().InSecondsF() |
402 << " frame_end_timestamp " << frame_end_timestamp.InSecondsF() | 403 << " frame_end_timestamp " << frame_end_timestamp.InSecondsF() |
403 << " append_window_end " << append_window_end.InSecondsF(); | 404 << " append_window_end " << append_window_end.InSecondsF(); |
(...skipping 23 matching lines...) Expand all Loading... |
427 // media-source.html#sourcebuffer-coded-frame-processing | 428 // media-source.html#sourcebuffer-coded-frame-processing |
428 | 429 |
429 while (true) { | 430 while (true) { |
430 // 1. Loop Top: Let presentation timestamp be a double precision floating | 431 // 1. Loop Top: Let presentation timestamp be a double precision floating |
431 // point representation of the coded frame's presentation timestamp in | 432 // point representation of the coded frame's presentation timestamp in |
432 // seconds. | 433 // seconds. |
433 // 2. Let decode timestamp be a double precision floating point | 434 // 2. Let decode timestamp be a double precision floating point |
434 // representation of the coded frame's decode timestamp in seconds. | 435 // representation of the coded frame's decode timestamp in seconds. |
435 // 3. Let frame duration be a double precision floating point representation | 436 // 3. Let frame duration be a double precision floating point representation |
436 // of the coded frame's duration in seconds. | 437 // of the coded frame's duration in seconds. |
437 // We use base::TimeDelta instead of double. | 438 // We use base::TimeDelta and DecodeTimestamp instead of double. |
438 base::TimeDelta presentation_timestamp = frame->timestamp(); | 439 base::TimeDelta presentation_timestamp = frame->timestamp(); |
439 base::TimeDelta decode_timestamp = frame->GetDecodeTimestamp(); | 440 DecodeTimestamp decode_timestamp = frame->GetDecodeTimestamp(); |
440 base::TimeDelta frame_duration = frame->duration(); | 441 base::TimeDelta frame_duration = frame->duration(); |
441 | 442 |
442 DVLOG(3) << __FUNCTION__ << ": Processing frame " | 443 DVLOG(3) << __FUNCTION__ << ": Processing frame " |
443 << "Type=" << frame->type() | 444 << "Type=" << frame->type() |
444 << ", TrackID=" << frame->track_id() | 445 << ", TrackID=" << frame->track_id() |
445 << ", PTS=" << presentation_timestamp.InSecondsF() | 446 << ", PTS=" << presentation_timestamp.InSecondsF() |
446 << ", DTS=" << decode_timestamp.InSecondsF() | 447 << ", DTS=" << decode_timestamp.InSecondsF() |
447 << ", DUR=" << frame_duration.InSecondsF() | 448 << ", DUR=" << frame_duration.InSecondsF() |
448 << ", RAP=" << frame->IsKeyframe(); | 449 << ", RAP=" << frame->IsKeyframe(); |
449 | 450 |
450 // Sanity check the timestamps. | 451 // Sanity check the timestamps. |
451 if (presentation_timestamp == kNoTimestamp()) { | 452 if (presentation_timestamp == kNoTimestamp()) { |
452 DVLOG(2) << __FUNCTION__ << ": Unknown frame PTS"; | 453 DVLOG(2) << __FUNCTION__ << ": Unknown frame PTS"; |
453 return false; | 454 return false; |
454 } | 455 } |
455 if (decode_timestamp == kNoTimestamp()) { | 456 if (decode_timestamp == kNoDecodeTimestamp()) { |
456 DVLOG(2) << __FUNCTION__ << ": Unknown frame DTS"; | 457 DVLOG(2) << __FUNCTION__ << ": Unknown frame DTS"; |
457 return false; | 458 return false; |
458 } | 459 } |
459 if (decode_timestamp > presentation_timestamp) { | 460 if (decode_timestamp.ToPresentationTime() > presentation_timestamp) { |
460 // TODO(wolenetz): Determine whether DTS>PTS should really be allowed. See | 461 // TODO(wolenetz): Determine whether DTS>PTS should really be allowed. See |
461 // http://crbug.com/354518. | 462 // http://crbug.com/354518. |
462 DVLOG(2) << __FUNCTION__ << ": WARNING: Frame DTS(" | 463 DVLOG(2) << __FUNCTION__ << ": WARNING: Frame DTS(" |
463 << decode_timestamp.InSecondsF() << ") > PTS(" | 464 << decode_timestamp.InSecondsF() << ") > PTS(" |
464 << presentation_timestamp.InSecondsF() << ")"; | 465 << presentation_timestamp.InSecondsF() << ")"; |
465 } | 466 } |
466 | 467 |
467 // TODO(acolwell/wolenetz): All stream parsers must emit valid (positive) | 468 // TODO(acolwell/wolenetz): All stream parsers must emit valid (positive) |
468 // frame durations. For now, we allow non-negative frame duration. | 469 // frame durations. For now, we allow non-negative frame duration. |
469 // See http://crbug.com/351166. | 470 // See http://crbug.com/351166. |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
537 << ", parser track id=" << frame->track_id(); | 538 << ", parser track id=" << frame->track_id(); |
538 return false; | 539 return false; |
539 } | 540 } |
540 | 541 |
541 // 7. If last decode timestamp for track buffer is set and decode timestamp | 542 // 7. If last decode timestamp for track buffer is set and decode timestamp |
542 // is less than last decode timestamp | 543 // is less than last decode timestamp |
543 // OR | 544 // OR |
544 // If last decode timestamp for track buffer is set and the difference | 545 // If last decode timestamp for track buffer is set and the difference |
545 // between decode timestamp and last decode timestamp is greater than 2 | 546 // between decode timestamp and last decode timestamp is greater than 2 |
546 // times last frame duration: | 547 // times last frame duration: |
547 base::TimeDelta last_decode_timestamp = | 548 DecodeTimestamp last_decode_timestamp = |
548 track_buffer->last_decode_timestamp(); | 549 track_buffer->last_decode_timestamp(); |
549 if (last_decode_timestamp != kNoTimestamp()) { | 550 if (last_decode_timestamp != kNoDecodeTimestamp()) { |
550 base::TimeDelta dts_delta = decode_timestamp - last_decode_timestamp; | 551 base::TimeDelta dts_delta = decode_timestamp - last_decode_timestamp; |
551 if (dts_delta < base::TimeDelta() || | 552 if (dts_delta < base::TimeDelta() || |
552 dts_delta > 2 * track_buffer->last_frame_duration()) { | 553 dts_delta > 2 * track_buffer->last_frame_duration()) { |
553 // 7.1. If mode equals "segments": Set group end timestamp to | 554 // 7.1. If mode equals "segments": Set group end timestamp to |
554 // presentation timestamp. | 555 // presentation timestamp. |
555 // If mode equals "sequence": Set group start timestamp equal to | 556 // If mode equals "sequence": Set group start timestamp equal to |
556 // the group end timestamp. | 557 // the group end timestamp. |
557 if (!sequence_mode_) { | 558 if (!sequence_mode_) { |
558 group_end_timestamp_ = presentation_timestamp; | 559 group_end_timestamp_ = presentation_timestamp; |
559 // This triggers a discontinuity so we need to treat the next frames | 560 // This triggers a discontinuity so we need to treat the next frames |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
619 return true; | 620 return true; |
620 } | 621 } |
621 | 622 |
622 // Note: This step is relocated, versus April 1 spec, to allow append window | 623 // Note: This step is relocated, versus April 1 spec, to allow append window |
623 // processing to first filter coded frames shifted by |timestamp_offset_| in | 624 // processing to first filter coded frames shifted by |timestamp_offset_| in |
624 // such a way that their PTS is negative. | 625 // such a way that their PTS is negative. |
625 // 8. If the presentation timestamp or decode timestamp is less than the | 626 // 8. If the presentation timestamp or decode timestamp is less than the |
626 // presentation start time, then run the end of stream algorithm with the | 627 // presentation start time, then run the end of stream algorithm with the |
627 // error parameter set to "decode", and abort these steps. | 628 // error parameter set to "decode", and abort these steps. |
628 DCHECK(presentation_timestamp >= base::TimeDelta()); | 629 DCHECK(presentation_timestamp >= base::TimeDelta()); |
629 if (decode_timestamp < base::TimeDelta()) { | 630 if (decode_timestamp < DecodeTimestamp()) { |
630 // B-frames may still result in negative DTS here after being shifted by | 631 // B-frames may still result in negative DTS here after being shifted by |
631 // |timestamp_offset_|. | 632 // |timestamp_offset_|. |
632 DVLOG(2) << __FUNCTION__ | 633 DVLOG(2) << __FUNCTION__ |
633 << ": frame PTS=" << presentation_timestamp.InSecondsF() | 634 << ": frame PTS=" << presentation_timestamp.InSecondsF() |
634 << " has negative DTS=" << decode_timestamp.InSecondsF() | 635 << " has negative DTS=" << decode_timestamp.InSecondsF() |
635 << " after applying timestampOffset, handling any discontinuity," | 636 << " after applying timestampOffset, handling any discontinuity," |
636 << " and filtering against append window"; | 637 << " and filtering against append window"; |
637 return false; | 638 return false; |
638 } | 639 } |
639 | 640 |
(...skipping 16 matching lines...) Expand all Loading... |
656 // We now have a processed buffer to append to the track buffer's stream. | 657 // We now have a processed buffer to append to the track buffer's stream. |
657 // If it is the first in a new media segment or following a discontinuity, | 658 // If it is the first in a new media segment or following a discontinuity, |
658 // notify all the track buffers' streams that a new segment is beginning. | 659 // notify all the track buffers' streams that a new segment is beginning. |
659 if (*new_media_segment) { | 660 if (*new_media_segment) { |
660 // First, complete the append to track buffer streams of previous media | 661 // First, complete the append to track buffer streams of previous media |
661 // segment's frames, if any. | 662 // segment's frames, if any. |
662 if (!FlushProcessedFrames()) | 663 if (!FlushProcessedFrames()) |
663 return false; | 664 return false; |
664 | 665 |
665 *new_media_segment = false; | 666 *new_media_segment = false; |
| 667 |
| 668 // TODO(acolwell/wolenetz): This should be changed to a presentation |
| 669 // timestamp. See http://crbug.com/402502 |
666 NotifyNewMediaSegmentStarting(decode_timestamp); | 670 NotifyNewMediaSegmentStarting(decode_timestamp); |
667 } | 671 } |
668 | 672 |
669 DVLOG(3) << __FUNCTION__ << ": Sending processed frame to stream, " | 673 DVLOG(3) << __FUNCTION__ << ": Sending processed frame to stream, " |
670 << "PTS=" << presentation_timestamp.InSecondsF() | 674 << "PTS=" << presentation_timestamp.InSecondsF() |
671 << ", DTS=" << decode_timestamp.InSecondsF(); | 675 << ", DTS=" << decode_timestamp.InSecondsF(); |
672 | 676 |
673 // Steps 13-18: Note, we optimize by appending groups of contiguous | 677 // Steps 13-18: Note, we optimize by appending groups of contiguous |
674 // processed frames for each track buffer at end of ProcessFrames() or prior | 678 // processed frames for each track buffer at end of ProcessFrames() or prior |
675 // to NotifyNewMediaSegmentStarting(). | 679 // to NotifyNewMediaSegmentStarting(). |
(...skipping 21 matching lines...) Expand all Loading... |
697 DCHECK(group_end_timestamp_ >= base::TimeDelta()); | 701 DCHECK(group_end_timestamp_ >= base::TimeDelta()); |
698 | 702 |
699 return true; | 703 return true; |
700 } | 704 } |
701 | 705 |
702 NOTREACHED(); | 706 NOTREACHED(); |
703 return false; | 707 return false; |
704 } | 708 } |
705 | 709 |
706 } // namespace media | 710 } // namespace media |
OLD | NEW |