Index: media/formats/webm/webm_cluster_parser.h |
diff --git a/media/formats/webm/webm_cluster_parser.h b/media/formats/webm/webm_cluster_parser.h |
index 759fba9d6341800a6e58d1793435fdc03b11ef91..4fdd443cb4290dcc927df71e5007b6f0aa98b0d4 100644 |
--- a/media/formats/webm/webm_cluster_parser.h |
+++ b/media/formats/webm/webm_cluster_parser.h |
@@ -27,11 +27,14 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient { |
typedef std::deque<scoped_refptr<StreamParserBuffer> > BufferQueue; |
typedef std::map<TrackId, const BufferQueue> TextBufferQueueMap; |
- // Arbitrarily-chosen numbers to estimate the duration of a buffer if none is |
- // set and there is not enough information to get a better estimate. |
+ // Numbers chosen to estimate the duration of a buffer if none is set and |
+ // there is not enough information to get a better estimate. |
enum { |
- kDefaultAudioBufferDurationInMs = 23, // Common 1k samples @44.1kHz |
- kDefaultVideoBufferDurationInMs = 42 // Low 24fps to reduce stalls |
+ // Common 1k samples @44.1kHz |
+ kDefaultAudioBufferDurationInMs = 23, |
+ // Chosen to represent 16fps duration, which will prevent MSE stalls in |
wolenetz
2015/04/15 02:55:23
nit: insert blank line to break the two comment+it
chcunningham
2015/04/16 18:04:16
Done.
|
+ // videos with frame-rates as low as 8fps. |
+ kDefaultVideoBufferDurationInMs = 63 |
}; |
// Opus packets encode the duration and other parameters in the 5 most |
@@ -72,8 +75,8 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient { |
bool AddBuffer(const scoped_refptr<StreamParserBuffer>& buffer); |
// If |last_added_buffer_missing_duration_| is set, updates its duration to |
- // be non-kNoTimestamp() value of |estimated_next_frame_duration_| or an |
- // arbitrary default, then adds it to |buffers_| and unsets |
+ // be non-kNoTimestamp() value of |estimated_next_frame_duration_| or a |
+ // hard-coded default, then adds it to |buffers_| and unsets |
// |last_added_buffer_missing_duration_|. (This method helps stream parser |
// emit all buffers in a media segment before signaling end of segment.) |
void ApplyDurationEstimateIfNeeded(); |
@@ -107,6 +110,10 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient { |
// ApplyDurationEstimateIfNeeded(). |
base::TimeDelta GetDurationEstimate(); |
+ // Counts the number of estimated durations used in this track. Used to |
+ // prevent log spam for MEDIA_LOG()s about estimated duration. |
+ int num_duration_estimates_; |
+ |
int track_num_; |
bool is_video_; |
@@ -129,8 +136,10 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient { |
base::TimeDelta default_duration_; |
// If kNoTimestamp(), then a default value will be used. This estimate is |
- // the maximum duration seen or derived so far for this track, and is valid |
- // only if |default_duration_| is kNoTimestamp(). |
+ // the maximum (for video), or minimum (for audio) duration seen so far for |
+ // this track, and is used only if |default_duration_| is kNoTimestamp(). |
+ // TODO(chcunningham): Use maximum for audio too, adding checks to disable |
+ // splicing when these estimates are observed in SourceBufferStream. |
base::TimeDelta estimated_next_frame_duration_; |
LogCB log_cb_; |