Index: net/http/http_cache_transaction.cc |
diff --git a/net/http/http_cache_transaction.cc b/net/http/http_cache_transaction.cc |
index 286a526a3edb2fa39e7101e10429cf3c15b7920a..5436ff4cef507e824a34e283833e853b9776d3cb 100644 |
--- a/net/http/http_cache_transaction.cc |
+++ b/net/http/http_cache_transaction.cc |
@@ -1382,7 +1382,20 @@ int HttpCache::Transaction::DoAddToEntry() { |
if (bypass_lock_for_test_) { |
OnAddToEntryTimeout(entry_lock_waiting_since_); |
} else { |
- const int kTimeoutSeconds = 20; |
+ // Immediately timeout and bypass the cache if we're a range request and |
+ // we're blocked by the reader/writer lock. Doing so eliminates a long |
+ // running issue, http://crbug.com/31014, where two of the same media |
+ // resources could not be played back simultaneously due to one locking |
+ // the cache entry until the entire video was downloaded. |
+ // |
+ // Bypassing the cache is not ideal, as we are now ignoring the cache |
+ // entirely for all range requests to a resource beyond the first. This is |
+ // however a much more succinct solution than the alternatives, which |
+ // would require somewhat significant changes to the http caching logic. |
+ const int kTimeoutSeconds = (partial_ && new_entry_->writer && |
rvargas (doing something else)
2014/08/22 22:19:24
nit:
int timeout_seconds = 20;
if (par
DaleCurtis
2014/08/22 22:33:03
Done.
|
+ new_entry_->writer->range_requested_) |
+ ? 0 |
+ : 20; |
base::MessageLoop::current()->PostDelayedTask( |
FROM_HERE, |
base::Bind(&HttpCache::Transaction::OnAddToEntryTimeout, |