Index: net/http/http_cache_unittest.cc |
diff --git a/net/http/http_cache_unittest.cc b/net/http/http_cache_unittest.cc |
index 1c05602a357cc41a20923bcb0825d6c0265b2b42..1faf537c286affe39500dd35a5f529e066b8db41 100644 |
--- a/net/http/http_cache_unittest.cc |
+++ b/net/http/http_cache_unittest.cc |
@@ -147,7 +147,7 @@ void TestLoadTimingCachedResponse(const LoadTimingInfo& load_timing_info) { |
EXPECT_TRUE(load_timing_info.receive_headers_end.is_null()); |
} |
-void DeferNetworkStart(bool* defer) { |
+void DeferCallback(bool* defer) { |
*defer = true; |
} |
@@ -1478,6 +1478,158 @@ TEST(HttpCache, RangeGET_ParallelValidationNoMatch) { |
EXPECT_EQ(5, cache.disk_cache()->create_count()); |
} |
+// Tests that if a transaction is dooming the entry and the entry was doomed by |
+// another transaction that was not part of the entry and created a new entry, |
+// the new entry should not be incorrectly doomed. (crbug.com/736993) |
+TEST(HttpCache, RangeGET_ParallelValidationNoMatchDoomEntry) { |
+ MockHttpCache cache; |
+ |
+ ScopedMockTransaction transaction(kRangeGET_TransactionOK); |
+ MockHttpRequest request(transaction); |
+ |
+ MockTransaction dooming_transaction(kRangeGET_TransactionOK); |
+ dooming_transaction.load_flags |= LOAD_BYPASS_CACHE; |
+ MockHttpRequest dooming_request(dooming_transaction); |
+ |
+ std::vector<std::unique_ptr<Context>> context_list; |
+ const int kNumTransactions = 3; |
+ |
+ for (int i = 0; i < kNumTransactions; ++i) { |
+ context_list.push_back(base::MakeUnique<Context>()); |
+ auto& c = context_list[i]; |
+ |
+ c->result = cache.CreateTransaction(&c->trans); |
+ ASSERT_THAT(c->result, IsOk()); |
+ EXPECT_EQ(LOAD_STATE_IDLE, c->trans->GetLoadState()); |
+ |
+ MockHttpRequest* this_request = &request; |
+ |
+ if (i == 2) |
+ this_request = &dooming_request; |
+ |
+ if (i == 1) |
+ cache.disk_cache()->SetDefer(kRangeGET_TransactionOK.url, DEFER_READ); |
+ |
+ c->result = c->trans->Start(this_request, c->callback.callback(), |
+ NetLogWithSource()); |
+ |
+ // Continue the transactions. 2nd will pause at the cache reading state and |
+ // 3rd transaction will doom the entry. |
+ base::RunLoop().RunUntilIdle(); |
+ } |
+ |
+ EXPECT_TRUE( |
+ cache.disk_cache()->IsDiskEntryDoomed(kRangeGET_TransactionOK.url)); |
+ |
+ // Resume cache read by 1st transaction which will lead to dooming the entry |
+ // as well since the entry cannot be validated. This double dooming should not |
+ // lead to an assertion. |
+ // Pause before it creates an entry so we can check the status of entry |
+ // created by the third transaction. |
+ cache.disk_cache()->SetDefer(DEFER_CREATE); |
+ cache.disk_cache()->ResumeDoomedEntryCacheOperation( |
+ kRangeGET_TransactionOK.url); |
+ base::RunLoop().RunUntilIdle(); |
+ |
+ EXPECT_TRUE( |
+ cache.disk_cache()->IsDiskEntryNotDoomed(kRangeGET_TransactionOK.url)); |
+ |
+ // Resume creation of entry by 2nd transaction. |
+ cache.disk_cache()->ResumeCacheOperation(); |
+ base::RunLoop().RunUntilIdle(); |
+ |
+ EXPECT_EQ(3, cache.network_layer()->transaction_count()); |
+ EXPECT_EQ(0, cache.disk_cache()->open_count()); |
+ EXPECT_EQ(3, cache.disk_cache()->create_count()); |
+ |
+ for (auto& context : context_list) { |
+ EXPECT_EQ(LOAD_STATE_IDLE, context->trans->GetLoadState()); |
+ } |
+ |
+ for (auto& c : context_list) { |
+ ReadAndVerifyTransaction(c->trans.get(), kRangeGET_TransactionOK); |
+ } |
+ |
+ EXPECT_EQ(3, cache.network_layer()->transaction_count()); |
+ EXPECT_EQ(0, cache.disk_cache()->open_count()); |
+ EXPECT_EQ(3, cache.disk_cache()->create_count()); |
+} |
+ |
+// Same as above but tests that the 2nd transaction does not do anything if |
+// there is nothing to doom. (crbug.com/736993) |
+TEST(HttpCache, RangeGET_ParallelValidationNoMatchDoomEntry1) { |
+ MockHttpCache cache; |
+ |
+ ScopedMockTransaction transaction(kRangeGET_TransactionOK); |
+ MockHttpRequest request(transaction); |
+ |
+ MockTransaction dooming_transaction(kRangeGET_TransactionOK); |
+ dooming_transaction.load_flags |= LOAD_BYPASS_CACHE; |
+ MockHttpRequest dooming_request(dooming_transaction); |
+ |
+ std::vector<std::unique_ptr<Context>> context_list; |
+ const int kNumTransactions = 3; |
+ |
+ for (int i = 0; i < kNumTransactions; ++i) { |
+ context_list.push_back(base::MakeUnique<Context>()); |
+ auto& c = context_list[i]; |
+ |
+ c->result = cache.CreateTransaction(&c->trans); |
+ ASSERT_THAT(c->result, IsOk()); |
+ EXPECT_EQ(LOAD_STATE_IDLE, c->trans->GetLoadState()); |
+ |
+ MockHttpRequest* this_request = &request; |
+ |
+ if (i == 2) { |
+ this_request = &dooming_request; |
+ cache.disk_cache()->SetDefer(DEFER_CREATE); |
+ } |
+ |
+ if (i == 1) |
+ cache.disk_cache()->SetDefer(kRangeGET_TransactionOK.url, DEFER_READ); |
+ |
+ c->result = c->trans->Start(this_request, c->callback.callback(), |
+ NetLogWithSource()); |
+ |
+ // Continue the transactions. 2nd will pause at the cache reading state and |
+ // 3rd transaction will doom the entry and pause before creating a new |
+ // entry. |
+ base::RunLoop().RunUntilIdle(); |
+ } |
+ |
+ EXPECT_TRUE( |
+ cache.disk_cache()->IsDiskEntryDoomed(kRangeGET_TransactionOK.url)); |
+ |
+ // Resume cache read by 2nd transaction which will lead to dooming the entry |
+ // as well since the entry cannot be validated. This double dooming should not |
+ // lead to an assertion. |
+ cache.disk_cache()->ResumeDoomedEntryCacheOperation( |
+ kRangeGET_TransactionOK.url); |
+ base::RunLoop().RunUntilIdle(); |
+ |
+ // Resume creation of entry by 3rd transaction. |
+ cache.disk_cache()->ResumeCacheOperation(); |
+ base::RunLoop().RunUntilIdle(); |
+ |
+ // Note that 2nd transaction gets added to entry created by transaction 3rd |
+ // transaction, thus only 2 entries get created. |
+ EXPECT_EQ(3, cache.network_layer()->transaction_count()); |
+ EXPECT_EQ(0, cache.disk_cache()->open_count()); |
+ EXPECT_EQ(2, cache.disk_cache()->create_count()); |
+ |
+ for (auto& context : context_list) { |
+ EXPECT_EQ(LOAD_STATE_IDLE, context->trans->GetLoadState()); |
+ } |
+ |
+ for (auto& c : context_list) { |
+ ReadAndVerifyTransaction(c->trans.get(), kRangeGET_TransactionOK); |
+ } |
+ |
+ EXPECT_EQ(3, cache.network_layer()->transaction_count()); |
+ EXPECT_EQ(0, cache.disk_cache()->open_count()); |
+ EXPECT_EQ(2, cache.disk_cache()->create_count()); |
+} |
+ |
// Tests parallel validation on range requests with non-overlapping ranges. |
TEST(HttpCache, RangeGET_ParallelValidationDifferentRanges) { |
MockHttpCache cache; |
@@ -2159,7 +2311,7 @@ TEST(HttpCache, SimpleGET_ParallelValidationCancelReader) { |
MockHttpRequest* this_request = &request; |
if (i == 3) { |
this_request = &validate_request; |
- c->trans->SetBeforeNetworkStartCallback(base::Bind(&DeferNetworkStart)); |
+ c->trans->SetBeforeNetworkStartCallback(base::Bind(&DeferCallback)); |
} |
c->result = c->trans->Start(this_request, c->callback.callback(), |
@@ -2257,7 +2409,7 @@ TEST(HttpCache, SimpleGET_ParallelValidationCancelWriter) { |
MockHttpRequest* this_request = &request; |
if (i == 2) { |
this_request = &validate_request; |
- c->trans->SetBeforeNetworkStartCallback(base::Bind(&DeferNetworkStart)); |
+ c->trans->SetBeforeNetworkStartCallback(base::Bind(&DeferCallback)); |
} |
c->result = c->trans->Start(this_request, c->callback.callback(), |
@@ -2395,7 +2547,7 @@ TEST(HttpCache, SimpleGET_ParallelValidationStopCaching) { |
MockHttpRequest* this_request = &request; |
if (i == 2) { |
this_request = &validate_request; |
- c->trans->SetBeforeNetworkStartCallback(base::Bind(&DeferNetworkStart)); |
+ c->trans->SetBeforeNetworkStartCallback(base::Bind(&DeferCallback)); |
} |
c->result = c->trans->Start(this_request, c->callback.callback(), |
@@ -2461,7 +2613,7 @@ TEST(HttpCache, SimpleGET_ParallelValidationCancelHeaders) { |
ASSERT_THAT(c->result, IsOk()); |
if (i == 0) |
- c->trans->SetBeforeNetworkStartCallback(base::Bind(&DeferNetworkStart)); |
+ c->trans->SetBeforeNetworkStartCallback(base::Bind(&DeferCallback)); |
c->result = |
c->trans->Start(&request, c->callback.callback(), NetLogWithSource()); |