Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(34)

Side by Side Diff: net/disk_cache/disk_cache_perftest.cc

Issue 2501353002: Parallelize disk_cache_perftest.
Patch Set: compile better? Created 3 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <limits> 5 #include <limits>
6 #include <memory>
6 #include <string> 7 #include <string>
7 8
8 #include "base/bind.h" 9 #include "base/bind.h"
9 #include "base/bind_helpers.h" 10 #include "base/bind_helpers.h"
10 #include "base/files/file_enumerator.h" 11 #include "base/files/file_enumerator.h"
11 #include "base/files/file_path.h" 12 #include "base/files/file_path.h"
12 #include "base/hash.h" 13 #include "base/hash.h"
13 #include "base/process/process_metrics.h" 14 #include "base/process/process_metrics.h"
14 #include "base/rand_util.h" 15 #include "base/rand_util.h"
15 #include "base/run_loop.h" 16 #include "base/run_loop.h"
(...skipping 11 matching lines...) Expand all
27 #include "net/disk_cache/disk_cache_test_base.h" 28 #include "net/disk_cache/disk_cache_test_base.h"
28 #include "net/disk_cache/disk_cache_test_util.h" 29 #include "net/disk_cache/disk_cache_test_util.h"
29 #include "net/disk_cache/simple/simple_backend_impl.h" 30 #include "net/disk_cache/simple/simple_backend_impl.h"
30 #include "testing/gtest/include/gtest/gtest.h" 31 #include "testing/gtest/include/gtest/gtest.h"
31 #include "testing/platform_test.h" 32 #include "testing/platform_test.h"
32 33
33 using base::Time; 34 using base::Time;
34 35
35 namespace { 36 namespace {
36 37
38 const size_t kNumEntries = 1000;
39 const int kHeadersSize = 800;
40 const int kBodySize = 256 * 1024 - 1;
41
42 const int kChunkSize = 64 * 1024;
jkarlin 2017/01/11 16:40:47 Is this based on our actual write size?
43 const int kMaxParallelOperations = 8;
jkarlin 2017/01/11 16:40:47 10 is what the ResourceScheduler maxes at per tab.
44
37 size_t MaybeGetMaxFds() { 45 size_t MaybeGetMaxFds() {
38 #if defined(OS_POSIX) 46 #if defined(OS_POSIX)
39 return base::GetMaxFds(); 47 return base::GetMaxFds();
40 #else 48 #else
41 return std::numeric_limits<size_t>::max(); 49 return std::numeric_limits<size_t>::max();
42 #endif 50 #endif
43 } 51 }
44 52
45 void MaybeSetFdLimit(unsigned int max_descriptors) { 53 void MaybeSetFdLimit(unsigned int max_descriptors) {
46 #if defined(OS_POSIX) 54 #if defined(OS_POSIX)
47 base::SetFdLimit(max_descriptors); 55 base::SetFdLimit(max_descriptors);
48 #endif 56 #endif
49 } 57 }
50 58
51 struct TestEntry { 59 struct TestEntry {
52 std::string key; 60 std::string key;
53 int data_len; 61 int data_len;
54 }; 62 };
55 63
64 enum class WhatToRead {
65 HEADERS_ONLY,
66 HEADERS_AND_BODY,
67 };
68
56 class DiskCachePerfTest : public DiskCacheTestWithCache { 69 class DiskCachePerfTest : public DiskCacheTestWithCache {
57 public: 70 public:
58 DiskCachePerfTest() : saved_fd_limit_(MaybeGetMaxFds()) { 71 DiskCachePerfTest() {
59 if (saved_fd_limit_ < kFdLimitForCacheTests) 72 if (saved_fd_limit_ < kFdLimitForCacheTests)
60 MaybeSetFdLimit(kFdLimitForCacheTests); 73 MaybeSetFdLimit(kFdLimitForCacheTests);
61 } 74 }
62 75
63 ~DiskCachePerfTest() override { 76 ~DiskCachePerfTest() override {
64 if (saved_fd_limit_ < kFdLimitForCacheTests) 77 if (saved_fd_limit_ < kFdLimitForCacheTests)
65 MaybeSetFdLimit(saved_fd_limit_); 78 MaybeSetFdLimit(saved_fd_limit_);
66 } 79 }
67 80
81 const std::vector<TestEntry>& entries() const { return entries_; }
82
68 protected: 83 protected:
69 enum class WhatToRead {
70 HEADERS_ONLY,
71 HEADERS_AND_BODY,
72 };
73 84
74 // Helper methods for constructing tests. 85 // Helper methods for constructing tests.
75 bool TimeWrite(); 86 bool TimeWrites();
76 bool TimeRead(WhatToRead what_to_read, const char* timer_message); 87 bool TimeReads(WhatToRead what_to_read, const char* timer_message);
77 void ResetAndEvictSystemDiskCache(); 88 void ResetAndEvictSystemDiskCache();
78 89
90 // Callbacks used within tests for intermediate operations.
91 void WriteCallback(const net::CompletionCallback& final_callback,
92 scoped_refptr<net::IOBuffer> headers_buffer,
93 scoped_refptr<net::IOBuffer> body_buffer,
94 disk_cache::Entry* cache_entry,
95 int entry_index,
96 size_t write_offset,
97 int result);
98
79 // Complete perf tests. 99 // Complete perf tests.
80 void CacheBackendPerformance(); 100 void CacheBackendPerformance();
81 101
82 const size_t kFdLimitForCacheTests = 8192; 102 const size_t kFdLimitForCacheTests = 8192;
83 103
84 const int kNumEntries = 1000;
85 const int kHeadersSize = 800;
86 const int kBodySize = 256 * 1024 - 1;
87
88 std::vector<TestEntry> entries_; 104 std::vector<TestEntry> entries_;
89 105
106 size_t next_entry_ = 0; // Which entry will be the next entry to read/write.
107 size_t pending_operations_count_ = 0;
108 int pending_result_;
109
90 private: 110 private:
91 const size_t saved_fd_limit_; 111 const size_t saved_fd_limit_ = MaybeGetMaxFds();
92 }; 112 };
93 113
94 // Creates num_entries on the cache, and writes kHeaderSize bytes of metadata 114 class WriteHandler {
jkarlin 2017/01/11 16:40:47 Please add some docs for this class.
95 // and up to kBodySize of data to each entry. 115 public:
96 bool DiskCachePerfTest::TimeWrite() { 116 WriteHandler(const DiskCachePerfTest* test,
97 // TODO(gavinp): This test would be significantly more realistic if it didn't 117 disk_cache::Backend* cache,
98 // do single reads and writes. Perhaps entries should be written 64kb at a 118 net::CompletionCallback final_callback)
99 // time. As well, not all entries should be created and written essentially 119 : test_(test), cache_(cache), final_callback_(final_callback) {
100 // simultaneously; some number of entries in flight at a time would be a 120 CacheTestFillBuffer(headers_buffer_->data(), kHeadersSize, false);
101 // likely better testing load. 121 CacheTestFillBuffer(body_buffer_->data(), kChunkSize, false);
102 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kHeadersSize)); 122 }
103 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kBodySize)); 123
104 124 void Run();
105 CacheTestFillBuffer(buffer1->data(), kHeadersSize, false); 125
106 CacheTestFillBuffer(buffer2->data(), kBodySize, false); 126 protected:
107 127 void CreateNextEntry();
108 int expected = 0; 128
109 129 void CreateCallback(std::unique_ptr<disk_cache::Entry*> unique_entry_ptr,
110 MessageLoopHelper helper; 130 int data_len,
111 CallbackTest callback(&helper, true); 131 int result);
112 132 void WriteDataCallback(disk_cache::Entry* entry,
113 base::PerfTimeLogger timer("Write disk cache entries"); 133 int next_offset,
114 134 int data_len,
135 int expected_result,
136 int result);
137
138 private:
139 bool CheckForErrorAndCancel(int result);
140
141 const DiskCachePerfTest* test_;
142 disk_cache::Backend* cache_;
143 net::CompletionCallback final_callback_;
144
145 size_t next_entry_index_ = 0;
146 size_t pending_operations_count_ = 0;
147
148 int pending_result_ = net::OK;
149
150 scoped_refptr<net::IOBuffer> headers_buffer_ =
151 new net::IOBuffer(kHeadersSize);
152 scoped_refptr<net::IOBuffer> body_buffer_ = new net::IOBuffer(kChunkSize);
153 };
jkarlin 2017/01/11 16:40:47 DISALLOW_COPY_AND_ASSIGN
154
155 void WriteHandler::Run() {
156 for (int i = 0; i < kMaxParallelOperations; ++i) {
157 ++pending_operations_count_;
158 CreateNextEntry();
159 }
160 }
161
162 void WriteHandler::CreateNextEntry() {
163 EXPECT_GT(kNumEntries, next_entry_index_);
164 TestEntry test_entry = test_->entries()[next_entry_index_++];
165 disk_cache::Entry** entry_ptr = new disk_cache::Entry*();
166 std::unique_ptr<disk_cache::Entry*> unique_entry_ptr(entry_ptr);
167 net::CompletionCallback callback =
168 base::Bind(&WriteHandler::CreateCallback, base::Unretained(this),
169 base::Passed(&unique_entry_ptr), test_entry.data_len);
170 int result = cache_->CreateEntry(test_entry.key, entry_ptr, callback);
171 if (result != net::ERR_IO_PENDING)
172 callback.Run(result);
173 }
174
175 void WriteHandler::CreateCallback(std::unique_ptr<disk_cache::Entry*> entry_ptr,
176 int data_len,
177 int result) {
178 if (CheckForErrorAndCancel(result))
179 return;
180
181 disk_cache::Entry* entry = *(entry_ptr.get());
182
183 net::CompletionCallback callback =
184 base::Bind(&WriteHandler::WriteDataCallback, base::Unretained(this),
185 entry, 0, data_len, kHeadersSize);
186 int new_result = entry->WriteData(0, 0, headers_buffer_.get(), kHeadersSize,
187 callback, false);
188 if (new_result != net::ERR_IO_PENDING)
189 callback.Run(new_result);
190 }
191
192 void WriteHandler::WriteDataCallback(disk_cache::Entry* entry,
193 int next_offset,
194 int data_len,
195 int expected_result,
196 int result) {
197 if (CheckForErrorAndCancel(result)) {
198 entry->Close();
199 return;
200 }
201 if (next_offset >= data_len) {
202 entry->Close();
203 if (next_entry_index_ < kNumEntries) {
204 CreateNextEntry();
205 } else {
206 --pending_operations_count_;
207 if (pending_operations_count_ == 0)
208 final_callback_.Run(net::OK);
209 }
210 return;
211 }
212
213 int write_size = std::min(kChunkSize, data_len - next_offset);
214 net::CompletionCallback callback =
215 base::Bind(&WriteHandler::WriteDataCallback, base::Unretained(this),
216 entry, next_offset + write_size, data_len, write_size);
217 int new_result = entry->WriteData(1, next_offset, body_buffer_.get(),
218 write_size, callback, false);
219 if (new_result != net::ERR_IO_PENDING)
220 callback.Run(new_result);
221 }
222
223 bool WriteHandler::CheckForErrorAndCancel(int result) {
224 DCHECK_NE(net::ERR_IO_PENDING, result);
225 if (result != net::OK && !(result > 0))
226 pending_result_ = result;
227 if (pending_result_ != net::OK) {
228 --pending_operations_count_;
229 if (pending_operations_count_ == 0)
230 final_callback_.Run(pending_result_);
231 return true;
232 }
233 return false;
234 }
235
236 class ReadHandler {
237 public:
238 ReadHandler(const DiskCachePerfTest* test,
239 WhatToRead what_to_read,
240 disk_cache::Backend* cache,
241 net::CompletionCallback final_callback)
242 : test_(test),
243 what_to_read_(what_to_read),
244 cache_(cache),
245 final_callback_(final_callback) {
246 for (int i = 0; i < kMaxParallelOperations; ++i)
247 read_buffers_[i] = new net::IOBuffer(std::max(kHeadersSize, kChunkSize));
248 }
249
250 void Run();
251
252 protected:
253 void OpenNextEntry(int parallel_operation_index);
254
255 void OpenCallback(int parallel_operation_index,
256 std::unique_ptr<disk_cache::Entry*> unique_entry_ptr,
257 int data_len,
258 int result);
259 void ReadDataCallback(int parallel_operation_index,
260 disk_cache::Entry* entry,
261 int next_offset,
262 int data_len,
263 int expected_result,
264 int result);
265
266 private:
267 bool CheckForErrorAndCancel(int result);
268
269 const DiskCachePerfTest* test_;
270 const WhatToRead what_to_read_;
271
272 disk_cache::Backend* cache_;
273 net::CompletionCallback final_callback_;
274
275 size_t next_entry_index_ = 0;
276 size_t pending_operations_count_ = 0;
277
278 int pending_result_ = net::OK;
279
280 scoped_refptr<net::IOBuffer> read_buffers_[kMaxParallelOperations];
281 };
jkarlin 2017/01/11 16:40:47 DISALLOW_COPY_AND_ASSIGN
282
283 void ReadHandler::Run() {
284 for (int i = 0; i < kMaxParallelOperations; ++i) {
285 OpenNextEntry(pending_operations_count_);
286 ++pending_operations_count_;
287 }
288 }
289
290 void ReadHandler::OpenNextEntry(int parallel_operation_index) {
291 EXPECT_GT(kNumEntries, next_entry_index_);
292 TestEntry test_entry = test_->entries()[next_entry_index_++];
293 disk_cache::Entry** entry_ptr = new disk_cache::Entry*();
294 std::unique_ptr<disk_cache::Entry*> unique_entry_ptr(entry_ptr);
295 net::CompletionCallback callback =
296 base::Bind(&ReadHandler::OpenCallback, base::Unretained(this),
297 parallel_operation_index, base::Passed(&unique_entry_ptr),
298 test_entry.data_len);
299 int result = cache_->OpenEntry(test_entry.key, entry_ptr, callback);
300 if (result != net::ERR_IO_PENDING)
301 callback.Run(result);
302 }
303
304 void ReadHandler::OpenCallback(int parallel_operation_index,
305 std::unique_ptr<disk_cache::Entry*> entry_ptr,
306 int data_len,
307 int result) {
308 if (CheckForErrorAndCancel(result))
309 return;
310
311 disk_cache::Entry* entry = *(entry_ptr.get());
jkarlin 2017/01/11 16:40:47 does *entry_ptr work instead of *(entry_ptr.get())
312
313 EXPECT_EQ(data_len, entry->GetDataSize(1));
jkarlin 2017/01/11 16:40:47 Please use an enum or constant for the streams in
314
315 net::CompletionCallback callback =
316 base::Bind(&ReadHandler::ReadDataCallback, base::Unretained(this),
317 parallel_operation_index, entry, 0, data_len, kHeadersSize);
318 int new_result =
319 entry->ReadData(0, 0, read_buffers_[parallel_operation_index].get(),
320 kChunkSize, callback);
321 if (new_result != net::ERR_IO_PENDING)
322 callback.Run(new_result);
323 }
324
325 void ReadHandler::ReadDataCallback(int parallel_operation_index,
326 disk_cache::Entry* entry,
327 int next_offset,
328 int data_len,
329 int expected_result,
330 int result) {
331 if (CheckForErrorAndCancel(result)) {
332 entry->Close();
333 return;
334 }
335 if (what_to_read_ == WhatToRead::HEADERS_ONLY || next_offset >= data_len) {
336 entry->Close();
337 if (next_entry_index_ < kNumEntries) {
338 OpenNextEntry(parallel_operation_index);
339 } else {
340 --pending_operations_count_;
341 if (pending_operations_count_ == 0)
342 final_callback_.Run(net::OK);
343 }
344 return;
345 }
346
347 int expected_read_size = std::min(kChunkSize, data_len - next_offset);
348 net::CompletionCallback callback =
349 base::Bind(&ReadHandler::ReadDataCallback, base::Unretained(this),
350 parallel_operation_index, entry, next_offset + kChunkSize,
351 data_len, expected_read_size);
352 int new_result = entry->ReadData(
353 1, next_offset, read_buffers_[parallel_operation_index].get(), kChunkSize,
354 callback);
355 if (new_result != net::ERR_IO_PENDING)
356 callback.Run(new_result);
357 }
358
359 bool ReadHandler::CheckForErrorAndCancel(int result) {
360 DCHECK_NE(net::ERR_IO_PENDING, result);
361 if (result != net::OK && !(result > 0))
362 pending_result_ = result;
363 if (pending_result_ != net::OK) {
364 --pending_operations_count_;
365 if (pending_operations_count_ == 0)
366 final_callback_.Run(pending_result_);
367 return true;
368 }
369 return false;
370 }
371
372 bool DiskCachePerfTest::TimeWrites() {
115 for (int i = 0; i < kNumEntries; i++) { 373 for (int i = 0; i < kNumEntries; i++) {
116 TestEntry entry; 374 TestEntry entry;
117 entry.key = GenerateKey(true); 375 entry.key = GenerateKey(true);
118 entry.data_len = base::RandInt(0, kBodySize); 376 entry.data_len = base::RandInt(0, kBodySize);
jkarlin 2017/01/11 16:40:47 We have a very good idea of body sizes read from t
119 entries_.push_back(entry); 377 entries_.push_back(entry);
120 378 }
121 disk_cache::Entry* cache_entry; 379
122 net::TestCompletionCallback cb; 380 net::TestCompletionCallback cb;
123 int rv = cache_->CreateEntry(entry.key, &cache_entry, cb.callback()); 381
124 if (net::OK != cb.GetResult(rv)) 382 base::PerfTimeLogger timer("Write disk cache entries");
125 break; 383
126 int ret = cache_entry->WriteData( 384 std::unique_ptr<WriteHandler> write_handler(
127 0, 0, buffer1.get(), kHeadersSize, 385 new WriteHandler(this, cache_.get(), cb.callback()));
128 base::Bind(&CallbackTest::Run, base::Unretained(&callback)), false); 386 write_handler->Run();
129 if (net::ERR_IO_PENDING == ret) 387 return cb.WaitForResult() == net::OK;
130 expected++; 388 }
131 else if (kHeadersSize != ret) 389
132 break; 390 bool DiskCachePerfTest::TimeReads(WhatToRead what_to_read,
133 391 const char* timer_message) {
134 ret = cache_entry->WriteData(
135 1, 0, buffer2.get(), entry.data_len,
136 base::Bind(&CallbackTest::Run, base::Unretained(&callback)), false);
137 if (net::ERR_IO_PENDING == ret)
138 expected++;
139 else if (entry.data_len != ret)
140 break;
141 cache_entry->Close();
142 }
143
144 helper.WaitUntilCacheIoFinished(expected);
145 timer.Done();
146
147 return expected == helper.callbacks_called();
148 }
149
150 // Reads the data and metadata from each entry listed on |entries|.
151 bool DiskCachePerfTest::TimeRead(WhatToRead what_to_read,
152 const char* timer_message) {
153 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kHeadersSize));
154 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kBodySize));
155
156 CacheTestFillBuffer(buffer1->data(), kHeadersSize, false);
157 CacheTestFillBuffer(buffer2->data(), kBodySize, false);
158
159 int expected = 0;
160
161 MessageLoopHelper helper;
162 CallbackTest callback(&helper, true);
163
164 base::PerfTimeLogger timer(timer_message); 392 base::PerfTimeLogger timer(timer_message);
165 393
166 for (int i = 0; i < kNumEntries; i++) { 394 net::TestCompletionCallback cb;
167 disk_cache::Entry* cache_entry; 395 std::unique_ptr<ReadHandler> read_handler(
168 net::TestCompletionCallback cb; 396 new ReadHandler(this, what_to_read, cache_.get(), cb.callback()));
169 int rv = cache_->OpenEntry(entries_[i].key, &cache_entry, cb.callback()); 397 read_handler->Run();
170 if (net::OK != cb.GetResult(rv)) 398 return cb.WaitForResult() == net::OK;
171 break;
172 int ret = cache_entry->ReadData(
173 0, 0, buffer1.get(), kHeadersSize,
174 base::Bind(&CallbackTest::Run, base::Unretained(&callback)));
175 if (net::ERR_IO_PENDING == ret)
176 expected++;
177 else if (kHeadersSize != ret)
178 break;
179
180 if (what_to_read == WhatToRead::HEADERS_AND_BODY) {
181 ret = cache_entry->ReadData(
182 1, 0, buffer2.get(), entries_[i].data_len,
183 base::Bind(&CallbackTest::Run, base::Unretained(&callback)));
184 if (net::ERR_IO_PENDING == ret)
185 expected++;
186 else if (entries_[i].data_len != ret)
187 break;
188 }
189
190 cache_entry->Close();
191 }
192
193 helper.WaitUntilCacheIoFinished(expected);
194 timer.Done();
195
196 return (expected == helper.callbacks_called());
197 } 399 }
198 400
199 TEST_F(DiskCachePerfTest, BlockfileHashes) { 401 TEST_F(DiskCachePerfTest, BlockfileHashes) {
200 base::PerfTimeLogger timer("Hash disk cache keys"); 402 base::PerfTimeLogger timer("Hash disk cache keys");
201 for (int i = 0; i < 300000; i++) { 403 for (int i = 0; i < 300000; i++) {
202 std::string key = GenerateKey(true); 404 std::string key = GenerateKey(true);
203 base::Hash(key); 405 base::Hash(key);
204 } 406 }
205 timer.Done(); 407 timer.Done();
206 } 408 }
(...skipping 19 matching lines...) Expand all
226 } 428 }
227 ASSERT_TRUE(base::EvictFileFromSystemCache(cache_path_)); 429 ASSERT_TRUE(base::EvictFileFromSystemCache(cache_path_));
228 #endif 430 #endif
229 431
230 DisableFirstCleanup(); 432 DisableFirstCleanup();
231 InitCache(); 433 InitCache();
232 } 434 }
233 435
234 void DiskCachePerfTest::CacheBackendPerformance() { 436 void DiskCachePerfTest::CacheBackendPerformance() {
235 InitCache(); 437 InitCache();
236 EXPECT_TRUE(TimeWrite()); 438 EXPECT_TRUE(TimeWrites());
237 439
238 disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting(); 440 disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting();
239 base::RunLoop().RunUntilIdle(); 441 base::RunLoop().RunUntilIdle();
240 442
241 ResetAndEvictSystemDiskCache(); 443 ResetAndEvictSystemDiskCache();
242 EXPECT_TRUE(TimeRead(WhatToRead::HEADERS_ONLY, 444 EXPECT_TRUE(TimeReads(WhatToRead::HEADERS_ONLY,
243 "Read disk cache headers only (cold)")); 445 "Read disk cache headers only (cold)"));
244 EXPECT_TRUE(TimeRead(WhatToRead::HEADERS_ONLY, 446 EXPECT_TRUE(TimeReads(WhatToRead::HEADERS_ONLY,
245 "Read disk cache headers only (warm)")); 447 "Read disk cache headers only (warm)"));
246 448
247 disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting(); 449 disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting();
248 base::RunLoop().RunUntilIdle(); 450 base::RunLoop().RunUntilIdle();
249 451
250 ResetAndEvictSystemDiskCache(); 452 ResetAndEvictSystemDiskCache();
251 EXPECT_TRUE( 453 EXPECT_TRUE(TimeReads(WhatToRead::HEADERS_AND_BODY,
252 TimeRead(WhatToRead::HEADERS_AND_BODY, "Read disk cache entries (cold)")); 454 "Read disk cache entries (cold)"));
253 EXPECT_TRUE( 455 EXPECT_TRUE(TimeReads(WhatToRead::HEADERS_AND_BODY,
254 TimeRead(WhatToRead::HEADERS_AND_BODY, "Read disk cache entries (warm)")); 456 "Read disk cache entries (warm)"));
255 457
256 disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting(); 458 disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting();
257 base::RunLoop().RunUntilIdle(); 459 base::RunLoop().RunUntilIdle();
258 } 460 }
259 461
260 TEST_F(DiskCachePerfTest, CacheBackendPerformance) { 462 TEST_F(DiskCachePerfTest, CacheBackendPerformance) {
261 CacheBackendPerformance(); 463 CacheBackendPerformance();
262 } 464 }
263 465
264 TEST_F(DiskCachePerfTest, SimpleCacheBackendPerformance) { 466 TEST_F(DiskCachePerfTest, SimpleCacheBackendPerformance) {
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
299 files.DeleteBlock(address[entry], false); 501 files.DeleteBlock(address[entry], false);
300 EXPECT_TRUE( 502 EXPECT_TRUE(
301 files.CreateBlock(disk_cache::RANKINGS, block_size, &address[entry])); 503 files.CreateBlock(disk_cache::RANKINGS, block_size, &address[entry]));
302 } 504 }
303 505
304 timer2.Done(); 506 timer2.Done();
305 base::RunLoop().RunUntilIdle(); 507 base::RunLoop().RunUntilIdle();
306 } 508 }
307 509
308 } // namespace 510 } // namespace
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698