OLD | NEW |
---|---|
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <limits> | 5 #include <limits> |
6 #include <memory> | |
6 #include <string> | 7 #include <string> |
7 | 8 |
8 #include "base/bind.h" | 9 #include "base/bind.h" |
9 #include "base/bind_helpers.h" | 10 #include "base/bind_helpers.h" |
10 #include "base/files/file_enumerator.h" | 11 #include "base/files/file_enumerator.h" |
11 #include "base/files/file_path.h" | 12 #include "base/files/file_path.h" |
12 #include "base/hash.h" | 13 #include "base/hash.h" |
13 #include "base/process/process_metrics.h" | 14 #include "base/process/process_metrics.h" |
14 #include "base/rand_util.h" | 15 #include "base/rand_util.h" |
15 #include "base/run_loop.h" | 16 #include "base/run_loop.h" |
(...skipping 11 matching lines...) Expand all Loading... | |
27 #include "net/disk_cache/disk_cache_test_base.h" | 28 #include "net/disk_cache/disk_cache_test_base.h" |
28 #include "net/disk_cache/disk_cache_test_util.h" | 29 #include "net/disk_cache/disk_cache_test_util.h" |
29 #include "net/disk_cache/simple/simple_backend_impl.h" | 30 #include "net/disk_cache/simple/simple_backend_impl.h" |
30 #include "testing/gtest/include/gtest/gtest.h" | 31 #include "testing/gtest/include/gtest/gtest.h" |
31 #include "testing/platform_test.h" | 32 #include "testing/platform_test.h" |
32 | 33 |
33 using base::Time; | 34 using base::Time; |
34 | 35 |
35 namespace { | 36 namespace { |
36 | 37 |
38 const size_t kNumEntries = 1000; | |
39 const int kHeadersSize = 800; | |
Maks Orlovich
2017/05/18 15:45:36
This is likely way too small, at least per SimpleC
| |
40 | |
41 const int kBodySize = 256 * 1024 - 1; | |
Maks Orlovich
2017/05/18 15:45:36
This, OTOH, is way too big unless we are optimizin
| |
42 | |
43 // As of 2017-01-12, this is the typical IPC size used for | |
44 const int kChunkSize = 64 * 1024; | |
Maks Orlovich
2017/05/18 15:45:36
Don't know where it comes from, but it's clearly u
| |
45 | |
46 // As of 2017-01-12, this is a typical per-tab limit on HTTP connections. | |
47 const int kMaxParallelOperations = 10; | |
48 | |
37 size_t MaybeGetMaxFds() { | 49 size_t MaybeGetMaxFds() { |
38 #if defined(OS_POSIX) | 50 #if defined(OS_POSIX) |
39 return base::GetMaxFds(); | 51 return base::GetMaxFds(); |
40 #else | 52 #else |
41 return std::numeric_limits<size_t>::max(); | 53 return std::numeric_limits<size_t>::max(); |
42 #endif | 54 #endif |
43 } | 55 } |
44 | 56 |
45 void MaybeSetFdLimit(unsigned int max_descriptors) { | 57 void MaybeSetFdLimit(unsigned int max_descriptors) { |
46 #if defined(OS_POSIX) | 58 #if defined(OS_POSIX) |
47 base::SetFdLimit(max_descriptors); | 59 base::SetFdLimit(max_descriptors); |
48 #endif | 60 #endif |
49 } | 61 } |
50 | 62 |
51 struct TestEntry { | 63 struct TestEntry { |
52 std::string key; | 64 std::string key; |
53 int data_len; | 65 int data_len; |
54 }; | 66 }; |
55 | 67 |
68 enum class WhatToRead { | |
69 HEADERS_ONLY, | |
70 HEADERS_AND_BODY, | |
71 }; | |
72 | |
56 class DiskCachePerfTest : public DiskCacheTestWithCache { | 73 class DiskCachePerfTest : public DiskCacheTestWithCache { |
57 public: | 74 public: |
58 DiskCachePerfTest() : saved_fd_limit_(MaybeGetMaxFds()) { | 75 DiskCachePerfTest() { |
59 if (saved_fd_limit_ < kFdLimitForCacheTests) | 76 if (saved_fd_limit_ < kFdLimitForCacheTests) |
60 MaybeSetFdLimit(kFdLimitForCacheTests); | 77 MaybeSetFdLimit(kFdLimitForCacheTests); |
61 } | 78 } |
62 | 79 |
63 ~DiskCachePerfTest() override { | 80 ~DiskCachePerfTest() override { |
64 if (saved_fd_limit_ < kFdLimitForCacheTests) | 81 if (saved_fd_limit_ < kFdLimitForCacheTests) |
65 MaybeSetFdLimit(saved_fd_limit_); | 82 MaybeSetFdLimit(saved_fd_limit_); |
66 } | 83 } |
67 | 84 |
85 const std::vector<TestEntry>& entries() const { return entries_; } | |
86 | |
68 protected: | 87 protected: |
69 enum class WhatToRead { | |
70 HEADERS_ONLY, | |
71 HEADERS_AND_BODY, | |
72 }; | |
73 | 88 |
74 // Helper methods for constructing tests. | 89 // Helper methods for constructing tests. |
75 bool TimeWrite(); | 90 bool TimeWrites(); |
76 bool TimeRead(WhatToRead what_to_read, const char* timer_message); | 91 bool TimeReads(WhatToRead what_to_read, const char* timer_message); |
77 void ResetAndEvictSystemDiskCache(); | 92 void ResetAndEvictSystemDiskCache(); |
78 | 93 |
94 // Callbacks used within tests for intermediate operations. | |
95 void WriteCallback(const net::CompletionCallback& final_callback, | |
96 scoped_refptr<net::IOBuffer> headers_buffer, | |
97 scoped_refptr<net::IOBuffer> body_buffer, | |
98 disk_cache::Entry* cache_entry, | |
99 int entry_index, | |
100 size_t write_offset, | |
101 int result); | |
102 | |
79 // Complete perf tests. | 103 // Complete perf tests. |
80 void CacheBackendPerformance(); | 104 void CacheBackendPerformance(); |
81 | 105 |
82 const size_t kFdLimitForCacheTests = 8192; | 106 const size_t kFdLimitForCacheTests = 8192; |
83 | 107 |
84 const int kNumEntries = 1000; | |
85 const int kHeadersSize = 800; | |
86 const int kBodySize = 256 * 1024 - 1; | |
87 | |
88 std::vector<TestEntry> entries_; | 108 std::vector<TestEntry> entries_; |
89 | 109 |
110 size_t next_entry_ = 0; // Which entry will be the next entry to read/write. | |
111 size_t pending_operations_count_ = 0; | |
112 int pending_result_; | |
Maks Orlovich
2017/05/18 15:45:36
These 3 fields are probably dead?
| |
113 | |
90 private: | 114 private: |
91 const size_t saved_fd_limit_; | 115 const size_t saved_fd_limit_ = MaybeGetMaxFds(); |
92 }; | 116 }; |
93 | 117 |
94 // Creates num_entries on the cache, and writes kHeaderSize bytes of metadata | 118 class WriteHandler { |
95 // and up to kBodySize of data to each entry. | 119 public: |
96 bool DiskCachePerfTest::TimeWrite() { | 120 WriteHandler(const DiskCachePerfTest* test, |
97 // TODO(gavinp): This test would be significantly more realistic if it didn't | 121 disk_cache::Backend* cache, |
98 // do single reads and writes. Perhaps entries should be written 64kb at a | 122 net::CompletionCallback final_callback) |
99 // time. As well, not all entries should be created and written essentially | 123 : test_(test), cache_(cache), final_callback_(final_callback) { |
100 // simultaneously; some number of entries in flight at a time would be a | 124 CacheTestFillBuffer(headers_buffer_->data(), kHeadersSize, false); |
101 // likely better testing load. | 125 CacheTestFillBuffer(body_buffer_->data(), kChunkSize, false); |
102 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kHeadersSize)); | 126 } |
103 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kBodySize)); | 127 |
104 | 128 void Run(); |
105 CacheTestFillBuffer(buffer1->data(), kHeadersSize, false); | 129 |
106 CacheTestFillBuffer(buffer2->data(), kBodySize, false); | 130 protected: |
107 | 131 void CreateNextEntry(); |
108 int expected = 0; | 132 |
109 | 133 void CreateCallback(std::unique_ptr<disk_cache::Entry*> unique_entry_ptr, |
110 MessageLoopHelper helper; | 134 int data_len, |
111 CallbackTest callback(&helper, true); | 135 int result); |
112 | 136 void WriteDataCallback(disk_cache::Entry* entry, |
113 base::PerfTimeLogger timer("Write disk cache entries"); | 137 int next_offset, |
114 | 138 int data_len, |
115 for (int i = 0; i < kNumEntries; i++) { | 139 int expected_result, |
140 int result); | |
141 | |
142 private: | |
143 bool CheckForErrorAndCancel(int result); | |
144 | |
145 const DiskCachePerfTest* test_; | |
146 disk_cache::Backend* cache_; | |
147 net::CompletionCallback final_callback_; | |
148 | |
149 size_t next_entry_index_ = 0; | |
150 size_t pending_operations_count_ = 0; | |
151 | |
152 int pending_result_ = net::OK; | |
153 | |
154 scoped_refptr<net::IOBuffer> headers_buffer_ = | |
155 new net::IOBuffer(kHeadersSize); | |
156 scoped_refptr<net::IOBuffer> body_buffer_ = new net::IOBuffer(kChunkSize); | |
157 }; | |
158 | |
159 void WriteHandler::Run() { | |
160 for (int i = 0; i < kMaxParallelOperations; ++i) { | |
161 ++pending_operations_count_; | |
162 CreateNextEntry(); | |
163 } | |
164 } | |
165 | |
166 void WriteHandler::CreateNextEntry() { | |
167 EXPECT_GT(kNumEntries, next_entry_index_); | |
Maks Orlovich
2017/05/18 15:45:36
ASSERT_GT, given the out-of-bounds access if this
| |
168 TestEntry test_entry = test_->entries()[next_entry_index_++]; | |
169 disk_cache::Entry** entry_ptr = new disk_cache::Entry*(); | |
170 std::unique_ptr<disk_cache::Entry*> unique_entry_ptr(entry_ptr); | |
171 net::CompletionCallback callback = | |
172 base::Bind(&WriteHandler::CreateCallback, base::Unretained(this), | |
173 base::Passed(&unique_entry_ptr), test_entry.data_len); | |
174 int result = cache_->CreateEntry(test_entry.key, entry_ptr, callback); | |
175 if (result != net::ERR_IO_PENDING) | |
176 callback.Run(result); | |
177 } | |
178 | |
179 void WriteHandler::CreateCallback(std::unique_ptr<disk_cache::Entry*> entry_ptr, | |
180 int data_len, | |
181 int result) { | |
182 if (CheckForErrorAndCancel(result)) | |
183 return; | |
184 | |
185 disk_cache::Entry* entry = *entry_ptr; | |
186 | |
187 net::CompletionCallback callback = | |
188 base::Bind(&WriteHandler::WriteDataCallback, base::Unretained(this), | |
189 entry, 0, data_len, kHeadersSize); | |
190 int new_result = entry->WriteData(0, 0, headers_buffer_.get(), kHeadersSize, | |
191 callback, false); | |
192 if (new_result != net::ERR_IO_PENDING) | |
193 callback.Run(new_result); | |
194 } | |
195 | |
196 void WriteHandler::WriteDataCallback(disk_cache::Entry* entry, | |
197 int next_offset, | |
198 int data_len, | |
199 int expected_result, | |
200 int result) { | |
201 if (CheckForErrorAndCancel(result)) { | |
202 entry->Close(); | |
203 return; | |
204 } | |
205 if (next_offset >= data_len) { | |
206 entry->Close(); | |
207 if (next_entry_index_ < kNumEntries) { | |
208 CreateNextEntry(); | |
209 } else { | |
210 --pending_operations_count_; | |
211 if (pending_operations_count_ == 0) | |
212 final_callback_.Run(net::OK); | |
213 } | |
214 return; | |
215 } | |
216 | |
217 int write_size = std::min(kChunkSize, data_len - next_offset); | |
218 net::CompletionCallback callback = | |
219 base::Bind(&WriteHandler::WriteDataCallback, base::Unretained(this), | |
220 entry, next_offset + write_size, data_len, write_size); | |
221 int new_result = entry->WriteData(1, next_offset, body_buffer_.get(), | |
222 write_size, callback, false); | |
Maks Orlovich
2017/05/18 15:45:36
, true), since I think HttpCache uses truncating w
| |
223 if (new_result != net::ERR_IO_PENDING) | |
224 callback.Run(new_result); | |
225 } | |
226 | |
227 bool WriteHandler::CheckForErrorAndCancel(int result) { | |
228 DCHECK_NE(net::ERR_IO_PENDING, result); | |
229 if (result != net::OK && !(result > 0)) | |
230 pending_result_ = result; | |
231 if (pending_result_ != net::OK) { | |
232 --pending_operations_count_; | |
233 if (pending_operations_count_ == 0) | |
234 final_callback_.Run(pending_result_); | |
235 return true; | |
236 } | |
237 return false; | |
238 } | |
239 | |
240 class ReadHandler { | |
241 public: | |
242 ReadHandler(const DiskCachePerfTest* test, | |
243 WhatToRead what_to_read, | |
244 disk_cache::Backend* cache, | |
245 net::CompletionCallback final_callback) | |
246 : test_(test), | |
247 what_to_read_(what_to_read), | |
248 cache_(cache), | |
249 final_callback_(final_callback) { | |
250 for (int i = 0; i < kMaxParallelOperations; ++i) | |
251 read_buffers_[i] = new net::IOBuffer(std::max(kHeadersSize, kChunkSize)); | |
252 } | |
253 | |
254 void Run(); | |
255 | |
256 protected: | |
257 void OpenNextEntry(int parallel_operation_index); | |
258 | |
259 void OpenCallback(int parallel_operation_index, | |
260 std::unique_ptr<disk_cache::Entry*> unique_entry_ptr, | |
261 int data_len, | |
262 int result); | |
263 void ReadDataCallback(int parallel_operation_index, | |
264 disk_cache::Entry* entry, | |
265 int next_offset, | |
266 int data_len, | |
267 int expected_result, | |
268 int result); | |
269 | |
270 private: | |
271 bool CheckForErrorAndCancel(int result); | |
272 | |
273 const DiskCachePerfTest* test_; | |
274 const WhatToRead what_to_read_; | |
275 | |
276 disk_cache::Backend* cache_; | |
277 net::CompletionCallback final_callback_; | |
278 | |
279 size_t next_entry_index_ = 0; | |
280 size_t pending_operations_count_ = 0; | |
281 | |
282 int pending_result_ = net::OK; | |
283 | |
284 scoped_refptr<net::IOBuffer> read_buffers_[kMaxParallelOperations]; | |
285 }; | |
286 | |
287 void ReadHandler::Run() { | |
288 for (int i = 0; i < kMaxParallelOperations; ++i) { | |
289 OpenNextEntry(pending_operations_count_); | |
290 ++pending_operations_count_; | |
291 } | |
292 } | |
293 | |
294 void ReadHandler::OpenNextEntry(int parallel_operation_index) { | |
295 EXPECT_GT(kNumEntries, next_entry_index_); | |
Maks Orlovich
2017/05/18 15:45:36
ASSERT_GT?
| |
296 TestEntry test_entry = test_->entries()[next_entry_index_++]; | |
297 disk_cache::Entry** entry_ptr = new disk_cache::Entry*(); | |
298 std::unique_ptr<disk_cache::Entry*> unique_entry_ptr(entry_ptr); | |
299 net::CompletionCallback callback = | |
300 base::Bind(&ReadHandler::OpenCallback, base::Unretained(this), | |
301 parallel_operation_index, base::Passed(&unique_entry_ptr), | |
302 test_entry.data_len); | |
303 int result = cache_->OpenEntry(test_entry.key, entry_ptr, callback); | |
304 if (result != net::ERR_IO_PENDING) | |
305 callback.Run(result); | |
306 } | |
307 | |
308 void ReadHandler::OpenCallback(int parallel_operation_index, | |
309 std::unique_ptr<disk_cache::Entry*> entry_ptr, | |
310 int data_len, | |
311 int result) { | |
312 if (CheckForErrorAndCancel(result)) | |
313 return; | |
314 | |
315 disk_cache::Entry* entry = *(entry_ptr.get()); | |
316 | |
317 EXPECT_EQ(data_len, entry->GetDataSize(1)); | |
318 | |
319 net::CompletionCallback callback = | |
320 base::Bind(&ReadHandler::ReadDataCallback, base::Unretained(this), | |
321 parallel_operation_index, entry, 0, data_len, kHeadersSize); | |
322 int new_result = | |
323 entry->ReadData(0, 0, read_buffers_[parallel_operation_index].get(), | |
324 kChunkSize, callback); | |
325 if (new_result != net::ERR_IO_PENDING) | |
326 callback.Run(new_result); | |
327 } | |
328 | |
329 void ReadHandler::ReadDataCallback(int parallel_operation_index, | |
330 disk_cache::Entry* entry, | |
331 int next_offset, | |
332 int data_len, | |
333 int expected_result, | |
334 int result) { | |
335 if (CheckForErrorAndCancel(result)) { | |
336 entry->Close(); | |
337 return; | |
338 } | |
339 if (what_to_read_ == WhatToRead::HEADERS_ONLY || next_offset >= data_len) { | |
340 entry->Close(); | |
341 if (next_entry_index_ < kNumEntries) { | |
342 OpenNextEntry(parallel_operation_index); | |
343 } else { | |
344 --pending_operations_count_; | |
345 if (pending_operations_count_ == 0) | |
346 final_callback_.Run(net::OK); | |
347 } | |
348 return; | |
349 } | |
350 | |
351 int expected_read_size = std::min(kChunkSize, data_len - next_offset); | |
352 net::CompletionCallback callback = | |
353 base::Bind(&ReadHandler::ReadDataCallback, base::Unretained(this), | |
354 parallel_operation_index, entry, next_offset + kChunkSize, | |
355 data_len, expected_read_size); | |
356 int new_result = entry->ReadData( | |
357 1, next_offset, read_buffers_[parallel_operation_index].get(), kChunkSize, | |
358 callback); | |
359 if (new_result != net::ERR_IO_PENDING) | |
360 callback.Run(new_result); | |
361 } | |
362 | |
363 bool ReadHandler::CheckForErrorAndCancel(int result) { | |
364 DCHECK_NE(net::ERR_IO_PENDING, result); | |
365 if (result != net::OK && !(result > 0)) | |
366 pending_result_ = result; | |
367 if (pending_result_ != net::OK) { | |
368 --pending_operations_count_; | |
369 if (pending_operations_count_ == 0) | |
370 final_callback_.Run(pending_result_); | |
371 return true; | |
372 } | |
373 return false; | |
374 } | |
375 | |
376 bool DiskCachePerfTest::TimeWrites() { | |
377 for (size_t i = 0; i < kNumEntries; i++) { | |
116 TestEntry entry; | 378 TestEntry entry; |
117 entry.key = GenerateKey(true); | 379 entry.key = GenerateKey(true); |
118 entry.data_len = base::RandInt(0, kBodySize); | 380 entry.data_len = base::RandInt(0, kBodySize); |
119 entries_.push_back(entry); | 381 entries_.push_back(entry); |
120 | 382 } |
121 disk_cache::Entry* cache_entry; | 383 |
122 net::TestCompletionCallback cb; | 384 net::TestCompletionCallback cb; |
123 int rv = cache_->CreateEntry(entry.key, &cache_entry, cb.callback()); | 385 |
124 if (net::OK != cb.GetResult(rv)) | 386 base::PerfTimeLogger timer("Write disk cache entries"); |
125 break; | 387 |
126 int ret = cache_entry->WriteData( | 388 std::unique_ptr<WriteHandler> write_handler( |
127 0, 0, buffer1.get(), kHeadersSize, | 389 new WriteHandler(this, cache_.get(), cb.callback())); |
Maks Orlovich
2017/05/18 15:45:36
Just allocate on stack? Heap allocation seems poin
| |
128 base::Bind(&CallbackTest::Run, base::Unretained(&callback)), false); | 390 write_handler->Run(); |
129 if (net::ERR_IO_PENDING == ret) | 391 return cb.WaitForResult() == net::OK; |
130 expected++; | 392 } |
131 else if (kHeadersSize != ret) | 393 |
132 break; | 394 bool DiskCachePerfTest::TimeReads(WhatToRead what_to_read, |
133 | 395 const char* timer_message) { |
134 ret = cache_entry->WriteData( | |
135 1, 0, buffer2.get(), entry.data_len, | |
136 base::Bind(&CallbackTest::Run, base::Unretained(&callback)), false); | |
137 if (net::ERR_IO_PENDING == ret) | |
138 expected++; | |
139 else if (entry.data_len != ret) | |
140 break; | |
141 cache_entry->Close(); | |
142 } | |
143 | |
144 helper.WaitUntilCacheIoFinished(expected); | |
145 timer.Done(); | |
146 | |
147 return expected == helper.callbacks_called(); | |
148 } | |
149 | |
150 // Reads the data and metadata from each entry listed on |entries|. | |
151 bool DiskCachePerfTest::TimeRead(WhatToRead what_to_read, | |
152 const char* timer_message) { | |
153 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kHeadersSize)); | |
154 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kBodySize)); | |
155 | |
156 CacheTestFillBuffer(buffer1->data(), kHeadersSize, false); | |
157 CacheTestFillBuffer(buffer2->data(), kBodySize, false); | |
158 | |
159 int expected = 0; | |
160 | |
161 MessageLoopHelper helper; | |
162 CallbackTest callback(&helper, true); | |
163 | |
164 base::PerfTimeLogger timer(timer_message); | 396 base::PerfTimeLogger timer(timer_message); |
165 | 397 |
166 for (int i = 0; i < kNumEntries; i++) { | 398 net::TestCompletionCallback cb; |
167 disk_cache::Entry* cache_entry; | 399 std::unique_ptr<ReadHandler> read_handler( |
168 net::TestCompletionCallback cb; | 400 new ReadHandler(this, what_to_read, cache_.get(), cb.callback())); |
Maks Orlovich
2017/05/18 15:45:36
Likewise.
| |
169 int rv = cache_->OpenEntry(entries_[i].key, &cache_entry, cb.callback()); | 401 read_handler->Run(); |
170 if (net::OK != cb.GetResult(rv)) | 402 return cb.WaitForResult() == net::OK; |
171 break; | |
172 int ret = cache_entry->ReadData( | |
173 0, 0, buffer1.get(), kHeadersSize, | |
174 base::Bind(&CallbackTest::Run, base::Unretained(&callback))); | |
175 if (net::ERR_IO_PENDING == ret) | |
176 expected++; | |
177 else if (kHeadersSize != ret) | |
178 break; | |
179 | |
180 if (what_to_read == WhatToRead::HEADERS_AND_BODY) { | |
181 ret = cache_entry->ReadData( | |
182 1, 0, buffer2.get(), entries_[i].data_len, | |
183 base::Bind(&CallbackTest::Run, base::Unretained(&callback))); | |
184 if (net::ERR_IO_PENDING == ret) | |
185 expected++; | |
186 else if (entries_[i].data_len != ret) | |
187 break; | |
188 } | |
189 | |
190 cache_entry->Close(); | |
191 } | |
192 | |
193 helper.WaitUntilCacheIoFinished(expected); | |
194 timer.Done(); | |
195 | |
196 return (expected == helper.callbacks_called()); | |
197 } | 403 } |
198 | 404 |
199 TEST_F(DiskCachePerfTest, BlockfileHashes) { | 405 TEST_F(DiskCachePerfTest, BlockfileHashes) { |
200 base::PerfTimeLogger timer("Hash disk cache keys"); | 406 base::PerfTimeLogger timer("Hash disk cache keys"); |
201 for (int i = 0; i < 300000; i++) { | 407 for (int i = 0; i < 300000; i++) { |
202 std::string key = GenerateKey(true); | 408 std::string key = GenerateKey(true); |
203 base::Hash(key); | 409 base::Hash(key); |
204 } | 410 } |
205 timer.Done(); | 411 timer.Done(); |
206 } | 412 } |
(...skipping 19 matching lines...) Expand all Loading... | |
226 } | 432 } |
227 ASSERT_TRUE(base::EvictFileFromSystemCache(cache_path_)); | 433 ASSERT_TRUE(base::EvictFileFromSystemCache(cache_path_)); |
228 #endif | 434 #endif |
229 | 435 |
230 DisableFirstCleanup(); | 436 DisableFirstCleanup(); |
231 InitCache(); | 437 InitCache(); |
232 } | 438 } |
233 | 439 |
234 void DiskCachePerfTest::CacheBackendPerformance() { | 440 void DiskCachePerfTest::CacheBackendPerformance() { |
235 InitCache(); | 441 InitCache(); |
236 EXPECT_TRUE(TimeWrite()); | 442 EXPECT_TRUE(TimeWrites()); |
237 | 443 |
238 disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting(); | 444 disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting(); |
239 base::RunLoop().RunUntilIdle(); | 445 base::RunLoop().RunUntilIdle(); |
240 | 446 |
241 ResetAndEvictSystemDiskCache(); | 447 ResetAndEvictSystemDiskCache(); |
242 EXPECT_TRUE(TimeRead(WhatToRead::HEADERS_ONLY, | 448 EXPECT_TRUE(TimeReads(WhatToRead::HEADERS_ONLY, |
243 "Read disk cache headers only (cold)")); | 449 "Read disk cache headers only (cold)")); |
244 EXPECT_TRUE(TimeRead(WhatToRead::HEADERS_ONLY, | 450 EXPECT_TRUE(TimeReads(WhatToRead::HEADERS_ONLY, |
245 "Read disk cache headers only (warm)")); | 451 "Read disk cache headers only (warm)")); |
246 | 452 |
247 disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting(); | 453 disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting(); |
248 base::RunLoop().RunUntilIdle(); | 454 base::RunLoop().RunUntilIdle(); |
249 | 455 |
250 ResetAndEvictSystemDiskCache(); | 456 ResetAndEvictSystemDiskCache(); |
251 EXPECT_TRUE( | 457 EXPECT_TRUE(TimeReads(WhatToRead::HEADERS_AND_BODY, |
252 TimeRead(WhatToRead::HEADERS_AND_BODY, "Read disk cache entries (cold)")); | 458 "Read disk cache entries (cold)")); |
253 EXPECT_TRUE( | 459 EXPECT_TRUE(TimeReads(WhatToRead::HEADERS_AND_BODY, |
254 TimeRead(WhatToRead::HEADERS_AND_BODY, "Read disk cache entries (warm)")); | 460 "Read disk cache entries (warm)")); |
255 | 461 |
256 disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting(); | 462 disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting(); |
257 base::RunLoop().RunUntilIdle(); | 463 base::RunLoop().RunUntilIdle(); |
258 } | 464 } |
259 | 465 |
260 TEST_F(DiskCachePerfTest, CacheBackendPerformance) { | 466 TEST_F(DiskCachePerfTest, CacheBackendPerformance) { |
261 CacheBackendPerformance(); | 467 CacheBackendPerformance(); |
262 } | 468 } |
263 | 469 |
264 TEST_F(DiskCachePerfTest, SimpleCacheBackendPerformance) { | 470 TEST_F(DiskCachePerfTest, SimpleCacheBackendPerformance) { |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
299 files.DeleteBlock(address[entry], false); | 505 files.DeleteBlock(address[entry], false); |
300 EXPECT_TRUE( | 506 EXPECT_TRUE( |
301 files.CreateBlock(disk_cache::RANKINGS, block_size, &address[entry])); | 507 files.CreateBlock(disk_cache::RANKINGS, block_size, &address[entry])); |
302 } | 508 } |
303 | 509 |
304 timer2.Done(); | 510 timer2.Done(); |
305 base::RunLoop().RunUntilIdle(); | 511 base::RunLoop().RunUntilIdle(); |
306 } | 512 } |
307 | 513 |
308 } // namespace | 514 } // namespace |
OLD | NEW |