Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(19)

Side by Side Diff: net/disk_cache/entry_unittest.cc

Issue 992733002: Remove //net (except for Android test stuff) and sdch (Closed) Base URL: git@github.com:domokit/mojo.git@master
Patch Set: Created 5 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « net/disk_cache/disk_cache_test_util.cc ('k') | net/disk_cache/memory/mem_backend_impl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/basictypes.h"
6 #include "base/bind.h"
7 #include "base/bind_helpers.h"
8 #include "base/files/file.h"
9 #include "base/files/file_util.h"
10 #include "base/strings/string_util.h"
11 #include "base/strings/stringprintf.h"
12 #include "base/threading/platform_thread.h"
13 #include "net/base/completion_callback.h"
14 #include "net/base/io_buffer.h"
15 #include "net/base/net_errors.h"
16 #include "net/base/test_completion_callback.h"
17 #include "net/disk_cache/blockfile/backend_impl.h"
18 #include "net/disk_cache/blockfile/entry_impl.h"
19 #include "net/disk_cache/disk_cache_test_base.h"
20 #include "net/disk_cache/disk_cache_test_util.h"
21 #include "net/disk_cache/memory/mem_entry_impl.h"
22 #include "net/disk_cache/simple/simple_entry_format.h"
23 #include "net/disk_cache/simple/simple_entry_impl.h"
24 #include "net/disk_cache/simple/simple_synchronous_entry.h"
25 #include "net/disk_cache/simple/simple_test_util.h"
26 #include "net/disk_cache/simple/simple_util.h"
27 #include "testing/gtest/include/gtest/gtest.h"
28
29 using base::Time;
30 using disk_cache::ScopedEntryPtr;
31
32 // Tests that can run with different types of caches.
33 class DiskCacheEntryTest : public DiskCacheTestWithCache {
34 public:
35 void InternalSyncIOBackground(disk_cache::Entry* entry);
36 void ExternalSyncIOBackground(disk_cache::Entry* entry);
37
38 protected:
39 void InternalSyncIO();
40 void InternalAsyncIO();
41 void ExternalSyncIO();
42 void ExternalAsyncIO();
43 void ReleaseBuffer(int stream_index);
44 void StreamAccess();
45 void GetKey();
46 void GetTimes(int stream_index);
47 void GrowData(int stream_index);
48 void TruncateData(int stream_index);
49 void ZeroLengthIO(int stream_index);
50 void Buffering();
51 void SizeAtCreate();
52 void SizeChanges(int stream_index);
53 void ReuseEntry(int size, int stream_index);
54 void InvalidData(int stream_index);
55 void ReadWriteDestroyBuffer(int stream_index);
56 void DoomNormalEntry();
57 void DoomEntryNextToOpenEntry();
58 void DoomedEntry(int stream_index);
59 void BasicSparseIO();
60 void HugeSparseIO();
61 void GetAvailableRange();
62 void CouldBeSparse();
63 void UpdateSparseEntry();
64 void DoomSparseEntry();
65 void PartialSparseEntry();
66 bool SimpleCacheMakeBadChecksumEntry(const std::string& key, int* data_size);
67 bool SimpleCacheThirdStreamFileExists(const char* key);
68 void SyncDoomEntry(const char* key);
69 };
70
71 // This part of the test runs on the background thread.
72 void DiskCacheEntryTest::InternalSyncIOBackground(disk_cache::Entry* entry) {
73 const int kSize1 = 10;
74 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
75 CacheTestFillBuffer(buffer1->data(), kSize1, false);
76 EXPECT_EQ(
77 0,
78 entry->ReadData(0, 0, buffer1.get(), kSize1, net::CompletionCallback()));
79 base::strlcpy(buffer1->data(), "the data", kSize1);
80 EXPECT_EQ(10,
81 entry->WriteData(
82 0, 0, buffer1.get(), kSize1, net::CompletionCallback(), false));
83 memset(buffer1->data(), 0, kSize1);
84 EXPECT_EQ(
85 10,
86 entry->ReadData(0, 0, buffer1.get(), kSize1, net::CompletionCallback()));
87 EXPECT_STREQ("the data", buffer1->data());
88
89 const int kSize2 = 5000;
90 const int kSize3 = 10000;
91 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
92 scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
93 memset(buffer3->data(), 0, kSize3);
94 CacheTestFillBuffer(buffer2->data(), kSize2, false);
95 base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
96 EXPECT_EQ(
97 5000,
98 entry->WriteData(
99 1, 1500, buffer2.get(), kSize2, net::CompletionCallback(), false));
100 memset(buffer2->data(), 0, kSize2);
101 EXPECT_EQ(4989,
102 entry->ReadData(
103 1, 1511, buffer2.get(), kSize2, net::CompletionCallback()));
104 EXPECT_STREQ("big data goes here", buffer2->data());
105 EXPECT_EQ(
106 5000,
107 entry->ReadData(1, 0, buffer2.get(), kSize2, net::CompletionCallback()));
108 EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 1500));
109 EXPECT_EQ(1500,
110 entry->ReadData(
111 1, 5000, buffer2.get(), kSize2, net::CompletionCallback()));
112
113 EXPECT_EQ(0,
114 entry->ReadData(
115 1, 6500, buffer2.get(), kSize2, net::CompletionCallback()));
116 EXPECT_EQ(
117 6500,
118 entry->ReadData(1, 0, buffer3.get(), kSize3, net::CompletionCallback()));
119 EXPECT_EQ(8192,
120 entry->WriteData(
121 1, 0, buffer3.get(), 8192, net::CompletionCallback(), false));
122 EXPECT_EQ(
123 8192,
124 entry->ReadData(1, 0, buffer3.get(), kSize3, net::CompletionCallback()));
125 EXPECT_EQ(8192, entry->GetDataSize(1));
126
127 // We need to delete the memory buffer on this thread.
128 EXPECT_EQ(0, entry->WriteData(
129 0, 0, NULL, 0, net::CompletionCallback(), true));
130 EXPECT_EQ(0, entry->WriteData(
131 1, 0, NULL, 0, net::CompletionCallback(), true));
132 }
133
134 // We need to support synchronous IO even though it is not a supported operation
135 // from the point of view of the disk cache's public interface, because we use
136 // it internally, not just by a few tests, but as part of the implementation
137 // (see sparse_control.cc, for example).
138 void DiskCacheEntryTest::InternalSyncIO() {
139 disk_cache::Entry* entry = NULL;
140 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
141 ASSERT_TRUE(NULL != entry);
142
143 // The bulk of the test runs from within the callback, on the cache thread.
144 RunTaskForTest(base::Bind(&DiskCacheEntryTest::InternalSyncIOBackground,
145 base::Unretained(this),
146 entry));
147
148
149 entry->Doom();
150 entry->Close();
151 FlushQueueForTest();
152 EXPECT_EQ(0, cache_->GetEntryCount());
153 }
154
155 TEST_F(DiskCacheEntryTest, InternalSyncIO) {
156 InitCache();
157 InternalSyncIO();
158 }
159
160 TEST_F(DiskCacheEntryTest, MemoryOnlyInternalSyncIO) {
161 SetMemoryOnlyMode();
162 InitCache();
163 InternalSyncIO();
164 }
165
166 void DiskCacheEntryTest::InternalAsyncIO() {
167 disk_cache::Entry* entry = NULL;
168 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
169 ASSERT_TRUE(NULL != entry);
170
171 // Avoid using internal buffers for the test. We have to write something to
172 // the entry and close it so that we flush the internal buffer to disk. After
173 // that, IO operations will be really hitting the disk. We don't care about
174 // the content, so just extending the entry is enough (all extensions zero-
175 // fill any holes).
176 EXPECT_EQ(0, WriteData(entry, 0, 15 * 1024, NULL, 0, false));
177 EXPECT_EQ(0, WriteData(entry, 1, 15 * 1024, NULL, 0, false));
178 entry->Close();
179 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
180
181 MessageLoopHelper helper;
182 // Let's verify that each IO goes to the right callback object.
183 CallbackTest callback1(&helper, false);
184 CallbackTest callback2(&helper, false);
185 CallbackTest callback3(&helper, false);
186 CallbackTest callback4(&helper, false);
187 CallbackTest callback5(&helper, false);
188 CallbackTest callback6(&helper, false);
189 CallbackTest callback7(&helper, false);
190 CallbackTest callback8(&helper, false);
191 CallbackTest callback9(&helper, false);
192 CallbackTest callback10(&helper, false);
193 CallbackTest callback11(&helper, false);
194 CallbackTest callback12(&helper, false);
195 CallbackTest callback13(&helper, false);
196
197 const int kSize1 = 10;
198 const int kSize2 = 5000;
199 const int kSize3 = 10000;
200 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
201 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
202 scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
203 CacheTestFillBuffer(buffer1->data(), kSize1, false);
204 CacheTestFillBuffer(buffer2->data(), kSize2, false);
205 CacheTestFillBuffer(buffer3->data(), kSize3, false);
206
207 EXPECT_EQ(0,
208 entry->ReadData(
209 0,
210 15 * 1024,
211 buffer1.get(),
212 kSize1,
213 base::Bind(&CallbackTest::Run, base::Unretained(&callback1))));
214 base::strlcpy(buffer1->data(), "the data", kSize1);
215 int expected = 0;
216 int ret = entry->WriteData(
217 0,
218 0,
219 buffer1.get(),
220 kSize1,
221 base::Bind(&CallbackTest::Run, base::Unretained(&callback2)),
222 false);
223 EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
224 if (net::ERR_IO_PENDING == ret)
225 expected++;
226
227 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
228 memset(buffer2->data(), 0, kSize2);
229 ret = entry->ReadData(
230 0,
231 0,
232 buffer2.get(),
233 kSize1,
234 base::Bind(&CallbackTest::Run, base::Unretained(&callback3)));
235 EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
236 if (net::ERR_IO_PENDING == ret)
237 expected++;
238
239 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
240 EXPECT_STREQ("the data", buffer2->data());
241
242 base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
243 ret = entry->WriteData(
244 1,
245 1500,
246 buffer2.get(),
247 kSize2,
248 base::Bind(&CallbackTest::Run, base::Unretained(&callback4)),
249 true);
250 EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
251 if (net::ERR_IO_PENDING == ret)
252 expected++;
253
254 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
255 memset(buffer3->data(), 0, kSize3);
256 ret = entry->ReadData(
257 1,
258 1511,
259 buffer3.get(),
260 kSize2,
261 base::Bind(&CallbackTest::Run, base::Unretained(&callback5)));
262 EXPECT_TRUE(4989 == ret || net::ERR_IO_PENDING == ret);
263 if (net::ERR_IO_PENDING == ret)
264 expected++;
265
266 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
267 EXPECT_STREQ("big data goes here", buffer3->data());
268 ret = entry->ReadData(
269 1,
270 0,
271 buffer2.get(),
272 kSize2,
273 base::Bind(&CallbackTest::Run, base::Unretained(&callback6)));
274 EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
275 if (net::ERR_IO_PENDING == ret)
276 expected++;
277
278 memset(buffer3->data(), 0, kSize3);
279
280 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
281 EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 1500));
282 ret = entry->ReadData(
283 1,
284 5000,
285 buffer2.get(),
286 kSize2,
287 base::Bind(&CallbackTest::Run, base::Unretained(&callback7)));
288 EXPECT_TRUE(1500 == ret || net::ERR_IO_PENDING == ret);
289 if (net::ERR_IO_PENDING == ret)
290 expected++;
291
292 ret = entry->ReadData(
293 1,
294 0,
295 buffer3.get(),
296 kSize3,
297 base::Bind(&CallbackTest::Run, base::Unretained(&callback9)));
298 EXPECT_TRUE(6500 == ret || net::ERR_IO_PENDING == ret);
299 if (net::ERR_IO_PENDING == ret)
300 expected++;
301
302 ret = entry->WriteData(
303 1,
304 0,
305 buffer3.get(),
306 8192,
307 base::Bind(&CallbackTest::Run, base::Unretained(&callback10)),
308 true);
309 EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
310 if (net::ERR_IO_PENDING == ret)
311 expected++;
312
313 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
314 ret = entry->ReadData(
315 1,
316 0,
317 buffer3.get(),
318 kSize3,
319 base::Bind(&CallbackTest::Run, base::Unretained(&callback11)));
320 EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
321 if (net::ERR_IO_PENDING == ret)
322 expected++;
323
324 EXPECT_EQ(8192, entry->GetDataSize(1));
325
326 ret = entry->ReadData(
327 0,
328 0,
329 buffer1.get(),
330 kSize1,
331 base::Bind(&CallbackTest::Run, base::Unretained(&callback12)));
332 EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
333 if (net::ERR_IO_PENDING == ret)
334 expected++;
335
336 ret = entry->ReadData(
337 1,
338 0,
339 buffer2.get(),
340 kSize2,
341 base::Bind(&CallbackTest::Run, base::Unretained(&callback13)));
342 EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
343 if (net::ERR_IO_PENDING == ret)
344 expected++;
345
346 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
347
348 EXPECT_FALSE(helper.callback_reused_error());
349
350 entry->Doom();
351 entry->Close();
352 FlushQueueForTest();
353 EXPECT_EQ(0, cache_->GetEntryCount());
354 }
355
356 TEST_F(DiskCacheEntryTest, InternalAsyncIO) {
357 InitCache();
358 InternalAsyncIO();
359 }
360
361 TEST_F(DiskCacheEntryTest, MemoryOnlyInternalAsyncIO) {
362 SetMemoryOnlyMode();
363 InitCache();
364 InternalAsyncIO();
365 }
366
367 // This part of the test runs on the background thread.
368 void DiskCacheEntryTest::ExternalSyncIOBackground(disk_cache::Entry* entry) {
369 const int kSize1 = 17000;
370 const int kSize2 = 25000;
371 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
372 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
373 CacheTestFillBuffer(buffer1->data(), kSize1, false);
374 CacheTestFillBuffer(buffer2->data(), kSize2, false);
375 base::strlcpy(buffer1->data(), "the data", kSize1);
376 EXPECT_EQ(17000,
377 entry->WriteData(
378 0, 0, buffer1.get(), kSize1, net::CompletionCallback(), false));
379 memset(buffer1->data(), 0, kSize1);
380 EXPECT_EQ(
381 17000,
382 entry->ReadData(0, 0, buffer1.get(), kSize1, net::CompletionCallback()));
383 EXPECT_STREQ("the data", buffer1->data());
384
385 base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
386 EXPECT_EQ(
387 25000,
388 entry->WriteData(
389 1, 10000, buffer2.get(), kSize2, net::CompletionCallback(), false));
390 memset(buffer2->data(), 0, kSize2);
391 EXPECT_EQ(24989,
392 entry->ReadData(
393 1, 10011, buffer2.get(), kSize2, net::CompletionCallback()));
394 EXPECT_STREQ("big data goes here", buffer2->data());
395 EXPECT_EQ(
396 25000,
397 entry->ReadData(1, 0, buffer2.get(), kSize2, net::CompletionCallback()));
398 EXPECT_EQ(5000,
399 entry->ReadData(
400 1, 30000, buffer2.get(), kSize2, net::CompletionCallback()));
401
402 EXPECT_EQ(0,
403 entry->ReadData(
404 1, 35000, buffer2.get(), kSize2, net::CompletionCallback()));
405 EXPECT_EQ(
406 17000,
407 entry->ReadData(1, 0, buffer1.get(), kSize1, net::CompletionCallback()));
408 EXPECT_EQ(
409 17000,
410 entry->WriteData(
411 1, 20000, buffer1.get(), kSize1, net::CompletionCallback(), false));
412 EXPECT_EQ(37000, entry->GetDataSize(1));
413
414 // We need to delete the memory buffer on this thread.
415 EXPECT_EQ(0, entry->WriteData(
416 0, 0, NULL, 0, net::CompletionCallback(), true));
417 EXPECT_EQ(0, entry->WriteData(
418 1, 0, NULL, 0, net::CompletionCallback(), true));
419 }
420
421 void DiskCacheEntryTest::ExternalSyncIO() {
422 disk_cache::Entry* entry;
423 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
424
425 // The bulk of the test runs from within the callback, on the cache thread.
426 RunTaskForTest(base::Bind(&DiskCacheEntryTest::ExternalSyncIOBackground,
427 base::Unretained(this),
428 entry));
429
430 entry->Doom();
431 entry->Close();
432 FlushQueueForTest();
433 EXPECT_EQ(0, cache_->GetEntryCount());
434 }
435
436 TEST_F(DiskCacheEntryTest, ExternalSyncIO) {
437 InitCache();
438 ExternalSyncIO();
439 }
440
441 TEST_F(DiskCacheEntryTest, ExternalSyncIONoBuffer) {
442 InitCache();
443 cache_impl_->SetFlags(disk_cache::kNoBuffering);
444 ExternalSyncIO();
445 }
446
447 TEST_F(DiskCacheEntryTest, MemoryOnlyExternalSyncIO) {
448 SetMemoryOnlyMode();
449 InitCache();
450 ExternalSyncIO();
451 }
452
453 void DiskCacheEntryTest::ExternalAsyncIO() {
454 disk_cache::Entry* entry;
455 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
456
457 int expected = 0;
458
459 MessageLoopHelper helper;
460 // Let's verify that each IO goes to the right callback object.
461 CallbackTest callback1(&helper, false);
462 CallbackTest callback2(&helper, false);
463 CallbackTest callback3(&helper, false);
464 CallbackTest callback4(&helper, false);
465 CallbackTest callback5(&helper, false);
466 CallbackTest callback6(&helper, false);
467 CallbackTest callback7(&helper, false);
468 CallbackTest callback8(&helper, false);
469 CallbackTest callback9(&helper, false);
470
471 const int kSize1 = 17000;
472 const int kSize2 = 25000;
473 const int kSize3 = 25000;
474 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
475 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
476 scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
477 CacheTestFillBuffer(buffer1->data(), kSize1, false);
478 CacheTestFillBuffer(buffer2->data(), kSize2, false);
479 CacheTestFillBuffer(buffer3->data(), kSize3, false);
480 base::strlcpy(buffer1->data(), "the data", kSize1);
481 int ret = entry->WriteData(
482 0,
483 0,
484 buffer1.get(),
485 kSize1,
486 base::Bind(&CallbackTest::Run, base::Unretained(&callback1)),
487 false);
488 EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
489 if (net::ERR_IO_PENDING == ret)
490 expected++;
491
492 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
493
494 memset(buffer2->data(), 0, kSize1);
495 ret = entry->ReadData(
496 0,
497 0,
498 buffer2.get(),
499 kSize1,
500 base::Bind(&CallbackTest::Run, base::Unretained(&callback2)));
501 EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
502 if (net::ERR_IO_PENDING == ret)
503 expected++;
504
505 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
506 EXPECT_STREQ("the data", buffer2->data());
507
508 base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
509 ret = entry->WriteData(
510 1,
511 10000,
512 buffer2.get(),
513 kSize2,
514 base::Bind(&CallbackTest::Run, base::Unretained(&callback3)),
515 false);
516 EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
517 if (net::ERR_IO_PENDING == ret)
518 expected++;
519
520 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
521
522 memset(buffer3->data(), 0, kSize3);
523 ret = entry->ReadData(
524 1,
525 10011,
526 buffer3.get(),
527 kSize3,
528 base::Bind(&CallbackTest::Run, base::Unretained(&callback4)));
529 EXPECT_TRUE(24989 == ret || net::ERR_IO_PENDING == ret);
530 if (net::ERR_IO_PENDING == ret)
531 expected++;
532
533 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
534 EXPECT_STREQ("big data goes here", buffer3->data());
535 ret = entry->ReadData(
536 1,
537 0,
538 buffer2.get(),
539 kSize2,
540 base::Bind(&CallbackTest::Run, base::Unretained(&callback5)));
541 EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
542 if (net::ERR_IO_PENDING == ret)
543 expected++;
544
545 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
546 memset(buffer3->data(), 0, kSize3);
547 EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 10000));
548 ret = entry->ReadData(
549 1,
550 30000,
551 buffer2.get(),
552 kSize2,
553 base::Bind(&CallbackTest::Run, base::Unretained(&callback6)));
554 EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
555 if (net::ERR_IO_PENDING == ret)
556 expected++;
557
558 EXPECT_EQ(0,
559 entry->ReadData(
560 1,
561 35000,
562 buffer2.get(),
563 kSize2,
564 base::Bind(&CallbackTest::Run, base::Unretained(&callback7))));
565 ret = entry->ReadData(
566 1,
567 0,
568 buffer1.get(),
569 kSize1,
570 base::Bind(&CallbackTest::Run, base::Unretained(&callback8)));
571 EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
572 if (net::ERR_IO_PENDING == ret)
573 expected++;
574 ret = entry->WriteData(
575 1,
576 20000,
577 buffer3.get(),
578 kSize1,
579 base::Bind(&CallbackTest::Run, base::Unretained(&callback9)),
580 false);
581 EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
582 if (net::ERR_IO_PENDING == ret)
583 expected++;
584
585 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
586 EXPECT_EQ(37000, entry->GetDataSize(1));
587
588 EXPECT_FALSE(helper.callback_reused_error());
589
590 entry->Doom();
591 entry->Close();
592 FlushQueueForTest();
593 EXPECT_EQ(0, cache_->GetEntryCount());
594 }
595
596 TEST_F(DiskCacheEntryTest, ExternalAsyncIO) {
597 InitCache();
598 ExternalAsyncIO();
599 }
600
601 TEST_F(DiskCacheEntryTest, ExternalAsyncIONoBuffer) {
602 InitCache();
603 cache_impl_->SetFlags(disk_cache::kNoBuffering);
604 ExternalAsyncIO();
605 }
606
607 TEST_F(DiskCacheEntryTest, MemoryOnlyExternalAsyncIO) {
608 SetMemoryOnlyMode();
609 InitCache();
610 ExternalAsyncIO();
611 }
612
613 // Tests that IOBuffers are not referenced after IO completes.
614 void DiskCacheEntryTest::ReleaseBuffer(int stream_index) {
615 disk_cache::Entry* entry = NULL;
616 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
617 ASSERT_TRUE(NULL != entry);
618
619 const int kBufferSize = 1024;
620 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kBufferSize));
621 CacheTestFillBuffer(buffer->data(), kBufferSize, false);
622
623 net::ReleaseBufferCompletionCallback cb(buffer.get());
624 int rv = entry->WriteData(
625 stream_index, 0, buffer.get(), kBufferSize, cb.callback(), false);
626 EXPECT_EQ(kBufferSize, cb.GetResult(rv));
627 entry->Close();
628 }
629
630 TEST_F(DiskCacheEntryTest, ReleaseBuffer) {
631 InitCache();
632 cache_impl_->SetFlags(disk_cache::kNoBuffering);
633 ReleaseBuffer(0);
634 }
635
636 TEST_F(DiskCacheEntryTest, MemoryOnlyReleaseBuffer) {
637 SetMemoryOnlyMode();
638 InitCache();
639 ReleaseBuffer(0);
640 }
641
642 void DiskCacheEntryTest::StreamAccess() {
643 disk_cache::Entry* entry = NULL;
644 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
645 ASSERT_TRUE(NULL != entry);
646
647 const int kBufferSize = 1024;
648 const int kNumStreams = 3;
649 scoped_refptr<net::IOBuffer> reference_buffers[kNumStreams];
650 for (int i = 0; i < kNumStreams; i++) {
651 reference_buffers[i] = new net::IOBuffer(kBufferSize);
652 CacheTestFillBuffer(reference_buffers[i]->data(), kBufferSize, false);
653 }
654 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kBufferSize));
655 for (int i = 0; i < kNumStreams; i++) {
656 EXPECT_EQ(
657 kBufferSize,
658 WriteData(entry, i, 0, reference_buffers[i].get(), kBufferSize, false));
659 memset(buffer1->data(), 0, kBufferSize);
660 EXPECT_EQ(kBufferSize, ReadData(entry, i, 0, buffer1.get(), kBufferSize));
661 EXPECT_EQ(
662 0, memcmp(reference_buffers[i]->data(), buffer1->data(), kBufferSize));
663 }
664 EXPECT_EQ(net::ERR_INVALID_ARGUMENT,
665 ReadData(entry, kNumStreams, 0, buffer1.get(), kBufferSize));
666 entry->Close();
667
668 // Open the entry and read it in chunks, including a read past the end.
669 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
670 ASSERT_TRUE(NULL != entry);
671 const int kReadBufferSize = 600;
672 const int kFinalReadSize = kBufferSize - kReadBufferSize;
673 static_assert(kFinalReadSize < kReadBufferSize,
674 "should be exactly two reads");
675 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kReadBufferSize));
676 for (int i = 0; i < kNumStreams; i++) {
677 memset(buffer2->data(), 0, kReadBufferSize);
678 EXPECT_EQ(kReadBufferSize,
679 ReadData(entry, i, 0, buffer2.get(), kReadBufferSize));
680 EXPECT_EQ(
681 0,
682 memcmp(reference_buffers[i]->data(), buffer2->data(), kReadBufferSize));
683
684 memset(buffer2->data(), 0, kReadBufferSize);
685 EXPECT_EQ(
686 kFinalReadSize,
687 ReadData(entry, i, kReadBufferSize, buffer2.get(), kReadBufferSize));
688 EXPECT_EQ(0,
689 memcmp(reference_buffers[i]->data() + kReadBufferSize,
690 buffer2->data(),
691 kFinalReadSize));
692 }
693
694 entry->Close();
695 }
696
697 TEST_F(DiskCacheEntryTest, StreamAccess) {
698 InitCache();
699 StreamAccess();
700 }
701
702 TEST_F(DiskCacheEntryTest, MemoryOnlyStreamAccess) {
703 SetMemoryOnlyMode();
704 InitCache();
705 StreamAccess();
706 }
707
708 void DiskCacheEntryTest::GetKey() {
709 std::string key("the first key");
710 disk_cache::Entry* entry;
711 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
712 EXPECT_EQ(key, entry->GetKey()) << "short key";
713 entry->Close();
714
715 int seed = static_cast<int>(Time::Now().ToInternalValue());
716 srand(seed);
717 char key_buffer[20000];
718
719 CacheTestFillBuffer(key_buffer, 3000, true);
720 key_buffer[1000] = '\0';
721
722 key = key_buffer;
723 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
724 EXPECT_TRUE(key == entry->GetKey()) << "1000 bytes key";
725 entry->Close();
726
727 key_buffer[1000] = 'p';
728 key_buffer[3000] = '\0';
729 key = key_buffer;
730 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
731 EXPECT_TRUE(key == entry->GetKey()) << "medium size key";
732 entry->Close();
733
734 CacheTestFillBuffer(key_buffer, sizeof(key_buffer), true);
735 key_buffer[19999] = '\0';
736
737 key = key_buffer;
738 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
739 EXPECT_TRUE(key == entry->GetKey()) << "long key";
740 entry->Close();
741
742 CacheTestFillBuffer(key_buffer, 0x4000, true);
743 key_buffer[0x4000] = '\0';
744
745 key = key_buffer;
746 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
747 EXPECT_TRUE(key == entry->GetKey()) << "16KB key";
748 entry->Close();
749 }
750
751 TEST_F(DiskCacheEntryTest, GetKey) {
752 InitCache();
753 GetKey();
754 }
755
756 TEST_F(DiskCacheEntryTest, MemoryOnlyGetKey) {
757 SetMemoryOnlyMode();
758 InitCache();
759 GetKey();
760 }
761
762 void DiskCacheEntryTest::GetTimes(int stream_index) {
763 std::string key("the first key");
764 disk_cache::Entry* entry;
765
766 Time t1 = Time::Now();
767 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
768 EXPECT_TRUE(entry->GetLastModified() >= t1);
769 EXPECT_TRUE(entry->GetLastModified() == entry->GetLastUsed());
770
771 AddDelay();
772 Time t2 = Time::Now();
773 EXPECT_TRUE(t2 > t1);
774 EXPECT_EQ(0, WriteData(entry, stream_index, 200, NULL, 0, false));
775 if (type_ == net::APP_CACHE) {
776 EXPECT_TRUE(entry->GetLastModified() < t2);
777 } else {
778 EXPECT_TRUE(entry->GetLastModified() >= t2);
779 }
780 EXPECT_TRUE(entry->GetLastModified() == entry->GetLastUsed());
781
782 AddDelay();
783 Time t3 = Time::Now();
784 EXPECT_TRUE(t3 > t2);
785 const int kSize = 200;
786 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
787 EXPECT_EQ(kSize, ReadData(entry, stream_index, 0, buffer.get(), kSize));
788 if (type_ == net::APP_CACHE) {
789 EXPECT_TRUE(entry->GetLastUsed() < t2);
790 EXPECT_TRUE(entry->GetLastModified() < t2);
791 } else if (type_ == net::SHADER_CACHE) {
792 EXPECT_TRUE(entry->GetLastUsed() < t3);
793 EXPECT_TRUE(entry->GetLastModified() < t3);
794 } else {
795 EXPECT_TRUE(entry->GetLastUsed() >= t3);
796 EXPECT_TRUE(entry->GetLastModified() < t3);
797 }
798 entry->Close();
799 }
800
801 TEST_F(DiskCacheEntryTest, GetTimes) {
802 InitCache();
803 GetTimes(0);
804 }
805
806 TEST_F(DiskCacheEntryTest, MemoryOnlyGetTimes) {
807 SetMemoryOnlyMode();
808 InitCache();
809 GetTimes(0);
810 }
811
812 TEST_F(DiskCacheEntryTest, AppCacheGetTimes) {
813 SetCacheType(net::APP_CACHE);
814 InitCache();
815 GetTimes(0);
816 }
817
818 TEST_F(DiskCacheEntryTest, ShaderCacheGetTimes) {
819 SetCacheType(net::SHADER_CACHE);
820 InitCache();
821 GetTimes(0);
822 }
823
824 void DiskCacheEntryTest::GrowData(int stream_index) {
825 std::string key1("the first key");
826 disk_cache::Entry* entry;
827 ASSERT_EQ(net::OK, CreateEntry(key1, &entry));
828
829 const int kSize = 20000;
830 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
831 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
832 CacheTestFillBuffer(buffer1->data(), kSize, false);
833 memset(buffer2->data(), 0, kSize);
834
835 base::strlcpy(buffer1->data(), "the data", kSize);
836 EXPECT_EQ(10, WriteData(entry, stream_index, 0, buffer1.get(), 10, false));
837 EXPECT_EQ(10, ReadData(entry, stream_index, 0, buffer2.get(), 10));
838 EXPECT_STREQ("the data", buffer2->data());
839 EXPECT_EQ(10, entry->GetDataSize(stream_index));
840
841 EXPECT_EQ(2000,
842 WriteData(entry, stream_index, 0, buffer1.get(), 2000, false));
843 EXPECT_EQ(2000, entry->GetDataSize(stream_index));
844 EXPECT_EQ(2000, ReadData(entry, stream_index, 0, buffer2.get(), 2000));
845 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000));
846
847 EXPECT_EQ(20000,
848 WriteData(entry, stream_index, 0, buffer1.get(), kSize, false));
849 EXPECT_EQ(20000, entry->GetDataSize(stream_index));
850 EXPECT_EQ(20000, ReadData(entry, stream_index, 0, buffer2.get(), kSize));
851 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize));
852 entry->Close();
853
854 memset(buffer2->data(), 0, kSize);
855 std::string key2("Second key");
856 ASSERT_EQ(net::OK, CreateEntry(key2, &entry));
857 EXPECT_EQ(10, WriteData(entry, stream_index, 0, buffer1.get(), 10, false));
858 EXPECT_EQ(10, entry->GetDataSize(stream_index));
859 entry->Close();
860
861 // Go from an internal address to a bigger block size.
862 ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
863 EXPECT_EQ(2000,
864 WriteData(entry, stream_index, 0, buffer1.get(), 2000, false));
865 EXPECT_EQ(2000, entry->GetDataSize(stream_index));
866 EXPECT_EQ(2000, ReadData(entry, stream_index, 0, buffer2.get(), 2000));
867 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000));
868 entry->Close();
869 memset(buffer2->data(), 0, kSize);
870
871 // Go from an internal address to an external one.
872 ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
873 EXPECT_EQ(20000,
874 WriteData(entry, stream_index, 0, buffer1.get(), kSize, false));
875 EXPECT_EQ(20000, entry->GetDataSize(stream_index));
876 EXPECT_EQ(20000, ReadData(entry, stream_index, 0, buffer2.get(), kSize));
877 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize));
878 entry->Close();
879
880 // Double check the size from disk.
881 ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
882 EXPECT_EQ(20000, entry->GetDataSize(stream_index));
883
884 // Now extend the entry without actual data.
885 EXPECT_EQ(0, WriteData(entry, stream_index, 45500, buffer1.get(), 0, false));
886 entry->Close();
887
888 // And check again from disk.
889 ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
890 EXPECT_EQ(45500, entry->GetDataSize(stream_index));
891 entry->Close();
892 }
893
894 TEST_F(DiskCacheEntryTest, GrowData) {
895 InitCache();
896 GrowData(0);
897 }
898
899 TEST_F(DiskCacheEntryTest, GrowDataNoBuffer) {
900 InitCache();
901 cache_impl_->SetFlags(disk_cache::kNoBuffering);
902 GrowData(0);
903 }
904
905 TEST_F(DiskCacheEntryTest, MemoryOnlyGrowData) {
906 SetMemoryOnlyMode();
907 InitCache();
908 GrowData(0);
909 }
910
911 void DiskCacheEntryTest::TruncateData(int stream_index) {
912 std::string key("the first key");
913 disk_cache::Entry* entry;
914 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
915
916 const int kSize1 = 20000;
917 const int kSize2 = 20000;
918 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
919 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
920
921 CacheTestFillBuffer(buffer1->data(), kSize1, false);
922 memset(buffer2->data(), 0, kSize2);
923
924 // Simple truncation:
925 EXPECT_EQ(200, WriteData(entry, stream_index, 0, buffer1.get(), 200, false));
926 EXPECT_EQ(200, entry->GetDataSize(stream_index));
927 EXPECT_EQ(100, WriteData(entry, stream_index, 0, buffer1.get(), 100, false));
928 EXPECT_EQ(200, entry->GetDataSize(stream_index));
929 EXPECT_EQ(100, WriteData(entry, stream_index, 0, buffer1.get(), 100, true));
930 EXPECT_EQ(100, entry->GetDataSize(stream_index));
931 EXPECT_EQ(0, WriteData(entry, stream_index, 50, buffer1.get(), 0, true));
932 EXPECT_EQ(50, entry->GetDataSize(stream_index));
933 EXPECT_EQ(0, WriteData(entry, stream_index, 0, buffer1.get(), 0, true));
934 EXPECT_EQ(0, entry->GetDataSize(stream_index));
935 entry->Close();
936 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
937
938 // Go to an external file.
939 EXPECT_EQ(20000,
940 WriteData(entry, stream_index, 0, buffer1.get(), 20000, true));
941 EXPECT_EQ(20000, entry->GetDataSize(stream_index));
942 EXPECT_EQ(20000, ReadData(entry, stream_index, 0, buffer2.get(), 20000));
943 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 20000));
944 memset(buffer2->data(), 0, kSize2);
945
946 // External file truncation
947 EXPECT_EQ(18000,
948 WriteData(entry, stream_index, 0, buffer1.get(), 18000, false));
949 EXPECT_EQ(20000, entry->GetDataSize(stream_index));
950 EXPECT_EQ(18000,
951 WriteData(entry, stream_index, 0, buffer1.get(), 18000, true));
952 EXPECT_EQ(18000, entry->GetDataSize(stream_index));
953 EXPECT_EQ(0, WriteData(entry, stream_index, 17500, buffer1.get(), 0, true));
954 EXPECT_EQ(17500, entry->GetDataSize(stream_index));
955
956 // And back to an internal block.
957 EXPECT_EQ(600,
958 WriteData(entry, stream_index, 1000, buffer1.get(), 600, true));
959 EXPECT_EQ(1600, entry->GetDataSize(stream_index));
960 EXPECT_EQ(600, ReadData(entry, stream_index, 1000, buffer2.get(), 600));
961 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 600));
962 EXPECT_EQ(1000, ReadData(entry, stream_index, 0, buffer2.get(), 1000));
963 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 1000))
964 << "Preserves previous data";
965
966 // Go from external file to zero length.
967 EXPECT_EQ(20000,
968 WriteData(entry, stream_index, 0, buffer1.get(), 20000, true));
969 EXPECT_EQ(20000, entry->GetDataSize(stream_index));
970 EXPECT_EQ(0, WriteData(entry, stream_index, 0, buffer1.get(), 0, true));
971 EXPECT_EQ(0, entry->GetDataSize(stream_index));
972
973 entry->Close();
974 }
975
976 TEST_F(DiskCacheEntryTest, TruncateData) {
977 InitCache();
978 TruncateData(0);
979 }
980
981 TEST_F(DiskCacheEntryTest, TruncateDataNoBuffer) {
982 InitCache();
983 cache_impl_->SetFlags(disk_cache::kNoBuffering);
984 TruncateData(0);
985 }
986
987 TEST_F(DiskCacheEntryTest, MemoryOnlyTruncateData) {
988 SetMemoryOnlyMode();
989 InitCache();
990 TruncateData(0);
991 }
992
993 void DiskCacheEntryTest::ZeroLengthIO(int stream_index) {
994 std::string key("the first key");
995 disk_cache::Entry* entry;
996 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
997
998 EXPECT_EQ(0, ReadData(entry, stream_index, 0, NULL, 0));
999 EXPECT_EQ(0, WriteData(entry, stream_index, 0, NULL, 0, false));
1000
1001 // This write should extend the entry.
1002 EXPECT_EQ(0, WriteData(entry, stream_index, 1000, NULL, 0, false));
1003 EXPECT_EQ(0, ReadData(entry, stream_index, 500, NULL, 0));
1004 EXPECT_EQ(0, ReadData(entry, stream_index, 2000, NULL, 0));
1005 EXPECT_EQ(1000, entry->GetDataSize(stream_index));
1006
1007 EXPECT_EQ(0, WriteData(entry, stream_index, 100000, NULL, 0, true));
1008 EXPECT_EQ(0, ReadData(entry, stream_index, 50000, NULL, 0));
1009 EXPECT_EQ(100000, entry->GetDataSize(stream_index));
1010
1011 // Let's verify the actual content.
1012 const int kSize = 20;
1013 const char zeros[kSize] = {};
1014 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1015
1016 CacheTestFillBuffer(buffer->data(), kSize, false);
1017 EXPECT_EQ(kSize, ReadData(entry, stream_index, 500, buffer.get(), kSize));
1018 EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
1019
1020 CacheTestFillBuffer(buffer->data(), kSize, false);
1021 EXPECT_EQ(kSize, ReadData(entry, stream_index, 5000, buffer.get(), kSize));
1022 EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
1023
1024 CacheTestFillBuffer(buffer->data(), kSize, false);
1025 EXPECT_EQ(kSize, ReadData(entry, stream_index, 50000, buffer.get(), kSize));
1026 EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
1027
1028 entry->Close();
1029 }
1030
1031 TEST_F(DiskCacheEntryTest, ZeroLengthIO) {
1032 InitCache();
1033 ZeroLengthIO(0);
1034 }
1035
1036 TEST_F(DiskCacheEntryTest, ZeroLengthIONoBuffer) {
1037 InitCache();
1038 cache_impl_->SetFlags(disk_cache::kNoBuffering);
1039 ZeroLengthIO(0);
1040 }
1041
1042 TEST_F(DiskCacheEntryTest, MemoryOnlyZeroLengthIO) {
1043 SetMemoryOnlyMode();
1044 InitCache();
1045 ZeroLengthIO(0);
1046 }
1047
1048 // Tests that we handle the content correctly when buffering, a feature of the
1049 // standard cache that permits fast responses to certain reads.
1050 void DiskCacheEntryTest::Buffering() {
1051 std::string key("the first key");
1052 disk_cache::Entry* entry;
1053 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1054
1055 const int kSize = 200;
1056 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1057 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
1058 CacheTestFillBuffer(buffer1->data(), kSize, true);
1059 CacheTestFillBuffer(buffer2->data(), kSize, true);
1060
1061 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, false));
1062 entry->Close();
1063
1064 // Write a little more and read what we wrote before.
1065 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1066 EXPECT_EQ(kSize, WriteData(entry, 1, 5000, buffer1.get(), kSize, false));
1067 EXPECT_EQ(kSize, ReadData(entry, 1, 0, buffer2.get(), kSize));
1068 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1069
1070 // Now go to an external file.
1071 EXPECT_EQ(kSize, WriteData(entry, 1, 18000, buffer1.get(), kSize, false));
1072 entry->Close();
1073
1074 // Write something else and verify old data.
1075 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1076 EXPECT_EQ(kSize, WriteData(entry, 1, 10000, buffer1.get(), kSize, false));
1077 CacheTestFillBuffer(buffer2->data(), kSize, true);
1078 EXPECT_EQ(kSize, ReadData(entry, 1, 5000, buffer2.get(), kSize));
1079 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1080 CacheTestFillBuffer(buffer2->data(), kSize, true);
1081 EXPECT_EQ(kSize, ReadData(entry, 1, 0, buffer2.get(), kSize));
1082 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1083 CacheTestFillBuffer(buffer2->data(), kSize, true);
1084 EXPECT_EQ(kSize, ReadData(entry, 1, 18000, buffer2.get(), kSize));
1085 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1086
1087 // Extend the file some more.
1088 EXPECT_EQ(kSize, WriteData(entry, 1, 23000, buffer1.get(), kSize, false));
1089 entry->Close();
1090
1091 // And now make sure that we can deal with data in both places (ram/disk).
1092 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1093 EXPECT_EQ(kSize, WriteData(entry, 1, 17000, buffer1.get(), kSize, false));
1094
1095 // We should not overwrite the data at 18000 with this.
1096 EXPECT_EQ(kSize, WriteData(entry, 1, 19000, buffer1.get(), kSize, false));
1097 CacheTestFillBuffer(buffer2->data(), kSize, true);
1098 EXPECT_EQ(kSize, ReadData(entry, 1, 18000, buffer2.get(), kSize));
1099 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1100 CacheTestFillBuffer(buffer2->data(), kSize, true);
1101 EXPECT_EQ(kSize, ReadData(entry, 1, 17000, buffer2.get(), kSize));
1102 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1103
1104 EXPECT_EQ(kSize, WriteData(entry, 1, 22900, buffer1.get(), kSize, false));
1105 CacheTestFillBuffer(buffer2->data(), kSize, true);
1106 EXPECT_EQ(100, ReadData(entry, 1, 23000, buffer2.get(), kSize));
1107 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + 100, 100));
1108
1109 CacheTestFillBuffer(buffer2->data(), kSize, true);
1110 EXPECT_EQ(100, ReadData(entry, 1, 23100, buffer2.get(), kSize));
1111 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + 100, 100));
1112
1113 // Extend the file again and read before without closing the entry.
1114 EXPECT_EQ(kSize, WriteData(entry, 1, 25000, buffer1.get(), kSize, false));
1115 EXPECT_EQ(kSize, WriteData(entry, 1, 45000, buffer1.get(), kSize, false));
1116 CacheTestFillBuffer(buffer2->data(), kSize, true);
1117 EXPECT_EQ(kSize, ReadData(entry, 1, 25000, buffer2.get(), kSize));
1118 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1119 CacheTestFillBuffer(buffer2->data(), kSize, true);
1120 EXPECT_EQ(kSize, ReadData(entry, 1, 45000, buffer2.get(), kSize));
1121 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1122
1123 entry->Close();
1124 }
1125
1126 TEST_F(DiskCacheEntryTest, Buffering) {
1127 InitCache();
1128 Buffering();
1129 }
1130
1131 TEST_F(DiskCacheEntryTest, BufferingNoBuffer) {
1132 InitCache();
1133 cache_impl_->SetFlags(disk_cache::kNoBuffering);
1134 Buffering();
1135 }
1136
1137 // Checks that entries are zero length when created.
1138 void DiskCacheEntryTest::SizeAtCreate() {
1139 const char key[] = "the first key";
1140 disk_cache::Entry* entry;
1141 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1142
1143 const int kNumStreams = 3;
1144 for (int i = 0; i < kNumStreams; ++i)
1145 EXPECT_EQ(0, entry->GetDataSize(i));
1146 entry->Close();
1147 }
1148
1149 TEST_F(DiskCacheEntryTest, SizeAtCreate) {
1150 InitCache();
1151 SizeAtCreate();
1152 }
1153
1154 TEST_F(DiskCacheEntryTest, MemoryOnlySizeAtCreate) {
1155 SetMemoryOnlyMode();
1156 InitCache();
1157 SizeAtCreate();
1158 }
1159
1160 // Some extra tests to make sure that buffering works properly when changing
1161 // the entry size.
1162 void DiskCacheEntryTest::SizeChanges(int stream_index) {
1163 std::string key("the first key");
1164 disk_cache::Entry* entry;
1165 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1166
1167 const int kSize = 200;
1168 const char zeros[kSize] = {};
1169 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1170 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
1171 CacheTestFillBuffer(buffer1->data(), kSize, true);
1172 CacheTestFillBuffer(buffer2->data(), kSize, true);
1173
1174 EXPECT_EQ(kSize,
1175 WriteData(entry, stream_index, 0, buffer1.get(), kSize, true));
1176 EXPECT_EQ(kSize,
1177 WriteData(entry, stream_index, 17000, buffer1.get(), kSize, true));
1178 EXPECT_EQ(kSize,
1179 WriteData(entry, stream_index, 23000, buffer1.get(), kSize, true));
1180 entry->Close();
1181
1182 // Extend the file and read between the old size and the new write.
1183 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1184 EXPECT_EQ(23000 + kSize, entry->GetDataSize(stream_index));
1185 EXPECT_EQ(kSize,
1186 WriteData(entry, stream_index, 25000, buffer1.get(), kSize, true));
1187 EXPECT_EQ(25000 + kSize, entry->GetDataSize(stream_index));
1188 EXPECT_EQ(kSize, ReadData(entry, stream_index, 24000, buffer2.get(), kSize));
1189 EXPECT_TRUE(!memcmp(buffer2->data(), zeros, kSize));
1190
1191 // Read at the end of the old file size.
1192 EXPECT_EQ(
1193 kSize,
1194 ReadData(entry, stream_index, 23000 + kSize - 35, buffer2.get(), kSize));
1195 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + kSize - 35, 35));
1196
1197 // Read slightly before the last write.
1198 CacheTestFillBuffer(buffer2->data(), kSize, true);
1199 EXPECT_EQ(kSize, ReadData(entry, stream_index, 24900, buffer2.get(), kSize));
1200 EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1201 EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1202
1203 // Extend the entry a little more.
1204 EXPECT_EQ(kSize,
1205 WriteData(entry, stream_index, 26000, buffer1.get(), kSize, true));
1206 EXPECT_EQ(26000 + kSize, entry->GetDataSize(stream_index));
1207 CacheTestFillBuffer(buffer2->data(), kSize, true);
1208 EXPECT_EQ(kSize, ReadData(entry, stream_index, 25900, buffer2.get(), kSize));
1209 EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1210 EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1211
1212 // And now reduce the size.
1213 EXPECT_EQ(kSize,
1214 WriteData(entry, stream_index, 25000, buffer1.get(), kSize, true));
1215 EXPECT_EQ(25000 + kSize, entry->GetDataSize(stream_index));
1216 EXPECT_EQ(
1217 28,
1218 ReadData(entry, stream_index, 25000 + kSize - 28, buffer2.get(), kSize));
1219 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + kSize - 28, 28));
1220
1221 // Reduce the size with a buffer that is not extending the size.
1222 EXPECT_EQ(kSize,
1223 WriteData(entry, stream_index, 24000, buffer1.get(), kSize, false));
1224 EXPECT_EQ(25000 + kSize, entry->GetDataSize(stream_index));
1225 EXPECT_EQ(kSize,
1226 WriteData(entry, stream_index, 24500, buffer1.get(), kSize, true));
1227 EXPECT_EQ(24500 + kSize, entry->GetDataSize(stream_index));
1228 EXPECT_EQ(kSize, ReadData(entry, stream_index, 23900, buffer2.get(), kSize));
1229 EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1230 EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1231
1232 // And now reduce the size below the old size.
1233 EXPECT_EQ(kSize,
1234 WriteData(entry, stream_index, 19000, buffer1.get(), kSize, true));
1235 EXPECT_EQ(19000 + kSize, entry->GetDataSize(stream_index));
1236 EXPECT_EQ(kSize, ReadData(entry, stream_index, 18900, buffer2.get(), kSize));
1237 EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1238 EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1239
1240 // Verify that the actual file is truncated.
1241 entry->Close();
1242 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1243 EXPECT_EQ(19000 + kSize, entry->GetDataSize(stream_index));
1244
1245 // Extend the newly opened file with a zero length write, expect zero fill.
1246 EXPECT_EQ(
1247 0,
1248 WriteData(entry, stream_index, 20000 + kSize, buffer1.get(), 0, false));
1249 EXPECT_EQ(kSize,
1250 ReadData(entry, stream_index, 19000 + kSize, buffer1.get(), kSize));
1251 EXPECT_EQ(0, memcmp(buffer1->data(), zeros, kSize));
1252
1253 entry->Close();
1254 }
1255
1256 TEST_F(DiskCacheEntryTest, SizeChanges) {
1257 InitCache();
1258 SizeChanges(1);
1259 }
1260
1261 TEST_F(DiskCacheEntryTest, SizeChangesNoBuffer) {
1262 InitCache();
1263 cache_impl_->SetFlags(disk_cache::kNoBuffering);
1264 SizeChanges(1);
1265 }
1266
1267 // Write more than the total cache capacity but to a single entry. |size| is the
1268 // amount of bytes to write each time.
1269 void DiskCacheEntryTest::ReuseEntry(int size, int stream_index) {
1270 std::string key1("the first key");
1271 disk_cache::Entry* entry;
1272 ASSERT_EQ(net::OK, CreateEntry(key1, &entry));
1273
1274 entry->Close();
1275 std::string key2("the second key");
1276 ASSERT_EQ(net::OK, CreateEntry(key2, &entry));
1277
1278 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(size));
1279 CacheTestFillBuffer(buffer->data(), size, false);
1280
1281 for (int i = 0; i < 15; i++) {
1282 EXPECT_EQ(0, WriteData(entry, stream_index, 0, buffer.get(), 0, true));
1283 EXPECT_EQ(size,
1284 WriteData(entry, stream_index, 0, buffer.get(), size, false));
1285 entry->Close();
1286 ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
1287 }
1288
1289 entry->Close();
1290 ASSERT_EQ(net::OK, OpenEntry(key1, &entry)) << "have not evicted this entry";
1291 entry->Close();
1292 }
1293
1294 TEST_F(DiskCacheEntryTest, ReuseExternalEntry) {
1295 SetMaxSize(200 * 1024);
1296 InitCache();
1297 ReuseEntry(20 * 1024, 0);
1298 }
1299
1300 TEST_F(DiskCacheEntryTest, MemoryOnlyReuseExternalEntry) {
1301 SetMemoryOnlyMode();
1302 SetMaxSize(200 * 1024);
1303 InitCache();
1304 ReuseEntry(20 * 1024, 0);
1305 }
1306
1307 TEST_F(DiskCacheEntryTest, ReuseInternalEntry) {
1308 SetMaxSize(100 * 1024);
1309 InitCache();
1310 ReuseEntry(10 * 1024, 0);
1311 }
1312
1313 TEST_F(DiskCacheEntryTest, MemoryOnlyReuseInternalEntry) {
1314 SetMemoryOnlyMode();
1315 SetMaxSize(100 * 1024);
1316 InitCache();
1317 ReuseEntry(10 * 1024, 0);
1318 }
1319
1320 // Reading somewhere that was not written should return zeros.
1321 void DiskCacheEntryTest::InvalidData(int stream_index) {
1322 std::string key("the first key");
1323 disk_cache::Entry* entry;
1324 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1325
1326 const int kSize1 = 20000;
1327 const int kSize2 = 20000;
1328 const int kSize3 = 20000;
1329 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
1330 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
1331 scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
1332
1333 CacheTestFillBuffer(buffer1->data(), kSize1, false);
1334 memset(buffer2->data(), 0, kSize2);
1335
1336 // Simple data grow:
1337 EXPECT_EQ(200,
1338 WriteData(entry, stream_index, 400, buffer1.get(), 200, false));
1339 EXPECT_EQ(600, entry->GetDataSize(stream_index));
1340 EXPECT_EQ(100, ReadData(entry, stream_index, 300, buffer3.get(), 100));
1341 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
1342 entry->Close();
1343 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1344
1345 // The entry is now on disk. Load it and extend it.
1346 EXPECT_EQ(200,
1347 WriteData(entry, stream_index, 800, buffer1.get(), 200, false));
1348 EXPECT_EQ(1000, entry->GetDataSize(stream_index));
1349 EXPECT_EQ(100, ReadData(entry, stream_index, 700, buffer3.get(), 100));
1350 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
1351 entry->Close();
1352 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1353
1354 // This time using truncate.
1355 EXPECT_EQ(200,
1356 WriteData(entry, stream_index, 1800, buffer1.get(), 200, true));
1357 EXPECT_EQ(2000, entry->GetDataSize(stream_index));
1358 EXPECT_EQ(100, ReadData(entry, stream_index, 1500, buffer3.get(), 100));
1359 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
1360
1361 // Go to an external file.
1362 EXPECT_EQ(200,
1363 WriteData(entry, stream_index, 19800, buffer1.get(), 200, false));
1364 EXPECT_EQ(20000, entry->GetDataSize(stream_index));
1365 EXPECT_EQ(4000, ReadData(entry, stream_index, 14000, buffer3.get(), 4000));
1366 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 4000));
1367
1368 // And back to an internal block.
1369 EXPECT_EQ(600,
1370 WriteData(entry, stream_index, 1000, buffer1.get(), 600, true));
1371 EXPECT_EQ(1600, entry->GetDataSize(stream_index));
1372 EXPECT_EQ(600, ReadData(entry, stream_index, 1000, buffer3.get(), 600));
1373 EXPECT_TRUE(!memcmp(buffer3->data(), buffer1->data(), 600));
1374
1375 // Extend it again.
1376 EXPECT_EQ(600,
1377 WriteData(entry, stream_index, 2000, buffer1.get(), 600, false));
1378 EXPECT_EQ(2600, entry->GetDataSize(stream_index));
1379 EXPECT_EQ(200, ReadData(entry, stream_index, 1800, buffer3.get(), 200));
1380 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200));
1381
1382 // And again (with truncation flag).
1383 EXPECT_EQ(600,
1384 WriteData(entry, stream_index, 3000, buffer1.get(), 600, true));
1385 EXPECT_EQ(3600, entry->GetDataSize(stream_index));
1386 EXPECT_EQ(200, ReadData(entry, stream_index, 2800, buffer3.get(), 200));
1387 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200));
1388
1389 entry->Close();
1390 }
1391
1392 TEST_F(DiskCacheEntryTest, InvalidData) {
1393 InitCache();
1394 InvalidData(0);
1395 }
1396
1397 TEST_F(DiskCacheEntryTest, InvalidDataNoBuffer) {
1398 InitCache();
1399 cache_impl_->SetFlags(disk_cache::kNoBuffering);
1400 InvalidData(0);
1401 }
1402
1403 TEST_F(DiskCacheEntryTest, MemoryOnlyInvalidData) {
1404 SetMemoryOnlyMode();
1405 InitCache();
1406 InvalidData(0);
1407 }
1408
1409 // Tests that the cache preserves the buffer of an IO operation.
1410 void DiskCacheEntryTest::ReadWriteDestroyBuffer(int stream_index) {
1411 std::string key("the first key");
1412 disk_cache::Entry* entry;
1413 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1414
1415 const int kSize = 200;
1416 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1417 CacheTestFillBuffer(buffer->data(), kSize, false);
1418
1419 net::TestCompletionCallback cb;
1420 EXPECT_EQ(net::ERR_IO_PENDING,
1421 entry->WriteData(
1422 stream_index, 0, buffer.get(), kSize, cb.callback(), false));
1423
1424 // Release our reference to the buffer.
1425 buffer = NULL;
1426 EXPECT_EQ(kSize, cb.WaitForResult());
1427
1428 // And now test with a Read().
1429 buffer = new net::IOBuffer(kSize);
1430 CacheTestFillBuffer(buffer->data(), kSize, false);
1431
1432 EXPECT_EQ(
1433 net::ERR_IO_PENDING,
1434 entry->ReadData(stream_index, 0, buffer.get(), kSize, cb.callback()));
1435 buffer = NULL;
1436 EXPECT_EQ(kSize, cb.WaitForResult());
1437
1438 entry->Close();
1439 }
1440
1441 TEST_F(DiskCacheEntryTest, ReadWriteDestroyBuffer) {
1442 InitCache();
1443 ReadWriteDestroyBuffer(0);
1444 }
1445
1446 void DiskCacheEntryTest::DoomNormalEntry() {
1447 std::string key("the first key");
1448 disk_cache::Entry* entry;
1449 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1450 entry->Doom();
1451 entry->Close();
1452
1453 const int kSize = 20000;
1454 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1455 CacheTestFillBuffer(buffer->data(), kSize, true);
1456 buffer->data()[19999] = '\0';
1457
1458 key = buffer->data();
1459 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1460 EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1461 EXPECT_EQ(20000, WriteData(entry, 1, 0, buffer.get(), kSize, false));
1462 entry->Doom();
1463 entry->Close();
1464
1465 FlushQueueForTest();
1466 EXPECT_EQ(0, cache_->GetEntryCount());
1467 }
1468
1469 TEST_F(DiskCacheEntryTest, DoomEntry) {
1470 InitCache();
1471 DoomNormalEntry();
1472 }
1473
1474 TEST_F(DiskCacheEntryTest, MemoryOnlyDoomEntry) {
1475 SetMemoryOnlyMode();
1476 InitCache();
1477 DoomNormalEntry();
1478 }
1479
1480 // Tests dooming an entry that's linked to an open entry.
1481 void DiskCacheEntryTest::DoomEntryNextToOpenEntry() {
1482 disk_cache::Entry* entry1;
1483 disk_cache::Entry* entry2;
1484 ASSERT_EQ(net::OK, CreateEntry("fixed", &entry1));
1485 entry1->Close();
1486 ASSERT_EQ(net::OK, CreateEntry("foo", &entry1));
1487 entry1->Close();
1488 ASSERT_EQ(net::OK, CreateEntry("bar", &entry1));
1489 entry1->Close();
1490
1491 ASSERT_EQ(net::OK, OpenEntry("foo", &entry1));
1492 ASSERT_EQ(net::OK, OpenEntry("bar", &entry2));
1493 entry2->Doom();
1494 entry2->Close();
1495
1496 ASSERT_EQ(net::OK, OpenEntry("foo", &entry2));
1497 entry2->Doom();
1498 entry2->Close();
1499 entry1->Close();
1500
1501 ASSERT_EQ(net::OK, OpenEntry("fixed", &entry1));
1502 entry1->Close();
1503 }
1504
1505 TEST_F(DiskCacheEntryTest, DoomEntryNextToOpenEntry) {
1506 InitCache();
1507 DoomEntryNextToOpenEntry();
1508 }
1509
1510 TEST_F(DiskCacheEntryTest, NewEvictionDoomEntryNextToOpenEntry) {
1511 SetNewEviction();
1512 InitCache();
1513 DoomEntryNextToOpenEntry();
1514 }
1515
1516 TEST_F(DiskCacheEntryTest, AppCacheDoomEntryNextToOpenEntry) {
1517 SetCacheType(net::APP_CACHE);
1518 InitCache();
1519 DoomEntryNextToOpenEntry();
1520 }
1521
1522 // Verify that basic operations work as expected with doomed entries.
1523 void DiskCacheEntryTest::DoomedEntry(int stream_index) {
1524 std::string key("the first key");
1525 disk_cache::Entry* entry;
1526 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1527 entry->Doom();
1528
1529 FlushQueueForTest();
1530 EXPECT_EQ(0, cache_->GetEntryCount());
1531 Time initial = Time::Now();
1532 AddDelay();
1533
1534 const int kSize1 = 2000;
1535 const int kSize2 = 2000;
1536 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
1537 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
1538 CacheTestFillBuffer(buffer1->data(), kSize1, false);
1539 memset(buffer2->data(), 0, kSize2);
1540
1541 EXPECT_EQ(2000,
1542 WriteData(entry, stream_index, 0, buffer1.get(), 2000, false));
1543 EXPECT_EQ(2000, ReadData(entry, stream_index, 0, buffer2.get(), 2000));
1544 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize1));
1545 EXPECT_EQ(key, entry->GetKey());
1546 EXPECT_TRUE(initial < entry->GetLastModified());
1547 EXPECT_TRUE(initial < entry->GetLastUsed());
1548
1549 entry->Close();
1550 }
1551
1552 TEST_F(DiskCacheEntryTest, DoomedEntry) {
1553 InitCache();
1554 DoomedEntry(0);
1555 }
1556
1557 TEST_F(DiskCacheEntryTest, MemoryOnlyDoomedEntry) {
1558 SetMemoryOnlyMode();
1559 InitCache();
1560 DoomedEntry(0);
1561 }
1562
1563 // Tests that we discard entries if the data is missing.
1564 TEST_F(DiskCacheEntryTest, MissingData) {
1565 InitCache();
1566
1567 std::string key("the first key");
1568 disk_cache::Entry* entry;
1569 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1570
1571 // Write to an external file.
1572 const int kSize = 20000;
1573 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1574 CacheTestFillBuffer(buffer->data(), kSize, false);
1575 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1576 entry->Close();
1577 FlushQueueForTest();
1578
1579 disk_cache::Addr address(0x80000001);
1580 base::FilePath name = cache_impl_->GetFileName(address);
1581 EXPECT_TRUE(base::DeleteFile(name, false));
1582
1583 // Attempt to read the data.
1584 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1585 EXPECT_EQ(net::ERR_FILE_NOT_FOUND,
1586 ReadData(entry, 0, 0, buffer.get(), kSize));
1587 entry->Close();
1588
1589 // The entry should be gone.
1590 ASSERT_NE(net::OK, OpenEntry(key, &entry));
1591 }
1592
1593 // Test that child entries in a memory cache backend are not visible from
1594 // enumerations.
1595 TEST_F(DiskCacheEntryTest, MemoryOnlyEnumerationWithSparseEntries) {
1596 SetMemoryOnlyMode();
1597 InitCache();
1598
1599 const int kSize = 4096;
1600 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
1601 CacheTestFillBuffer(buf->data(), kSize, false);
1602
1603 std::string key("the first key");
1604 disk_cache::Entry* parent_entry;
1605 ASSERT_EQ(net::OK, CreateEntry(key, &parent_entry));
1606
1607 // Writes to the parent entry.
1608 EXPECT_EQ(kSize,
1609 parent_entry->WriteSparseData(
1610 0, buf.get(), kSize, net::CompletionCallback()));
1611
1612 // This write creates a child entry and writes to it.
1613 EXPECT_EQ(kSize,
1614 parent_entry->WriteSparseData(
1615 8192, buf.get(), kSize, net::CompletionCallback()));
1616
1617 parent_entry->Close();
1618
1619 // Perform the enumerations.
1620 scoped_ptr<TestIterator> iter = CreateIterator();
1621 disk_cache::Entry* entry = NULL;
1622 int count = 0;
1623 while (iter->OpenNextEntry(&entry) == net::OK) {
1624 ASSERT_TRUE(entry != NULL);
1625 ++count;
1626 disk_cache::MemEntryImpl* mem_entry =
1627 reinterpret_cast<disk_cache::MemEntryImpl*>(entry);
1628 EXPECT_EQ(disk_cache::MemEntryImpl::kParentEntry, mem_entry->type());
1629 mem_entry->Close();
1630 }
1631 EXPECT_EQ(1, count);
1632 }
1633
1634 // Writes |buf_1| to offset and reads it back as |buf_2|.
1635 void VerifySparseIO(disk_cache::Entry* entry, int64 offset,
1636 net::IOBuffer* buf_1, int size, net::IOBuffer* buf_2) {
1637 net::TestCompletionCallback cb;
1638
1639 memset(buf_2->data(), 0, size);
1640 int ret = entry->ReadSparseData(offset, buf_2, size, cb.callback());
1641 EXPECT_EQ(0, cb.GetResult(ret));
1642
1643 ret = entry->WriteSparseData(offset, buf_1, size, cb.callback());
1644 EXPECT_EQ(size, cb.GetResult(ret));
1645
1646 ret = entry->ReadSparseData(offset, buf_2, size, cb.callback());
1647 EXPECT_EQ(size, cb.GetResult(ret));
1648
1649 EXPECT_EQ(0, memcmp(buf_1->data(), buf_2->data(), size));
1650 }
1651
1652 // Reads |size| bytes from |entry| at |offset| and verifies that they are the
1653 // same as the content of the provided |buffer|.
1654 void VerifyContentSparseIO(disk_cache::Entry* entry, int64 offset, char* buffer,
1655 int size) {
1656 net::TestCompletionCallback cb;
1657
1658 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(size));
1659 memset(buf_1->data(), 0, size);
1660 int ret = entry->ReadSparseData(offset, buf_1.get(), size, cb.callback());
1661 EXPECT_EQ(size, cb.GetResult(ret));
1662 EXPECT_EQ(0, memcmp(buf_1->data(), buffer, size));
1663 }
1664
1665 void DiskCacheEntryTest::BasicSparseIO() {
1666 std::string key("the first key");
1667 disk_cache::Entry* entry;
1668 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1669
1670 const int kSize = 2048;
1671 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
1672 scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
1673 CacheTestFillBuffer(buf_1->data(), kSize, false);
1674
1675 // Write at offset 0.
1676 VerifySparseIO(entry, 0, buf_1.get(), kSize, buf_2.get());
1677
1678 // Write at offset 0x400000 (4 MB).
1679 VerifySparseIO(entry, 0x400000, buf_1.get(), kSize, buf_2.get());
1680
1681 // Write at offset 0x800000000 (32 GB).
1682 VerifySparseIO(entry, 0x800000000LL, buf_1.get(), kSize, buf_2.get());
1683
1684 entry->Close();
1685
1686 // Check everything again.
1687 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1688 VerifyContentSparseIO(entry, 0, buf_1->data(), kSize);
1689 VerifyContentSparseIO(entry, 0x400000, buf_1->data(), kSize);
1690 VerifyContentSparseIO(entry, 0x800000000LL, buf_1->data(), kSize);
1691 entry->Close();
1692 }
1693
1694 TEST_F(DiskCacheEntryTest, BasicSparseIO) {
1695 InitCache();
1696 BasicSparseIO();
1697 }
1698
1699 TEST_F(DiskCacheEntryTest, MemoryOnlyBasicSparseIO) {
1700 SetMemoryOnlyMode();
1701 InitCache();
1702 BasicSparseIO();
1703 }
1704
1705 void DiskCacheEntryTest::HugeSparseIO() {
1706 std::string key("the first key");
1707 disk_cache::Entry* entry;
1708 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1709
1710 // Write 1.2 MB so that we cover multiple entries.
1711 const int kSize = 1200 * 1024;
1712 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
1713 scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
1714 CacheTestFillBuffer(buf_1->data(), kSize, false);
1715
1716 // Write at offset 0x20F0000 (33 MB - 64 KB).
1717 VerifySparseIO(entry, 0x20F0000, buf_1.get(), kSize, buf_2.get());
1718 entry->Close();
1719
1720 // Check it again.
1721 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1722 VerifyContentSparseIO(entry, 0x20F0000, buf_1->data(), kSize);
1723 entry->Close();
1724 }
1725
1726 TEST_F(DiskCacheEntryTest, HugeSparseIO) {
1727 InitCache();
1728 HugeSparseIO();
1729 }
1730
1731 TEST_F(DiskCacheEntryTest, MemoryOnlyHugeSparseIO) {
1732 SetMemoryOnlyMode();
1733 InitCache();
1734 HugeSparseIO();
1735 }
1736
1737 void DiskCacheEntryTest::GetAvailableRange() {
1738 std::string key("the first key");
1739 disk_cache::Entry* entry;
1740 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1741
1742 const int kSize = 16 * 1024;
1743 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
1744 CacheTestFillBuffer(buf->data(), kSize, false);
1745
1746 // Write at offset 0x20F0000 (33 MB - 64 KB), and 0x20F4400 (33 MB - 47 KB).
1747 EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F0000, buf.get(), kSize));
1748 EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F4400, buf.get(), kSize));
1749
1750 // We stop at the first empty block.
1751 int64 start;
1752 net::TestCompletionCallback cb;
1753 int rv = entry->GetAvailableRange(
1754 0x20F0000, kSize * 2, &start, cb.callback());
1755 EXPECT_EQ(kSize, cb.GetResult(rv));
1756 EXPECT_EQ(0x20F0000, start);
1757
1758 start = 0;
1759 rv = entry->GetAvailableRange(0, kSize, &start, cb.callback());
1760 EXPECT_EQ(0, cb.GetResult(rv));
1761 rv = entry->GetAvailableRange(
1762 0x20F0000 - kSize, kSize, &start, cb.callback());
1763 EXPECT_EQ(0, cb.GetResult(rv));
1764 rv = entry->GetAvailableRange(0, 0x2100000, &start, cb.callback());
1765 EXPECT_EQ(kSize, cb.GetResult(rv));
1766 EXPECT_EQ(0x20F0000, start);
1767
1768 // We should be able to Read based on the results of GetAvailableRange.
1769 start = -1;
1770 rv = entry->GetAvailableRange(0x2100000, kSize, &start, cb.callback());
1771 EXPECT_EQ(0, cb.GetResult(rv));
1772 rv = entry->ReadSparseData(start, buf.get(), kSize, cb.callback());
1773 EXPECT_EQ(0, cb.GetResult(rv));
1774
1775 start = 0;
1776 rv = entry->GetAvailableRange(0x20F2000, kSize, &start, cb.callback());
1777 EXPECT_EQ(0x2000, cb.GetResult(rv));
1778 EXPECT_EQ(0x20F2000, start);
1779 EXPECT_EQ(0x2000, ReadSparseData(entry, start, buf.get(), kSize));
1780
1781 // Make sure that we respect the |len| argument.
1782 start = 0;
1783 rv = entry->GetAvailableRange(
1784 0x20F0001 - kSize, kSize, &start, cb.callback());
1785 EXPECT_EQ(1, cb.GetResult(rv));
1786 EXPECT_EQ(0x20F0000, start);
1787
1788 entry->Close();
1789 }
1790
1791 TEST_F(DiskCacheEntryTest, GetAvailableRange) {
1792 InitCache();
1793 GetAvailableRange();
1794 }
1795
1796 TEST_F(DiskCacheEntryTest, MemoryOnlyGetAvailableRange) {
1797 SetMemoryOnlyMode();
1798 InitCache();
1799 GetAvailableRange();
1800 }
1801
1802 // Tests that non-sequential writes that are not aligned with the minimum sparse
1803 // data granularity (1024 bytes) do in fact result in dropped data.
1804 TEST_F(DiskCacheEntryTest, SparseWriteDropped) {
1805 InitCache();
1806 std::string key("the first key");
1807 disk_cache::Entry* entry;
1808 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1809
1810 const int kSize = 180;
1811 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
1812 scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
1813 CacheTestFillBuffer(buf_1->data(), kSize, false);
1814
1815 // Do small writes (180 bytes) that get increasingly close to a 1024-byte
1816 // boundary. All data should be dropped until a boundary is crossed, at which
1817 // point the data after the boundary is saved (at least for a while).
1818 int offset = 1024 - 500;
1819 int rv = 0;
1820 net::TestCompletionCallback cb;
1821 int64 start;
1822 for (int i = 0; i < 5; i++) {
1823 // Check result of last GetAvailableRange.
1824 EXPECT_EQ(0, rv);
1825
1826 rv = entry->WriteSparseData(offset, buf_1.get(), kSize, cb.callback());
1827 EXPECT_EQ(kSize, cb.GetResult(rv));
1828
1829 rv = entry->GetAvailableRange(offset - 100, kSize, &start, cb.callback());
1830 EXPECT_EQ(0, cb.GetResult(rv));
1831
1832 rv = entry->GetAvailableRange(offset, kSize, &start, cb.callback());
1833 rv = cb.GetResult(rv);
1834 if (!rv) {
1835 rv = entry->ReadSparseData(offset, buf_2.get(), kSize, cb.callback());
1836 EXPECT_EQ(0, cb.GetResult(rv));
1837 rv = 0;
1838 }
1839 offset += 1024 * i + 100;
1840 }
1841
1842 // The last write started 100 bytes below a bundary, so there should be 80
1843 // bytes after the boundary.
1844 EXPECT_EQ(80, rv);
1845 EXPECT_EQ(1024 * 7, start);
1846 rv = entry->ReadSparseData(start, buf_2.get(), kSize, cb.callback());
1847 EXPECT_EQ(80, cb.GetResult(rv));
1848 EXPECT_EQ(0, memcmp(buf_1.get()->data() + 100, buf_2.get()->data(), 80));
1849
1850 // And even that part is dropped when another write changes the offset.
1851 offset = start;
1852 rv = entry->WriteSparseData(0, buf_1.get(), kSize, cb.callback());
1853 EXPECT_EQ(kSize, cb.GetResult(rv));
1854
1855 rv = entry->GetAvailableRange(offset, kSize, &start, cb.callback());
1856 EXPECT_EQ(0, cb.GetResult(rv));
1857 entry->Close();
1858 }
1859
1860 // Tests that small sequential writes are not dropped.
1861 TEST_F(DiskCacheEntryTest, SparseSquentialWriteNotDropped) {
1862 InitCache();
1863 std::string key("the first key");
1864 disk_cache::Entry* entry;
1865 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1866
1867 const int kSize = 180;
1868 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
1869 scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
1870 CacheTestFillBuffer(buf_1->data(), kSize, false);
1871
1872 // Any starting offset is fine as long as it is 1024-bytes aligned.
1873 int rv = 0;
1874 net::TestCompletionCallback cb;
1875 int64 start;
1876 int64 offset = 1024 * 11;
1877 for (; offset < 20000; offset += kSize) {
1878 rv = entry->WriteSparseData(offset, buf_1.get(), kSize, cb.callback());
1879 EXPECT_EQ(kSize, cb.GetResult(rv));
1880
1881 rv = entry->GetAvailableRange(offset, kSize, &start, cb.callback());
1882 EXPECT_EQ(kSize, cb.GetResult(rv));
1883 EXPECT_EQ(offset, start);
1884
1885 rv = entry->ReadSparseData(offset, buf_2.get(), kSize, cb.callback());
1886 EXPECT_EQ(kSize, cb.GetResult(rv));
1887 EXPECT_EQ(0, memcmp(buf_1.get()->data(), buf_2.get()->data(), kSize));
1888 }
1889
1890 entry->Close();
1891 FlushQueueForTest();
1892
1893 // Verify again the last write made.
1894 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1895 offset -= kSize;
1896 rv = entry->GetAvailableRange(offset, kSize, &start, cb.callback());
1897 EXPECT_EQ(kSize, cb.GetResult(rv));
1898 EXPECT_EQ(offset, start);
1899
1900 rv = entry->ReadSparseData(offset, buf_2.get(), kSize, cb.callback());
1901 EXPECT_EQ(kSize, cb.GetResult(rv));
1902 EXPECT_EQ(0, memcmp(buf_1.get()->data(), buf_2.get()->data(), kSize));
1903
1904 entry->Close();
1905 }
1906
1907 void DiskCacheEntryTest::CouldBeSparse() {
1908 std::string key("the first key");
1909 disk_cache::Entry* entry;
1910 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1911
1912 const int kSize = 16 * 1024;
1913 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
1914 CacheTestFillBuffer(buf->data(), kSize, false);
1915
1916 // Write at offset 0x20F0000 (33 MB - 64 KB).
1917 EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F0000, buf.get(), kSize));
1918
1919 EXPECT_TRUE(entry->CouldBeSparse());
1920 entry->Close();
1921
1922 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1923 EXPECT_TRUE(entry->CouldBeSparse());
1924 entry->Close();
1925
1926 // Now verify a regular entry.
1927 key.assign("another key");
1928 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1929 EXPECT_FALSE(entry->CouldBeSparse());
1930
1931 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buf.get(), kSize, false));
1932 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buf.get(), kSize, false));
1933 EXPECT_EQ(kSize, WriteData(entry, 2, 0, buf.get(), kSize, false));
1934
1935 EXPECT_FALSE(entry->CouldBeSparse());
1936 entry->Close();
1937
1938 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1939 EXPECT_FALSE(entry->CouldBeSparse());
1940 entry->Close();
1941 }
1942
1943 TEST_F(DiskCacheEntryTest, CouldBeSparse) {
1944 InitCache();
1945 CouldBeSparse();
1946 }
1947
1948 TEST_F(DiskCacheEntryTest, MemoryCouldBeSparse) {
1949 SetMemoryOnlyMode();
1950 InitCache();
1951 CouldBeSparse();
1952 }
1953
1954 TEST_F(DiskCacheEntryTest, MemoryOnlyMisalignedSparseIO) {
1955 SetMemoryOnlyMode();
1956 InitCache();
1957
1958 const int kSize = 8192;
1959 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
1960 scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
1961 CacheTestFillBuffer(buf_1->data(), kSize, false);
1962
1963 std::string key("the first key");
1964 disk_cache::Entry* entry;
1965 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1966
1967 // This loop writes back to back starting from offset 0 and 9000.
1968 for (int i = 0; i < kSize; i += 1024) {
1969 scoped_refptr<net::WrappedIOBuffer> buf_3(
1970 new net::WrappedIOBuffer(buf_1->data() + i));
1971 VerifySparseIO(entry, i, buf_3.get(), 1024, buf_2.get());
1972 VerifySparseIO(entry, 9000 + i, buf_3.get(), 1024, buf_2.get());
1973 }
1974
1975 // Make sure we have data written.
1976 VerifyContentSparseIO(entry, 0, buf_1->data(), kSize);
1977 VerifyContentSparseIO(entry, 9000, buf_1->data(), kSize);
1978
1979 // This tests a large write that spans 3 entries from a misaligned offset.
1980 VerifySparseIO(entry, 20481, buf_1.get(), 8192, buf_2.get());
1981
1982 entry->Close();
1983 }
1984
1985 TEST_F(DiskCacheEntryTest, MemoryOnlyMisalignedGetAvailableRange) {
1986 SetMemoryOnlyMode();
1987 InitCache();
1988
1989 const int kSize = 8192;
1990 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
1991 CacheTestFillBuffer(buf->data(), kSize, false);
1992
1993 disk_cache::Entry* entry;
1994 std::string key("the first key");
1995 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1996
1997 // Writes in the middle of an entry.
1998 EXPECT_EQ(
1999 1024,
2000 entry->WriteSparseData(0, buf.get(), 1024, net::CompletionCallback()));
2001 EXPECT_EQ(
2002 1024,
2003 entry->WriteSparseData(5120, buf.get(), 1024, net::CompletionCallback()));
2004 EXPECT_EQ(1024,
2005 entry->WriteSparseData(
2006 10000, buf.get(), 1024, net::CompletionCallback()));
2007
2008 // Writes in the middle of an entry and spans 2 child entries.
2009 EXPECT_EQ(8192,
2010 entry->WriteSparseData(
2011 50000, buf.get(), 8192, net::CompletionCallback()));
2012
2013 int64 start;
2014 net::TestCompletionCallback cb;
2015 // Test that we stop at a discontinuous child at the second block.
2016 int rv = entry->GetAvailableRange(0, 10000, &start, cb.callback());
2017 EXPECT_EQ(1024, cb.GetResult(rv));
2018 EXPECT_EQ(0, start);
2019
2020 // Test that number of bytes is reported correctly when we start from the
2021 // middle of a filled region.
2022 rv = entry->GetAvailableRange(512, 10000, &start, cb.callback());
2023 EXPECT_EQ(512, cb.GetResult(rv));
2024 EXPECT_EQ(512, start);
2025
2026 // Test that we found bytes in the child of next block.
2027 rv = entry->GetAvailableRange(1024, 10000, &start, cb.callback());
2028 EXPECT_EQ(1024, cb.GetResult(rv));
2029 EXPECT_EQ(5120, start);
2030
2031 // Test that the desired length is respected. It starts within a filled
2032 // region.
2033 rv = entry->GetAvailableRange(5500, 512, &start, cb.callback());
2034 EXPECT_EQ(512, cb.GetResult(rv));
2035 EXPECT_EQ(5500, start);
2036
2037 // Test that the desired length is respected. It starts before a filled
2038 // region.
2039 rv = entry->GetAvailableRange(5000, 620, &start, cb.callback());
2040 EXPECT_EQ(500, cb.GetResult(rv));
2041 EXPECT_EQ(5120, start);
2042
2043 // Test that multiple blocks are scanned.
2044 rv = entry->GetAvailableRange(40000, 20000, &start, cb.callback());
2045 EXPECT_EQ(8192, cb.GetResult(rv));
2046 EXPECT_EQ(50000, start);
2047
2048 entry->Close();
2049 }
2050
2051 void DiskCacheEntryTest::UpdateSparseEntry() {
2052 std::string key("the first key");
2053 disk_cache::Entry* entry1;
2054 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
2055
2056 const int kSize = 2048;
2057 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
2058 scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
2059 CacheTestFillBuffer(buf_1->data(), kSize, false);
2060
2061 // Write at offset 0.
2062 VerifySparseIO(entry1, 0, buf_1.get(), kSize, buf_2.get());
2063 entry1->Close();
2064
2065 // Write at offset 2048.
2066 ASSERT_EQ(net::OK, OpenEntry(key, &entry1));
2067 VerifySparseIO(entry1, 2048, buf_1.get(), kSize, buf_2.get());
2068
2069 disk_cache::Entry* entry2;
2070 ASSERT_EQ(net::OK, CreateEntry("the second key", &entry2));
2071
2072 entry1->Close();
2073 entry2->Close();
2074 FlushQueueForTest();
2075 if (memory_only_ || simple_cache_mode_)
2076 EXPECT_EQ(2, cache_->GetEntryCount());
2077 else
2078 EXPECT_EQ(3, cache_->GetEntryCount());
2079 }
2080
2081 TEST_F(DiskCacheEntryTest, UpdateSparseEntry) {
2082 SetCacheType(net::MEDIA_CACHE);
2083 InitCache();
2084 UpdateSparseEntry();
2085 }
2086
2087 TEST_F(DiskCacheEntryTest, MemoryOnlyUpdateSparseEntry) {
2088 SetMemoryOnlyMode();
2089 SetCacheType(net::MEDIA_CACHE);
2090 InitCache();
2091 UpdateSparseEntry();
2092 }
2093
2094 void DiskCacheEntryTest::DoomSparseEntry() {
2095 std::string key1("the first key");
2096 std::string key2("the second key");
2097 disk_cache::Entry *entry1, *entry2;
2098 ASSERT_EQ(net::OK, CreateEntry(key1, &entry1));
2099 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
2100
2101 const int kSize = 4 * 1024;
2102 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
2103 CacheTestFillBuffer(buf->data(), kSize, false);
2104
2105 int64 offset = 1024;
2106 // Write to a bunch of ranges.
2107 for (int i = 0; i < 12; i++) {
2108 EXPECT_EQ(kSize, WriteSparseData(entry1, offset, buf.get(), kSize));
2109 // Keep the second map under the default size.
2110 if (i < 9)
2111 EXPECT_EQ(kSize, WriteSparseData(entry2, offset, buf.get(), kSize));
2112
2113 offset *= 4;
2114 }
2115
2116 if (memory_only_ || simple_cache_mode_)
2117 EXPECT_EQ(2, cache_->GetEntryCount());
2118 else
2119 EXPECT_EQ(15, cache_->GetEntryCount());
2120
2121 // Doom the first entry while it's still open.
2122 entry1->Doom();
2123 entry1->Close();
2124 entry2->Close();
2125
2126 // Doom the second entry after it's fully saved.
2127 EXPECT_EQ(net::OK, DoomEntry(key2));
2128
2129 // Make sure we do all needed work. This may fail for entry2 if between Close
2130 // and DoomEntry the system decides to remove all traces of the file from the
2131 // system cache so we don't see that there is pending IO.
2132 base::MessageLoop::current()->RunUntilIdle();
2133
2134 if (memory_only_) {
2135 EXPECT_EQ(0, cache_->GetEntryCount());
2136 } else {
2137 if (5 == cache_->GetEntryCount()) {
2138 // Most likely we are waiting for the result of reading the sparse info
2139 // (it's always async on Posix so it is easy to miss). Unfortunately we
2140 // don't have any signal to watch for so we can only wait.
2141 base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(500));
2142 base::MessageLoop::current()->RunUntilIdle();
2143 }
2144 EXPECT_EQ(0, cache_->GetEntryCount());
2145 }
2146 }
2147
2148 TEST_F(DiskCacheEntryTest, DoomSparseEntry) {
2149 UseCurrentThread();
2150 InitCache();
2151 DoomSparseEntry();
2152 }
2153
2154 TEST_F(DiskCacheEntryTest, MemoryOnlyDoomSparseEntry) {
2155 SetMemoryOnlyMode();
2156 InitCache();
2157 DoomSparseEntry();
2158 }
2159
2160 // A CompletionCallback wrapper that deletes the cache from within the callback.
2161 // The way a CompletionCallback works means that all tasks (even new ones)
2162 // are executed by the message loop before returning to the caller so the only
2163 // way to simulate a race is to execute what we want on the callback.
2164 class SparseTestCompletionCallback: public net::TestCompletionCallback {
2165 public:
2166 explicit SparseTestCompletionCallback(scoped_ptr<disk_cache::Backend> cache)
2167 : cache_(cache.Pass()) {
2168 }
2169
2170 private:
2171 void SetResult(int result) override {
2172 cache_.reset();
2173 TestCompletionCallback::SetResult(result);
2174 }
2175
2176 scoped_ptr<disk_cache::Backend> cache_;
2177 DISALLOW_COPY_AND_ASSIGN(SparseTestCompletionCallback);
2178 };
2179
2180 // Tests that we don't crash when the backend is deleted while we are working
2181 // deleting the sub-entries of a sparse entry.
2182 TEST_F(DiskCacheEntryTest, DoomSparseEntry2) {
2183 UseCurrentThread();
2184 InitCache();
2185 std::string key("the key");
2186 disk_cache::Entry* entry;
2187 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2188
2189 const int kSize = 4 * 1024;
2190 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
2191 CacheTestFillBuffer(buf->data(), kSize, false);
2192
2193 int64 offset = 1024;
2194 // Write to a bunch of ranges.
2195 for (int i = 0; i < 12; i++) {
2196 EXPECT_EQ(kSize,
2197 entry->WriteSparseData(
2198 offset, buf.get(), kSize, net::CompletionCallback()));
2199 offset *= 4;
2200 }
2201 EXPECT_EQ(9, cache_->GetEntryCount());
2202
2203 entry->Close();
2204 disk_cache::Backend* cache = cache_.get();
2205 SparseTestCompletionCallback cb(cache_.Pass());
2206 int rv = cache->DoomEntry(key, cb.callback());
2207 EXPECT_EQ(net::ERR_IO_PENDING, rv);
2208 EXPECT_EQ(net::OK, cb.WaitForResult());
2209 }
2210
2211 void DiskCacheEntryTest::PartialSparseEntry() {
2212 std::string key("the first key");
2213 disk_cache::Entry* entry;
2214 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2215
2216 // We should be able to deal with IO that is not aligned to the block size
2217 // of a sparse entry, at least to write a big range without leaving holes.
2218 const int kSize = 4 * 1024;
2219 const int kSmallSize = 128;
2220 scoped_refptr<net::IOBuffer> buf1(new net::IOBuffer(kSize));
2221 CacheTestFillBuffer(buf1->data(), kSize, false);
2222
2223 // The first write is just to extend the entry. The third write occupies
2224 // a 1KB block partially, it may not be written internally depending on the
2225 // implementation.
2226 EXPECT_EQ(kSize, WriteSparseData(entry, 20000, buf1.get(), kSize));
2227 EXPECT_EQ(kSize, WriteSparseData(entry, 500, buf1.get(), kSize));
2228 EXPECT_EQ(kSmallSize,
2229 WriteSparseData(entry, 1080321, buf1.get(), kSmallSize));
2230 entry->Close();
2231 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2232
2233 scoped_refptr<net::IOBuffer> buf2(new net::IOBuffer(kSize));
2234 memset(buf2->data(), 0, kSize);
2235 EXPECT_EQ(0, ReadSparseData(entry, 8000, buf2.get(), kSize));
2236
2237 EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize));
2238 EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
2239 EXPECT_EQ(0, ReadSparseData(entry, 0, buf2.get(), kSize));
2240
2241 // This read should not change anything.
2242 if (memory_only_ || simple_cache_mode_)
2243 EXPECT_EQ(96, ReadSparseData(entry, 24000, buf2.get(), kSize));
2244 else
2245 EXPECT_EQ(0, ReadSparseData(entry, 24000, buf2.get(), kSize));
2246
2247 EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize));
2248 EXPECT_EQ(0, ReadSparseData(entry, 99, buf2.get(), kSize));
2249
2250 int rv;
2251 int64 start;
2252 net::TestCompletionCallback cb;
2253 if (memory_only_ || simple_cache_mode_) {
2254 rv = entry->GetAvailableRange(0, 600, &start, cb.callback());
2255 EXPECT_EQ(100, cb.GetResult(rv));
2256 EXPECT_EQ(500, start);
2257 } else {
2258 rv = entry->GetAvailableRange(0, 2048, &start, cb.callback());
2259 EXPECT_EQ(1024, cb.GetResult(rv));
2260 EXPECT_EQ(1024, start);
2261 }
2262 rv = entry->GetAvailableRange(kSize, kSize, &start, cb.callback());
2263 EXPECT_EQ(500, cb.GetResult(rv));
2264 EXPECT_EQ(kSize, start);
2265 rv = entry->GetAvailableRange(20 * 1024, 10000, &start, cb.callback());
2266 if (memory_only_ || simple_cache_mode_)
2267 EXPECT_EQ(3616, cb.GetResult(rv));
2268 else
2269 EXPECT_EQ(3072, cb.GetResult(rv));
2270
2271 EXPECT_EQ(20 * 1024, start);
2272
2273 // 1. Query before a filled 1KB block.
2274 // 2. Query within a filled 1KB block.
2275 // 3. Query beyond a filled 1KB block.
2276 if (memory_only_ || simple_cache_mode_) {
2277 rv = entry->GetAvailableRange(19400, kSize, &start, cb.callback());
2278 EXPECT_EQ(3496, cb.GetResult(rv));
2279 EXPECT_EQ(20000, start);
2280 } else {
2281 rv = entry->GetAvailableRange(19400, kSize, &start, cb.callback());
2282 EXPECT_EQ(3016, cb.GetResult(rv));
2283 EXPECT_EQ(20480, start);
2284 }
2285 rv = entry->GetAvailableRange(3073, kSize, &start, cb.callback());
2286 EXPECT_EQ(1523, cb.GetResult(rv));
2287 EXPECT_EQ(3073, start);
2288 rv = entry->GetAvailableRange(4600, kSize, &start, cb.callback());
2289 EXPECT_EQ(0, cb.GetResult(rv));
2290 EXPECT_EQ(4600, start);
2291
2292 // Now make another write and verify that there is no hole in between.
2293 EXPECT_EQ(kSize, WriteSparseData(entry, 500 + kSize, buf1.get(), kSize));
2294 rv = entry->GetAvailableRange(1024, 10000, &start, cb.callback());
2295 EXPECT_EQ(7 * 1024 + 500, cb.GetResult(rv));
2296 EXPECT_EQ(1024, start);
2297 EXPECT_EQ(kSize, ReadSparseData(entry, kSize, buf2.get(), kSize));
2298 EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
2299 EXPECT_EQ(0, memcmp(buf2->data() + 500, buf1->data(), kSize - 500));
2300
2301 entry->Close();
2302 }
2303
2304 TEST_F(DiskCacheEntryTest, PartialSparseEntry) {
2305 InitCache();
2306 PartialSparseEntry();
2307 }
2308
2309 TEST_F(DiskCacheEntryTest, MemoryPartialSparseEntry) {
2310 SetMemoryOnlyMode();
2311 InitCache();
2312 PartialSparseEntry();
2313 }
2314
2315 // Tests that corrupt sparse children are removed automatically.
2316 TEST_F(DiskCacheEntryTest, CleanupSparseEntry) {
2317 InitCache();
2318 std::string key("the first key");
2319 disk_cache::Entry* entry;
2320 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2321
2322 const int kSize = 4 * 1024;
2323 scoped_refptr<net::IOBuffer> buf1(new net::IOBuffer(kSize));
2324 CacheTestFillBuffer(buf1->data(), kSize, false);
2325
2326 const int k1Meg = 1024 * 1024;
2327 EXPECT_EQ(kSize, WriteSparseData(entry, 8192, buf1.get(), kSize));
2328 EXPECT_EQ(kSize, WriteSparseData(entry, k1Meg + 8192, buf1.get(), kSize));
2329 EXPECT_EQ(kSize, WriteSparseData(entry, 2 * k1Meg + 8192, buf1.get(), kSize));
2330 entry->Close();
2331 EXPECT_EQ(4, cache_->GetEntryCount());
2332
2333 scoped_ptr<TestIterator> iter = CreateIterator();
2334 int count = 0;
2335 std::string child_key[2];
2336 while (iter->OpenNextEntry(&entry) == net::OK) {
2337 ASSERT_TRUE(entry != NULL);
2338 // Writing to an entry will alter the LRU list and invalidate the iterator.
2339 if (entry->GetKey() != key && count < 2)
2340 child_key[count++] = entry->GetKey();
2341 entry->Close();
2342 }
2343 for (int i = 0; i < 2; i++) {
2344 ASSERT_EQ(net::OK, OpenEntry(child_key[i], &entry));
2345 // Overwrite the header's magic and signature.
2346 EXPECT_EQ(12, WriteData(entry, 2, 0, buf1.get(), 12, false));
2347 entry->Close();
2348 }
2349
2350 EXPECT_EQ(4, cache_->GetEntryCount());
2351 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2352
2353 // Two children should be gone. One while reading and one while writing.
2354 EXPECT_EQ(0, ReadSparseData(entry, 2 * k1Meg + 8192, buf1.get(), kSize));
2355 EXPECT_EQ(kSize, WriteSparseData(entry, k1Meg + 16384, buf1.get(), kSize));
2356 EXPECT_EQ(0, ReadSparseData(entry, k1Meg + 8192, buf1.get(), kSize));
2357
2358 // We never touched this one.
2359 EXPECT_EQ(kSize, ReadSparseData(entry, 8192, buf1.get(), kSize));
2360 entry->Close();
2361
2362 // We re-created one of the corrupt children.
2363 EXPECT_EQ(3, cache_->GetEntryCount());
2364 }
2365
2366 TEST_F(DiskCacheEntryTest, CancelSparseIO) {
2367 UseCurrentThread();
2368 InitCache();
2369 std::string key("the first key");
2370 disk_cache::Entry* entry;
2371 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2372
2373 const int kSize = 40 * 1024;
2374 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
2375 CacheTestFillBuffer(buf->data(), kSize, false);
2376
2377 // This will open and write two "real" entries.
2378 net::TestCompletionCallback cb1, cb2, cb3, cb4, cb5;
2379 int rv = entry->WriteSparseData(
2380 1024 * 1024 - 4096, buf.get(), kSize, cb1.callback());
2381 EXPECT_EQ(net::ERR_IO_PENDING, rv);
2382
2383 int64 offset = 0;
2384 rv = entry->GetAvailableRange(offset, kSize, &offset, cb5.callback());
2385 rv = cb5.GetResult(rv);
2386 if (!cb1.have_result()) {
2387 // We may or may not have finished writing to the entry. If we have not,
2388 // we cannot start another operation at this time.
2389 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED, rv);
2390 }
2391
2392 // We cancel the pending operation, and register multiple notifications.
2393 entry->CancelSparseIO();
2394 EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb2.callback()));
2395 EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb3.callback()));
2396 entry->CancelSparseIO(); // Should be a no op at this point.
2397 EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb4.callback()));
2398
2399 if (!cb1.have_result()) {
2400 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
2401 entry->ReadSparseData(
2402 offset, buf.get(), kSize, net::CompletionCallback()));
2403 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
2404 entry->WriteSparseData(
2405 offset, buf.get(), kSize, net::CompletionCallback()));
2406 }
2407
2408 // Now see if we receive all notifications. Note that we should not be able
2409 // to write everything (unless the timing of the system is really weird).
2410 rv = cb1.WaitForResult();
2411 EXPECT_TRUE(rv == 4096 || rv == kSize);
2412 EXPECT_EQ(net::OK, cb2.WaitForResult());
2413 EXPECT_EQ(net::OK, cb3.WaitForResult());
2414 EXPECT_EQ(net::OK, cb4.WaitForResult());
2415
2416 rv = entry->GetAvailableRange(offset, kSize, &offset, cb5.callback());
2417 EXPECT_EQ(0, cb5.GetResult(rv));
2418 entry->Close();
2419 }
2420
2421 // Tests that we perform sanity checks on an entry's key. Note that there are
2422 // other tests that exercise sanity checks by using saved corrupt files.
2423 TEST_F(DiskCacheEntryTest, KeySanityCheck) {
2424 UseCurrentThread();
2425 InitCache();
2426 std::string key("the first key");
2427 disk_cache::Entry* entry;
2428 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2429
2430 disk_cache::EntryImpl* entry_impl =
2431 static_cast<disk_cache::EntryImpl*>(entry);
2432 disk_cache::EntryStore* store = entry_impl->entry()->Data();
2433
2434 // We have reserved space for a short key (one block), let's say that the key
2435 // takes more than one block, and remove the NULLs after the actual key.
2436 store->key_len = 800;
2437 memset(store->key + key.size(), 'k', sizeof(store->key) - key.size());
2438 entry_impl->entry()->set_modified();
2439 entry->Close();
2440
2441 // We have a corrupt entry. Now reload it. We should NOT read beyond the
2442 // allocated buffer here.
2443 ASSERT_NE(net::OK, OpenEntry(key, &entry));
2444 DisableIntegrityCheck();
2445 }
2446
2447 TEST_F(DiskCacheEntryTest, SimpleCacheInternalAsyncIO) {
2448 SetSimpleCacheMode();
2449 InitCache();
2450 InternalAsyncIO();
2451 }
2452
2453 TEST_F(DiskCacheEntryTest, SimpleCacheExternalAsyncIO) {
2454 SetSimpleCacheMode();
2455 InitCache();
2456 ExternalAsyncIO();
2457 }
2458
2459 TEST_F(DiskCacheEntryTest, SimpleCacheReleaseBuffer) {
2460 SetSimpleCacheMode();
2461 InitCache();
2462 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2463 EXPECT_EQ(net::OK, DoomAllEntries());
2464 ReleaseBuffer(i);
2465 }
2466 }
2467
2468 TEST_F(DiskCacheEntryTest, SimpleCacheStreamAccess) {
2469 SetSimpleCacheMode();
2470 InitCache();
2471 StreamAccess();
2472 }
2473
2474 TEST_F(DiskCacheEntryTest, SimpleCacheGetKey) {
2475 SetSimpleCacheMode();
2476 InitCache();
2477 GetKey();
2478 }
2479
2480 TEST_F(DiskCacheEntryTest, SimpleCacheGetTimes) {
2481 SetSimpleCacheMode();
2482 InitCache();
2483 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2484 EXPECT_EQ(net::OK, DoomAllEntries());
2485 GetTimes(i);
2486 }
2487 }
2488
2489 TEST_F(DiskCacheEntryTest, SimpleCacheGrowData) {
2490 SetSimpleCacheMode();
2491 InitCache();
2492 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2493 EXPECT_EQ(net::OK, DoomAllEntries());
2494 GrowData(i);
2495 }
2496 }
2497
2498 TEST_F(DiskCacheEntryTest, SimpleCacheTruncateData) {
2499 SetSimpleCacheMode();
2500 InitCache();
2501 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2502 EXPECT_EQ(net::OK, DoomAllEntries());
2503 TruncateData(i);
2504 }
2505 }
2506
2507 TEST_F(DiskCacheEntryTest, SimpleCacheZeroLengthIO) {
2508 SetSimpleCacheMode();
2509 InitCache();
2510 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2511 EXPECT_EQ(net::OK, DoomAllEntries());
2512 ZeroLengthIO(i);
2513 }
2514 }
2515
2516 TEST_F(DiskCacheEntryTest, SimpleCacheSizeAtCreate) {
2517 SetSimpleCacheMode();
2518 InitCache();
2519 SizeAtCreate();
2520 }
2521
2522 TEST_F(DiskCacheEntryTest, SimpleCacheReuseExternalEntry) {
2523 SetSimpleCacheMode();
2524 SetMaxSize(200 * 1024);
2525 InitCache();
2526 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2527 EXPECT_EQ(net::OK, DoomAllEntries());
2528 ReuseEntry(20 * 1024, i);
2529 }
2530 }
2531
2532 TEST_F(DiskCacheEntryTest, SimpleCacheReuseInternalEntry) {
2533 SetSimpleCacheMode();
2534 SetMaxSize(100 * 1024);
2535 InitCache();
2536 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2537 EXPECT_EQ(net::OK, DoomAllEntries());
2538 ReuseEntry(10 * 1024, i);
2539 }
2540 }
2541
2542 TEST_F(DiskCacheEntryTest, SimpleCacheSizeChanges) {
2543 SetSimpleCacheMode();
2544 InitCache();
2545 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2546 EXPECT_EQ(net::OK, DoomAllEntries());
2547 SizeChanges(i);
2548 }
2549 }
2550
2551 TEST_F(DiskCacheEntryTest, SimpleCacheInvalidData) {
2552 SetSimpleCacheMode();
2553 InitCache();
2554 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2555 EXPECT_EQ(net::OK, DoomAllEntries());
2556 InvalidData(i);
2557 }
2558 }
2559
2560 TEST_F(DiskCacheEntryTest, SimpleCacheReadWriteDestroyBuffer) {
2561 // Proving that the test works well with optimistic operations enabled is
2562 // subtle, instead run only in APP_CACHE mode to disable optimistic
2563 // operations. Stream 0 always uses optimistic operations, so the test is not
2564 // run on stream 0.
2565 SetCacheType(net::APP_CACHE);
2566 SetSimpleCacheMode();
2567 InitCache();
2568 for (int i = 1; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2569 EXPECT_EQ(net::OK, DoomAllEntries());
2570 ReadWriteDestroyBuffer(i);
2571 }
2572 }
2573
2574 TEST_F(DiskCacheEntryTest, SimpleCacheDoomEntry) {
2575 SetSimpleCacheMode();
2576 InitCache();
2577 DoomNormalEntry();
2578 }
2579
2580 TEST_F(DiskCacheEntryTest, SimpleCacheDoomEntryNextToOpenEntry) {
2581 SetSimpleCacheMode();
2582 InitCache();
2583 DoomEntryNextToOpenEntry();
2584 }
2585
2586 TEST_F(DiskCacheEntryTest, SimpleCacheDoomedEntry) {
2587 SetSimpleCacheMode();
2588 InitCache();
2589 // Stream 2 is excluded because the implementation does not support writing to
2590 // it on a doomed entry, if it was previously lazily omitted.
2591 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount - 1; ++i) {
2592 EXPECT_EQ(net::OK, DoomAllEntries());
2593 DoomedEntry(i);
2594 }
2595 }
2596
2597 // Creates an entry with corrupted last byte in stream 0.
2598 // Requires SimpleCacheMode.
2599 bool DiskCacheEntryTest::SimpleCacheMakeBadChecksumEntry(const std::string& key,
2600 int* data_size) {
2601 disk_cache::Entry* entry = NULL;
2602
2603 if (CreateEntry(key, &entry) != net::OK || !entry) {
2604 LOG(ERROR) << "Could not create entry";
2605 return false;
2606 }
2607
2608 const char data[] = "this is very good data";
2609 const int kDataSize = arraysize(data);
2610 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kDataSize));
2611 base::strlcpy(buffer->data(), data, kDataSize);
2612
2613 EXPECT_EQ(kDataSize, WriteData(entry, 1, 0, buffer.get(), kDataSize, false));
2614 entry->Close();
2615 entry = NULL;
2616
2617 // Corrupt the last byte of the data.
2618 base::FilePath entry_file0_path = cache_path_.AppendASCII(
2619 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
2620 base::File entry_file0(entry_file0_path,
2621 base::File::FLAG_WRITE | base::File::FLAG_OPEN);
2622 if (!entry_file0.IsValid())
2623 return false;
2624
2625 int64 file_offset =
2626 sizeof(disk_cache::SimpleFileHeader) + key.size() + kDataSize - 2;
2627 EXPECT_EQ(1, entry_file0.Write(file_offset, "X", 1));
2628 *data_size = kDataSize;
2629 return true;
2630 }
2631
2632 // Tests that the simple cache can detect entries that have bad data.
2633 TEST_F(DiskCacheEntryTest, SimpleCacheBadChecksum) {
2634 SetSimpleCacheMode();
2635 InitCache();
2636
2637 const char key[] = "the first key";
2638 int size_unused;
2639 ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size_unused));
2640
2641 disk_cache::Entry* entry = NULL;
2642
2643 // Open the entry.
2644 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2645 ScopedEntryPtr entry_closer(entry);
2646
2647 const int kReadBufferSize = 200;
2648 EXPECT_GE(kReadBufferSize, entry->GetDataSize(1));
2649 scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize));
2650 EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH,
2651 ReadData(entry, 1, 0, read_buffer.get(), kReadBufferSize));
2652 }
2653
2654 // Tests that an entry that has had an IO error occur can still be Doomed().
2655 TEST_F(DiskCacheEntryTest, SimpleCacheErrorThenDoom) {
2656 SetSimpleCacheMode();
2657 InitCache();
2658
2659 const char key[] = "the first key";
2660 int size_unused;
2661 ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size_unused));
2662
2663 disk_cache::Entry* entry = NULL;
2664
2665 // Open the entry, forcing an IO error.
2666 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2667 ScopedEntryPtr entry_closer(entry);
2668
2669 const int kReadBufferSize = 200;
2670 EXPECT_GE(kReadBufferSize, entry->GetDataSize(1));
2671 scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize));
2672 EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH,
2673 ReadData(entry, 1, 0, read_buffer.get(), kReadBufferSize));
2674
2675 entry->Doom(); // Should not crash.
2676 }
2677
2678 bool TruncatePath(const base::FilePath& file_path, int64 length) {
2679 base::File file(file_path, base::File::FLAG_WRITE | base::File::FLAG_OPEN);
2680 if (!file.IsValid())
2681 return false;
2682 return file.SetLength(length);
2683 }
2684
2685 TEST_F(DiskCacheEntryTest, SimpleCacheNoEOF) {
2686 SetSimpleCacheMode();
2687 InitCache();
2688
2689 const char key[] = "the first key";
2690
2691 disk_cache::Entry* entry = NULL;
2692 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2693 disk_cache::Entry* null = NULL;
2694 EXPECT_NE(null, entry);
2695 entry->Close();
2696 entry = NULL;
2697
2698 // Force the entry to flush to disk, so subsequent platform file operations
2699 // succed.
2700 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2701 entry->Close();
2702 entry = NULL;
2703
2704 // Truncate the file such that the length isn't sufficient to have an EOF
2705 // record.
2706 int kTruncationBytes = -implicit_cast<int>(sizeof(disk_cache::SimpleFileEOF));
2707 const base::FilePath entry_path = cache_path_.AppendASCII(
2708 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
2709 const int64 invalid_size =
2710 disk_cache::simple_util::GetFileSizeFromKeyAndDataSize(key,
2711 kTruncationBytes);
2712 EXPECT_TRUE(TruncatePath(entry_path, invalid_size));
2713 EXPECT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
2714 DisableIntegrityCheck();
2715 }
2716
2717 TEST_F(DiskCacheEntryTest, SimpleCacheNonOptimisticOperationsBasic) {
2718 // Test sequence:
2719 // Create, Write, Read, Close.
2720 SetCacheType(net::APP_CACHE); // APP_CACHE doesn't use optimistic operations.
2721 SetSimpleCacheMode();
2722 InitCache();
2723 disk_cache::Entry* const null_entry = NULL;
2724
2725 disk_cache::Entry* entry = NULL;
2726 EXPECT_EQ(net::OK, CreateEntry("my key", &entry));
2727 ASSERT_NE(null_entry, entry);
2728 ScopedEntryPtr entry_closer(entry);
2729
2730 const int kBufferSize = 10;
2731 scoped_refptr<net::IOBufferWithSize> write_buffer(
2732 new net::IOBufferWithSize(kBufferSize));
2733 CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
2734 EXPECT_EQ(
2735 write_buffer->size(),
2736 WriteData(entry, 1, 0, write_buffer.get(), write_buffer->size(), false));
2737
2738 scoped_refptr<net::IOBufferWithSize> read_buffer(
2739 new net::IOBufferWithSize(kBufferSize));
2740 EXPECT_EQ(read_buffer->size(),
2741 ReadData(entry, 1, 0, read_buffer.get(), read_buffer->size()));
2742 }
2743
2744 TEST_F(DiskCacheEntryTest, SimpleCacheNonOptimisticOperationsDontBlock) {
2745 // Test sequence:
2746 // Create, Write, Close.
2747 SetCacheType(net::APP_CACHE); // APP_CACHE doesn't use optimistic operations.
2748 SetSimpleCacheMode();
2749 InitCache();
2750 disk_cache::Entry* const null_entry = NULL;
2751
2752 MessageLoopHelper helper;
2753 CallbackTest create_callback(&helper, false);
2754
2755 int expected_callback_runs = 0;
2756 const int kBufferSize = 10;
2757 scoped_refptr<net::IOBufferWithSize> write_buffer(
2758 new net::IOBufferWithSize(kBufferSize));
2759
2760 disk_cache::Entry* entry = NULL;
2761 EXPECT_EQ(net::OK, CreateEntry("my key", &entry));
2762 ASSERT_NE(null_entry, entry);
2763 ScopedEntryPtr entry_closer(entry);
2764
2765 CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
2766 CallbackTest write_callback(&helper, false);
2767 int ret = entry->WriteData(
2768 1,
2769 0,
2770 write_buffer.get(),
2771 write_buffer->size(),
2772 base::Bind(&CallbackTest::Run, base::Unretained(&write_callback)),
2773 false);
2774 ASSERT_EQ(net::ERR_IO_PENDING, ret);
2775 helper.WaitUntilCacheIoFinished(++expected_callback_runs);
2776 }
2777
2778 TEST_F(DiskCacheEntryTest,
2779 SimpleCacheNonOptimisticOperationsBasicsWithoutWaiting) {
2780 // Test sequence:
2781 // Create, Write, Read, Close.
2782 SetCacheType(net::APP_CACHE); // APP_CACHE doesn't use optimistic operations.
2783 SetSimpleCacheMode();
2784 InitCache();
2785 disk_cache::Entry* const null_entry = NULL;
2786 MessageLoopHelper helper;
2787
2788 disk_cache::Entry* entry = NULL;
2789 // Note that |entry| is only set once CreateEntry() completed which is why we
2790 // have to wait (i.e. use the helper CreateEntry() function).
2791 EXPECT_EQ(net::OK, CreateEntry("my key", &entry));
2792 ASSERT_NE(null_entry, entry);
2793 ScopedEntryPtr entry_closer(entry);
2794
2795 const int kBufferSize = 10;
2796 scoped_refptr<net::IOBufferWithSize> write_buffer(
2797 new net::IOBufferWithSize(kBufferSize));
2798 CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
2799 CallbackTest write_callback(&helper, false);
2800 int ret = entry->WriteData(
2801 1,
2802 0,
2803 write_buffer.get(),
2804 write_buffer->size(),
2805 base::Bind(&CallbackTest::Run, base::Unretained(&write_callback)),
2806 false);
2807 EXPECT_EQ(net::ERR_IO_PENDING, ret);
2808 int expected_callback_runs = 1;
2809
2810 scoped_refptr<net::IOBufferWithSize> read_buffer(
2811 new net::IOBufferWithSize(kBufferSize));
2812 CallbackTest read_callback(&helper, false);
2813 ret = entry->ReadData(
2814 1,
2815 0,
2816 read_buffer.get(),
2817 read_buffer->size(),
2818 base::Bind(&CallbackTest::Run, base::Unretained(&read_callback)));
2819 EXPECT_EQ(net::ERR_IO_PENDING, ret);
2820 ++expected_callback_runs;
2821
2822 helper.WaitUntilCacheIoFinished(expected_callback_runs);
2823 ASSERT_EQ(read_buffer->size(), write_buffer->size());
2824 EXPECT_EQ(
2825 0,
2826 memcmp(read_buffer->data(), write_buffer->data(), read_buffer->size()));
2827 }
2828
2829 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic) {
2830 // Test sequence:
2831 // Create, Write, Read, Write, Read, Close.
2832 SetSimpleCacheMode();
2833 InitCache();
2834 disk_cache::Entry* null = NULL;
2835 const char key[] = "the first key";
2836
2837 MessageLoopHelper helper;
2838 CallbackTest callback1(&helper, false);
2839 CallbackTest callback2(&helper, false);
2840 CallbackTest callback3(&helper, false);
2841 CallbackTest callback4(&helper, false);
2842 CallbackTest callback5(&helper, false);
2843
2844 int expected = 0;
2845 const int kSize1 = 10;
2846 const int kSize2 = 20;
2847 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
2848 scoped_refptr<net::IOBuffer> buffer1_read(new net::IOBuffer(kSize1));
2849 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
2850 scoped_refptr<net::IOBuffer> buffer2_read(new net::IOBuffer(kSize2));
2851 CacheTestFillBuffer(buffer1->data(), kSize1, false);
2852 CacheTestFillBuffer(buffer2->data(), kSize2, false);
2853
2854 disk_cache::Entry* entry = NULL;
2855 // Create is optimistic, must return OK.
2856 ASSERT_EQ(net::OK,
2857 cache_->CreateEntry(key, &entry,
2858 base::Bind(&CallbackTest::Run,
2859 base::Unretained(&callback1))));
2860 EXPECT_NE(null, entry);
2861 ScopedEntryPtr entry_closer(entry);
2862
2863 // This write may or may not be optimistic (it depends if the previous
2864 // optimistic create already finished by the time we call the write here).
2865 int ret = entry->WriteData(
2866 1,
2867 0,
2868 buffer1.get(),
2869 kSize1,
2870 base::Bind(&CallbackTest::Run, base::Unretained(&callback2)),
2871 false);
2872 EXPECT_TRUE(kSize1 == ret || net::ERR_IO_PENDING == ret);
2873 if (net::ERR_IO_PENDING == ret)
2874 expected++;
2875
2876 // This Read must not be optimistic, since we don't support that yet.
2877 EXPECT_EQ(net::ERR_IO_PENDING,
2878 entry->ReadData(
2879 1,
2880 0,
2881 buffer1_read.get(),
2882 kSize1,
2883 base::Bind(&CallbackTest::Run, base::Unretained(&callback3))));
2884 expected++;
2885 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
2886 EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read->data(), kSize1));
2887
2888 // At this point after waiting, the pending operations queue on the entry
2889 // should be empty, so the next Write operation must run as optimistic.
2890 EXPECT_EQ(kSize2,
2891 entry->WriteData(
2892 1,
2893 0,
2894 buffer2.get(),
2895 kSize2,
2896 base::Bind(&CallbackTest::Run, base::Unretained(&callback4)),
2897 false));
2898
2899 // Lets do another read so we block until both the write and the read
2900 // operation finishes and we can then test for HasOneRef() below.
2901 EXPECT_EQ(net::ERR_IO_PENDING,
2902 entry->ReadData(
2903 1,
2904 0,
2905 buffer2_read.get(),
2906 kSize2,
2907 base::Bind(&CallbackTest::Run, base::Unretained(&callback5))));
2908 expected++;
2909
2910 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
2911 EXPECT_EQ(0, memcmp(buffer2->data(), buffer2_read->data(), kSize2));
2912
2913 // Check that we are not leaking.
2914 EXPECT_NE(entry, null);
2915 EXPECT_TRUE(
2916 static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
2917 }
2918
2919 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic2) {
2920 // Test sequence:
2921 // Create, Open, Close, Close.
2922 SetSimpleCacheMode();
2923 InitCache();
2924 disk_cache::Entry* null = NULL;
2925 const char key[] = "the first key";
2926
2927 MessageLoopHelper helper;
2928 CallbackTest callback1(&helper, false);
2929 CallbackTest callback2(&helper, false);
2930
2931 disk_cache::Entry* entry = NULL;
2932 ASSERT_EQ(net::OK,
2933 cache_->CreateEntry(key, &entry,
2934 base::Bind(&CallbackTest::Run,
2935 base::Unretained(&callback1))));
2936 EXPECT_NE(null, entry);
2937 ScopedEntryPtr entry_closer(entry);
2938
2939 disk_cache::Entry* entry2 = NULL;
2940 ASSERT_EQ(net::ERR_IO_PENDING,
2941 cache_->OpenEntry(key, &entry2,
2942 base::Bind(&CallbackTest::Run,
2943 base::Unretained(&callback2))));
2944 ASSERT_TRUE(helper.WaitUntilCacheIoFinished(1));
2945
2946 EXPECT_NE(null, entry2);
2947 EXPECT_EQ(entry, entry2);
2948
2949 // We have to call close twice, since we called create and open above.
2950 entry->Close();
2951
2952 // Check that we are not leaking.
2953 EXPECT_TRUE(
2954 static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
2955 }
2956
2957 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic3) {
2958 // Test sequence:
2959 // Create, Close, Open, Close.
2960 SetSimpleCacheMode();
2961 InitCache();
2962 disk_cache::Entry* null = NULL;
2963 const char key[] = "the first key";
2964
2965 disk_cache::Entry* entry = NULL;
2966 ASSERT_EQ(net::OK,
2967 cache_->CreateEntry(key, &entry, net::CompletionCallback()));
2968 EXPECT_NE(null, entry);
2969 entry->Close();
2970
2971 net::TestCompletionCallback cb;
2972 disk_cache::Entry* entry2 = NULL;
2973 ASSERT_EQ(net::ERR_IO_PENDING,
2974 cache_->OpenEntry(key, &entry2, cb.callback()));
2975 ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
2976 ScopedEntryPtr entry_closer(entry2);
2977
2978 EXPECT_NE(null, entry2);
2979 EXPECT_EQ(entry, entry2);
2980
2981 // Check that we are not leaking.
2982 EXPECT_TRUE(
2983 static_cast<disk_cache::SimpleEntryImpl*>(entry2)->HasOneRef());
2984 }
2985
2986 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic4) {
2987 // Test sequence:
2988 // Create, Close, Write, Open, Open, Close, Write, Read, Close.
2989 SetSimpleCacheMode();
2990 InitCache();
2991 disk_cache::Entry* null = NULL;
2992 const char key[] = "the first key";
2993
2994 net::TestCompletionCallback cb;
2995 const int kSize1 = 10;
2996 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
2997 CacheTestFillBuffer(buffer1->data(), kSize1, false);
2998 disk_cache::Entry* entry = NULL;
2999
3000 ASSERT_EQ(net::OK,
3001 cache_->CreateEntry(key, &entry, net::CompletionCallback()));
3002 EXPECT_NE(null, entry);
3003 entry->Close();
3004
3005 // Lets do a Write so we block until both the Close and the Write
3006 // operation finishes. Write must fail since we are writing in a closed entry.
3007 EXPECT_EQ(
3008 net::ERR_IO_PENDING,
3009 entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
3010 EXPECT_EQ(net::ERR_FAILED, cb.GetResult(net::ERR_IO_PENDING));
3011
3012 // Finish running the pending tasks so that we fully complete the close
3013 // operation and destroy the entry object.
3014 base::MessageLoop::current()->RunUntilIdle();
3015
3016 // At this point the |entry| must have been destroyed, and called
3017 // RemoveSelfFromBackend().
3018 disk_cache::Entry* entry2 = NULL;
3019 ASSERT_EQ(net::ERR_IO_PENDING,
3020 cache_->OpenEntry(key, &entry2, cb.callback()));
3021 ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
3022 EXPECT_NE(null, entry2);
3023
3024 disk_cache::Entry* entry3 = NULL;
3025 ASSERT_EQ(net::ERR_IO_PENDING,
3026 cache_->OpenEntry(key, &entry3, cb.callback()));
3027 ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
3028 EXPECT_NE(null, entry3);
3029 EXPECT_EQ(entry2, entry3);
3030 entry3->Close();
3031
3032 // The previous Close doesn't actually closes the entry since we opened it
3033 // twice, so the next Write operation must succeed and it must be able to
3034 // perform it optimistically, since there is no operation running on this
3035 // entry.
3036 EXPECT_EQ(kSize1,
3037 entry2->WriteData(
3038 1, 0, buffer1.get(), kSize1, net::CompletionCallback(), false));
3039
3040 // Lets do another read so we block until both the write and the read
3041 // operation finishes and we can then test for HasOneRef() below.
3042 EXPECT_EQ(net::ERR_IO_PENDING,
3043 entry2->ReadData(1, 0, buffer1.get(), kSize1, cb.callback()));
3044 EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
3045
3046 // Check that we are not leaking.
3047 EXPECT_TRUE(
3048 static_cast<disk_cache::SimpleEntryImpl*>(entry2)->HasOneRef());
3049 entry2->Close();
3050 }
3051
3052 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic5) {
3053 // Test sequence:
3054 // Create, Doom, Write, Read, Close.
3055 SetSimpleCacheMode();
3056 InitCache();
3057 disk_cache::Entry* null = NULL;
3058 const char key[] = "the first key";
3059
3060 net::TestCompletionCallback cb;
3061 const int kSize1 = 10;
3062 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
3063 CacheTestFillBuffer(buffer1->data(), kSize1, false);
3064 disk_cache::Entry* entry = NULL;
3065
3066 ASSERT_EQ(net::OK,
3067 cache_->CreateEntry(key, &entry, net::CompletionCallback()));
3068 EXPECT_NE(null, entry);
3069 ScopedEntryPtr entry_closer(entry);
3070 entry->Doom();
3071
3072 EXPECT_EQ(
3073 net::ERR_IO_PENDING,
3074 entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
3075 EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
3076
3077 EXPECT_EQ(net::ERR_IO_PENDING,
3078 entry->ReadData(1, 0, buffer1.get(), kSize1, cb.callback()));
3079 EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
3080
3081 // Check that we are not leaking.
3082 EXPECT_TRUE(
3083 static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
3084 }
3085
3086 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic6) {
3087 // Test sequence:
3088 // Create, Write, Doom, Doom, Read, Doom, Close.
3089 SetSimpleCacheMode();
3090 InitCache();
3091 disk_cache::Entry* null = NULL;
3092 const char key[] = "the first key";
3093
3094 net::TestCompletionCallback cb;
3095 const int kSize1 = 10;
3096 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
3097 scoped_refptr<net::IOBuffer> buffer1_read(new net::IOBuffer(kSize1));
3098 CacheTestFillBuffer(buffer1->data(), kSize1, false);
3099 disk_cache::Entry* entry = NULL;
3100
3101 ASSERT_EQ(net::OK,
3102 cache_->CreateEntry(key, &entry, net::CompletionCallback()));
3103 EXPECT_NE(null, entry);
3104 ScopedEntryPtr entry_closer(entry);
3105
3106 EXPECT_EQ(
3107 net::ERR_IO_PENDING,
3108 entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
3109 EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
3110
3111 entry->Doom();
3112 entry->Doom();
3113
3114 // This Read must not be optimistic, since we don't support that yet.
3115 EXPECT_EQ(net::ERR_IO_PENDING,
3116 entry->ReadData(1, 0, buffer1_read.get(), kSize1, cb.callback()));
3117 EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
3118 EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read->data(), kSize1));
3119
3120 entry->Doom();
3121 }
3122
3123 // Confirm that IO buffers are not referenced by the Simple Cache after a write
3124 // completes.
3125 TEST_F(DiskCacheEntryTest, SimpleCacheOptimisticWriteReleases) {
3126 SetSimpleCacheMode();
3127 InitCache();
3128
3129 const char key[] = "the first key";
3130 disk_cache::Entry* entry = NULL;
3131
3132 // First, an optimistic create.
3133 ASSERT_EQ(net::OK,
3134 cache_->CreateEntry(key, &entry, net::CompletionCallback()));
3135 ASSERT_TRUE(entry);
3136 ScopedEntryPtr entry_closer(entry);
3137
3138 const int kWriteSize = 512;
3139 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kWriteSize));
3140 EXPECT_TRUE(buffer1->HasOneRef());
3141 CacheTestFillBuffer(buffer1->data(), kWriteSize, false);
3142
3143 // An optimistic write happens only when there is an empty queue of pending
3144 // operations. To ensure the queue is empty, we issue a write and wait until
3145 // it completes.
3146 EXPECT_EQ(kWriteSize,
3147 WriteData(entry, 1, 0, buffer1.get(), kWriteSize, false));
3148 EXPECT_TRUE(buffer1->HasOneRef());
3149
3150 // Finally, we should perform an optimistic write and confirm that all
3151 // references to the IO buffer have been released.
3152 EXPECT_EQ(
3153 kWriteSize,
3154 entry->WriteData(
3155 1, 0, buffer1.get(), kWriteSize, net::CompletionCallback(), false));
3156 EXPECT_TRUE(buffer1->HasOneRef());
3157 }
3158
3159 TEST_F(DiskCacheEntryTest, SimpleCacheCreateDoomRace) {
3160 // Test sequence:
3161 // Create, Doom, Write, Close, Check files are not on disk anymore.
3162 SetSimpleCacheMode();
3163 InitCache();
3164 disk_cache::Entry* null = NULL;
3165 const char key[] = "the first key";
3166
3167 net::TestCompletionCallback cb;
3168 const int kSize1 = 10;
3169 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
3170 CacheTestFillBuffer(buffer1->data(), kSize1, false);
3171 disk_cache::Entry* entry = NULL;
3172
3173 ASSERT_EQ(net::OK,
3174 cache_->CreateEntry(key, &entry, net::CompletionCallback()));
3175 EXPECT_NE(null, entry);
3176
3177 EXPECT_EQ(net::ERR_IO_PENDING, cache_->DoomEntry(key, cb.callback()));
3178 EXPECT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
3179
3180 EXPECT_EQ(
3181 kSize1,
3182 entry->WriteData(0, 0, buffer1.get(), kSize1, cb.callback(), false));
3183
3184 entry->Close();
3185
3186 // Finish running the pending tasks so that we fully complete the close
3187 // operation and destroy the entry object.
3188 base::MessageLoop::current()->RunUntilIdle();
3189
3190 for (int i = 0; i < disk_cache::kSimpleEntryFileCount; ++i) {
3191 base::FilePath entry_file_path = cache_path_.AppendASCII(
3192 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i));
3193 base::File::Info info;
3194 EXPECT_FALSE(base::GetFileInfo(entry_file_path, &info));
3195 }
3196 }
3197
3198 TEST_F(DiskCacheEntryTest, SimpleCacheDoomCreateRace) {
3199 // This test runs as APP_CACHE to make operations more synchronous. Test
3200 // sequence:
3201 // Create, Doom, Create.
3202 SetCacheType(net::APP_CACHE);
3203 SetSimpleCacheMode();
3204 InitCache();
3205 disk_cache::Entry* null = NULL;
3206 const char key[] = "the first key";
3207
3208 net::TestCompletionCallback create_callback;
3209
3210 disk_cache::Entry* entry1 = NULL;
3211 ASSERT_EQ(net::OK,
3212 create_callback.GetResult(
3213 cache_->CreateEntry(key, &entry1, create_callback.callback())));
3214 ScopedEntryPtr entry1_closer(entry1);
3215 EXPECT_NE(null, entry1);
3216
3217 net::TestCompletionCallback doom_callback;
3218 EXPECT_EQ(net::ERR_IO_PENDING,
3219 cache_->DoomEntry(key, doom_callback.callback()));
3220
3221 disk_cache::Entry* entry2 = NULL;
3222 ASSERT_EQ(net::OK,
3223 create_callback.GetResult(
3224 cache_->CreateEntry(key, &entry2, create_callback.callback())));
3225 ScopedEntryPtr entry2_closer(entry2);
3226 EXPECT_EQ(net::OK, doom_callback.GetResult(net::ERR_IO_PENDING));
3227 }
3228
3229 TEST_F(DiskCacheEntryTest, SimpleCacheDoomDoom) {
3230 // Test sequence:
3231 // Create, Doom, Create, Doom (1st entry), Open.
3232 SetSimpleCacheMode();
3233 InitCache();
3234 disk_cache::Entry* null = NULL;
3235
3236 const char key[] = "the first key";
3237
3238 disk_cache::Entry* entry1 = NULL;
3239 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
3240 ScopedEntryPtr entry1_closer(entry1);
3241 EXPECT_NE(null, entry1);
3242
3243 EXPECT_EQ(net::OK, DoomEntry(key));
3244
3245 disk_cache::Entry* entry2 = NULL;
3246 ASSERT_EQ(net::OK, CreateEntry(key, &entry2));
3247 ScopedEntryPtr entry2_closer(entry2);
3248 EXPECT_NE(null, entry2);
3249
3250 // Redundantly dooming entry1 should not delete entry2.
3251 disk_cache::SimpleEntryImpl* simple_entry1 =
3252 static_cast<disk_cache::SimpleEntryImpl*>(entry1);
3253 net::TestCompletionCallback cb;
3254 EXPECT_EQ(net::OK,
3255 cb.GetResult(simple_entry1->DoomEntry(cb.callback())));
3256
3257 disk_cache::Entry* entry3 = NULL;
3258 ASSERT_EQ(net::OK, OpenEntry(key, &entry3));
3259 ScopedEntryPtr entry3_closer(entry3);
3260 EXPECT_NE(null, entry3);
3261 }
3262
3263 TEST_F(DiskCacheEntryTest, SimpleCacheDoomCreateDoom) {
3264 // Test sequence:
3265 // Create, Doom, Create, Doom.
3266 SetSimpleCacheMode();
3267 InitCache();
3268
3269 disk_cache::Entry* null = NULL;
3270
3271 const char key[] = "the first key";
3272
3273 disk_cache::Entry* entry1 = NULL;
3274 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
3275 ScopedEntryPtr entry1_closer(entry1);
3276 EXPECT_NE(null, entry1);
3277
3278 entry1->Doom();
3279
3280 disk_cache::Entry* entry2 = NULL;
3281 ASSERT_EQ(net::OK, CreateEntry(key, &entry2));
3282 ScopedEntryPtr entry2_closer(entry2);
3283 EXPECT_NE(null, entry2);
3284
3285 entry2->Doom();
3286
3287 // This test passes if it doesn't crash.
3288 }
3289
3290 TEST_F(DiskCacheEntryTest, SimpleCacheDoomCloseCreateCloseOpen) {
3291 // Test sequence: Create, Doom, Close, Create, Close, Open.
3292 SetSimpleCacheMode();
3293 InitCache();
3294
3295 disk_cache::Entry* null = NULL;
3296
3297 const char key[] = "this is a key";
3298
3299 disk_cache::Entry* entry1 = NULL;
3300 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
3301 ScopedEntryPtr entry1_closer(entry1);
3302 EXPECT_NE(null, entry1);
3303
3304 entry1->Doom();
3305 entry1_closer.reset();
3306 entry1 = NULL;
3307
3308 disk_cache::Entry* entry2 = NULL;
3309 ASSERT_EQ(net::OK, CreateEntry(key, &entry2));
3310 ScopedEntryPtr entry2_closer(entry2);
3311 EXPECT_NE(null, entry2);
3312
3313 entry2_closer.reset();
3314 entry2 = NULL;
3315
3316 disk_cache::Entry* entry3 = NULL;
3317 ASSERT_EQ(net::OK, OpenEntry(key, &entry3));
3318 ScopedEntryPtr entry3_closer(entry3);
3319 EXPECT_NE(null, entry3);
3320 }
3321
3322 // Checks that an optimistic Create would fail later on a racing Open.
3323 TEST_F(DiskCacheEntryTest, SimpleCacheOptimisticCreateFailsOnOpen) {
3324 SetSimpleCacheMode();
3325 InitCache();
3326
3327 // Create a corrupt file in place of a future entry. Optimistic create should
3328 // initially succeed, but realize later that creation failed.
3329 const std::string key = "the key";
3330 net::TestCompletionCallback cb;
3331 disk_cache::Entry* entry = NULL;
3332 disk_cache::Entry* entry2 = NULL;
3333
3334 EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3335 key, cache_path_));
3336 EXPECT_EQ(net::OK, cache_->CreateEntry(key, &entry, cb.callback()));
3337 ASSERT_TRUE(entry);
3338 ScopedEntryPtr entry_closer(entry);
3339 ASSERT_NE(net::OK, OpenEntry(key, &entry2));
3340
3341 // Check that we are not leaking.
3342 EXPECT_TRUE(
3343 static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
3344
3345 DisableIntegrityCheck();
3346 }
3347
3348 // Tests that old entries are evicted while new entries remain in the index.
3349 // This test relies on non-mandatory properties of the simple Cache Backend:
3350 // LRU eviction, specific values of high-watermark and low-watermark etc.
3351 // When changing the eviction algorithm, the test will have to be re-engineered.
3352 TEST_F(DiskCacheEntryTest, SimpleCacheEvictOldEntries) {
3353 const int kMaxSize = 200 * 1024;
3354 const int kWriteSize = kMaxSize / 10;
3355 const int kNumExtraEntries = 12;
3356 SetSimpleCacheMode();
3357 SetMaxSize(kMaxSize);
3358 InitCache();
3359
3360 std::string key1("the first key");
3361 disk_cache::Entry* entry;
3362 ASSERT_EQ(net::OK, CreateEntry(key1, &entry));
3363 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kWriteSize));
3364 CacheTestFillBuffer(buffer->data(), kWriteSize, false);
3365 EXPECT_EQ(kWriteSize,
3366 WriteData(entry, 1, 0, buffer.get(), kWriteSize, false));
3367 entry->Close();
3368 AddDelay();
3369
3370 std::string key2("the key prefix");
3371 for (int i = 0; i < kNumExtraEntries; i++) {
3372 if (i == kNumExtraEntries - 2) {
3373 // Create a distinct timestamp for the last two entries. These entries
3374 // will be checked for outliving the eviction.
3375 AddDelay();
3376 }
3377 ASSERT_EQ(net::OK, CreateEntry(key2 + base::StringPrintf("%d", i), &entry));
3378 ScopedEntryPtr entry_closer(entry);
3379 EXPECT_EQ(kWriteSize,
3380 WriteData(entry, 1, 0, buffer.get(), kWriteSize, false));
3381 }
3382
3383 // TODO(pasko): Find a way to wait for the eviction task(s) to finish by using
3384 // the internal knowledge about |SimpleBackendImpl|.
3385 ASSERT_NE(net::OK, OpenEntry(key1, &entry))
3386 << "Should have evicted the old entry";
3387 for (int i = 0; i < 2; i++) {
3388 int entry_no = kNumExtraEntries - i - 1;
3389 // Generally there is no guarantee that at this point the backround eviction
3390 // is finished. We are testing the positive case, i.e. when the eviction
3391 // never reaches this entry, should be non-flaky.
3392 ASSERT_EQ(net::OK, OpenEntry(key2 + base::StringPrintf("%d", entry_no),
3393 &entry))
3394 << "Should not have evicted fresh entry " << entry_no;
3395 entry->Close();
3396 }
3397 }
3398
3399 // Tests that if a read and a following in-flight truncate are both in progress
3400 // simultaniously that they both can occur successfully. See
3401 // http://crbug.com/239223
3402 TEST_F(DiskCacheEntryTest, SimpleCacheInFlightTruncate) {
3403 SetSimpleCacheMode();
3404 InitCache();
3405
3406 const char key[] = "the first key";
3407
3408 const int kBufferSize = 1024;
3409 scoped_refptr<net::IOBuffer> write_buffer(new net::IOBuffer(kBufferSize));
3410 CacheTestFillBuffer(write_buffer->data(), kBufferSize, false);
3411
3412 disk_cache::Entry* entry = NULL;
3413 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3414
3415 EXPECT_EQ(kBufferSize,
3416 WriteData(entry, 1, 0, write_buffer.get(), kBufferSize, false));
3417 entry->Close();
3418 entry = NULL;
3419
3420 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3421 ScopedEntryPtr entry_closer(entry);
3422
3423 MessageLoopHelper helper;
3424 int expected = 0;
3425
3426 // Make a short read.
3427 const int kReadBufferSize = 512;
3428 scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize));
3429 CallbackTest read_callback(&helper, false);
3430 EXPECT_EQ(net::ERR_IO_PENDING,
3431 entry->ReadData(1,
3432 0,
3433 read_buffer.get(),
3434 kReadBufferSize,
3435 base::Bind(&CallbackTest::Run,
3436 base::Unretained(&read_callback))));
3437 ++expected;
3438
3439 // Truncate the entry to the length of that read.
3440 scoped_refptr<net::IOBuffer>
3441 truncate_buffer(new net::IOBuffer(kReadBufferSize));
3442 CacheTestFillBuffer(truncate_buffer->data(), kReadBufferSize, false);
3443 CallbackTest truncate_callback(&helper, false);
3444 EXPECT_EQ(net::ERR_IO_PENDING,
3445 entry->WriteData(1,
3446 0,
3447 truncate_buffer.get(),
3448 kReadBufferSize,
3449 base::Bind(&CallbackTest::Run,
3450 base::Unretained(&truncate_callback)),
3451 true));
3452 ++expected;
3453
3454 // Wait for both the read and truncation to finish, and confirm that both
3455 // succeeded.
3456 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
3457 EXPECT_EQ(kReadBufferSize, read_callback.last_result());
3458 EXPECT_EQ(kReadBufferSize, truncate_callback.last_result());
3459 EXPECT_EQ(0,
3460 memcmp(write_buffer->data(), read_buffer->data(), kReadBufferSize));
3461 }
3462
3463 // Tests that if a write and a read dependant on it are both in flight
3464 // simultaneiously that they both can complete successfully without erroneous
3465 // early returns. See http://crbug.com/239223
3466 TEST_F(DiskCacheEntryTest, SimpleCacheInFlightRead) {
3467 SetSimpleCacheMode();
3468 InitCache();
3469
3470 const char key[] = "the first key";
3471 disk_cache::Entry* entry = NULL;
3472 ASSERT_EQ(net::OK,
3473 cache_->CreateEntry(key, &entry, net::CompletionCallback()));
3474 ScopedEntryPtr entry_closer(entry);
3475
3476 const int kBufferSize = 1024;
3477 scoped_refptr<net::IOBuffer> write_buffer(new net::IOBuffer(kBufferSize));
3478 CacheTestFillBuffer(write_buffer->data(), kBufferSize, false);
3479
3480 MessageLoopHelper helper;
3481 int expected = 0;
3482
3483 CallbackTest write_callback(&helper, false);
3484 EXPECT_EQ(net::ERR_IO_PENDING,
3485 entry->WriteData(1,
3486 0,
3487 write_buffer.get(),
3488 kBufferSize,
3489 base::Bind(&CallbackTest::Run,
3490 base::Unretained(&write_callback)),
3491 true));
3492 ++expected;
3493
3494 scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kBufferSize));
3495 CallbackTest read_callback(&helper, false);
3496 EXPECT_EQ(net::ERR_IO_PENDING,
3497 entry->ReadData(1,
3498 0,
3499 read_buffer.get(),
3500 kBufferSize,
3501 base::Bind(&CallbackTest::Run,
3502 base::Unretained(&read_callback))));
3503 ++expected;
3504
3505 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
3506 EXPECT_EQ(kBufferSize, write_callback.last_result());
3507 EXPECT_EQ(kBufferSize, read_callback.last_result());
3508 EXPECT_EQ(0, memcmp(write_buffer->data(), read_buffer->data(), kBufferSize));
3509 }
3510
3511 TEST_F(DiskCacheEntryTest, SimpleCacheOpenCreateRaceWithNoIndex) {
3512 SetSimpleCacheMode();
3513 DisableSimpleCacheWaitForIndex();
3514 DisableIntegrityCheck();
3515 InitCache();
3516
3517 // Assume the index is not initialized, which is likely, since we are blocking
3518 // the IO thread from executing the index finalization step.
3519 disk_cache::Entry* entry1;
3520 net::TestCompletionCallback cb1;
3521 disk_cache::Entry* entry2;
3522 net::TestCompletionCallback cb2;
3523 int rv1 = cache_->OpenEntry("key", &entry1, cb1.callback());
3524 int rv2 = cache_->CreateEntry("key", &entry2, cb2.callback());
3525
3526 EXPECT_EQ(net::ERR_FAILED, cb1.GetResult(rv1));
3527 ASSERT_EQ(net::OK, cb2.GetResult(rv2));
3528 entry2->Close();
3529 }
3530
3531 // Checks that reading two entries simultaneously does not discard a CRC check.
3532 // TODO(pasko): make it work with Simple Cache.
3533 TEST_F(DiskCacheEntryTest, DISABLED_SimpleCacheMultipleReadersCheckCRC) {
3534 SetSimpleCacheMode();
3535 InitCache();
3536
3537 const char key[] = "key";
3538
3539 int size;
3540 ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size));
3541
3542 scoped_refptr<net::IOBuffer> read_buffer1(new net::IOBuffer(size));
3543 scoped_refptr<net::IOBuffer> read_buffer2(new net::IOBuffer(size));
3544
3545 // Advance the first reader a little.
3546 disk_cache::Entry* entry = NULL;
3547 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3548 EXPECT_EQ(1, ReadData(entry, 0, 0, read_buffer1.get(), 1));
3549
3550 // Make the second reader pass the point where the first one is, and close.
3551 disk_cache::Entry* entry2 = NULL;
3552 EXPECT_EQ(net::OK, OpenEntry(key, &entry2));
3553 EXPECT_EQ(1, ReadData(entry2, 0, 0, read_buffer2.get(), 1));
3554 EXPECT_EQ(1, ReadData(entry2, 0, 1, read_buffer2.get(), 1));
3555 entry2->Close();
3556
3557 // Read the data till the end should produce an error.
3558 EXPECT_GT(0, ReadData(entry, 0, 1, read_buffer1.get(), size));
3559 entry->Close();
3560 DisableIntegrityCheck();
3561 }
3562
3563 // Checking one more scenario of overlapped reading of a bad entry.
3564 // Differs from the |SimpleCacheMultipleReadersCheckCRC| only by the order of
3565 // last two reads.
3566 TEST_F(DiskCacheEntryTest, SimpleCacheMultipleReadersCheckCRC2) {
3567 SetSimpleCacheMode();
3568 InitCache();
3569
3570 const char key[] = "key";
3571 int size;
3572 ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size));
3573
3574 scoped_refptr<net::IOBuffer> read_buffer1(new net::IOBuffer(size));
3575 scoped_refptr<net::IOBuffer> read_buffer2(new net::IOBuffer(size));
3576
3577 // Advance the first reader a little.
3578 disk_cache::Entry* entry = NULL;
3579 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3580 ScopedEntryPtr entry_closer(entry);
3581 EXPECT_EQ(1, ReadData(entry, 1, 0, read_buffer1.get(), 1));
3582
3583 // Advance the 2nd reader by the same amount.
3584 disk_cache::Entry* entry2 = NULL;
3585 EXPECT_EQ(net::OK, OpenEntry(key, &entry2));
3586 ScopedEntryPtr entry2_closer(entry2);
3587 EXPECT_EQ(1, ReadData(entry2, 1, 0, read_buffer2.get(), 1));
3588
3589 // Continue reading 1st.
3590 EXPECT_GT(0, ReadData(entry, 1, 1, read_buffer1.get(), size));
3591
3592 // This read should fail as well because we have previous read failures.
3593 EXPECT_GT(0, ReadData(entry2, 1, 1, read_buffer2.get(), 1));
3594 DisableIntegrityCheck();
3595 }
3596
3597 // Test if we can sequentially read each subset of the data until all the data
3598 // is read, then the CRC is calculated correctly and the reads are successful.
3599 TEST_F(DiskCacheEntryTest, SimpleCacheReadCombineCRC) {
3600 // Test sequence:
3601 // Create, Write, Read (first half of data), Read (second half of data),
3602 // Close.
3603 SetSimpleCacheMode();
3604 InitCache();
3605 disk_cache::Entry* null = NULL;
3606 const char key[] = "the first key";
3607
3608 const int kHalfSize = 200;
3609 const int kSize = 2 * kHalfSize;
3610 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3611 CacheTestFillBuffer(buffer1->data(), kSize, false);
3612 disk_cache::Entry* entry = NULL;
3613
3614 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3615 EXPECT_NE(null, entry);
3616
3617 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, false));
3618 entry->Close();
3619
3620 disk_cache::Entry* entry2 = NULL;
3621 ASSERT_EQ(net::OK, OpenEntry(key, &entry2));
3622 EXPECT_EQ(entry, entry2);
3623
3624 // Read the first half of the data.
3625 int offset = 0;
3626 int buf_len = kHalfSize;
3627 scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(buf_len));
3628 EXPECT_EQ(buf_len, ReadData(entry2, 1, offset, buffer1_read1.get(), buf_len));
3629 EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), buf_len));
3630
3631 // Read the second half of the data.
3632 offset = buf_len;
3633 buf_len = kHalfSize;
3634 scoped_refptr<net::IOBuffer> buffer1_read2(new net::IOBuffer(buf_len));
3635 EXPECT_EQ(buf_len, ReadData(entry2, 1, offset, buffer1_read2.get(), buf_len));
3636 char* buffer1_data = buffer1->data() + offset;
3637 EXPECT_EQ(0, memcmp(buffer1_data, buffer1_read2->data(), buf_len));
3638
3639 // Check that we are not leaking.
3640 EXPECT_NE(entry, null);
3641 EXPECT_TRUE(
3642 static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
3643 entry->Close();
3644 entry = NULL;
3645 }
3646
3647 // Test if we can write the data not in sequence and read correctly. In
3648 // this case the CRC will not be present.
3649 TEST_F(DiskCacheEntryTest, SimpleCacheNonSequentialWrite) {
3650 // Test sequence:
3651 // Create, Write (second half of data), Write (first half of data), Read,
3652 // Close.
3653 SetSimpleCacheMode();
3654 InitCache();
3655 disk_cache::Entry* null = NULL;
3656 const char key[] = "the first key";
3657
3658 const int kHalfSize = 200;
3659 const int kSize = 2 * kHalfSize;
3660 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3661 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
3662 CacheTestFillBuffer(buffer1->data(), kSize, false);
3663 char* buffer1_data = buffer1->data() + kHalfSize;
3664 memcpy(buffer2->data(), buffer1_data, kHalfSize);
3665
3666 disk_cache::Entry* entry = NULL;
3667 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3668 entry->Close();
3669 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
3670 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3671 EXPECT_NE(null, entry);
3672
3673 int offset = kHalfSize;
3674 int buf_len = kHalfSize;
3675
3676 EXPECT_EQ(buf_len,
3677 WriteData(entry, i, offset, buffer2.get(), buf_len, false));
3678 offset = 0;
3679 buf_len = kHalfSize;
3680 EXPECT_EQ(buf_len,
3681 WriteData(entry, i, offset, buffer1.get(), buf_len, false));
3682 entry->Close();
3683
3684 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3685
3686 scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(kSize));
3687 EXPECT_EQ(kSize, ReadData(entry, i, 0, buffer1_read1.get(), kSize));
3688 EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), kSize));
3689 // Check that we are not leaking.
3690 ASSERT_NE(entry, null);
3691 EXPECT_TRUE(static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
3692 entry->Close();
3693 }
3694 }
3695
3696 // Test that changing stream1 size does not affect stream0 (stream0 and stream1
3697 // are stored in the same file in Simple Cache).
3698 TEST_F(DiskCacheEntryTest, SimpleCacheStream1SizeChanges) {
3699 SetSimpleCacheMode();
3700 InitCache();
3701 disk_cache::Entry* entry = NULL;
3702 const char key[] = "the key";
3703 const int kSize = 100;
3704 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3705 scoped_refptr<net::IOBuffer> buffer_read(new net::IOBuffer(kSize));
3706 CacheTestFillBuffer(buffer->data(), kSize, false);
3707
3708 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3709 EXPECT_TRUE(entry);
3710
3711 // Write something into stream0.
3712 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
3713 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
3714 EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
3715 entry->Close();
3716
3717 // Extend stream1.
3718 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3719 int stream1_size = 100;
3720 EXPECT_EQ(0, WriteData(entry, 1, stream1_size, buffer.get(), 0, false));
3721 EXPECT_EQ(stream1_size, entry->GetDataSize(1));
3722 entry->Close();
3723
3724 // Check that stream0 data has not been modified and that the EOF record for
3725 // stream 0 contains a crc.
3726 // The entry needs to be reopened before checking the crc: Open will perform
3727 // the synchronization with the previous Close. This ensures the EOF records
3728 // have been written to disk before we attempt to read them independently.
3729 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3730 base::FilePath entry_file0_path = cache_path_.AppendASCII(
3731 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3732 base::File entry_file0(entry_file0_path,
3733 base::File::FLAG_READ | base::File::FLAG_OPEN);
3734 ASSERT_TRUE(entry_file0.IsValid());
3735
3736 int data_size[disk_cache::kSimpleEntryStreamCount] = {kSize, stream1_size, 0};
3737 int sparse_data_size = 0;
3738 disk_cache::SimpleEntryStat entry_stat(
3739 base::Time::Now(), base::Time::Now(), data_size, sparse_data_size);
3740 int eof_offset = entry_stat.GetEOFOffsetInFile(key, 0);
3741 disk_cache::SimpleFileEOF eof_record;
3742 ASSERT_EQ(static_cast<int>(sizeof(eof_record)),
3743 entry_file0.Read(eof_offset, reinterpret_cast<char*>(&eof_record),
3744 sizeof(eof_record)));
3745 EXPECT_EQ(disk_cache::kSimpleFinalMagicNumber, eof_record.final_magic_number);
3746 EXPECT_TRUE((eof_record.flags & disk_cache::SimpleFileEOF::FLAG_HAS_CRC32) ==
3747 disk_cache::SimpleFileEOF::FLAG_HAS_CRC32);
3748
3749 buffer_read = new net::IOBuffer(kSize);
3750 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
3751 EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
3752
3753 // Shrink stream1.
3754 stream1_size = 50;
3755 EXPECT_EQ(0, WriteData(entry, 1, stream1_size, buffer.get(), 0, true));
3756 EXPECT_EQ(stream1_size, entry->GetDataSize(1));
3757 entry->Close();
3758
3759 // Check that stream0 data has not been modified.
3760 buffer_read = new net::IOBuffer(kSize);
3761 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3762 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
3763 EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
3764 entry->Close();
3765 entry = NULL;
3766 }
3767
3768 // Test that writing within the range for which the crc has already been
3769 // computed will properly invalidate the computed crc.
3770 TEST_F(DiskCacheEntryTest, SimpleCacheCRCRewrite) {
3771 // Test sequence:
3772 // Create, Write (big data), Write (small data in the middle), Close.
3773 // Open, Read (all), Close.
3774 SetSimpleCacheMode();
3775 InitCache();
3776 disk_cache::Entry* null = NULL;
3777 const char key[] = "the first key";
3778
3779 const int kHalfSize = 200;
3780 const int kSize = 2 * kHalfSize;
3781 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3782 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kHalfSize));
3783 CacheTestFillBuffer(buffer1->data(), kSize, false);
3784 CacheTestFillBuffer(buffer2->data(), kHalfSize, false);
3785
3786 disk_cache::Entry* entry = NULL;
3787 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3788 EXPECT_NE(null, entry);
3789 entry->Close();
3790
3791 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
3792 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3793 int offset = 0;
3794 int buf_len = kSize;
3795
3796 EXPECT_EQ(buf_len,
3797 WriteData(entry, i, offset, buffer1.get(), buf_len, false));
3798 offset = kHalfSize;
3799 buf_len = kHalfSize;
3800 EXPECT_EQ(buf_len,
3801 WriteData(entry, i, offset, buffer2.get(), buf_len, false));
3802 entry->Close();
3803
3804 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3805
3806 scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(kSize));
3807 EXPECT_EQ(kSize, ReadData(entry, i, 0, buffer1_read1.get(), kSize));
3808 EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), kHalfSize));
3809 EXPECT_EQ(
3810 0,
3811 memcmp(buffer2->data(), buffer1_read1->data() + kHalfSize, kHalfSize));
3812
3813 entry->Close();
3814 }
3815 }
3816
3817 bool DiskCacheEntryTest::SimpleCacheThirdStreamFileExists(const char* key) {
3818 int third_stream_file_index =
3819 disk_cache::simple_util::GetFileIndexFromStreamIndex(2);
3820 base::FilePath third_stream_file_path = cache_path_.AppendASCII(
3821 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(
3822 key, third_stream_file_index));
3823 return PathExists(third_stream_file_path);
3824 }
3825
3826 void DiskCacheEntryTest::SyncDoomEntry(const char* key) {
3827 net::TestCompletionCallback callback;
3828 cache_->DoomEntry(key, callback.callback());
3829 callback.WaitForResult();
3830 }
3831
3832 // Check that a newly-created entry with no third-stream writes omits the
3833 // third stream file.
3834 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream1) {
3835 SetSimpleCacheMode();
3836 InitCache();
3837
3838 const char key[] = "key";
3839
3840 disk_cache::Entry* entry;
3841
3842 // Create entry and close without writing: third stream file should be
3843 // omitted, since the stream is empty.
3844 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3845 entry->Close();
3846 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3847
3848 SyncDoomEntry(key);
3849 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3850 }
3851
3852 // Check that a newly-created entry with only a single zero-offset, zero-length
3853 // write omits the third stream file.
3854 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream2) {
3855 SetSimpleCacheMode();
3856 InitCache();
3857
3858 const int kHalfSize = 8;
3859 const int kSize = kHalfSize * 2;
3860 const char key[] = "key";
3861 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3862 CacheTestFillBuffer(buffer->data(), kHalfSize, false);
3863
3864 disk_cache::Entry* entry;
3865
3866 // Create entry, write empty buffer to third stream, and close: third stream
3867 // should still be omitted, since the entry ignores writes that don't modify
3868 // data or change the length.
3869 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3870 EXPECT_EQ(0, WriteData(entry, 2, 0, buffer.get(), 0, true));
3871 entry->Close();
3872 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3873
3874 SyncDoomEntry(key);
3875 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3876 }
3877
3878 // Check that we can read back data written to the third stream.
3879 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream3) {
3880 SetSimpleCacheMode();
3881 InitCache();
3882
3883 const int kHalfSize = 8;
3884 const int kSize = kHalfSize * 2;
3885 const char key[] = "key";
3886 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3887 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
3888 CacheTestFillBuffer(buffer1->data(), kHalfSize, false);
3889
3890 disk_cache::Entry* entry;
3891
3892 // Create entry, write data to third stream, and close: third stream should
3893 // not be omitted, since it contains data. Re-open entry and ensure there
3894 // are that many bytes in the third stream.
3895 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3896 EXPECT_EQ(kHalfSize, WriteData(entry, 2, 0, buffer1.get(), kHalfSize, true));
3897 entry->Close();
3898 EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
3899
3900 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3901 EXPECT_EQ(kHalfSize, ReadData(entry, 2, 0, buffer2.get(), kSize));
3902 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kHalfSize));
3903 entry->Close();
3904 EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
3905
3906 SyncDoomEntry(key);
3907 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3908 }
3909
3910 // Check that we remove the third stream file upon opening an entry and finding
3911 // the third stream empty. (This is the upgrade path for entries written
3912 // before the third stream was optional.)
3913 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream4) {
3914 SetSimpleCacheMode();
3915 InitCache();
3916
3917 const int kHalfSize = 8;
3918 const int kSize = kHalfSize * 2;
3919 const char key[] = "key";
3920 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3921 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
3922 CacheTestFillBuffer(buffer1->data(), kHalfSize, false);
3923
3924 disk_cache::Entry* entry;
3925
3926 // Create entry, write data to third stream, truncate third stream back to
3927 // empty, and close: third stream will not initially be omitted, since entry
3928 // creates the file when the first significant write comes in, and only
3929 // removes it on open if it is empty. Reopen, ensure that the file is
3930 // deleted, and that there's no data in the third stream.
3931 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3932 EXPECT_EQ(kHalfSize, WriteData(entry, 2, 0, buffer1.get(), kHalfSize, true));
3933 EXPECT_EQ(0, WriteData(entry, 2, 0, buffer1.get(), 0, true));
3934 entry->Close();
3935 EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
3936
3937 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3938 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3939 EXPECT_EQ(0, ReadData(entry, 2, 0, buffer2.get(), kSize));
3940 entry->Close();
3941 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3942
3943 SyncDoomEntry(key);
3944 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3945 }
3946
3947 // Check that we don't accidentally create the third stream file once the entry
3948 // has been doomed.
3949 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream5) {
3950 SetSimpleCacheMode();
3951 InitCache();
3952
3953 const int kHalfSize = 8;
3954 const int kSize = kHalfSize * 2;
3955 const char key[] = "key";
3956 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3957 CacheTestFillBuffer(buffer->data(), kHalfSize, false);
3958
3959 disk_cache::Entry* entry;
3960
3961 // Create entry, doom entry, write data to third stream, and close: third
3962 // stream should not exist. (Note: We don't care if the write fails, just
3963 // that it doesn't cause the file to be created on disk.)
3964 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3965 entry->Doom();
3966 WriteData(entry, 2, 0, buffer.get(), kHalfSize, true);
3967 entry->Close();
3968 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3969 }
3970
3971 // There could be a race between Doom and an optimistic write.
3972 TEST_F(DiskCacheEntryTest, SimpleCacheDoomOptimisticWritesRace) {
3973 // Test sequence:
3974 // Create, first Write, second Write, Close.
3975 // Open, Close.
3976 SetSimpleCacheMode();
3977 InitCache();
3978 disk_cache::Entry* null = NULL;
3979 const char key[] = "the first key";
3980
3981 const int kSize = 200;
3982 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3983 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
3984 CacheTestFillBuffer(buffer1->data(), kSize, false);
3985 CacheTestFillBuffer(buffer2->data(), kSize, false);
3986
3987 // The race only happens on stream 1 and stream 2.
3988 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
3989 ASSERT_EQ(net::OK, DoomAllEntries());
3990 disk_cache::Entry* entry = NULL;
3991
3992 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3993 EXPECT_NE(null, entry);
3994 entry->Close();
3995 entry = NULL;
3996
3997 ASSERT_EQ(net::OK, DoomAllEntries());
3998 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3999 EXPECT_NE(null, entry);
4000
4001 int offset = 0;
4002 int buf_len = kSize;
4003 // This write should not be optimistic (since create is).
4004 EXPECT_EQ(buf_len,
4005 WriteData(entry, i, offset, buffer1.get(), buf_len, false));
4006
4007 offset = kSize;
4008 // This write should be optimistic.
4009 EXPECT_EQ(buf_len,
4010 WriteData(entry, i, offset, buffer2.get(), buf_len, false));
4011 entry->Close();
4012
4013 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
4014 EXPECT_NE(null, entry);
4015
4016 entry->Close();
4017 entry = NULL;
4018 }
4019 }
4020
4021 // Tests for a regression in crbug.com/317138 , in which deleting an already
4022 // doomed entry was removing the active entry from the index.
4023 TEST_F(DiskCacheEntryTest, SimpleCachePreserveActiveEntries) {
4024 SetSimpleCacheMode();
4025 InitCache();
4026
4027 disk_cache::Entry* null = NULL;
4028
4029 const char key[] = "this is a key";
4030
4031 disk_cache::Entry* entry1 = NULL;
4032 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
4033 ScopedEntryPtr entry1_closer(entry1);
4034 EXPECT_NE(null, entry1);
4035 entry1->Doom();
4036
4037 disk_cache::Entry* entry2 = NULL;
4038 ASSERT_EQ(net::OK, CreateEntry(key, &entry2));
4039 ScopedEntryPtr entry2_closer(entry2);
4040 EXPECT_NE(null, entry2);
4041 entry2_closer.reset();
4042
4043 // Closing then reopening entry2 insures that entry2 is serialized, and so
4044 // it can be opened from files without error.
4045 entry2 = NULL;
4046 ASSERT_EQ(net::OK, OpenEntry(key, &entry2));
4047 EXPECT_NE(null, entry2);
4048 entry2_closer.reset(entry2);
4049
4050 scoped_refptr<disk_cache::SimpleEntryImpl>
4051 entry1_refptr = static_cast<disk_cache::SimpleEntryImpl*>(entry1);
4052
4053 // If crbug.com/317138 has regressed, this will remove |entry2| from
4054 // the backend's |active_entries_| while |entry2| is still alive and its
4055 // files are still on disk.
4056 entry1_closer.reset();
4057 entry1 = NULL;
4058
4059 // Close does not have a callback. However, we need to be sure the close is
4060 // finished before we continue the test. We can take advantage of how the ref
4061 // counting of a SimpleEntryImpl works to fake out a callback: When the
4062 // last Close() call is made to an entry, an IO operation is sent to the
4063 // synchronous entry to close the platform files. This IO operation holds a
4064 // ref pointer to the entry, which expires when the operation is done. So,
4065 // we take a refpointer, and watch the SimpleEntry object until it has only
4066 // one ref; this indicates the IO operation is complete.
4067 while (!entry1_refptr->HasOneRef()) {
4068 base::PlatformThread::YieldCurrentThread();
4069 base::MessageLoop::current()->RunUntilIdle();
4070 }
4071 entry1_refptr = NULL;
4072
4073 // In the bug case, this new entry ends up being a duplicate object pointing
4074 // at the same underlying files.
4075 disk_cache::Entry* entry3 = NULL;
4076 EXPECT_EQ(net::OK, OpenEntry(key, &entry3));
4077 ScopedEntryPtr entry3_closer(entry3);
4078 EXPECT_NE(null, entry3);
4079
4080 // The test passes if these two dooms do not crash.
4081 entry2->Doom();
4082 entry3->Doom();
4083 }
4084
4085 TEST_F(DiskCacheEntryTest, SimpleCacheBasicSparseIO) {
4086 SetSimpleCacheMode();
4087 InitCache();
4088 BasicSparseIO();
4089 }
4090
4091 TEST_F(DiskCacheEntryTest, SimpleCacheHugeSparseIO) {
4092 SetSimpleCacheMode();
4093 InitCache();
4094 HugeSparseIO();
4095 }
4096
4097 TEST_F(DiskCacheEntryTest, SimpleCacheGetAvailableRange) {
4098 SetSimpleCacheMode();
4099 InitCache();
4100 GetAvailableRange();
4101 }
4102
4103 TEST_F(DiskCacheEntryTest, DISABLED_SimpleCacheCouldBeSparse) {
4104 SetSimpleCacheMode();
4105 InitCache();
4106 CouldBeSparse();
4107 }
4108
4109 TEST_F(DiskCacheEntryTest, SimpleCacheUpdateSparseEntry) {
4110 SetSimpleCacheMode();
4111 InitCache();
4112 UpdateSparseEntry();
4113 }
4114
4115 TEST_F(DiskCacheEntryTest, SimpleCacheDoomSparseEntry) {
4116 SetSimpleCacheMode();
4117 InitCache();
4118 DoomSparseEntry();
4119 }
4120
4121 TEST_F(DiskCacheEntryTest, SimpleCachePartialSparseEntry) {
4122 SetSimpleCacheMode();
4123 InitCache();
4124 PartialSparseEntry();
4125 }
4126
4127 TEST_F(DiskCacheEntryTest, SimpleCacheTruncateLargeSparseFile) {
4128 const int kSize = 1024;
4129
4130 SetSimpleCacheMode();
4131 // An entry is allowed sparse data 1/10 the size of the cache, so this size
4132 // allows for one |kSize|-sized range plus overhead, but not two ranges.
4133 SetMaxSize(kSize * 15);
4134 InitCache();
4135
4136 const char key[] = "key";
4137 disk_cache::Entry* null = NULL;
4138 disk_cache::Entry* entry;
4139 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
4140 EXPECT_NE(null, entry);
4141
4142 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
4143 CacheTestFillBuffer(buffer->data(), kSize, false);
4144 net::TestCompletionCallback callback;
4145 int ret;
4146
4147 // Verify initial conditions.
4148 ret = entry->ReadSparseData(0, buffer.get(), kSize, callback.callback());
4149 EXPECT_EQ(0, callback.GetResult(ret));
4150
4151 ret = entry->ReadSparseData(kSize, buffer.get(), kSize, callback.callback());
4152 EXPECT_EQ(0, callback.GetResult(ret));
4153
4154 // Write a range and make sure it reads back.
4155 ret = entry->WriteSparseData(0, buffer.get(), kSize, callback.callback());
4156 EXPECT_EQ(kSize, callback.GetResult(ret));
4157
4158 ret = entry->ReadSparseData(0, buffer.get(), kSize, callback.callback());
4159 EXPECT_EQ(kSize, callback.GetResult(ret));
4160
4161 // Write another range and make sure it reads back.
4162 ret = entry->WriteSparseData(kSize, buffer.get(), kSize, callback.callback());
4163 EXPECT_EQ(kSize, callback.GetResult(ret));
4164
4165 ret = entry->ReadSparseData(kSize, buffer.get(), kSize, callback.callback());
4166 EXPECT_EQ(kSize, callback.GetResult(ret));
4167
4168 // Make sure the first range was removed when the second was written.
4169 ret = entry->ReadSparseData(0, buffer.get(), kSize, callback.callback());
4170 EXPECT_EQ(0, callback.GetResult(ret));
4171
4172 entry->Close();
4173 }
OLDNEW
« no previous file with comments | « net/disk_cache/disk_cache_test_util.cc ('k') | net/disk_cache/memory/mem_backend_impl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698