OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "net/disk_cache/entry_tests.h" |
| 6 |
| 7 #include <string> |
| 8 |
| 9 #include "base/strings/string_util.h" |
| 10 #include "base/strings/stringprintf.h" |
| 11 #include "net/base/io_buffer.h" |
| 12 #include "net/base/net_errors.h" |
| 13 #include "net/base/test_completion_callback.h" |
| 14 #include "base/time/time.h" |
| 15 #include "net/disk_cache/disk_cache.h" |
| 16 #include "net/disk_cache/disk_cache_test.h" |
| 17 #include "net/disk_cache/disk_cache_test_util.h" |
| 18 |
| 19 using base::Time; |
| 20 |
| 21 namespace disk_cache { |
| 22 |
| 23 namespace { |
| 24 |
| 25 TEST_P(DiskCacheEntryTest, InternalAsyncIO) { |
| 26 InitCache(); |
| 27 |
| 28 Entry* entry = NULL; |
| 29 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry)); |
| 30 ASSERT_TRUE(NULL != entry); |
| 31 |
| 32 // Avoid using internal buffers for the test. We have to write something to |
| 33 // the entry and close it so that we flush the internal buffer to disk. After |
| 34 // that, IO operations will be really hitting the disk. We don't care about |
| 35 // the content, so just extending the entry is enough (all extensions zero- |
| 36 // fill any holes). |
| 37 EXPECT_EQ(0, WriteData(entry, 0, 15 * 1024, NULL, 0, false)); |
| 38 EXPECT_EQ(0, WriteData(entry, 1, 15 * 1024, NULL, 0, false)); |
| 39 entry->Close(); |
| 40 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry)); |
| 41 |
| 42 MessageLoopHelper helper; |
| 43 // Let's verify that each IO goes to the right callback object. |
| 44 CallbackTest callback1(&helper, false); |
| 45 CallbackTest callback2(&helper, false); |
| 46 CallbackTest callback3(&helper, false); |
| 47 CallbackTest callback4(&helper, false); |
| 48 CallbackTest callback5(&helper, false); |
| 49 CallbackTest callback6(&helper, false); |
| 50 CallbackTest callback7(&helper, false); |
| 51 CallbackTest callback8(&helper, false); |
| 52 CallbackTest callback9(&helper, false); |
| 53 CallbackTest callback10(&helper, false); |
| 54 CallbackTest callback11(&helper, false); |
| 55 CallbackTest callback12(&helper, false); |
| 56 CallbackTest callback13(&helper, false); |
| 57 |
| 58 const int kSize1 = 10; |
| 59 const int kSize2 = 5000; |
| 60 const int kSize3 = 10000; |
| 61 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1)); |
| 62 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2)); |
| 63 scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3)); |
| 64 CacheTestFillBuffer(buffer1->data(), kSize1, false); |
| 65 CacheTestFillBuffer(buffer2->data(), kSize2, false); |
| 66 CacheTestFillBuffer(buffer3->data(), kSize3, false); |
| 67 |
| 68 EXPECT_EQ(0, |
| 69 entry->ReadData( |
| 70 0, |
| 71 15 * 1024, |
| 72 buffer1.get(), |
| 73 kSize1, |
| 74 base::Bind(&CallbackTest::Run, base::Unretained(&callback1)))); |
| 75 base::strlcpy(buffer1->data(), "the data", kSize1); |
| 76 int expected = 0; |
| 77 int ret = entry->WriteData( |
| 78 0, |
| 79 0, |
| 80 buffer1.get(), |
| 81 kSize1, |
| 82 base::Bind(&CallbackTest::Run, base::Unretained(&callback2)), |
| 83 false); |
| 84 EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret); |
| 85 if (net::ERR_IO_PENDING == ret) |
| 86 expected++; |
| 87 |
| 88 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected)); |
| 89 memset(buffer2->data(), 0, kSize2); |
| 90 ret = entry->ReadData( |
| 91 0, |
| 92 0, |
| 93 buffer2.get(), |
| 94 kSize1, |
| 95 base::Bind(&CallbackTest::Run, base::Unretained(&callback3))); |
| 96 EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret); |
| 97 if (net::ERR_IO_PENDING == ret) |
| 98 expected++; |
| 99 |
| 100 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected)); |
| 101 EXPECT_STREQ("the data", buffer2->data()); |
| 102 |
| 103 base::strlcpy(buffer2->data(), "The really big data goes here", kSize2); |
| 104 ret = entry->WriteData( |
| 105 1, |
| 106 1500, |
| 107 buffer2.get(), |
| 108 kSize2, |
| 109 base::Bind(&CallbackTest::Run, base::Unretained(&callback4)), |
| 110 true); |
| 111 EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret); |
| 112 if (net::ERR_IO_PENDING == ret) |
| 113 expected++; |
| 114 |
| 115 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected)); |
| 116 memset(buffer3->data(), 0, kSize3); |
| 117 ret = entry->ReadData( |
| 118 1, |
| 119 1511, |
| 120 buffer3.get(), |
| 121 kSize2, |
| 122 base::Bind(&CallbackTest::Run, base::Unretained(&callback5))); |
| 123 EXPECT_TRUE(4989 == ret || net::ERR_IO_PENDING == ret); |
| 124 if (net::ERR_IO_PENDING == ret) |
| 125 expected++; |
| 126 |
| 127 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected)); |
| 128 EXPECT_STREQ("big data goes here", buffer3->data()); |
| 129 ret = entry->ReadData( |
| 130 1, |
| 131 0, |
| 132 buffer2.get(), |
| 133 kSize2, |
| 134 base::Bind(&CallbackTest::Run, base::Unretained(&callback6))); |
| 135 EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret); |
| 136 if (net::ERR_IO_PENDING == ret) |
| 137 expected++; |
| 138 |
| 139 memset(buffer3->data(), 0, kSize3); |
| 140 |
| 141 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected)); |
| 142 EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 1500)); |
| 143 ret = entry->ReadData( |
| 144 1, |
| 145 5000, |
| 146 buffer2.get(), |
| 147 kSize2, |
| 148 base::Bind(&CallbackTest::Run, base::Unretained(&callback7))); |
| 149 EXPECT_TRUE(1500 == ret || net::ERR_IO_PENDING == ret); |
| 150 if (net::ERR_IO_PENDING == ret) |
| 151 expected++; |
| 152 |
| 153 ret = entry->ReadData( |
| 154 1, |
| 155 0, |
| 156 buffer3.get(), |
| 157 kSize3, |
| 158 base::Bind(&CallbackTest::Run, base::Unretained(&callback9))); |
| 159 EXPECT_TRUE(6500 == ret || net::ERR_IO_PENDING == ret); |
| 160 if (net::ERR_IO_PENDING == ret) |
| 161 expected++; |
| 162 |
| 163 ret = entry->WriteData( |
| 164 1, |
| 165 0, |
| 166 buffer3.get(), |
| 167 8192, |
| 168 base::Bind(&CallbackTest::Run, base::Unretained(&callback10)), |
| 169 true); |
| 170 EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret); |
| 171 if (net::ERR_IO_PENDING == ret) |
| 172 expected++; |
| 173 |
| 174 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected)); |
| 175 ret = entry->ReadData( |
| 176 1, |
| 177 0, |
| 178 buffer3.get(), |
| 179 kSize3, |
| 180 base::Bind(&CallbackTest::Run, base::Unretained(&callback11))); |
| 181 EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret); |
| 182 if (net::ERR_IO_PENDING == ret) |
| 183 expected++; |
| 184 |
| 185 EXPECT_EQ(8192, entry->GetDataSize(1)); |
| 186 |
| 187 ret = entry->ReadData( |
| 188 0, |
| 189 0, |
| 190 buffer1.get(), |
| 191 kSize1, |
| 192 base::Bind(&CallbackTest::Run, base::Unretained(&callback12))); |
| 193 EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret); |
| 194 if (net::ERR_IO_PENDING == ret) |
| 195 expected++; |
| 196 |
| 197 ret = entry->ReadData( |
| 198 1, |
| 199 0, |
| 200 buffer2.get(), |
| 201 kSize2, |
| 202 base::Bind(&CallbackTest::Run, base::Unretained(&callback13))); |
| 203 EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret); |
| 204 if (net::ERR_IO_PENDING == ret) |
| 205 expected++; |
| 206 |
| 207 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected)); |
| 208 |
| 209 EXPECT_FALSE(helper.callback_reused_error()); |
| 210 |
| 211 entry->Doom(); |
| 212 entry->Close(); |
| 213 FlushQueueForTest(); |
| 214 EXPECT_EQ(0, cache()->GetEntryCount()); |
| 215 } |
| 216 |
| 217 TEST_P(DiskCacheEntryTest, ExternalAsyncIO) { |
| 218 InitCache(); |
| 219 Entry* entry; |
| 220 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry)); |
| 221 |
| 222 int expected = 0; |
| 223 |
| 224 MessageLoopHelper helper; |
| 225 // Let's verify that each IO goes to the right callback object. |
| 226 CallbackTest callback1(&helper, false); |
| 227 CallbackTest callback2(&helper, false); |
| 228 CallbackTest callback3(&helper, false); |
| 229 CallbackTest callback4(&helper, false); |
| 230 CallbackTest callback5(&helper, false); |
| 231 CallbackTest callback6(&helper, false); |
| 232 CallbackTest callback7(&helper, false); |
| 233 CallbackTest callback8(&helper, false); |
| 234 CallbackTest callback9(&helper, false); |
| 235 |
| 236 const int kSize1 = 17000; |
| 237 const int kSize2 = 25000; |
| 238 const int kSize3 = 25000; |
| 239 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1)); |
| 240 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2)); |
| 241 scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3)); |
| 242 CacheTestFillBuffer(buffer1->data(), kSize1, false); |
| 243 CacheTestFillBuffer(buffer2->data(), kSize2, false); |
| 244 CacheTestFillBuffer(buffer3->data(), kSize3, false); |
| 245 base::strlcpy(buffer1->data(), "the data", kSize1); |
| 246 int ret = entry->WriteData( |
| 247 0, |
| 248 0, |
| 249 buffer1.get(), |
| 250 kSize1, |
| 251 base::Bind(&CallbackTest::Run, base::Unretained(&callback1)), |
| 252 false); |
| 253 EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret); |
| 254 if (net::ERR_IO_PENDING == ret) |
| 255 expected++; |
| 256 |
| 257 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected)); |
| 258 |
| 259 memset(buffer2->data(), 0, kSize1); |
| 260 ret = entry->ReadData( |
| 261 0, |
| 262 0, |
| 263 buffer2.get(), |
| 264 kSize1, |
| 265 base::Bind(&CallbackTest::Run, base::Unretained(&callback2))); |
| 266 EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret); |
| 267 if (net::ERR_IO_PENDING == ret) |
| 268 expected++; |
| 269 |
| 270 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected)); |
| 271 EXPECT_STREQ("the data", buffer2->data()); |
| 272 |
| 273 base::strlcpy(buffer2->data(), "The really big data goes here", kSize2); |
| 274 ret = entry->WriteData( |
| 275 1, |
| 276 10000, |
| 277 buffer2.get(), |
| 278 kSize2, |
| 279 base::Bind(&CallbackTest::Run, base::Unretained(&callback3)), |
| 280 false); |
| 281 EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret); |
| 282 if (net::ERR_IO_PENDING == ret) |
| 283 expected++; |
| 284 |
| 285 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected)); |
| 286 |
| 287 memset(buffer3->data(), 0, kSize3); |
| 288 ret = entry->ReadData( |
| 289 1, |
| 290 10011, |
| 291 buffer3.get(), |
| 292 kSize3, |
| 293 base::Bind(&CallbackTest::Run, base::Unretained(&callback4))); |
| 294 EXPECT_TRUE(24989 == ret || net::ERR_IO_PENDING == ret); |
| 295 if (net::ERR_IO_PENDING == ret) |
| 296 expected++; |
| 297 |
| 298 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected)); |
| 299 EXPECT_STREQ("big data goes here", buffer3->data()); |
| 300 ret = entry->ReadData( |
| 301 1, |
| 302 0, |
| 303 buffer2.get(), |
| 304 kSize2, |
| 305 base::Bind(&CallbackTest::Run, base::Unretained(&callback5))); |
| 306 EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret); |
| 307 if (net::ERR_IO_PENDING == ret) |
| 308 expected++; |
| 309 |
| 310 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected)); |
| 311 memset(buffer3->data(), 0, kSize3); |
| 312 EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 10000)); |
| 313 ret = entry->ReadData( |
| 314 1, |
| 315 30000, |
| 316 buffer2.get(), |
| 317 kSize2, |
| 318 base::Bind(&CallbackTest::Run, base::Unretained(&callback6))); |
| 319 EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret); |
| 320 if (net::ERR_IO_PENDING == ret) |
| 321 expected++; |
| 322 |
| 323 EXPECT_EQ(0, |
| 324 entry->ReadData( |
| 325 1, |
| 326 35000, |
| 327 buffer2.get(), |
| 328 kSize2, |
| 329 base::Bind(&CallbackTest::Run, base::Unretained(&callback7)))); |
| 330 ret = entry->ReadData( |
| 331 1, |
| 332 0, |
| 333 buffer1.get(), |
| 334 kSize1, |
| 335 base::Bind(&CallbackTest::Run, base::Unretained(&callback8))); |
| 336 EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret); |
| 337 if (net::ERR_IO_PENDING == ret) |
| 338 expected++; |
| 339 ret = entry->WriteData( |
| 340 1, |
| 341 20000, |
| 342 buffer3.get(), |
| 343 kSize1, |
| 344 base::Bind(&CallbackTest::Run, base::Unretained(&callback9)), |
| 345 false); |
| 346 EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret); |
| 347 if (net::ERR_IO_PENDING == ret) |
| 348 expected++; |
| 349 |
| 350 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected)); |
| 351 EXPECT_EQ(37000, entry->GetDataSize(1)); |
| 352 |
| 353 EXPECT_FALSE(helper.callback_reused_error()); |
| 354 |
| 355 entry->Doom(); |
| 356 entry->Close(); |
| 357 FlushQueueForTest(); |
| 358 EXPECT_EQ(0, cache()->GetEntryCount()); |
| 359 } |
| 360 |
| 361 // Tests that IOBuffers are not referenced after IO completes. |
| 362 TEST_P(DiskCacheEntryTest, ReleaseBuffer) { |
| 363 InitCache(); |
| 364 |
| 365 Entry* entry = NULL; |
| 366 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry)); |
| 367 ASSERT_TRUE(NULL != entry); |
| 368 |
| 369 const int kBufferSize = 1024; |
| 370 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kBufferSize)); |
| 371 CacheTestFillBuffer(buffer->data(), kBufferSize, false); |
| 372 |
| 373 net::ReleaseBufferCompletionCallback cb(buffer.get()); |
| 374 int rv = |
| 375 entry->WriteData(0, 0, buffer.get(), kBufferSize, cb.callback(), false); |
| 376 EXPECT_EQ(kBufferSize, cb.GetResult(rv)); |
| 377 entry->Close(); |
| 378 } |
| 379 |
| 380 TEST_P(DiskCacheEntryTest, StreamAccess) { |
| 381 InitCache(); |
| 382 |
| 383 disk_cache::Entry* entry = NULL; |
| 384 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry)); |
| 385 ASSERT_TRUE(NULL != entry); |
| 386 |
| 387 const int kBufferSize = 1024; |
| 388 const int kNumStreams = 3; |
| 389 scoped_refptr<net::IOBuffer> reference_buffers[kNumStreams]; |
| 390 for (int i = 0; i < kNumStreams; i++) { |
| 391 reference_buffers[i] = new net::IOBuffer(kBufferSize); |
| 392 CacheTestFillBuffer(reference_buffers[i]->data(), kBufferSize, false); |
| 393 } |
| 394 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kBufferSize)); |
| 395 for (int i = 0; i < kNumStreams; i++) { |
| 396 EXPECT_EQ( |
| 397 kBufferSize, |
| 398 WriteData(entry, i, 0, reference_buffers[i].get(), kBufferSize, false)); |
| 399 memset(buffer1->data(), 0, kBufferSize); |
| 400 EXPECT_EQ(kBufferSize, ReadData(entry, i, 0, buffer1.get(), kBufferSize)); |
| 401 EXPECT_EQ( |
| 402 0, memcmp(reference_buffers[i]->data(), buffer1->data(), kBufferSize)); |
| 403 } |
| 404 EXPECT_EQ(net::ERR_INVALID_ARGUMENT, |
| 405 ReadData(entry, kNumStreams, 0, buffer1.get(), kBufferSize)); |
| 406 entry->Close(); |
| 407 |
| 408 // Open the entry and read it in chunks, including a read past the end. |
| 409 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry)); |
| 410 ASSERT_TRUE(NULL != entry); |
| 411 const int kReadBufferSize = 600; |
| 412 const int kFinalReadSize = kBufferSize - kReadBufferSize; |
| 413 COMPILE_ASSERT(kFinalReadSize < kReadBufferSize, should_be_exactly_two_reads); |
| 414 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kReadBufferSize)); |
| 415 for (int i = 0; i < kNumStreams; i++) { |
| 416 memset(buffer2->data(), 0, kReadBufferSize); |
| 417 EXPECT_EQ(kReadBufferSize, |
| 418 ReadData(entry, i, 0, buffer2.get(), kReadBufferSize)); |
| 419 EXPECT_EQ( |
| 420 0, |
| 421 memcmp(reference_buffers[i]->data(), buffer2->data(), kReadBufferSize)); |
| 422 |
| 423 memset(buffer2->data(), 0, kReadBufferSize); |
| 424 EXPECT_EQ( |
| 425 kFinalReadSize, |
| 426 ReadData(entry, i, kReadBufferSize, buffer2.get(), kReadBufferSize)); |
| 427 EXPECT_EQ(0, |
| 428 memcmp(reference_buffers[i]->data() + kReadBufferSize, |
| 429 buffer2->data(), |
| 430 kFinalReadSize)); |
| 431 } |
| 432 |
| 433 entry->Close(); |
| 434 } |
| 435 |
| 436 TEST_P(DiskCacheEntryTest, GetKey) { |
| 437 InitCache(); |
| 438 |
| 439 std::string key("the first key"); |
| 440 Entry* entry; |
| 441 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 442 EXPECT_EQ(key, entry->GetKey()) << "short key"; |
| 443 entry->Close(); |
| 444 |
| 445 int seed = static_cast<int>(Time::Now().ToInternalValue()); |
| 446 srand(seed); |
| 447 char key_buffer[20000]; |
| 448 |
| 449 CacheTestFillBuffer(key_buffer, 3000, true); |
| 450 key_buffer[1000] = '\0'; |
| 451 |
| 452 key = key_buffer; |
| 453 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 454 EXPECT_TRUE(key == entry->GetKey()) << "1000 bytes key"; |
| 455 entry->Close(); |
| 456 |
| 457 key_buffer[1000] = 'p'; |
| 458 key_buffer[3000] = '\0'; |
| 459 key = key_buffer; |
| 460 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 461 EXPECT_TRUE(key == entry->GetKey()) << "medium size key"; |
| 462 entry->Close(); |
| 463 |
| 464 CacheTestFillBuffer(key_buffer, sizeof(key_buffer), true); |
| 465 key_buffer[19999] = '\0'; |
| 466 |
| 467 key = key_buffer; |
| 468 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 469 EXPECT_TRUE(key == entry->GetKey()) << "long key"; |
| 470 entry->Close(); |
| 471 |
| 472 CacheTestFillBuffer(key_buffer, 0x4000, true); |
| 473 key_buffer[0x4000] = '\0'; |
| 474 |
| 475 key = key_buffer; |
| 476 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 477 EXPECT_TRUE(key == entry->GetKey()) << "16KB key"; |
| 478 entry->Close(); |
| 479 } |
| 480 |
| 481 TEST_P(DiskCacheEntryTest, GetTimes) { |
| 482 InitCache(); |
| 483 |
| 484 std::string key("the first key"); |
| 485 disk_cache::Entry* entry; |
| 486 |
| 487 Time t1 = Time::Now(); |
| 488 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 489 EXPECT_TRUE(entry->GetLastModified() == entry->GetLastUsed()); |
| 490 |
| 491 AddDelay(); |
| 492 Time t2 = Time::Now(); |
| 493 EXPECT_TRUE(t2 > t1); |
| 494 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false)); |
| 495 if (!traits()->WritesUpdateLastUsed()) |
| 496 EXPECT_TRUE(entry->GetLastModified() < t2); |
| 497 else |
| 498 EXPECT_TRUE(entry->GetLastModified() >= t2); |
| 499 EXPECT_TRUE(entry->GetLastModified() == entry->GetLastUsed()); |
| 500 |
| 501 AddDelay(); |
| 502 Time t3 = Time::Now(); |
| 503 EXPECT_TRUE(t3 > t2); |
| 504 const int kSize = 200; |
| 505 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); |
| 506 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize)); |
| 507 if (!traits()->ReadsUpdateLastUsed()) { |
| 508 if (!traits()->WritesUpdateLastUsed()) { |
| 509 EXPECT_TRUE(entry->GetLastUsed() < t2); |
| 510 EXPECT_TRUE(entry->GetLastModified() < t2); |
| 511 } else { |
| 512 EXPECT_TRUE(entry->GetLastUsed() < t3); |
| 513 EXPECT_TRUE(entry->GetLastModified() < t3); |
| 514 } |
| 515 } else { |
| 516 EXPECT_TRUE(entry->GetLastUsed() >= t3); |
| 517 EXPECT_TRUE(entry->GetLastModified() < t3); |
| 518 } |
| 519 |
| 520 entry->Close(); |
| 521 } |
| 522 |
| 523 TEST_P(DiskCacheEntryTest, GrowData) { |
| 524 InitCache(); |
| 525 |
| 526 std::string key1("the first key"); |
| 527 disk_cache::Entry* entry; |
| 528 ASSERT_EQ(net::OK, CreateEntry(key1, &entry)); |
| 529 |
| 530 const int kSize = 20000; |
| 531 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); |
| 532 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize)); |
| 533 CacheTestFillBuffer(buffer1->data(), kSize, false); |
| 534 memset(buffer2->data(), 0, kSize); |
| 535 |
| 536 base::strlcpy(buffer1->data(), "the data", kSize); |
| 537 EXPECT_EQ(10, WriteData(entry, 0, 0, buffer1.get(), 10, false)); |
| 538 EXPECT_EQ(10, ReadData(entry, 0, 0, buffer2.get(), 10)); |
| 539 EXPECT_STREQ("the data", buffer2->data()); |
| 540 EXPECT_EQ(10, entry->GetDataSize(0)); |
| 541 |
| 542 EXPECT_EQ(2000, WriteData(entry, 0, 0, buffer1.get(), 2000, false)); |
| 543 EXPECT_EQ(2000, entry->GetDataSize(0)); |
| 544 EXPECT_EQ(2000, ReadData(entry, 0, 0, buffer2.get(), 2000)); |
| 545 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000)); |
| 546 |
| 547 EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer1.get(), kSize, false)); |
| 548 EXPECT_EQ(20000, entry->GetDataSize(0)); |
| 549 EXPECT_EQ(20000, ReadData(entry, 0, 0, buffer2.get(), kSize)); |
| 550 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize)); |
| 551 entry->Close(); |
| 552 |
| 553 memset(buffer2->data(), 0, kSize); |
| 554 std::string key2("Second key"); |
| 555 ASSERT_EQ(net::OK, CreateEntry(key2, &entry)); |
| 556 EXPECT_EQ(10, WriteData(entry, 0, 0, buffer1.get(), 10, false)); |
| 557 EXPECT_EQ(10, entry->GetDataSize(0)); |
| 558 entry->Close(); |
| 559 |
| 560 // Go from an internal address to a bigger block size. |
| 561 ASSERT_EQ(net::OK, OpenEntry(key2, &entry)); |
| 562 EXPECT_EQ(2000, WriteData(entry, 0, 0, buffer1.get(), 2000, false)); |
| 563 EXPECT_EQ(2000, entry->GetDataSize(0)); |
| 564 EXPECT_EQ(2000, ReadData(entry, 0, 0, buffer2.get(), 2000)); |
| 565 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000)); |
| 566 entry->Close(); |
| 567 memset(buffer2->data(), 0, kSize); |
| 568 |
| 569 // Go from an internal address to an external one. |
| 570 ASSERT_EQ(net::OK, OpenEntry(key2, &entry)); |
| 571 EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer1.get(), kSize, false)); |
| 572 EXPECT_EQ(20000, entry->GetDataSize(0)); |
| 573 EXPECT_EQ(20000, ReadData(entry, 0, 0, buffer2.get(), kSize)); |
| 574 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize)); |
| 575 entry->Close(); |
| 576 |
| 577 // Double check the size from disk. |
| 578 ASSERT_EQ(net::OK, OpenEntry(key2, &entry)); |
| 579 EXPECT_EQ(20000, entry->GetDataSize(0)); |
| 580 |
| 581 // Now extend the entry without actual data. |
| 582 EXPECT_EQ(0, WriteData(entry, 0, 45500, buffer1.get(), 0, false)); |
| 583 entry->Close(); |
| 584 |
| 585 // And check again from disk. |
| 586 ASSERT_EQ(net::OK, OpenEntry(key2, &entry)); |
| 587 EXPECT_EQ(45500, entry->GetDataSize(0)); |
| 588 entry->Close(); |
| 589 } |
| 590 |
| 591 TEST_P(DiskCacheEntryTest, TruncateData) { |
| 592 InitCache(); |
| 593 |
| 594 std::string key("the first key"); |
| 595 disk_cache::Entry* entry; |
| 596 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 597 |
| 598 const int kSize1 = 20000; |
| 599 const int kSize2 = 20000; |
| 600 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1)); |
| 601 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2)); |
| 602 |
| 603 CacheTestFillBuffer(buffer1->data(), kSize1, false); |
| 604 memset(buffer2->data(), 0, kSize2); |
| 605 |
| 606 // Simple truncation: |
| 607 EXPECT_EQ(200, WriteData(entry, 0, 0, buffer1.get(), 200, false)); |
| 608 EXPECT_EQ(200, entry->GetDataSize(0)); |
| 609 EXPECT_EQ(100, WriteData(entry, 0, 0, buffer1.get(), 100, false)); |
| 610 EXPECT_EQ(200, entry->GetDataSize(0)); |
| 611 EXPECT_EQ(100, WriteData(entry, 0, 0, buffer1.get(), 100, true)); |
| 612 EXPECT_EQ(100, entry->GetDataSize(0)); |
| 613 EXPECT_EQ(0, WriteData(entry, 0, 50, buffer1.get(), 0, true)); |
| 614 EXPECT_EQ(50, entry->GetDataSize(0)); |
| 615 EXPECT_EQ(0, WriteData(entry, 0, 0, buffer1.get(), 0, true)); |
| 616 EXPECT_EQ(0, entry->GetDataSize(0)); |
| 617 entry->Close(); |
| 618 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 619 |
| 620 // Go to an external file. |
| 621 EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer1.get(), 20000, true)); |
| 622 EXPECT_EQ(20000, entry->GetDataSize(0)); |
| 623 EXPECT_EQ(20000, ReadData(entry, 0, 0, buffer2.get(), 20000)); |
| 624 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 20000)); |
| 625 memset(buffer2->data(), 0, kSize2); |
| 626 |
| 627 // External file truncation |
| 628 EXPECT_EQ(18000, WriteData(entry, 0, 0, buffer1.get(), 18000, false)); |
| 629 EXPECT_EQ(20000, entry->GetDataSize(0)); |
| 630 EXPECT_EQ(18000, WriteData(entry, 0, 0, buffer1.get(), 18000, true)); |
| 631 EXPECT_EQ(18000, entry->GetDataSize(0)); |
| 632 EXPECT_EQ(0, WriteData(entry, 0, 17500, buffer1.get(), 0, true)); |
| 633 EXPECT_EQ(17500, entry->GetDataSize(0)); |
| 634 |
| 635 // And back to an internal block. |
| 636 EXPECT_EQ(600, WriteData(entry, 0, 1000, buffer1.get(), 600, true)); |
| 637 EXPECT_EQ(1600, entry->GetDataSize(0)); |
| 638 EXPECT_EQ(600, ReadData(entry, 0, 1000, buffer2.get(), 600)); |
| 639 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 600)); |
| 640 EXPECT_EQ(1000, ReadData(entry, 0, 0, buffer2.get(), 1000)); |
| 641 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 1000)) |
| 642 << "Preserves previous data"; |
| 643 |
| 644 // Go from external file to zero length. |
| 645 EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer1.get(), 20000, true)); |
| 646 EXPECT_EQ(20000, entry->GetDataSize(0)); |
| 647 EXPECT_EQ(0, WriteData(entry, 0, 0, buffer1.get(), 0, true)); |
| 648 EXPECT_EQ(0, entry->GetDataSize(0)); |
| 649 |
| 650 entry->Close(); |
| 651 } |
| 652 |
| 653 TEST_P(DiskCacheEntryTest, ZeroLengthIO) { |
| 654 InitCache(); |
| 655 |
| 656 std::string key("the first key"); |
| 657 disk_cache::Entry* entry; |
| 658 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 659 |
| 660 EXPECT_EQ(0, ReadData(entry, 0, 0, NULL, 0)); |
| 661 EXPECT_EQ(0, WriteData(entry, 0, 0, NULL, 0, false)); |
| 662 |
| 663 // This write should extend the entry. |
| 664 EXPECT_EQ(0, WriteData(entry, 0, 1000, NULL, 0, false)); |
| 665 EXPECT_EQ(0, ReadData(entry, 0, 500, NULL, 0)); |
| 666 EXPECT_EQ(0, ReadData(entry, 0, 2000, NULL, 0)); |
| 667 EXPECT_EQ(1000, entry->GetDataSize(0)); |
| 668 |
| 669 EXPECT_EQ(0, WriteData(entry, 0, 100000, NULL, 0, true)); |
| 670 EXPECT_EQ(0, ReadData(entry, 0, 50000, NULL, 0)); |
| 671 EXPECT_EQ(100000, entry->GetDataSize(0)); |
| 672 |
| 673 // Let's verify the actual content. |
| 674 const int kSize = 20; |
| 675 const char zeros[kSize] = {}; |
| 676 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); |
| 677 |
| 678 CacheTestFillBuffer(buffer->data(), kSize, false); |
| 679 EXPECT_EQ(kSize, ReadData(entry, 0, 500, buffer.get(), kSize)); |
| 680 EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize)); |
| 681 |
| 682 CacheTestFillBuffer(buffer->data(), kSize, false); |
| 683 EXPECT_EQ(kSize, ReadData(entry, 0, 5000, buffer.get(), kSize)); |
| 684 EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize)); |
| 685 |
| 686 CacheTestFillBuffer(buffer->data(), kSize, false); |
| 687 EXPECT_EQ(kSize, ReadData(entry, 0, 50000, buffer.get(), kSize)); |
| 688 EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize)); |
| 689 |
| 690 entry->Close(); |
| 691 } |
| 692 |
| 693 // Checks that entries are zero length when created. |
| 694 TEST_P(DiskCacheEntryTest, SizeAtCreate) { |
| 695 InitCache(); |
| 696 |
| 697 const char key[] = "the first key"; |
| 698 disk_cache::Entry* entry; |
| 699 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 700 |
| 701 const int kNumStreams = 3; |
| 702 for (int i = 0; i < kNumStreams; ++i) |
| 703 EXPECT_EQ(0, entry->GetDataSize(i)); |
| 704 entry->Close(); |
| 705 } |
| 706 |
| 707 // Some extra tests to make sure that buffering works properly when changing |
| 708 // the entry size. |
| 709 TEST_P(DiskCacheEntryTest, SizeChanges) { |
| 710 InitCache(); |
| 711 |
| 712 std::string key("the first key"); |
| 713 disk_cache::Entry* entry; |
| 714 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 715 |
| 716 const int kSize = 200; |
| 717 const char zeros[kSize] = {}; |
| 718 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); |
| 719 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize)); |
| 720 CacheTestFillBuffer(buffer1->data(), kSize, true); |
| 721 CacheTestFillBuffer(buffer2->data(), kSize, true); |
| 722 |
| 723 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, true)); |
| 724 EXPECT_EQ(kSize, WriteData(entry, 1, 17000, buffer1.get(), kSize, true)); |
| 725 EXPECT_EQ(kSize, WriteData(entry, 1, 23000, buffer1.get(), kSize, true)); |
| 726 entry->Close(); |
| 727 |
| 728 // Extend the file and read between the old size and the new write. |
| 729 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 730 EXPECT_EQ(23000 + kSize, entry->GetDataSize(1)); |
| 731 EXPECT_EQ(kSize, WriteData(entry, 1, 25000, buffer1.get(), kSize, true)); |
| 732 EXPECT_EQ(25000 + kSize, entry->GetDataSize(1)); |
| 733 EXPECT_EQ(kSize, ReadData(entry, 1, 24000, buffer2.get(), kSize)); |
| 734 EXPECT_TRUE(!memcmp(buffer2->data(), zeros, kSize)); |
| 735 |
| 736 // Read at the end of the old file size. |
| 737 EXPECT_EQ(kSize, |
| 738 ReadData(entry, 1, 23000 + kSize - 35, buffer2.get(), kSize)); |
| 739 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + kSize - 35, 35)); |
| 740 |
| 741 // Read slightly before the last write. |
| 742 CacheTestFillBuffer(buffer2->data(), kSize, true); |
| 743 EXPECT_EQ(kSize, ReadData(entry, 1, 24900, buffer2.get(), kSize)); |
| 744 EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100)); |
| 745 EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100)); |
| 746 |
| 747 // Extend the entry a little more. |
| 748 EXPECT_EQ(kSize, WriteData(entry, 1, 26000, buffer1.get(), kSize, true)); |
| 749 EXPECT_EQ(26000 + kSize, entry->GetDataSize(1)); |
| 750 CacheTestFillBuffer(buffer2->data(), kSize, true); |
| 751 EXPECT_EQ(kSize, ReadData(entry, 1, 25900, buffer2.get(), kSize)); |
| 752 EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100)); |
| 753 EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100)); |
| 754 |
| 755 // And now reduce the size. |
| 756 EXPECT_EQ(kSize, WriteData(entry, 1, 25000, buffer1.get(), kSize, true)); |
| 757 EXPECT_EQ(25000 + kSize, entry->GetDataSize(1)); |
| 758 EXPECT_EQ(28, ReadData(entry, 1, 25000 + kSize - 28, buffer2.get(), kSize)); |
| 759 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + kSize - 28, 28)); |
| 760 |
| 761 // Reduce the size with a buffer that is not extending the size. |
| 762 EXPECT_EQ(kSize, WriteData(entry, 1, 24000, buffer1.get(), kSize, false)); |
| 763 EXPECT_EQ(25000 + kSize, entry->GetDataSize(1)); |
| 764 EXPECT_EQ(kSize, WriteData(entry, 1, 24500, buffer1.get(), kSize, true)); |
| 765 EXPECT_EQ(24500 + kSize, entry->GetDataSize(1)); |
| 766 EXPECT_EQ(kSize, ReadData(entry, 1, 23900, buffer2.get(), kSize)); |
| 767 EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100)); |
| 768 EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100)); |
| 769 |
| 770 // And now reduce the size below the old size. |
| 771 EXPECT_EQ(kSize, WriteData(entry, 1, 19000, buffer1.get(), kSize, true)); |
| 772 EXPECT_EQ(19000 + kSize, entry->GetDataSize(1)); |
| 773 EXPECT_EQ(kSize, ReadData(entry, 1, 18900, buffer2.get(), kSize)); |
| 774 EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100)); |
| 775 EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100)); |
| 776 |
| 777 // Verify that the actual file is truncated. |
| 778 entry->Close(); |
| 779 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 780 EXPECT_EQ(19000 + kSize, entry->GetDataSize(1)); |
| 781 |
| 782 // Extend the newly opened file with a zero length write, expect zero fill. |
| 783 EXPECT_EQ(0, WriteData(entry, 1, 20000 + kSize, buffer1.get(), 0, false)); |
| 784 EXPECT_EQ(kSize, ReadData(entry, 1, 19000 + kSize, buffer1.get(), kSize)); |
| 785 EXPECT_EQ(0, memcmp(buffer1->data(), zeros, kSize)); |
| 786 |
| 787 entry->Close(); |
| 788 } |
| 789 |
| 790 // Write more than the total cache capacity but to a single entry. |max_size| |
| 791 // is the maximum size of the cache. |
| 792 void ReuseEntryTest(const int max_size, |
| 793 DiskCacheTest* test) { |
| 794 const int write_size = max_size / 10; |
| 795 |
| 796 std::string key1("the first key"); |
| 797 disk_cache::Entry* entry; |
| 798 ASSERT_EQ(net::OK, test->CreateEntry(key1, &entry)); |
| 799 |
| 800 entry->Close(); |
| 801 std::string key2("the second key"); |
| 802 ASSERT_EQ(net::OK, test->CreateEntry(key2, &entry)); |
| 803 |
| 804 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(write_size)); |
| 805 CacheTestFillBuffer(buffer->data(), write_size, false); |
| 806 |
| 807 for (int j = 0; j < 15; j++) { |
| 808 EXPECT_EQ(0, test->WriteData(entry, 0, 0, buffer.get(), 0, true)); |
| 809 EXPECT_EQ(write_size, test->WriteData(entry, 0, 0, buffer.get(), write_size,
false)); |
| 810 entry->Close(); |
| 811 ASSERT_EQ(net::OK, test->OpenEntry(key2, &entry)); |
| 812 } |
| 813 |
| 814 entry->Close(); |
| 815 ASSERT_EQ(net::OK, test->OpenEntry(key1, &entry)) << "have not evicted this en
try"; |
| 816 entry->Close(); |
| 817 } |
| 818 |
| 819 // Some extra tests to make sure that buffering works properly when changing |
| 820 // the entry size. |
| 821 TEST_P(DiskCacheEntryTest, ReuseExternalEntry) { |
| 822 const int max_size = 200 * 1024; |
| 823 SetMaxSize(max_size); |
| 824 InitCache(); |
| 825 ReuseEntryTest(max_size, this); |
| 826 } |
| 827 |
| 828 TEST_P(DiskCacheEntryTest, ReuseInternalEntry) { |
| 829 const int max_size = 100 * 1024; |
| 830 SetMaxSize(max_size); |
| 831 InitCache(); |
| 832 ReuseEntryTest(max_size, this); |
| 833 } |
| 834 |
| 835 // Reading somewhere that was not written should return zeros. |
| 836 TEST_P(DiskCacheEntryTest, InvalidData) { |
| 837 InitCache(); |
| 838 |
| 839 std::string key("the first key"); |
| 840 disk_cache::Entry* entry; |
| 841 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 842 |
| 843 const int kSize1 = 20000; |
| 844 const int kSize2 = 20000; |
| 845 const int kSize3 = 20000; |
| 846 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1)); |
| 847 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2)); |
| 848 scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3)); |
| 849 |
| 850 CacheTestFillBuffer(buffer1->data(), kSize1, false); |
| 851 memset(buffer2->data(), 0, kSize2); |
| 852 |
| 853 // Simple data grow: |
| 854 EXPECT_EQ(200, WriteData(entry, 0, 400, buffer1.get(), 200, false)); |
| 855 EXPECT_EQ(600, entry->GetDataSize(0)); |
| 856 EXPECT_EQ(100, ReadData(entry, 0, 300, buffer3.get(), 100)); |
| 857 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100)); |
| 858 entry->Close(); |
| 859 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 860 |
| 861 // The entry is now on disk. Load it and extend it. |
| 862 EXPECT_EQ(200, WriteData(entry, 0, 800, buffer1.get(), 200, false)); |
| 863 EXPECT_EQ(1000, entry->GetDataSize(0)); |
| 864 EXPECT_EQ(100, ReadData(entry, 0, 700, buffer3.get(), 100)); |
| 865 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100)); |
| 866 entry->Close(); |
| 867 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 868 |
| 869 // This time using truncate. |
| 870 EXPECT_EQ(200, WriteData(entry, 0, 1800, buffer1.get(), 200, true)); |
| 871 EXPECT_EQ(2000, entry->GetDataSize(0)); |
| 872 EXPECT_EQ(100, ReadData(entry, 0, 1500, buffer3.get(), 100)); |
| 873 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100)); |
| 874 |
| 875 // Go to an external file. |
| 876 EXPECT_EQ(200, WriteData(entry, 0, 19800, buffer1.get(), 200, false)); |
| 877 EXPECT_EQ(20000, entry->GetDataSize(0)); |
| 878 EXPECT_EQ(4000, ReadData(entry, 0, 14000, buffer3.get(), 4000)); |
| 879 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 4000)); |
| 880 |
| 881 // And back to an internal block. |
| 882 EXPECT_EQ(600, WriteData(entry, 0, 1000, buffer1.get(), 600, true)); |
| 883 EXPECT_EQ(1600, entry->GetDataSize(0)); |
| 884 EXPECT_EQ(600, ReadData(entry, 0, 1000, buffer3.get(), 600)); |
| 885 EXPECT_TRUE(!memcmp(buffer3->data(), buffer1->data(), 600)); |
| 886 |
| 887 // Extend it again. |
| 888 EXPECT_EQ(600, WriteData(entry, 0, 2000, buffer1.get(), 600, false)); |
| 889 EXPECT_EQ(2600, entry->GetDataSize(0)); |
| 890 EXPECT_EQ(200, ReadData(entry, 0, 1800, buffer3.get(), 200)); |
| 891 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200)); |
| 892 |
| 893 // And again (with truncation flag). |
| 894 EXPECT_EQ(600, WriteData(entry, 0, 3000, buffer1.get(), 600, true)); |
| 895 EXPECT_EQ(3600, entry->GetDataSize(0)); |
| 896 EXPECT_EQ(200, ReadData(entry, 0, 2800, buffer3.get(), 200)); |
| 897 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200)); |
| 898 |
| 899 entry->Close(); |
| 900 } |
| 901 |
| 902 // Tests that the cache preserves the buffer of an IO operation. |
| 903 TEST_P(DiskCacheEntryTest, ReadWriteDestroyBuffer) { |
| 904 InitCache(); |
| 905 |
| 906 std::string key("the first key"); |
| 907 disk_cache::Entry* entry; |
| 908 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 909 |
| 910 const int kSize = 200; |
| 911 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); |
| 912 CacheTestFillBuffer(buffer->data(), kSize, false); |
| 913 |
| 914 net::TestCompletionCallback cb; |
| 915 int result = entry->WriteData(0, 0, buffer.get(), kSize, |
| 916 cb.callback(), false); |
| 917 // Release our reference to the buffer. |
| 918 buffer = NULL; |
| 919 EXPECT_EQ(kSize, cb.GetResult(result)); |
| 920 |
| 921 // And now test with a Read(). |
| 922 buffer = new net::IOBuffer(kSize); |
| 923 CacheTestFillBuffer(buffer->data(), kSize, false); |
| 924 |
| 925 result = entry->ReadData(0, 0, buffer.get(), kSize, cb.callback()); |
| 926 buffer = NULL; |
| 927 EXPECT_EQ(kSize, cb.GetResult(result)); |
| 928 |
| 929 entry->Close(); |
| 930 } |
| 931 |
| 932 TEST_P(DiskCacheEntryTest, DoomEntry) { |
| 933 InitCache(); |
| 934 |
| 935 std::string key("the first key"); |
| 936 disk_cache::Entry* entry; |
| 937 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 938 entry->Doom(); |
| 939 entry->Close(); |
| 940 |
| 941 const int kSize = 20000; |
| 942 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); |
| 943 CacheTestFillBuffer(buffer->data(), kSize, true); |
| 944 buffer->data()[19999] = '\0'; |
| 945 |
| 946 key = buffer->data(); |
| 947 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 948 EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer.get(), kSize, false)); |
| 949 EXPECT_EQ(20000, WriteData(entry, 1, 0, buffer.get(), kSize, false)); |
| 950 entry->Doom(); |
| 951 entry->Close(); |
| 952 |
| 953 FlushQueueForTest(); |
| 954 EXPECT_EQ(0, cache()->GetEntryCount()); |
| 955 } |
| 956 |
| 957 // Tests dooming an entry that's linked to an open entry. |
| 958 TEST_P(DiskCacheEntryTest, DoomNextToOpenEntry) { |
| 959 InitCache(); |
| 960 |
| 961 disk_cache::Entry* entry1; |
| 962 disk_cache::Entry* entry2; |
| 963 ASSERT_EQ(net::OK, CreateEntry("fixed", &entry1)); |
| 964 entry1->Close(); |
| 965 ASSERT_EQ(net::OK, CreateEntry("foo", &entry1)); |
| 966 entry1->Close(); |
| 967 ASSERT_EQ(net::OK, CreateEntry("bar", &entry1)); |
| 968 entry1->Close(); |
| 969 |
| 970 ASSERT_EQ(net::OK, OpenEntry("foo", &entry1)); |
| 971 ASSERT_EQ(net::OK, OpenEntry("bar", &entry2)); |
| 972 entry2->Doom(); |
| 973 entry2->Close(); |
| 974 |
| 975 ASSERT_EQ(net::OK, OpenEntry("foo", &entry2)); |
| 976 entry2->Doom(); |
| 977 entry2->Close(); |
| 978 entry1->Close(); |
| 979 |
| 980 ASSERT_EQ(net::OK, OpenEntry("fixed", &entry1)); |
| 981 entry1->Close(); |
| 982 } |
| 983 |
| 984 // Verify that basic operations work as expected with doomed entries. |
| 985 TEST_P(DiskCacheEntryTest, DoomedEntry) { |
| 986 InitCache(); |
| 987 |
| 988 std::string key("the first key"); |
| 989 disk_cache::Entry* entry; |
| 990 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 991 entry->Doom(); |
| 992 |
| 993 FlushQueueForTest(); |
| 994 EXPECT_EQ(0, cache()->GetEntryCount()); |
| 995 Time initial = Time::Now(); |
| 996 AddDelay(); |
| 997 |
| 998 const int kSize1 = 2000; |
| 999 const int kSize2 = 2000; |
| 1000 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1)); |
| 1001 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2)); |
| 1002 CacheTestFillBuffer(buffer1->data(), kSize1, false); |
| 1003 memset(buffer2->data(), 0, kSize2); |
| 1004 |
| 1005 EXPECT_EQ(2000, WriteData(entry, 0, 0, buffer1.get(), 2000, false)); |
| 1006 EXPECT_EQ(2000, ReadData(entry, 0, 0, buffer2.get(), 2000)); |
| 1007 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize1)); |
| 1008 EXPECT_EQ(key, entry->GetKey()); |
| 1009 EXPECT_TRUE(initial < entry->GetLastModified()); |
| 1010 EXPECT_TRUE(initial < entry->GetLastUsed()); |
| 1011 |
| 1012 entry->Close(); |
| 1013 } |
| 1014 |
| 1015 // Test that child entries in a cache backend are not visible from |
| 1016 // enumerations. |
| 1017 TEST_P(DiskCacheEntryTest, EnumerationWithSparseEntries) { |
| 1018 TEST_DISABLED_IF(traits()->EntryCountIncludesSparseRanges()); |
| 1019 |
| 1020 InitCache(); |
| 1021 |
| 1022 const int kSize = 4096; |
| 1023 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize)); |
| 1024 CacheTestFillBuffer(buf->data(), kSize, false); |
| 1025 |
| 1026 std::string key("the first key"); |
| 1027 disk_cache::Entry* parent_entry; |
| 1028 ASSERT_EQ(net::OK, CreateEntry(key, &parent_entry)); |
| 1029 |
| 1030 // Writes to the parent entry. |
| 1031 EXPECT_EQ(kSize, |
| 1032 WriteSparseData(parent_entry, 0, buf.get(), kSize)); |
| 1033 |
| 1034 // This write creates a child entry and writes to it. |
| 1035 EXPECT_EQ(kSize, |
| 1036 WriteSparseData(parent_entry, 8192, buf.get(), kSize)); |
| 1037 |
| 1038 parent_entry->Close(); |
| 1039 |
| 1040 // Perform the enumerations. |
| 1041 void* iter = NULL; |
| 1042 disk_cache::Entry* entry = NULL; |
| 1043 int count = 0; |
| 1044 while (OpenNextEntry(&iter, &entry) == net::OK) { |
| 1045 ASSERT_TRUE(entry != NULL); |
| 1046 ++count; |
| 1047 entry->Close(); |
| 1048 } |
| 1049 EXPECT_EQ(1, count); |
| 1050 } |
| 1051 |
| 1052 // Writes |buf_1| to offset and reads it back as |buf_2|. |
| 1053 void VerifySparseIO(disk_cache::Entry* entry, int64 offset, |
| 1054 net::IOBuffer* buf_1, int size, net::IOBuffer* buf_2) { |
| 1055 net::TestCompletionCallback cb; |
| 1056 |
| 1057 memset(buf_2->data(), 0, size); |
| 1058 int ret = entry->ReadSparseData(offset, buf_2, size, cb.callback()); |
| 1059 EXPECT_EQ(0, cb.GetResult(ret)); |
| 1060 |
| 1061 ret = entry->WriteSparseData(offset, buf_1, size, cb.callback()); |
| 1062 EXPECT_EQ(size, cb.GetResult(ret)); |
| 1063 |
| 1064 ret = entry->ReadSparseData(offset, buf_2, size, cb.callback()); |
| 1065 EXPECT_EQ(size, cb.GetResult(ret)); |
| 1066 |
| 1067 EXPECT_EQ(0, memcmp(buf_1->data(), buf_2->data(), size)); |
| 1068 } |
| 1069 |
| 1070 // Reads |size| bytes from |entry| at |offset| and verifies that they are the |
| 1071 // same as the content of the provided |buffer|. |
| 1072 void VerifyContentSparseIO(disk_cache::Entry* entry, int64 offset, char* buffer, |
| 1073 int size) { |
| 1074 net::TestCompletionCallback cb; |
| 1075 |
| 1076 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(size)); |
| 1077 memset(buf_1->data(), 0, size); |
| 1078 int ret = entry->ReadSparseData(offset, buf_1.get(), size, cb.callback()); |
| 1079 EXPECT_EQ(size, cb.GetResult(ret)); |
| 1080 EXPECT_EQ(0, memcmp(buf_1->data(), buffer, size)); |
| 1081 } |
| 1082 |
| 1083 TEST_P(DiskCacheEntryTest, BasicSparseIO) { |
| 1084 InitCache(); |
| 1085 |
| 1086 std::string key("the first key"); |
| 1087 disk_cache::Entry* entry; |
| 1088 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1089 |
| 1090 const int kSize = 2048; |
| 1091 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize)); |
| 1092 scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize)); |
| 1093 CacheTestFillBuffer(buf_1->data(), kSize, false); |
| 1094 |
| 1095 // Write at offset 0. |
| 1096 VerifySparseIO(entry, 0, buf_1.get(), kSize, buf_2.get()); |
| 1097 |
| 1098 // Write at offset 0x400000 (4 MB). |
| 1099 VerifySparseIO(entry, 0x400000, buf_1.get(), kSize, buf_2.get()); |
| 1100 |
| 1101 // Write at offset 0x800000000 (32 GB). |
| 1102 VerifySparseIO(entry, 0x800000000LL, buf_1.get(), kSize, buf_2.get()); |
| 1103 |
| 1104 entry->Close(); |
| 1105 |
| 1106 // Check everything again. |
| 1107 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 1108 VerifyContentSparseIO(entry, 0, buf_1->data(), kSize); |
| 1109 VerifyContentSparseIO(entry, 0x400000, buf_1->data(), kSize); |
| 1110 VerifyContentSparseIO(entry, 0x800000000LL, buf_1->data(), kSize); |
| 1111 entry->Close(); |
| 1112 } |
| 1113 |
| 1114 TEST_P(DiskCacheEntryTest, HugeSparseIO) { |
| 1115 InitCache(); |
| 1116 std::string key("the first key"); |
| 1117 disk_cache::Entry* entry; |
| 1118 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1119 |
| 1120 // Write 1.2 MB so that we cover multiple entries. |
| 1121 const int kSize = 1200 * 1024; |
| 1122 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize)); |
| 1123 scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize)); |
| 1124 CacheTestFillBuffer(buf_1->data(), kSize, false); |
| 1125 |
| 1126 // Write at offset 0x20F0000 (33 MB - 64 KB). |
| 1127 VerifySparseIO(entry, 0x20F0000, buf_1.get(), kSize, buf_2.get()); |
| 1128 entry->Close(); |
| 1129 |
| 1130 // Check it again. |
| 1131 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 1132 VerifyContentSparseIO(entry, 0x20F0000, buf_1->data(), kSize); |
| 1133 entry->Close(); |
| 1134 } |
| 1135 |
| 1136 TEST_P(DiskCacheEntryTest, GetAvailableRange) { |
| 1137 InitCache(); |
| 1138 |
| 1139 std::string key("the first key"); |
| 1140 disk_cache::Entry* entry; |
| 1141 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1142 |
| 1143 const int kSize = 16 * 1024; |
| 1144 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize)); |
| 1145 CacheTestFillBuffer(buf->data(), kSize, false); |
| 1146 |
| 1147 // Write at offset 0x20F0000 (33 MB - 64 KB), and 0x20F4400 (33 MB - 47 KB). |
| 1148 EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F0000, buf.get(), kSize)); |
| 1149 EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F4400, buf.get(), kSize)); |
| 1150 |
| 1151 // We stop at the first empty block. |
| 1152 int64 start; |
| 1153 net::TestCompletionCallback cb; |
| 1154 int rv = entry->GetAvailableRange( |
| 1155 0x20F0000, kSize * 2, &start, cb.callback()); |
| 1156 EXPECT_EQ(kSize, cb.GetResult(rv)); |
| 1157 EXPECT_EQ(0x20F0000, start); |
| 1158 |
| 1159 start = 0; |
| 1160 rv = entry->GetAvailableRange(0, kSize, &start, cb.callback()); |
| 1161 EXPECT_EQ(0, cb.GetResult(rv)); |
| 1162 rv = entry->GetAvailableRange( |
| 1163 0x20F0000 - kSize, kSize, &start, cb.callback()); |
| 1164 EXPECT_EQ(0, cb.GetResult(rv)); |
| 1165 rv = entry->GetAvailableRange(0, 0x2100000, &start, cb.callback()); |
| 1166 EXPECT_EQ(kSize, cb.GetResult(rv)); |
| 1167 EXPECT_EQ(0x20F0000, start); |
| 1168 |
| 1169 // We should be able to Read based on the results of GetAvailableRange. |
| 1170 start = -1; |
| 1171 rv = entry->GetAvailableRange(0x2100000, kSize, &start, cb.callback()); |
| 1172 EXPECT_EQ(0, cb.GetResult(rv)); |
| 1173 rv = entry->ReadSparseData(start, buf.get(), kSize, cb.callback()); |
| 1174 EXPECT_EQ(0, cb.GetResult(rv)); |
| 1175 |
| 1176 start = 0; |
| 1177 rv = entry->GetAvailableRange(0x20F2000, kSize, &start, cb.callback()); |
| 1178 EXPECT_EQ(0x2000, cb.GetResult(rv)); |
| 1179 EXPECT_EQ(0x20F2000, start); |
| 1180 EXPECT_EQ(0x2000, ReadSparseData(entry, start, buf.get(), kSize)); |
| 1181 |
| 1182 // Make sure that we respect the |len| argument. |
| 1183 start = 0; |
| 1184 rv = entry->GetAvailableRange( |
| 1185 0x20F0001 - kSize, kSize, &start, cb.callback()); |
| 1186 EXPECT_EQ(1, cb.GetResult(rv)); |
| 1187 EXPECT_EQ(0x20F0000, start); |
| 1188 |
| 1189 entry->Close(); |
| 1190 } |
| 1191 |
| 1192 TEST_P(DiskCacheEntryTest, CouldBeSparse) { |
| 1193 TEST_DISABLED_IF(!traits()->ImplementsCouldBeSparse()); |
| 1194 |
| 1195 InitCache(); |
| 1196 |
| 1197 std::string key("the first key"); |
| 1198 disk_cache::Entry* entry; |
| 1199 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1200 |
| 1201 const int kSize = 16 * 1024; |
| 1202 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize)); |
| 1203 CacheTestFillBuffer(buf->data(), kSize, false); |
| 1204 |
| 1205 // Write at offset 0x20F0000 (33 MB - 64 KB). |
| 1206 EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F0000, buf.get(), kSize)); |
| 1207 |
| 1208 EXPECT_TRUE(entry->CouldBeSparse()); |
| 1209 entry->Close(); |
| 1210 |
| 1211 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 1212 EXPECT_TRUE(entry->CouldBeSparse()); |
| 1213 entry->Close(); |
| 1214 |
| 1215 // Now verify a regular entry. |
| 1216 key.assign("another key"); |
| 1217 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1218 EXPECT_FALSE(entry->CouldBeSparse()); |
| 1219 |
| 1220 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buf.get(), kSize, false)); |
| 1221 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buf.get(), kSize, false)); |
| 1222 EXPECT_EQ(kSize, WriteData(entry, 2, 0, buf.get(), kSize, false)); |
| 1223 |
| 1224 EXPECT_FALSE(entry->CouldBeSparse()); |
| 1225 entry->Close(); |
| 1226 |
| 1227 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 1228 EXPECT_FALSE(entry->CouldBeSparse()); |
| 1229 entry->Close(); |
| 1230 } |
| 1231 |
| 1232 TEST_P(DiskCacheEntryTest, UnalignedGetAvailableRange) { |
| 1233 TEST_DISABLED_IF(traits()->SparseRoundingInterval() != 1); |
| 1234 InitCache(); |
| 1235 |
| 1236 const int kSize = 8192; |
| 1237 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize)); |
| 1238 CacheTestFillBuffer(buf->data(), kSize, false); |
| 1239 |
| 1240 disk_cache::Entry* entry; |
| 1241 std::string key("the first key"); |
| 1242 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1243 |
| 1244 // Writes in the middle of an entry. |
| 1245 EXPECT_EQ(1024, WriteSparseData(entry, 0, buf.get(), 1024)); |
| 1246 EXPECT_EQ(1024, WriteSparseData(entry, 5120, buf.get(), 1024)); |
| 1247 EXPECT_EQ(1024, WriteSparseData(entry, 10000, buf.get(), 1024)); |
| 1248 |
| 1249 // Writes in the middle of an entry and spans 2 child entries. |
| 1250 EXPECT_EQ(8192, WriteSparseData(entry, 50000, buf.get(), 8192)); |
| 1251 |
| 1252 int64 start; |
| 1253 net::TestCompletionCallback cb; |
| 1254 // Test that we stop at a discontinuous child at the second block. |
| 1255 int rv = entry->GetAvailableRange(0, 10000, &start, cb.callback()); |
| 1256 EXPECT_EQ(1024, cb.GetResult(rv)); |
| 1257 EXPECT_EQ(0, start); |
| 1258 |
| 1259 // Test that number of bytes is reported correctly when we start from the |
| 1260 // middle of a filled region. |
| 1261 rv = entry->GetAvailableRange(512, 10000, &start, cb.callback()); |
| 1262 EXPECT_EQ(512, cb.GetResult(rv)); |
| 1263 EXPECT_EQ(512, start); |
| 1264 |
| 1265 // Test that we found bytes in the child of next block. |
| 1266 rv = entry->GetAvailableRange(1024, 10000, &start, cb.callback()); |
| 1267 EXPECT_EQ(1024, cb.GetResult(rv)); |
| 1268 EXPECT_EQ(5120, start); |
| 1269 |
| 1270 // Test that the desired length is respected. It starts within a filled |
| 1271 // region. |
| 1272 rv = entry->GetAvailableRange(5500, 512, &start, cb.callback()); |
| 1273 EXPECT_EQ(512, cb.GetResult(rv)); |
| 1274 EXPECT_EQ(5500, start); |
| 1275 |
| 1276 // Test that the desired length is respected. It starts before a filled |
| 1277 // region. |
| 1278 rv = entry->GetAvailableRange(5000, 620, &start, cb.callback()); |
| 1279 EXPECT_EQ(500, cb.GetResult(rv)); |
| 1280 EXPECT_EQ(5120, start); |
| 1281 |
| 1282 // Test that multiple blocks are scanned. |
| 1283 rv = entry->GetAvailableRange(40000, 20000, &start, cb.callback()); |
| 1284 EXPECT_EQ(8192, cb.GetResult(rv)); |
| 1285 EXPECT_EQ(50000, start); |
| 1286 |
| 1287 entry->Close(); |
| 1288 } |
| 1289 |
| 1290 TEST_P(DiskCacheEntryTest, UnalignedSparseIO) { |
| 1291 TEST_DISABLED_IF(traits()->SparseRoundingInterval() != 1); |
| 1292 InitCache(); |
| 1293 |
| 1294 const int kSize = 8192; |
| 1295 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize)); |
| 1296 scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize)); |
| 1297 CacheTestFillBuffer(buf_1->data(), kSize, false); |
| 1298 |
| 1299 std::string key("the first key"); |
| 1300 disk_cache::Entry* entry; |
| 1301 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1302 |
| 1303 // This loop writes back to back starting from offset 0 and 9000. |
| 1304 for (int i = 0; i < kSize; i += 1024) { |
| 1305 scoped_refptr<net::WrappedIOBuffer> buf_3( |
| 1306 new net::WrappedIOBuffer(buf_1->data() + i)); |
| 1307 VerifySparseIO(entry, i, buf_3.get(), 1024, buf_2.get()); |
| 1308 VerifySparseIO(entry, 9000 + i, buf_3.get(), 1024, buf_2.get()); |
| 1309 } |
| 1310 |
| 1311 // Make sure we have data written. |
| 1312 VerifyContentSparseIO(entry, 0, buf_1->data(), kSize); |
| 1313 VerifyContentSparseIO(entry, 9000, buf_1->data(), kSize); |
| 1314 |
| 1315 // This tests a large write that spans 3 entries from a misaligned offset. |
| 1316 VerifySparseIO(entry, 20481, buf_1.get(), 8192, buf_2.get()); |
| 1317 |
| 1318 entry->Close(); |
| 1319 } |
| 1320 |
| 1321 TEST_P(DiskCacheEntryTest, UpdateSparseEntry) { |
| 1322 InitCache(); |
| 1323 |
| 1324 std::string key("the first key"); |
| 1325 disk_cache::Entry* entry1; |
| 1326 ASSERT_EQ(net::OK, CreateEntry(key, &entry1)); |
| 1327 |
| 1328 const int kSize = 2048; |
| 1329 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize)); |
| 1330 scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize)); |
| 1331 CacheTestFillBuffer(buf_1->data(), kSize, false); |
| 1332 |
| 1333 // Write at offset 0. |
| 1334 VerifySparseIO(entry1, 0, buf_1.get(), kSize, buf_2.get()); |
| 1335 entry1->Close(); |
| 1336 |
| 1337 // Write at offset 2048. |
| 1338 ASSERT_EQ(net::OK, OpenEntry(key, &entry1)); |
| 1339 VerifySparseIO(entry1, 2048, buf_1.get(), kSize, buf_2.get()); |
| 1340 |
| 1341 disk_cache::Entry* entry2; |
| 1342 ASSERT_EQ(net::OK, CreateEntry("the second key", &entry2)); |
| 1343 |
| 1344 entry1->Close(); |
| 1345 entry2->Close(); |
| 1346 FlushQueueForTest(); |
| 1347 if (traits()->EntryCountIncludesSparseRanges()) |
| 1348 EXPECT_EQ(3, cache()->GetEntryCount()); |
| 1349 else |
| 1350 EXPECT_EQ(2, cache()->GetEntryCount()); |
| 1351 } |
| 1352 |
| 1353 TEST_P(DiskCacheEntryTest, DoomSparseEntry) { |
| 1354 UseCurrentThread(); |
| 1355 InitCache(); |
| 1356 |
| 1357 std::string key1("the first key"); |
| 1358 std::string key2("the second key"); |
| 1359 disk_cache::Entry *entry1, *entry2; |
| 1360 ASSERT_EQ(net::OK, CreateEntry(key1, &entry1)); |
| 1361 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2)); |
| 1362 |
| 1363 const int kSize = 4 * 1024; |
| 1364 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize)); |
| 1365 CacheTestFillBuffer(buf->data(), kSize, false); |
| 1366 |
| 1367 int64 offset = 1024; |
| 1368 // Write to a bunch of ranges. |
| 1369 for (int i = 0; i < 12; i++) { |
| 1370 EXPECT_EQ(kSize, WriteSparseData(entry1, offset, buf.get(), kSize)); |
| 1371 // Keep the second map under the default size. |
| 1372 if (i < 9) |
| 1373 EXPECT_EQ(kSize, WriteSparseData(entry2, offset, buf.get(), kSize)); |
| 1374 |
| 1375 offset *= 4; |
| 1376 } |
| 1377 |
| 1378 if (traits()->EntryCountIncludesSparseRanges()) |
| 1379 EXPECT_EQ(15, cache()->GetEntryCount()); |
| 1380 else |
| 1381 EXPECT_EQ(2, cache()->GetEntryCount()); |
| 1382 |
| 1383 // Doom the first entry while it's still open. |
| 1384 entry1->Doom(); |
| 1385 entry1->Close(); |
| 1386 entry2->Close(); |
| 1387 |
| 1388 // Doom the second entry after it's fully saved. |
| 1389 EXPECT_EQ(net::OK, DoomEntry(key2)); |
| 1390 |
| 1391 // Make sure we do all needed work. This may fail for entry2 if between Close |
| 1392 // and DoomEntry the system decides to remove all traces of the file from the |
| 1393 // system cache so we don't see that there is pending IO. |
| 1394 base::MessageLoop::current()->RunUntilIdle(); |
| 1395 |
| 1396 if (5 == cache()->GetEntryCount()) { |
| 1397 // Most likely we are waiting for the result of reading the sparse info |
| 1398 // (it's always async on Posix so it is easy to miss). Unfortunately we |
| 1399 // don't have any signal to watch for so we can only wait. |
| 1400 base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(500)); |
| 1401 base::MessageLoop::current()->RunUntilIdle(); |
| 1402 } |
| 1403 EXPECT_EQ(0, cache()->GetEntryCount()); |
| 1404 } |
| 1405 |
| 1406 // A CompletionCallback to let test code run in the callback. The way a |
| 1407 // CompletionCallback works means that all tasks (even new ones) are executed by |
| 1408 // the message loop before returning to the caller so the only way to simulate a |
| 1409 // race is to execute what we want on the callback. |
| 1410 class ClosureTestCompletionCallback: public net::TestCompletionCallback { |
| 1411 public: |
| 1412 explicit ClosureTestCompletionCallback( |
| 1413 const base::Closure& closure) : closure_(closure) {} |
| 1414 |
| 1415 private: |
| 1416 virtual void SetResult(int result) OVERRIDE { |
| 1417 closure_.Run(); |
| 1418 TestCompletionCallback::SetResult(result); |
| 1419 } |
| 1420 |
| 1421 base::Closure closure_; |
| 1422 DISALLOW_COPY_AND_ASSIGN(ClosureTestCompletionCallback); |
| 1423 }; |
| 1424 |
| 1425 // Tests that we don't crash when the backend is deleted while we are working |
| 1426 // deleting the sub-entries of a sparse entry. |
| 1427 TEST_P(DiskCacheEntryTest, DoomSparseEntry2) { |
| 1428 TEST_DISABLED_IF(!traits()->DoomedSparseEntriesIOWorks()); |
| 1429 |
| 1430 UseCurrentThread(); |
| 1431 InitCache(); |
| 1432 |
| 1433 std::string key("the key"); |
| 1434 disk_cache::Entry* entry; |
| 1435 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1436 |
| 1437 const int kSize = 4 * 1024; |
| 1438 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize)); |
| 1439 CacheTestFillBuffer(buf->data(), kSize, false); |
| 1440 |
| 1441 int64 offset = 1024; |
| 1442 // Write to a bunch of ranges. |
| 1443 for (int i = 0; i < 12; i++) { |
| 1444 EXPECT_EQ(kSize, |
| 1445 entry->WriteSparseData( |
| 1446 offset, buf.get(), kSize, net::CompletionCallback())); |
| 1447 offset *= 4; |
| 1448 } |
| 1449 if (traits()->EntryCountIncludesSparseRanges()) |
| 1450 EXPECT_EQ(9, cache()->GetEntryCount()); |
| 1451 else |
| 1452 EXPECT_EQ(1, cache()->GetEntryCount()); |
| 1453 |
| 1454 entry->Close(); |
| 1455 ClosureTestCompletionCallback cb(base::Bind(&DiskCacheEntryTest_DoomSparseEntr
y2_Test::DeleteCache, base::Unretained(this))); |
| 1456 int rv = cache()->DoomEntry(key, cb.callback()); |
| 1457 EXPECT_EQ(net::OK, cb.GetResult(rv)); |
| 1458 } |
| 1459 |
| 1460 TEST_P(DiskCacheEntryTest, PartialSparseEntry) { |
| 1461 InitCache(); |
| 1462 |
| 1463 std::string key("the first key"); |
| 1464 disk_cache::Entry* entry; |
| 1465 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1466 |
| 1467 // We should be able to deal with IO that is not aligned to the block size |
| 1468 // of a sparse entry, at least to write a big range without leaving holes. |
| 1469 const int kSize = 4 * 1024; |
| 1470 const int kSmallSize = 128; |
| 1471 scoped_refptr<net::IOBuffer> buf1(new net::IOBuffer(kSize)); |
| 1472 CacheTestFillBuffer(buf1->data(), kSize, false); |
| 1473 |
| 1474 // The first write is just to extend the entry. The third write occupies |
| 1475 // a 1KB block partially, it may not be written internally depending on the |
| 1476 // implementation. |
| 1477 EXPECT_EQ(kSize, WriteSparseData(entry, 20000, buf1.get(), kSize)); |
| 1478 EXPECT_EQ(kSize, WriteSparseData(entry, 500, buf1.get(), kSize)); |
| 1479 EXPECT_EQ(kSmallSize, |
| 1480 WriteSparseData(entry, 1080321, buf1.get(), kSmallSize)); |
| 1481 entry->Close(); |
| 1482 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 1483 |
| 1484 scoped_refptr<net::IOBuffer> buf2(new net::IOBuffer(kSize)); |
| 1485 memset(buf2->data(), 0, kSize); |
| 1486 EXPECT_EQ(0, ReadSparseData(entry, 8000, buf2.get(), kSize)); |
| 1487 |
| 1488 EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize)); |
| 1489 EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500)); |
| 1490 EXPECT_EQ(0, ReadSparseData(entry, 0, buf2.get(), kSize)); |
| 1491 |
| 1492 // This read should not change anything. |
| 1493 EXPECT_EQ(96, ReadSparseData(entry, 24000, buf2.get(), kSize)); |
| 1494 EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize)); |
| 1495 EXPECT_EQ(0, ReadSparseData(entry, 99, buf2.get(), kSize)); |
| 1496 |
| 1497 int rv; |
| 1498 int64 start; |
| 1499 net::TestCompletionCallback cb; |
| 1500 if (traits()->SparseRoundingInterval() == 1024) { |
| 1501 rv = entry->GetAvailableRange(0, 2048, &start, cb.callback()); |
| 1502 EXPECT_EQ(1024, cb.GetResult(rv)); |
| 1503 EXPECT_EQ(1024, start); |
| 1504 } else { |
| 1505 rv = entry->GetAvailableRange(0, 600, &start, cb.callback()); |
| 1506 EXPECT_EQ(100, cb.GetResult(rv)); |
| 1507 EXPECT_EQ(500, start); |
| 1508 } |
| 1509 |
| 1510 rv = entry->GetAvailableRange(kSize, kSize, &start, cb.callback()); |
| 1511 EXPECT_EQ(500, cb.GetResult(rv)); |
| 1512 EXPECT_EQ(kSize, start); |
| 1513 rv = entry->GetAvailableRange(20 * 1024, 10000, &start, cb.callback()); |
| 1514 EXPECT_EQ(3616, cb.GetResult(rv)); |
| 1515 EXPECT_EQ(20 * 1024, start); |
| 1516 |
| 1517 // 1. Query before a filled 1KB block. |
| 1518 // 2. Query within a filled 1KB block. |
| 1519 // 3. Query beyond a filled 1KB block. |
| 1520 |
| 1521 if (traits()->SparseRoundingInterval() == 1024) { |
| 1522 rv = entry->GetAvailableRange(19400, kSize, &start, cb.callback()); |
| 1523 EXPECT_EQ(3016, cb.GetResult(rv)); |
| 1524 EXPECT_EQ(20480, start); |
| 1525 } else { |
| 1526 rv = entry->GetAvailableRange(19400, kSize, &start, cb.callback()); |
| 1527 EXPECT_EQ(3496, cb.GetResult(rv)); |
| 1528 EXPECT_EQ(20000, start); |
| 1529 } |
| 1530 |
| 1531 rv = entry->GetAvailableRange(3073, kSize, &start, cb.callback()); |
| 1532 EXPECT_EQ(1523, cb.GetResult(rv)); |
| 1533 EXPECT_EQ(3073, start); |
| 1534 rv = entry->GetAvailableRange(4600, kSize, &start, cb.callback()); |
| 1535 EXPECT_EQ(0, cb.GetResult(rv)); |
| 1536 EXPECT_EQ(4600, start); |
| 1537 |
| 1538 // Now make another write and verify that there is no hole in between. |
| 1539 EXPECT_EQ(kSize, WriteSparseData(entry, 500 + kSize, buf1.get(), kSize)); |
| 1540 rv = entry->GetAvailableRange(1024, 10000, &start, cb.callback()); |
| 1541 EXPECT_EQ(7 * 1024 + 500, cb.GetResult(rv)); |
| 1542 EXPECT_EQ(1024, start); |
| 1543 EXPECT_EQ(kSize, ReadSparseData(entry, kSize, buf2.get(), kSize)); |
| 1544 EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500)); |
| 1545 EXPECT_EQ(0, memcmp(buf2->data() + 500, buf1->data(), kSize - 500)); |
| 1546 |
| 1547 entry->Close(); |
| 1548 } |
| 1549 |
| 1550 |
| 1551 } // namespace |
| 1552 |
| 1553 } // namespace disk_cache |
OLD | NEW |