OLD | NEW |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "net/disk_cache/blockfile/index_table_v3.h" | 5 #include "net/disk_cache/blockfile/index_table_v3.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <set> | 8 #include <set> |
9 #include <utility> | 9 #include <utility> |
10 | 10 |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
66 | 66 |
67 uint32 GetCellSmallTableLocation(const IndexCell& cell) { | 67 uint32 GetCellSmallTableLocation(const IndexCell& cell) { |
68 return cell.first_part & kCellSmallTableLocationMask; | 68 return cell.first_part & kCellSmallTableLocationMask; |
69 } | 69 } |
70 | 70 |
71 uint32 GetCellId(const IndexCell& cell) { | 71 uint32 GetCellId(const IndexCell& cell) { |
72 return (cell.first_part >> kCellIdOffset) & kCellIdMask; | 72 return (cell.first_part >> kCellIdOffset) & kCellIdMask; |
73 } | 73 } |
74 | 74 |
75 uint32 GetCellSmallTableId(const IndexCell& cell) { | 75 uint32 GetCellSmallTableId(const IndexCell& cell) { |
76 return (cell.first_part >> kCellSmallTableIdOffset) & | 76 return (cell.first_part >> kCellSmallTableIdOffset) & kCellSmallTableIdMask; |
77 kCellSmallTableIdMask; | |
78 } | 77 } |
79 | 78 |
80 int GetCellTimestamp(const IndexCell& cell) { | 79 int GetCellTimestamp(const IndexCell& cell) { |
81 return (cell.first_part >> kCellTimestampOffset) & kCellTimestampMask; | 80 return (cell.first_part >> kCellTimestampOffset) & kCellTimestampMask; |
82 } | 81 } |
83 | 82 |
84 int GetCellReuse(const IndexCell& cell) { | 83 int GetCellReuse(const IndexCell& cell) { |
85 return (cell.first_part >> kCellReuseOffset) & kCellReuseMask; | 84 return (cell.first_part >> kCellReuseOffset) & kCellReuseMask; |
86 } | 85 } |
87 | 86 |
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
197 } | 196 } |
198 | 197 |
199 bool IsNormalState(const IndexCell& cell) { | 198 bool IsNormalState(const IndexCell& cell) { |
200 disk_cache::EntryState state = | 199 disk_cache::EntryState state = |
201 static_cast<disk_cache::EntryState>(GetCellState(cell)); | 200 static_cast<disk_cache::EntryState>(GetCellState(cell)); |
202 DCHECK_NE(state, disk_cache::ENTRY_FREE); | 201 DCHECK_NE(state, disk_cache::ENTRY_FREE); |
203 return state != disk_cache::ENTRY_DELETED && | 202 return state != disk_cache::ENTRY_DELETED && |
204 state != disk_cache::ENTRY_FIXING; | 203 state != disk_cache::ENTRY_FIXING; |
205 } | 204 } |
206 | 205 |
207 inline int GetNextBucket(int min_bucket_num, int max_bucket_num, | 206 inline int GetNextBucket(int min_bucket_num, |
| 207 int max_bucket_num, |
208 disk_cache::IndexBucket* table, | 208 disk_cache::IndexBucket* table, |
209 disk_cache::IndexBucket** bucket) { | 209 disk_cache::IndexBucket** bucket) { |
210 if (!(*bucket)->next) | 210 if (!(*bucket)->next) |
211 return 0; | 211 return 0; |
212 | 212 |
213 int bucket_num = (*bucket)->next / disk_cache::kCellsPerBucket; | 213 int bucket_num = (*bucket)->next / disk_cache::kCellsPerBucket; |
214 if (bucket_num < min_bucket_num || bucket_num > max_bucket_num) { | 214 if (bucket_num < min_bucket_num || bucket_num > max_bucket_num) { |
215 // The next bucket must fall within the extra table. Note that this is not | 215 // The next bucket must fall within the extra table. Note that this is not |
216 // an uncommon path as growing the table may not cleanup the link from the | 216 // an uncommon path as growing the table may not cleanup the link from the |
217 // main table to the extra table, and that cleanup is performed here when | 217 // main table to the extra table, and that cleanup is performed here when |
(...skipping 20 matching lines...) Expand all Loading... |
238 if (!iterator->forward && time >= limit_time) | 238 if (!iterator->forward && time >= limit_time) |
239 return; | 239 return; |
240 | 240 |
241 if ((iterator->forward && time < iterator->timestamp) || | 241 if ((iterator->forward && time < iterator->timestamp) || |
242 (!iterator->forward && time > iterator->timestamp)) { | 242 (!iterator->forward && time > iterator->timestamp)) { |
243 // This timestamp is better than the one we had. | 243 // This timestamp is better than the one we had. |
244 iterator->timestamp = time; | 244 iterator->timestamp = time; |
245 iterator->cells.clear(); | 245 iterator->cells.clear(); |
246 } | 246 } |
247 if (time == iterator->timestamp) { | 247 if (time == iterator->timestamp) { |
248 CellInfo cell_info = { cell.hash(), cell.GetAddress() }; | 248 CellInfo cell_info = {cell.hash(), cell.GetAddress()}; |
249 iterator->cells.push_back(cell_info); | 249 iterator->cells.push_back(cell_info); |
250 } | 250 } |
251 } | 251 } |
252 | 252 |
253 void InitIterator(IndexIterator* iterator) { | 253 void InitIterator(IndexIterator* iterator) { |
254 iterator->cells.clear(); | 254 iterator->cells.clear(); |
255 iterator->timestamp = iterator->forward ? kint32max : 0; | 255 iterator->timestamp = iterator->forward ? kint32max : 0; |
256 } | 256 } |
257 | 257 |
258 } // namespace | 258 } // namespace |
259 | 259 |
260 namespace disk_cache { | 260 namespace disk_cache { |
261 | 261 |
262 EntryCell::~EntryCell() { | 262 EntryCell::~EntryCell() { |
263 } | 263 } |
264 | 264 |
265 bool EntryCell::IsValid() const { | 265 bool EntryCell::IsValid() const { |
266 return GetCellLocation(cell_) != 0; | 266 return GetCellLocation(cell_) != 0; |
267 } | 267 } |
268 | 268 |
269 // This code has to map the cell address (up to 22 bits) to a general cache Addr | 269 // This code has to map the cell address (up to 22 bits) to a general cache Addr |
270 // (up to 24 bits of general addressing). It also set the implied file_number | 270 // (up to 24 bits of general addressing). It also set the implied file_number |
271 // in the case of small tables. See also the comment by the definition of | 271 // in the case of small tables. See also the comment by the definition of |
272 // kEntriesFile. | 272 // kEntriesFile. |
273 Addr EntryCell::GetAddress() const { | 273 Addr EntryCell::GetAddress() const { |
274 uint32 location = GetLocation(); | 274 uint32 location = GetLocation(); |
275 int file_number = FileNumberFromLocation(location); | 275 int file_number = FileNumberFromLocation(location); |
276 if (small_table_) { | 276 if (small_table_) { |
277 DCHECK_EQ(0, file_number); | 277 DCHECK_EQ(0, file_number); |
278 file_number = (GetGroup() == ENTRY_EVICTED) ? kEvictedEntriesFile : | 278 file_number = |
279 kEntriesFile; | 279 (GetGroup() == ENTRY_EVICTED) ? kEvictedEntriesFile : kEntriesFile; |
280 } | 280 } |
281 DCHECK_NE(0, file_number); | 281 DCHECK_NE(0, file_number); |
282 FileType file_type = (GetGroup() == ENTRY_EVICTED) ? BLOCK_EVICTED : | 282 FileType file_type = |
283 BLOCK_ENTRIES; | 283 (GetGroup() == ENTRY_EVICTED) ? BLOCK_EVICTED : BLOCK_ENTRIES; |
284 return Addr(file_type, 1, file_number, StartBlockFromLocation(location)); | 284 return Addr(file_type, 1, file_number, StartBlockFromLocation(location)); |
285 } | 285 } |
286 | 286 |
287 EntryState EntryCell::GetState() const { | 287 EntryState EntryCell::GetState() const { |
288 return static_cast<EntryState>(GetCellState(cell_)); | 288 return static_cast<EntryState>(GetCellState(cell_)); |
289 } | 289 } |
290 | 290 |
291 EntryGroup EntryCell::GetGroup() const { | 291 EntryGroup EntryCell::GetGroup() const { |
292 return static_cast<EntryGroup>(GetCellGroup(cell_)); | 292 return static_cast<EntryGroup>(GetCellGroup(cell_)); |
293 } | 293 } |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
336 } | 336 } |
337 | 337 |
338 EntryCell::EntryCell() : cell_num_(0), hash_(0), small_table_(false) { | 338 EntryCell::EntryCell() : cell_num_(0), hash_(0), small_table_(false) { |
339 cell_.Clear(); | 339 cell_.Clear(); |
340 } | 340 } |
341 | 341 |
342 EntryCell::EntryCell(int32 cell_num, | 342 EntryCell::EntryCell(int32 cell_num, |
343 uint32 hash, | 343 uint32 hash, |
344 Addr address, | 344 Addr address, |
345 bool small_table) | 345 bool small_table) |
346 : cell_num_(cell_num), | 346 : cell_num_(cell_num), hash_(hash), small_table_(small_table) { |
347 hash_(hash), | |
348 small_table_(small_table) { | |
349 DCHECK(IsValidAddress(address) || !address.value()); | 347 DCHECK(IsValidAddress(address) || !address.value()); |
350 | 348 |
351 cell_.Clear(); | 349 cell_.Clear(); |
352 SetCellState(&cell_, ENTRY_NEW); | 350 SetCellState(&cell_, ENTRY_NEW); |
353 SetCellGroup(&cell_, ENTRY_NO_USE); | 351 SetCellGroup(&cell_, ENTRY_NO_USE); |
354 if (small_table) { | 352 if (small_table) { |
355 DCHECK(address.FileNumber() == kEntriesFile || | 353 DCHECK(address.FileNumber() == kEntriesFile || |
356 address.FileNumber() == kEvictedEntriesFile); | 354 address.FileNumber() == kEvictedEntriesFile); |
357 SetCellSmallTableLocation(&cell_, address.start_block()); | 355 SetCellSmallTableLocation(&cell_, address.start_block()); |
358 SetCellSmallTableId(&cell_, hash >> kSmallTableHashShift); | 356 SetCellSmallTableId(&cell_, hash >> kSmallTableHashShift); |
359 } else { | 357 } else { |
360 uint32 location = address.FileNumber() << 16 | address.start_block(); | 358 uint32 location = address.FileNumber() << 16 | address.start_block(); |
361 SetCellLocation(&cell_, location); | 359 SetCellLocation(&cell_, location); |
362 SetCellId(&cell_, hash >> kHashShift); | 360 SetCellId(&cell_, hash >> kHashShift); |
363 } | 361 } |
364 } | 362 } |
365 | 363 |
366 EntryCell::EntryCell(int32 cell_num, | 364 EntryCell::EntryCell(int32 cell_num, |
367 uint32 hash, | 365 uint32 hash, |
368 const IndexCell& cell, | 366 const IndexCell& cell, |
369 bool small_table) | 367 bool small_table) |
370 : cell_num_(cell_num), | 368 : cell_num_(cell_num), hash_(hash), cell_(cell), small_table_(small_table) { |
371 hash_(hash), | |
372 cell_(cell), | |
373 small_table_(small_table) { | |
374 } | 369 } |
375 | 370 |
376 void EntryCell::FixSum() { | 371 void EntryCell::FixSum() { |
377 SetCellSum(&cell_, CalculateCellSum(cell_)); | 372 SetCellSum(&cell_, CalculateCellSum(cell_)); |
378 } | 373 } |
379 | 374 |
380 uint32 EntryCell::GetLocation() const { | 375 uint32 EntryCell::GetLocation() const { |
381 if (small_table_) | 376 if (small_table_) |
382 return GetCellSmallTableLocation(cell_); | 377 return GetCellSmallTableLocation(cell_); |
383 | 378 |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
465 DCHECK_EQ(base::bits::Log2Floor(header_->table_len), | 460 DCHECK_EQ(base::bits::Log2Floor(header_->table_len), |
466 base::bits::Log2Floor(backup_header_->table_len) + 1); | 461 base::bits::Log2Floor(backup_header_->table_len) + 1); |
467 int extra_size = (header()->max_bucket - mask_) * kCellsPerBucket; | 462 int extra_size = (header()->max_bucket - mask_) * kCellsPerBucket; |
468 DCHECK_GE(extra_size, 0); | 463 DCHECK_GE(extra_size, 0); |
469 | 464 |
470 // Doubling the size implies deleting the extra table and moving as many | 465 // Doubling the size implies deleting the extra table and moving as many |
471 // cells as we can to the main table, so we first copy the old one. This | 466 // cells as we can to the main table, so we first copy the old one. This |
472 // is not required when just growing the extra table because we don't | 467 // is not required when just growing the extra table because we don't |
473 // move any cell in that case. | 468 // move any cell in that case. |
474 old_extra_table.reset(new IndexBucket[extra_size]); | 469 old_extra_table.reset(new IndexBucket[extra_size]); |
475 memcpy(old_extra_table.get(), extra_table_, | 470 memcpy(old_extra_table.get(), |
| 471 extra_table_, |
476 extra_size * sizeof(IndexBucket)); | 472 extra_size * sizeof(IndexBucket)); |
477 memset(params->extra_table, 0, extra_size * sizeof(IndexBucket)); | 473 memset(params->extra_table, 0, extra_size * sizeof(IndexBucket)); |
478 } | 474 } |
479 main_table_ = params->main_table; | 475 main_table_ = params->main_table; |
480 } | 476 } |
481 DCHECK(main_table_); | 477 DCHECK(main_table_); |
482 extra_table_ = params->extra_table; | 478 extra_table_ = params->extra_table; |
483 | 479 |
484 // extra_bits_ is really measured against table-size specific values. | 480 // extra_bits_ is really measured against table-size specific values. |
485 const int kMaxAbsoluteExtraBits = 12; // From smallest to largest table. | 481 const int kMaxAbsoluteExtraBits = 12; // From smallest to largest table. |
486 const int kMaxExtraBitsSmallTable = 6; // From smallest to 64K table. | 482 const int kMaxExtraBitsSmallTable = 6; // From smallest to 64K table. |
487 | 483 |
488 extra_bits_ = base::bits::Log2Floor(header_->table_len) - | 484 extra_bits_ = base::bits::Log2Floor(header_->table_len) - |
489 base::bits::Log2Floor(kBaseTableLen); | 485 base::bits::Log2Floor(kBaseTableLen); |
490 DCHECK_GE(extra_bits_, 0); | 486 DCHECK_GE(extra_bits_, 0); |
491 DCHECK_LT(extra_bits_, kMaxAbsoluteExtraBits); | 487 DCHECK_LT(extra_bits_, kMaxAbsoluteExtraBits); |
492 | 488 |
493 // Note that following the previous code the constants could be derived as | 489 // Note that following the previous code the constants could be derived as |
494 // kMaxAbsoluteExtraBits = base::bits::Log2Floor(max table len) - | 490 // kMaxAbsoluteExtraBits = base::bits::Log2Floor(max table len) - |
495 // base::bits::Log2Floor(kBaseTableLen); | 491 // base::bits::Log2Floor(kBaseTableLen); |
496 // = 22 - base::bits::Log2Floor(1024) = 22 - 10; | 492 // = 22 - base::bits::Log2Floor(1024) = 22 - 10; |
497 // kMaxExtraBitsSmallTable = base::bits::Log2Floor(max 16 bit table) - 10. | 493 // kMaxExtraBitsSmallTable = base::bits::Log2Floor(max 16 bit table) - 10. |
498 | 494 |
499 mask_ = ((kBaseTableLen / kCellsPerBucket) << extra_bits_) - 1; | 495 mask_ = ((kBaseTableLen / kCellsPerBucket) << extra_bits_) - 1; |
500 small_table_ = extra_bits_ < kMaxExtraBitsSmallTable; | 496 small_table_ = extra_bits_ < kMaxExtraBitsSmallTable; |
501 if (!small_table_) | 497 if (!small_table_) |
502 extra_bits_ -= kMaxExtraBitsSmallTable; | 498 extra_bits_ -= kMaxExtraBitsSmallTable; |
503 | 499 |
504 // table_len keeps the max number of cells stored by the index. We need a | 500 // table_len keeps the max number of cells stored by the index. We need a |
505 // bitmap with 1 bit per cell, and that bitmap has num_words 32-bit words. | 501 // bitmap with 1 bit per cell, and that bitmap has num_words 32-bit words. |
506 int num_words = (header_->table_len + 31) / 32; | 502 int num_words = (header_->table_len + 31) / 32; |
507 | 503 |
508 if (old_extra_table) { | 504 if (old_extra_table) { |
509 // All the cells from the extra table are moving to the new tables so before | 505 // All the cells from the extra table are moving to the new tables so before |
510 // creating the bitmaps, clear the part of the bitmap referring to the extra | 506 // creating the bitmaps, clear the part of the bitmap referring to the extra |
511 // table. | 507 // table. |
512 int old_main_table_bit_words = ((mask_ >> 1) + 1) * kCellsPerBucket / 32; | 508 int old_main_table_bit_words = ((mask_ >> 1) + 1) * kCellsPerBucket / 32; |
513 DCHECK_GT(num_words, old_main_table_bit_words); | 509 DCHECK_GT(num_words, old_main_table_bit_words); |
514 memset(params->index_bitmap->bitmap + old_main_table_bit_words, 0, | 510 memset(params->index_bitmap->bitmap + old_main_table_bit_words, |
| 511 0, |
515 (num_words - old_main_table_bit_words) * sizeof(int32)); | 512 (num_words - old_main_table_bit_words) * sizeof(int32)); |
516 | 513 |
517 DCHECK(growing); | 514 DCHECK(growing); |
518 int old_num_words = (backup_header_.get()->table_len + 31) / 32; | 515 int old_num_words = (backup_header_.get()->table_len + 31) / 32; |
519 DCHECK_GT(old_num_words, old_main_table_bit_words); | 516 DCHECK_GT(old_num_words, old_main_table_bit_words); |
520 memset(backup_bitmap_storage_.get() + old_main_table_bit_words, 0, | 517 memset(backup_bitmap_storage_.get() + old_main_table_bit_words, |
| 518 0, |
521 (old_num_words - old_main_table_bit_words) * sizeof(int32)); | 519 (old_num_words - old_main_table_bit_words) * sizeof(int32)); |
522 } | 520 } |
523 bitmap_.reset(new Bitmap(params->index_bitmap->bitmap, header_->table_len, | 521 bitmap_.reset( |
524 num_words)); | 522 new Bitmap(params->index_bitmap->bitmap, header_->table_len, num_words)); |
525 | 523 |
526 if (growing) { | 524 if (growing) { |
527 int old_num_words = (backup_header_.get()->table_len + 31) / 32; | 525 int old_num_words = (backup_header_.get()->table_len + 31) / 32; |
528 DCHECK_GE(num_words, old_num_words); | 526 DCHECK_GE(num_words, old_num_words); |
529 scoped_ptr<uint32[]> storage(new uint32[num_words]); | 527 scoped_ptr<uint32[]> storage(new uint32[num_words]); |
530 memcpy(storage.get(), backup_bitmap_storage_.get(), | 528 memcpy(storage.get(), |
| 529 backup_bitmap_storage_.get(), |
531 old_num_words * sizeof(int32)); | 530 old_num_words * sizeof(int32)); |
532 memset(storage.get() + old_num_words, 0, | 531 memset(storage.get() + old_num_words, |
| 532 0, |
533 (num_words - old_num_words) * sizeof(int32)); | 533 (num_words - old_num_words) * sizeof(int32)); |
534 | 534 |
535 backup_bitmap_storage_.swap(storage); | 535 backup_bitmap_storage_.swap(storage); |
536 backup_header_->table_len = header_->table_len; | 536 backup_header_->table_len = header_->table_len; |
537 } else { | 537 } else { |
538 backup_bitmap_storage_.reset(params->backup_bitmap.release()); | 538 backup_bitmap_storage_.reset(params->backup_bitmap.release()); |
539 backup_header_.reset(params->backup_header.release()); | 539 backup_header_.reset(params->backup_header.release()); |
540 } | 540 } |
541 | 541 |
542 num_words = (backup_header_->table_len + 31) / 32; | 542 num_words = (backup_header_->table_len + 31) / 32; |
543 backup_bitmap_.reset(new Bitmap(backup_bitmap_storage_.get(), | 543 backup_bitmap_.reset(new Bitmap( |
544 backup_header_->table_len, num_words)); | 544 backup_bitmap_storage_.get(), backup_header_->table_len, num_words)); |
545 if (old_extra_table) | 545 if (old_extra_table) |
546 MoveCells(old_extra_table.get()); | 546 MoveCells(old_extra_table.get()); |
547 | 547 |
548 if (small_table_) | 548 if (small_table_) |
549 DCHECK(header_->flags & SMALL_CACHE); | 549 DCHECK(header_->flags & SMALL_CACHE); |
550 | 550 |
551 // All tables and backups are needed for operation. | 551 // All tables and backups are needed for operation. |
552 DCHECK(main_table_); | 552 DCHECK(main_table_); |
553 DCHECK(extra_table_); | 553 DCHECK(extra_table_); |
554 DCHECK(bitmap_.get()); | 554 DCHECK(bitmap_.get()); |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
598 } else if (IsHashMatch(*current_cell, hash)) { | 598 } else if (IsHashMatch(*current_cell, hash)) { |
599 EntryCell entry_cell(cell_num, hash, *current_cell, small_table_); | 599 EntryCell entry_cell(cell_num, hash, *current_cell, small_table_); |
600 CheckState(entry_cell); | 600 CheckState(entry_cell); |
601 if (entry_cell.GetState() != ENTRY_DELETED) { | 601 if (entry_cell.GetState() != ENTRY_DELETED) { |
602 entries.cells.push_back(entry_cell); | 602 entries.cells.push_back(entry_cell); |
603 if (entry_cell.GetGroup() == ENTRY_EVICTED) | 603 if (entry_cell.GetGroup() == ENTRY_EVICTED) |
604 entries.evicted_count++; | 604 entries.evicted_count++; |
605 } | 605 } |
606 } | 606 } |
607 } | 607 } |
608 bucket_num = GetNextBucket(mask_ + 1, header()->max_bucket, extra_table_, | 608 bucket_num = |
609 &bucket); | 609 GetNextBucket(mask_ + 1, header()->max_bucket, extra_table_, &bucket); |
610 } while (bucket_num); | 610 } while (bucket_num); |
611 return entries; | 611 return entries; |
612 } | 612 } |
613 | 613 |
614 EntryCell IndexTable::CreateEntryCell(uint32 hash, Addr address) { | 614 EntryCell IndexTable::CreateEntryCell(uint32 hash, Addr address) { |
615 DCHECK(IsValidAddress(address)); | 615 DCHECK(IsValidAddress(address)); |
616 DCHECK(address.FileNumber() || address.start_block()); | 616 DCHECK(address.FileNumber() || address.start_block()); |
617 | 617 |
618 int bucket_num = static_cast<int>(hash & mask_); | 618 int bucket_num = static_cast<int>(hash & mask_); |
619 int cell_num = 0; | 619 int cell_num = 0; |
620 IndexBucket* bucket = &main_table_[bucket_num]; | 620 IndexBucket* bucket = &main_table_[bucket_num]; |
621 IndexCell* current_cell = NULL; | 621 IndexCell* current_cell = NULL; |
622 bool found = false; | 622 bool found = false; |
623 do { | 623 do { |
624 for (int i = 0; i < kCellsPerBucket && !found; i++) { | 624 for (int i = 0; i < kCellsPerBucket && !found; i++) { |
625 current_cell = &bucket->cells[i]; | 625 current_cell = &bucket->cells[i]; |
626 if (!GetLocation(*current_cell)) { | 626 if (!GetLocation(*current_cell)) { |
627 cell_num = bucket_num * kCellsPerBucket + i; | 627 cell_num = bucket_num * kCellsPerBucket + i; |
628 found = true; | 628 found = true; |
629 } | 629 } |
630 } | 630 } |
631 if (found) | 631 if (found) |
632 break; | 632 break; |
633 bucket_num = GetNextBucket(mask_ + 1, header()->max_bucket, extra_table_, | 633 bucket_num = |
634 &bucket); | 634 GetNextBucket(mask_ + 1, header()->max_bucket, extra_table_, &bucket); |
635 } while (bucket_num); | 635 } while (bucket_num); |
636 | 636 |
637 if (!found) { | 637 if (!found) { |
638 bucket_num = NewExtraBucket(); | 638 bucket_num = NewExtraBucket(); |
639 if (bucket_num) { | 639 if (bucket_num) { |
640 cell_num = bucket_num * kCellsPerBucket; | 640 cell_num = bucket_num * kCellsPerBucket; |
641 bucket->next = cell_num; | 641 bucket->next = cell_num; |
642 bucket = &extra_table_[bucket_num - (mask_ + 1)]; | 642 bucket = &extra_table_[bucket_num - (mask_ + 1)]; |
643 bucket->hash = hash & mask_; | 643 bucket->hash = hash & mask_; |
644 found = true; | 644 found = true; |
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
777 } | 777 } |
778 | 778 |
779 void IndexTable::OnBackupTimer() { | 779 void IndexTable::OnBackupTimer() { |
780 if (!modified_) | 780 if (!modified_) |
781 return; | 781 return; |
782 | 782 |
783 int num_words = (header_->table_len + 31) / 32; | 783 int num_words = (header_->table_len + 31) / 32; |
784 int num_bytes = num_words * 4 + static_cast<int>(sizeof(*header_)); | 784 int num_bytes = num_words * 4 + static_cast<int>(sizeof(*header_)); |
785 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(num_bytes)); | 785 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(num_bytes)); |
786 memcpy(buffer->data(), header_, sizeof(*header_)); | 786 memcpy(buffer->data(), header_, sizeof(*header_)); |
787 memcpy(buffer->data() + sizeof(*header_), backup_bitmap_storage_.get(), | 787 memcpy(buffer->data() + sizeof(*header_), |
| 788 backup_bitmap_storage_.get(), |
788 num_words * 4); | 789 num_words * 4); |
789 backend_->SaveIndex(buffer, num_bytes); | 790 backend_->SaveIndex(buffer, num_bytes); |
790 modified_ = false; | 791 modified_ = false; |
791 } | 792 } |
792 | 793 |
793 // ----------------------------------------------------------------------- | 794 // ----------------------------------------------------------------------- |
794 | 795 |
795 EntryCell IndexTable::FindEntryCellImpl(uint32 hash, Addr address, | 796 EntryCell IndexTable::FindEntryCellImpl(uint32 hash, |
| 797 Addr address, |
796 bool allow_deleted) { | 798 bool allow_deleted) { |
797 int bucket_num = static_cast<int>(hash & mask_); | 799 int bucket_num = static_cast<int>(hash & mask_); |
798 IndexBucket* bucket = &main_table_[bucket_num]; | 800 IndexBucket* bucket = &main_table_[bucket_num]; |
799 do { | 801 do { |
800 for (int i = 0; i < kCellsPerBucket; i++) { | 802 for (int i = 0; i < kCellsPerBucket; i++) { |
801 IndexCell* current_cell = &bucket->cells[i]; | 803 IndexCell* current_cell = &bucket->cells[i]; |
802 if (!GetLocation(*current_cell)) | 804 if (!GetLocation(*current_cell)) |
803 continue; | 805 continue; |
804 DCHECK(SanityCheck(*current_cell)); | 806 DCHECK(SanityCheck(*current_cell)); |
805 if (IsHashMatch(*current_cell, hash)) { | 807 if (IsHashMatch(*current_cell, hash)) { |
806 // We have a match. | 808 // We have a match. |
807 int cell_num = bucket_num * kCellsPerBucket + i; | 809 int cell_num = bucket_num * kCellsPerBucket + i; |
808 EntryCell entry_cell(cell_num, hash, *current_cell, small_table_); | 810 EntryCell entry_cell(cell_num, hash, *current_cell, small_table_); |
809 if (entry_cell.GetAddress() != address) | 811 if (entry_cell.GetAddress() != address) |
810 continue; | 812 continue; |
811 | 813 |
812 if (!allow_deleted && entry_cell.GetState() == ENTRY_DELETED) | 814 if (!allow_deleted && entry_cell.GetState() == ENTRY_DELETED) |
813 continue; | 815 continue; |
814 | 816 |
815 return entry_cell; | 817 return entry_cell; |
816 } | 818 } |
817 } | 819 } |
818 bucket_num = GetNextBucket(mask_ + 1, header()->max_bucket, extra_table_, | 820 bucket_num = |
819 &bucket); | 821 GetNextBucket(mask_ + 1, header()->max_bucket, extra_table_, &bucket); |
820 } while (bucket_num); | 822 } while (bucket_num); |
821 return EntryCell(); | 823 return EntryCell(); |
822 } | 824 } |
823 | 825 |
824 void IndexTable::CheckState(const EntryCell& cell) { | 826 void IndexTable::CheckState(const EntryCell& cell) { |
825 int current_state = cell.GetState(); | 827 int current_state = cell.GetState(); |
826 if (current_state != ENTRY_FIXING) { | 828 if (current_state != ENTRY_FIXING) { |
827 bool present = ((current_state & 3) != 0); // Look at the last two bits. | 829 bool present = ((current_state & 3) != 0); // Look at the last two bits. |
828 if (present != bitmap_->Get(cell.cell_num()) || | 830 if (present != bitmap_->Get(cell.cell_num()) || |
829 present != backup_bitmap_->Get(cell.cell_num())) { | 831 present != backup_bitmap_->Get(cell.cell_num())) { |
(...skipping 19 matching lines...) Expand all Loading... |
849 int bucket_num = cell.cell_num() / kCellsPerBucket; | 851 int bucket_num = cell.cell_num() / kCellsPerBucket; |
850 if (bucket_num < static_cast<int32>(mask_ + 1)) { | 852 if (bucket_num < static_cast<int32>(mask_ + 1)) { |
851 bucket = &main_table_[bucket_num]; | 853 bucket = &main_table_[bucket_num]; |
852 } else { | 854 } else { |
853 DCHECK_LE(bucket_num, header()->max_bucket); | 855 DCHECK_LE(bucket_num, header()->max_bucket); |
854 bucket = &extra_table_[bucket_num - (mask_ + 1)]; | 856 bucket = &extra_table_[bucket_num - (mask_ + 1)]; |
855 } | 857 } |
856 | 858 |
857 int cell_number = cell.cell_num() % kCellsPerBucket; | 859 int cell_number = cell.cell_num() % kCellsPerBucket; |
858 if (GetLocation(bucket->cells[cell_number]) && cell.GetLocation()) { | 860 if (GetLocation(bucket->cells[cell_number]) && cell.GetLocation()) { |
859 DCHECK_EQ(cell.GetLocation(), | 861 DCHECK_EQ(cell.GetLocation(), GetLocation(bucket->cells[cell_number])); |
860 GetLocation(bucket->cells[cell_number])); | |
861 } | 862 } |
862 cell.Serialize(&bucket->cells[cell_number]); | 863 cell.Serialize(&bucket->cells[cell_number]); |
863 } | 864 } |
864 | 865 |
865 int IndexTable::NewExtraBucket() { | 866 int IndexTable::NewExtraBucket() { |
866 int safe_window = (header()->table_len < kNumExtraBlocks * 2) ? | 867 int safe_window = (header()->table_len < kNumExtraBlocks * 2) |
867 kNumExtraBlocks / 4 : kNumExtraBlocks; | 868 ? kNumExtraBlocks / 4 |
| 869 : kNumExtraBlocks; |
868 if (header()->table_len - header()->max_bucket * kCellsPerBucket < | 870 if (header()->table_len - header()->max_bucket * kCellsPerBucket < |
869 safe_window) { | 871 safe_window) { |
870 backend_->GrowIndex(); | 872 backend_->GrowIndex(); |
871 } | 873 } |
872 | 874 |
873 if (header()->max_bucket * kCellsPerBucket == | 875 if (header()->max_bucket * kCellsPerBucket == |
874 header()->table_len - kCellsPerBucket) { | 876 header()->table_len - kCellsPerBucket) { |
875 return 0; | 877 return 0; |
876 } | 878 } |
877 | 879 |
878 header()->max_bucket++; | 880 header()->max_bucket++; |
879 return header()->max_bucket; | 881 return header()->max_bucket; |
880 } | 882 } |
881 | 883 |
882 void IndexTable::WalkTables(int limit_time, | 884 void IndexTable::WalkTables(int limit_time, |
883 IndexIterator* no_use, | 885 IndexIterator* no_use, |
884 IndexIterator* low_use, | 886 IndexIterator* low_use, |
885 IndexIterator* high_use) { | 887 IndexIterator* high_use) { |
886 header_->num_no_use_entries = 0; | 888 header_->num_no_use_entries = 0; |
887 header_->num_low_use_entries = 0; | 889 header_->num_low_use_entries = 0; |
888 header_->num_high_use_entries = 0; | 890 header_->num_high_use_entries = 0; |
889 header_->num_evicted_entries = 0; | 891 header_->num_evicted_entries = 0; |
890 | 892 |
891 for (int i = 0; i < static_cast<int32>(mask_ + 1); i++) { | 893 for (int i = 0; i < static_cast<int32>(mask_ + 1); i++) { |
892 int bucket_num = i; | 894 int bucket_num = i; |
893 IndexBucket* bucket = &main_table_[i]; | 895 IndexBucket* bucket = &main_table_[i]; |
894 do { | 896 do { |
895 UpdateFromBucket(bucket, i, limit_time, no_use, low_use, high_use); | 897 UpdateFromBucket(bucket, i, limit_time, no_use, low_use, high_use); |
896 | 898 |
897 bucket_num = GetNextBucket(mask_ + 1, header()->max_bucket, extra_table_, | 899 bucket_num = |
898 &bucket); | 900 GetNextBucket(mask_ + 1, header()->max_bucket, extra_table_, &bucket); |
899 } while (bucket_num); | 901 } while (bucket_num); |
900 } | 902 } |
901 header_->num_entries = header_->num_no_use_entries + | 903 header_->num_entries = |
902 header_->num_low_use_entries + | 904 header_->num_no_use_entries + header_->num_low_use_entries + |
903 header_->num_high_use_entries + | 905 header_->num_high_use_entries + header_->num_evicted_entries; |
904 header_->num_evicted_entries; | |
905 modified_ = true; | 906 modified_ = true; |
906 } | 907 } |
907 | 908 |
908 void IndexTable::UpdateFromBucket(IndexBucket* bucket, int bucket_hash, | 909 void IndexTable::UpdateFromBucket(IndexBucket* bucket, |
| 910 int bucket_hash, |
909 int limit_time, | 911 int limit_time, |
910 IndexIterator* no_use, | 912 IndexIterator* no_use, |
911 IndexIterator* low_use, | 913 IndexIterator* low_use, |
912 IndexIterator* high_use) { | 914 IndexIterator* high_use) { |
913 for (int i = 0; i < kCellsPerBucket; i++) { | 915 for (int i = 0; i < kCellsPerBucket; i++) { |
914 IndexCell& current_cell = bucket->cells[i]; | 916 IndexCell& current_cell = bucket->cells[i]; |
915 if (!GetLocation(current_cell)) | 917 if (!GetLocation(current_cell)) |
916 continue; | 918 continue; |
917 DCHECK(SanityCheck(current_cell)); | 919 DCHECK(SanityCheck(current_cell)); |
918 if (!IsNormalState(current_cell)) | 920 if (!IsNormalState(current_cell)) |
919 continue; | 921 continue; |
920 | 922 |
921 EntryCell entry_cell(0, GetFullHash(current_cell, bucket_hash), | 923 EntryCell entry_cell( |
922 current_cell, small_table_); | 924 0, GetFullHash(current_cell, bucket_hash), current_cell, small_table_); |
923 switch (GetCellGroup(current_cell)) { | 925 switch (GetCellGroup(current_cell)) { |
924 case ENTRY_NO_USE: | 926 case ENTRY_NO_USE: |
925 UpdateIterator(entry_cell, limit_time, no_use); | 927 UpdateIterator(entry_cell, limit_time, no_use); |
926 header_->num_no_use_entries++; | 928 header_->num_no_use_entries++; |
927 break; | 929 break; |
928 case ENTRY_LOW_USE: | 930 case ENTRY_LOW_USE: |
929 UpdateIterator(entry_cell, limit_time, low_use); | 931 UpdateIterator(entry_cell, limit_time, low_use); |
930 header_->num_low_use_entries++; | 932 header_->num_low_use_entries++; |
931 break; | 933 break; |
932 case ENTRY_HIGH_USE: | 934 case ENTRY_HIGH_USE: |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
981 IndexBucket* bucket = &source_table[i]; | 983 IndexBucket* bucket = &source_table[i]; |
982 do { | 984 do { |
983 for (int j = 0; j < kCellsPerBucket; j++) { | 985 for (int j = 0; j < kCellsPerBucket; j++) { |
984 IndexCell& current_cell = bucket->cells[j]; | 986 IndexCell& current_cell = bucket->cells[j]; |
985 if (!GetLocation(current_cell)) | 987 if (!GetLocation(current_cell)) |
986 continue; | 988 continue; |
987 DCHECK(SanityCheck(current_cell)); | 989 DCHECK(SanityCheck(current_cell)); |
988 if (bucket_num == i) { | 990 if (bucket_num == i) { |
989 if (upgrade_format || (GetHashValue(current_cell) & new_bit)) { | 991 if (upgrade_format || (GetHashValue(current_cell) & new_bit)) { |
990 // Move this cell to the upper half of the table. | 992 // Move this cell to the upper half of the table. |
991 MoveSingleCell(¤t_cell, bucket_num * kCellsPerBucket + j, i, | 993 MoveSingleCell( |
992 true); | 994 ¤t_cell, bucket_num * kCellsPerBucket + j, i, true); |
993 } | 995 } |
994 } else { | 996 } else { |
995 // All cells on extra buckets have to move. | 997 // All cells on extra buckets have to move. |
996 MoveSingleCell(¤t_cell, bucket_num * kCellsPerBucket + j, i, | 998 MoveSingleCell( |
997 true); | 999 ¤t_cell, bucket_num * kCellsPerBucket + j, i, true); |
998 } | 1000 } |
999 } | 1001 } |
1000 | 1002 |
1001 // There is no need to clear the old bucket->next value because if falls | 1003 // There is no need to clear the old bucket->next value because if falls |
1002 // within the main table so it will be fixed when attempting to follow | 1004 // within the main table so it will be fixed when attempting to follow |
1003 // the link. | 1005 // the link. |
1004 bucket_num = GetNextBucket(max_hash, max_bucket, old_extra_table, | 1006 bucket_num = |
1005 &bucket); | 1007 GetNextBucket(max_hash, max_bucket, old_extra_table, &bucket); |
1006 } while (bucket_num); | 1008 } while (bucket_num); |
1007 } | 1009 } |
1008 | 1010 |
1009 DCHECK_EQ(header()->used_cells, used_cells); | 1011 DCHECK_EQ(header()->used_cells, used_cells); |
1010 | 1012 |
1011 if (upgrade_format) { | 1013 if (upgrade_format) { |
1012 small_table_ = false; | 1014 small_table_ = false; |
1013 header()->flags &= ~SMALL_CACHE; | 1015 header()->flags &= ~SMALL_CACHE; |
1014 } | 1016 } |
1015 } | 1017 } |
1016 | 1018 |
1017 void IndexTable::MoveSingleCell(IndexCell* current_cell, int cell_num, | 1019 void IndexTable::MoveSingleCell(IndexCell* current_cell, |
1018 int main_table_index, bool growing) { | 1020 int cell_num, |
| 1021 int main_table_index, |
| 1022 bool growing) { |
1019 uint32 hash = GetFullHash(*current_cell, main_table_index); | 1023 uint32 hash = GetFullHash(*current_cell, main_table_index); |
1020 EntryCell old_cell(cell_num, hash, *current_cell, small_table_); | 1024 EntryCell old_cell(cell_num, hash, *current_cell, small_table_); |
1021 | 1025 |
1022 // This method may be called when moving entries from a small table to a | 1026 // This method may be called when moving entries from a small table to a |
1023 // normal table. In that case, the caller (MoveCells) has to read the old | 1027 // normal table. In that case, the caller (MoveCells) has to read the old |
1024 // table, so it needs small_table_ set to true, but this method needs to | 1028 // table, so it needs small_table_ set to true, but this method needs to |
1025 // write to the new table so small_table_ has to be set to false, and the | 1029 // write to the new table so small_table_ has to be set to false, and the |
1026 // value restored to true before returning. | 1030 // value restored to true before returning. |
1027 bool upgrade_format = !extra_bits_ && growing; | 1031 bool upgrade_format = !extra_bits_ && growing; |
1028 if (upgrade_format) | 1032 if (upgrade_format) |
(...skipping 29 matching lines...) Expand all Loading... |
1058 } | 1062 } |
1059 | 1063 |
1060 if (cell_num != new_cell.cell_num()) { | 1064 if (cell_num != new_cell.cell_num()) { |
1061 bitmap_->Set(old_cell.cell_num(), false); | 1065 bitmap_->Set(old_cell.cell_num(), false); |
1062 backup_bitmap_->Set(old_cell.cell_num(), false); | 1066 backup_bitmap_->Set(old_cell.cell_num(), false); |
1063 } | 1067 } |
1064 } | 1068 } |
1065 header()->used_cells--; | 1069 header()->used_cells--; |
1066 } | 1070 } |
1067 | 1071 |
1068 void IndexTable::HandleMisplacedCell(IndexCell* current_cell, int cell_num, | 1072 void IndexTable::HandleMisplacedCell(IndexCell* current_cell, |
| 1073 int cell_num, |
1069 int main_table_index) { | 1074 int main_table_index) { |
1070 NOTREACHED(); // No unit tests yet. | 1075 NOTREACHED(); // No unit tests yet. |
1071 | 1076 |
1072 // The cell may be misplaced, or a duplicate cell exists with this data. | 1077 // The cell may be misplaced, or a duplicate cell exists with this data. |
1073 uint32 hash = GetFullHash(*current_cell, main_table_index); | 1078 uint32 hash = GetFullHash(*current_cell, main_table_index); |
1074 MoveSingleCell(current_cell, cell_num, main_table_index, false); | 1079 MoveSingleCell(current_cell, cell_num, main_table_index, false); |
1075 | 1080 |
1076 // Now look for a duplicate cell. | 1081 // Now look for a duplicate cell. |
1077 CheckBucketList(hash & mask_); | 1082 CheckBucketList(hash & mask_); |
1078 } | 1083 } |
1079 | 1084 |
1080 void IndexTable::CheckBucketList(int bucket_num) { | 1085 void IndexTable::CheckBucketList(int bucket_num) { |
1081 typedef std::pair<int, EntryGroup> AddressAndGroup; | 1086 typedef std::pair<int, EntryGroup> AddressAndGroup; |
1082 std::set<AddressAndGroup> entries; | 1087 std::set<AddressAndGroup> entries; |
1083 IndexBucket* bucket = &main_table_[bucket_num]; | 1088 IndexBucket* bucket = &main_table_[bucket_num]; |
1084 int bucket_hash = bucket_num; | 1089 int bucket_hash = bucket_num; |
1085 do { | 1090 do { |
1086 for (int i = 0; i < kCellsPerBucket; i++) { | 1091 for (int i = 0; i < kCellsPerBucket; i++) { |
1087 IndexCell* current_cell = &bucket->cells[i]; | 1092 IndexCell* current_cell = &bucket->cells[i]; |
1088 if (!GetLocation(*current_cell)) | 1093 if (!GetLocation(*current_cell)) |
1089 continue; | 1094 continue; |
1090 if (!SanityCheck(*current_cell)) { | 1095 if (!SanityCheck(*current_cell)) { |
1091 NOTREACHED(); | 1096 NOTREACHED(); |
1092 current_cell->Clear(); | 1097 current_cell->Clear(); |
1093 continue; | 1098 continue; |
1094 } | 1099 } |
1095 int cell_num = bucket_num * kCellsPerBucket + i; | 1100 int cell_num = bucket_num * kCellsPerBucket + i; |
1096 EntryCell cell(cell_num, GetFullHash(*current_cell, bucket_hash), | 1101 EntryCell cell(cell_num, |
1097 *current_cell, small_table_); | 1102 GetFullHash(*current_cell, bucket_hash), |
| 1103 *current_cell, |
| 1104 small_table_); |
1098 if (!entries.insert(std::make_pair(cell.GetAddress().value(), | 1105 if (!entries.insert(std::make_pair(cell.GetAddress().value(), |
1099 cell.GetGroup())).second) { | 1106 cell.GetGroup())).second) { |
1100 current_cell->Clear(); | 1107 current_cell->Clear(); |
1101 continue; | 1108 continue; |
1102 } | 1109 } |
1103 CheckState(cell); | 1110 CheckState(cell); |
1104 } | 1111 } |
1105 | 1112 |
1106 bucket_num = GetNextBucket(mask_ + 1, header()->max_bucket, extra_table_, | 1113 bucket_num = |
1107 &bucket); | 1114 GetNextBucket(mask_ + 1, header()->max_bucket, extra_table_, &bucket); |
1108 } while (bucket_num); | 1115 } while (bucket_num); |
1109 } | 1116 } |
1110 | 1117 |
1111 uint32 IndexTable::GetLocation(const IndexCell& cell) { | 1118 uint32 IndexTable::GetLocation(const IndexCell& cell) { |
1112 if (small_table_) | 1119 if (small_table_) |
1113 return GetCellSmallTableLocation(cell); | 1120 return GetCellSmallTableLocation(cell); |
1114 | 1121 |
1115 return GetCellLocation(cell); | 1122 return GetCellLocation(cell); |
1116 } | 1123 } |
1117 | 1124 |
(...skipping 22 matching lines...) Expand all Loading... |
1140 bool IndexTable::MisplacedHash(const IndexCell& cell, uint32 hash) { | 1147 bool IndexTable::MisplacedHash(const IndexCell& cell, uint32 hash) { |
1141 if (!extra_bits_) | 1148 if (!extra_bits_) |
1142 return false; | 1149 return false; |
1143 | 1150 |
1144 uint32 mask = (1 << extra_bits_) - 1; | 1151 uint32 mask = (1 << extra_bits_) - 1; |
1145 hash = small_table_ ? hash >> kSmallTableHashShift : hash >> kHashShift; | 1152 hash = small_table_ ? hash >> kSmallTableHashShift : hash >> kHashShift; |
1146 return (GetHashValue(cell) & mask) != (hash & mask); | 1153 return (GetHashValue(cell) & mask) != (hash & mask); |
1147 } | 1154 } |
1148 | 1155 |
1149 } // namespace disk_cache | 1156 } // namespace disk_cache |
OLD | NEW |