| Index: net/disk_cache/v3/index_table.cc
|
| ===================================================================
|
| --- net/disk_cache/v3/index_table.cc (revision 0)
|
| +++ net/disk_cache/v3/index_table.cc (revision 0)
|
| @@ -0,0 +1,1059 @@
|
| +// Copyright (c) 2013 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#include "net/disk_cache/v3/index_table.h"
|
| +
|
| +#include <algorithm>
|
| +#include <set>
|
| +#include <utility>
|
| +
|
| +#include "base/bits.h"
|
| +#include "net/base/io_buffer.h"
|
| +#include "net/base/net_errors.h"
|
| +#include "net/disk_cache/disk_cache.h"
|
| +
|
| +using base::Time;
|
| +using base::TimeDelta;
|
| +using disk_cache::CellInfo;
|
| +using disk_cache::CellList;
|
| +using disk_cache::IndexCell;
|
| +using disk_cache::IndexIterator;
|
| +
|
| +namespace {
|
| +
|
| +const uint32 kMaxAddress = 1 << 22;
|
| +
|
| +const int kCellHashOffset = 22;
|
| +const int kCellSmallTableHashOffset = 16;
|
| +const int kCellTimestampOffset = 40;
|
| +const int kCellReuseOffset = 60;
|
| +const int kCellGroupOffset = 3;
|
| +const int kCellSumOffset = 6;
|
| +
|
| +const uint64 kCellAddressMask = 0x3FFFFF;
|
| +const uint64 kCellSmallTableAddressMask = 0xFFFF;
|
| +const uint64 kCellHashMask = 0x3FFFF;
|
| +const uint64 kCellSmallTableHashMask = 0xFFFFFF;
|
| +const uint64 kCellTimestampMask = 0xFFFFF;
|
| +const uint64 kCellReuseMask = 0xF;
|
| +const uint8 kCellStateMask = 0x7;
|
| +const uint8 kCellGroupMask = 0x7;
|
| +const uint8 kCellSumMask = 0x3;
|
| +
|
| +const int kHashShift = 14;
|
| +const int kHashSmallTableShift = 8;
|
| +
|
| +// Unfortunately we have to break the abstaction a little here: the file number
|
| +// where entries are stored is outside of the control of this code, and it is
|
| +// usually part of the stored address. However, for small tables we only store
|
| +// 16 bits of the address so the file number is never stored on a cell. We have
|
| +// to infere the file number from the type of entry (normal vs evicted), and
|
| +// the knowledge that given that the table will not keep more than 64k entries,
|
| +// a single file of each type is enough.
|
| +const int kEntriesFile = disk_cache::BLOCK_ENTRIES - 1;
|
| +const int kEvictedEntriesFile = disk_cache::BLOCK_EVICTED - 1;
|
| +
|
| +uint32 GetCellAddress(const IndexCell& cell) {
|
| + return cell.first_part & kCellAddressMask;
|
| +}
|
| +
|
| +uint32 GetCellSmallTableAddress(const IndexCell& cell) {
|
| + return cell.first_part & kCellSmallTableAddressMask;
|
| +}
|
| +
|
| +uint32 GetCellHash(const IndexCell& cell) {
|
| + return (cell.first_part >> kCellHashOffset) & kCellHashMask;
|
| +}
|
| +
|
| +uint32 GetCellSmallTableHash(const IndexCell& cell) {
|
| + return (cell.first_part >> kCellSmallTableHashOffset) &
|
| + kCellSmallTableHashMask;
|
| +}
|
| +
|
| +int GetCellTimestamp(const IndexCell& cell) {
|
| + return (cell.first_part >> kCellTimestampOffset) & kCellTimestampMask;
|
| +}
|
| +
|
| +int GetCellReuse(const IndexCell& cell) {
|
| + return (cell.first_part >> kCellReuseOffset) & kCellReuseMask;
|
| +}
|
| +
|
| +int GetCellState(const IndexCell& cell) {
|
| + return cell.last_part & kCellStateMask;
|
| +}
|
| +
|
| +int GetCellGroup(const IndexCell& cell) {
|
| + return (cell.last_part >> kCellGroupOffset) & kCellGroupMask;
|
| +}
|
| +
|
| +int GetCellSum(const IndexCell& cell) {
|
| + return (cell.last_part >> kCellSumOffset) & kCellSumMask;
|
| +}
|
| +
|
| +void SetCellAddress(IndexCell* cell, uint32 address) {
|
| + DCHECK_LE(address, static_cast<uint32>(kCellAddressMask));
|
| + cell->first_part &= ~kCellAddressMask;
|
| + cell->first_part |= address;
|
| +}
|
| +
|
| +void SetCellSmallTableAddress(IndexCell* cell, uint32 address) {
|
| + DCHECK_LE(address, static_cast<uint32>(kCellSmallTableAddressMask));
|
| + cell->first_part &= ~kCellSmallTableAddressMask;
|
| + cell->first_part |= address;
|
| +}
|
| +
|
| +void SetCellHash(IndexCell* cell, uint32 hash) {
|
| + DCHECK_LE(hash, static_cast<uint32>(kCellHashMask));
|
| + cell->first_part &= ~(kCellHashMask << kCellHashOffset);
|
| + cell->first_part |= static_cast<int64>(hash) << kCellHashOffset;
|
| +}
|
| +
|
| +void SetCellSmallTableHash(IndexCell* cell, uint32 hash) {
|
| + DCHECK_LE(hash, static_cast<uint32>(kCellSmallTableHashMask));
|
| + cell->first_part &= ~(kCellSmallTableHashMask << kCellSmallTableHashOffset);
|
| + cell->first_part |= static_cast<int64>(hash) << kCellSmallTableHashOffset;
|
| +}
|
| +
|
| +void SetCellTimestamp(IndexCell* cell, int timestamp) {
|
| + DCHECK_LT(timestamp, 1 << 20);
|
| + DCHECK_GE(timestamp, 0);
|
| + cell->first_part &= ~(kCellTimestampMask << kCellTimestampOffset);
|
| + cell->first_part |= static_cast<int64>(timestamp) << kCellTimestampOffset;
|
| +}
|
| +
|
| +void SetCellReuse(IndexCell* cell, int count) {
|
| + DCHECK_LT(count, 16);
|
| + DCHECK_GE(count, 0);
|
| + cell->first_part &= ~(kCellReuseMask << kCellReuseOffset);
|
| + cell->first_part |= static_cast<int64>(count) << kCellReuseOffset;
|
| +}
|
| +
|
| +void SetCellState(IndexCell* cell, disk_cache::EntryState state) {
|
| + cell->last_part &= ~kCellStateMask;
|
| + cell->last_part |= state;
|
| +}
|
| +
|
| +void SetCellGroup(IndexCell* cell, disk_cache::EntryGroup group) {
|
| + cell->last_part &= ~(kCellGroupMask << kCellGroupOffset);
|
| + cell->last_part |= group << kCellGroupOffset;
|
| +}
|
| +
|
| +void SetCellSum(IndexCell* cell, int sum) {
|
| + DCHECK_LT(sum, 4);
|
| + DCHECK_GE(sum, 0);
|
| + cell->last_part &= ~(kCellSumMask << kCellSumOffset);
|
| + cell->last_part |= sum << kCellSumOffset;
|
| +}
|
| +
|
| +// This is a very particular way to calculate the sum, so it will not match if
|
| +// compared a gainst a pure 2 bit, modulo 2 sum.
|
| +int CalculateCellSum(const IndexCell& cell) {
|
| + uint32* words = bit_cast<uint32*>(&cell);
|
| + uint8* bytes = bit_cast<uint8*>(&cell);
|
| + uint32 result = words[0] + words[1];
|
| + result += result >> 16;
|
| + result += (result >> 8) + (bytes[8] & 0x3f);
|
| + result += result >> 4;
|
| + result += result >> 2;
|
| + return result & 3;
|
| +}
|
| +
|
| +bool SanityCheck(const IndexCell& cell) {
|
| + if (GetCellSum(cell) != CalculateCellSum(cell))
|
| + return false;
|
| +
|
| + if (GetCellState(cell) > disk_cache::ENTRY_USED ||
|
| + GetCellGroup(cell) == disk_cache::ENTRY_RESERVED ||
|
| + GetCellGroup(cell) > disk_cache::ENTRY_EVICTED) {
|
| + return false;
|
| + }
|
| +
|
| + return true;
|
| +}
|
| +
|
| +bool IsValidAddress(disk_cache::Addr address) {
|
| + if (!address.is_initialized() ||
|
| + (address.file_type() != disk_cache::BLOCK_EVICTED &&
|
| + address.file_type() != disk_cache::BLOCK_ENTRIES)) {
|
| + return false;
|
| + }
|
| +
|
| + return address.ToIndexEntryAddress() < kMaxAddress;
|
| +}
|
| +
|
| +bool IsNormalState(const IndexCell& cell) {
|
| + disk_cache::EntryState state =
|
| + static_cast<disk_cache::EntryState>(GetCellState(cell));
|
| + DCHECK_NE(state, disk_cache::ENTRY_FREE);
|
| + return state != disk_cache::ENTRY_DELETED &&
|
| + state != disk_cache::ENTRY_FIXING;
|
| +}
|
| +
|
| +inline int GetNextBucket(int min_bucket_id, int max_bucket_id,
|
| + disk_cache::IndexBucket* table,
|
| + disk_cache::IndexBucket** bucket) {
|
| + if (!(*bucket)->next)
|
| + return 0;
|
| +
|
| + int bucket_id = (*bucket)->next / disk_cache::kCellsPerBucket;
|
| + if (bucket_id < min_bucket_id || bucket_id > max_bucket_id) {
|
| + (*bucket)->next = 0;
|
| + return 0;
|
| + }
|
| + *bucket = &table[bucket_id - min_bucket_id];
|
| + return bucket_id;
|
| +}
|
| +
|
| +void UpdateListWithCell(int bucket_hash,
|
| + const disk_cache::EntryCell& cell,
|
| + CellList* list,
|
| + int* list_time) {
|
| + if (!list)
|
| + return;
|
| +
|
| + int time = cell.GetTimestamp();
|
| + if (time < *list_time) {
|
| + *list_time = time;
|
| + list->clear();
|
| + }
|
| + if (time == *list_time) {
|
| + CellInfo cell_info = { cell.hash(), cell.GetAddress() };
|
| + list->push_back(cell_info);
|
| + }
|
| +}
|
| +
|
| +} // namespace
|
| +
|
| +namespace disk_cache {
|
| +
|
| +EntryCell::~EntryCell() {
|
| +}
|
| +
|
| +bool EntryCell::IsValid() const {
|
| + return GetCellAddress(cell_) != 0;
|
| +}
|
| +
|
| +Addr EntryCell::GetAddress() const {
|
| + uint32 address_value = GetAddressValue();
|
| + if (small_table_) {
|
| + if (GetGroup() == ENTRY_EVICTED)
|
| + return Addr(BLOCK_EVICTED, 1, kEvictedEntriesFile, address_value);
|
| +
|
| + return Addr(BLOCK_ENTRIES, 1, kEntriesFile, address_value);
|
| + }
|
| +
|
| + if (GetGroup() == ENTRY_EVICTED)
|
| + return Addr::FromEvictedAddress(address_value);
|
| + else
|
| + return Addr::FromEntryAddress(address_value);
|
| +}
|
| +
|
| +EntryState EntryCell::GetState() const {
|
| + return static_cast<EntryState>(cell_.last_part & kCellStateMask);
|
| +}
|
| +
|
| +EntryGroup EntryCell::GetGroup() const {
|
| + return static_cast<EntryGroup>((cell_.last_part >> kCellGroupOffset) &
|
| + kCellGroupMask);
|
| +}
|
| +
|
| +int EntryCell::GetReuse() const {
|
| + return (cell_.first_part >> kCellReuseOffset) & kCellReuseMask;
|
| +}
|
| +
|
| +int EntryCell::GetTimestamp() const {
|
| + return GetCellTimestamp(cell_);
|
| +}
|
| +
|
| +void EntryCell::SetState(EntryState state) {
|
| + SetCellState(&cell_, state);
|
| +}
|
| +
|
| +void EntryCell::SetGroup(EntryGroup group) {
|
| + SetCellGroup(&cell_, group);
|
| +}
|
| +
|
| +void EntryCell::SetReuse(int count) {
|
| + SetCellReuse(&cell_, count);
|
| +}
|
| +
|
| +void EntryCell::SetTimestamp(int timestamp) {
|
| + SetCellTimestamp(&cell_, timestamp);
|
| +}
|
| +
|
| +// Static.
|
| +EntryCell EntryCell::GetEntryCellForTest(int32 cell_id,
|
| + uint32 hash,
|
| + Addr address,
|
| + IndexCell* cell,
|
| + bool small_table) {
|
| + if (cell) {
|
| + EntryCell entry_cell(cell_id, hash, *cell, small_table);
|
| + return entry_cell;
|
| + }
|
| +
|
| + return EntryCell(cell_id, hash, address, small_table);
|
| +}
|
| +
|
| +void EntryCell::SerializaForTest(IndexCell* destination) {
|
| + FixSum();
|
| + Serialize(destination);
|
| +}
|
| +
|
| +EntryCell::EntryCell() : cell_id_(0), hash_(0), small_table_(false) {
|
| + cell_.Clear();
|
| +}
|
| +
|
| +EntryCell::EntryCell(int32 cell_id, uint32 hash, Addr address, bool small_table)
|
| + : cell_id_(cell_id),
|
| + hash_(hash),
|
| + small_table_(small_table) {
|
| + DCHECK(IsValidAddress(address) || !address.value());
|
| +
|
| + cell_.Clear();
|
| + SetCellState(&cell_, ENTRY_NEW);
|
| + SetCellGroup(&cell_, ENTRY_NO_USE);
|
| + if (small_table) {
|
| + DCHECK(address.FileNumber() == kEntriesFile ||
|
| + address.FileNumber() == kEvictedEntriesFile);
|
| + SetCellSmallTableAddress(&cell_, address.start_block());
|
| + SetCellSmallTableHash(&cell_, hash >> kHashSmallTableShift);
|
| + } else {
|
| + SetCellAddress(&cell_, address.ToIndexEntryAddress());
|
| + SetCellHash(&cell_, hash >> kHashShift);
|
| + }
|
| +}
|
| +
|
| +EntryCell::EntryCell(int32 cell_id,
|
| + uint32 hash,
|
| + const IndexCell& cell,
|
| + bool small_table)
|
| + : cell_id_(cell_id),
|
| + hash_(hash),
|
| + cell_(cell),
|
| + small_table_(small_table) {
|
| +}
|
| +
|
| +void EntryCell::FixSum() {
|
| + SetCellSum(&cell_, CalculateCellSum(cell_));
|
| +}
|
| +
|
| +uint32 EntryCell::GetAddressValue() const {
|
| + if (small_table_)
|
| + return GetCellSmallTableAddress(cell_);
|
| +
|
| + return GetCellAddress(cell_);
|
| +}
|
| +
|
| +uint32 EntryCell::RecomputeHash() {
|
| + if (small_table_) {
|
| + hash_ &= (1 << kHashSmallTableShift) - 1;
|
| + hash_ |= GetCellSmallTableHash(cell_) << kHashSmallTableShift;
|
| + return hash_;
|
| + }
|
| +
|
| + hash_ &= (1 << kHashShift) - 1;
|
| + hash_ |= GetCellHash(cell_) << kHashShift;
|
| + return hash_;
|
| +}
|
| +
|
| +void EntryCell::Serialize(IndexCell* destination) const {
|
| + memcpy(destination, &cell_, sizeof(cell_));
|
| +}
|
| +
|
| +EntrySet::EntrySet() : evicted_count(0), current(0) {
|
| +}
|
| +
|
| +// -----------------------------------------------------------------------
|
| +
|
| +IndexTable::IndexTable(IndexTableBackend* backend)
|
| + : backend_(backend),
|
| + header_(NULL),
|
| + main_table_(NULL),
|
| + extra_table_(NULL),
|
| + modified_(false),
|
| + small_table_(false) {
|
| +}
|
| +
|
| +void IndexTable::Init(IndexTableInitData* params) {
|
| + bool growing = header_ != NULL;
|
| + scoped_ptr<IndexBucket[]> old_extra_table;
|
| + header_ = ¶ms->index_bitmap->header;
|
| +
|
| + if (params->main_table) {
|
| + if (main_table_) {
|
| + DCHECK_EQ(base::bits::Log2Floor(header_->table_len),
|
| + base::bits::Log2Floor(backup_header_->table_len) + 1);
|
| + int extra_size = (header()->max_bucket - mask_) * kCellsPerBucket;
|
| + DCHECK_GE(extra_size, 0);
|
| + old_extra_table.reset(new IndexBucket[extra_size]);
|
| + memcpy(old_extra_table.get(), extra_table_,
|
| + extra_size * sizeof(IndexBucket));
|
| + memset(params->extra_table, 0, extra_size * sizeof(IndexBucket));
|
| + }
|
| + main_table_ = params->main_table;
|
| + }
|
| + DCHECK(main_table_);
|
| + extra_table_ = params->extra_table;
|
| +
|
| + extra_bits_ = base::bits::Log2Floor(header_->table_len) -
|
| + base::bits::Log2Floor(kBaseTableLen);
|
| + DCHECK_GE(extra_bits_, 0);
|
| + DCHECK_LE(extra_bits_, 11);
|
| + mask_ = ((kBaseTableLen / kCellsPerBucket) << extra_bits_) - 1;
|
| + small_table_ = extra_bits_ < kHashShift - kHashSmallTableShift;
|
| + if (!small_table_)
|
| + extra_bits_ -= kHashShift - kHashSmallTableShift;
|
| +
|
| + int num_words = (header_->table_len + 31) / 32;
|
| +
|
| + if (old_extra_table) {
|
| + // All the cells from the extra table are moving to the new tables so before
|
| + // creating the bitmaps, clear the part of the extra table.
|
| + int main_table_bit_words = ((mask_ >> 1) + 1) * kCellsPerBucket / 32;
|
| + DCHECK_GT(num_words, main_table_bit_words);
|
| + memset(params->index_bitmap->bitmap + main_table_bit_words, 0,
|
| + (num_words - main_table_bit_words) * sizeof(int32));
|
| +
|
| + DCHECK(growing);
|
| + int old_num_words = (backup_header_.get()->table_len + 31) / 32;
|
| + DCHECK_GT(old_num_words, main_table_bit_words);
|
| + memset(backup_bitmap_storage_.get() + main_table_bit_words, 0,
|
| + (old_num_words - main_table_bit_words) * sizeof(int32));
|
| + }
|
| + bitmap_.reset(new Bitmap(params->index_bitmap->bitmap, header_->table_len,
|
| + num_words));
|
| +
|
| + if (growing) {
|
| + int old_num_words = (backup_header_.get()->table_len + 31) / 32;
|
| + DCHECK_GE(num_words, old_num_words);
|
| + scoped_ptr<uint32[]> storage(new uint32[num_words]);
|
| + memcpy(storage.get(), backup_bitmap_storage_.get(),
|
| + old_num_words * sizeof(int32));
|
| + memset(storage.get() + old_num_words, 0,
|
| + (num_words - old_num_words) * sizeof(int32));
|
| +
|
| + backup_bitmap_storage_.swap(storage);
|
| + backup_header_->table_len = header_->table_len;
|
| + } else {
|
| + backup_bitmap_storage_.reset(params->backup_bitmap.release());
|
| + backup_header_.reset(params->backup_header.release());
|
| + }
|
| +
|
| + num_words = (backup_header_->table_len + 31) / 32;
|
| + backup_bitmap_.reset(new Bitmap(backup_bitmap_storage_.get(),
|
| + backup_header_->table_len, num_words));
|
| + if (old_extra_table)
|
| + MoveCells(old_extra_table.get());
|
| +
|
| + if (small_table_)
|
| + DCHECK(header_->flags & SMALL_CACHE);
|
| +}
|
| +
|
| +void IndexTable::Reset() {
|
| + header_ = NULL;
|
| + main_table_ = NULL;
|
| + extra_table_ = NULL;
|
| + bitmap_.reset();
|
| + backup_bitmap_.reset();
|
| + backup_header_.reset();
|
| + backup_bitmap_storage_.reset();
|
| + modified_ = false;
|
| +}
|
| +
|
| +EntrySet IndexTable::LookupEntry(uint32 hash) {
|
| + EntrySet entries;
|
| + int bucket_id = static_cast<int>(hash & mask_);
|
| + IndexBucket* bucket = &main_table_[bucket_id];
|
| + for (;;) {
|
| + for (int i = 0; i < kCellsPerBucket; i++) {
|
| + IndexCell* current_cell = &bucket->cells[i];
|
| + if (!GetAddressValue(*current_cell))
|
| + continue;
|
| + if (!SanityCheck(*current_cell)) {
|
| + NOTREACHED();
|
| + int cell_id = bucket_id * kCellsPerBucket + i;
|
| + current_cell->Clear();
|
| + bitmap_->Set(cell_id, false);
|
| + backup_bitmap_->Set(cell_id, false);
|
| + modified_ = true;
|
| + continue;
|
| + }
|
| + int cell_id = bucket_id * kCellsPerBucket + i;
|
| + if (MisplacedHash(*current_cell, hash)) {
|
| + HandleMisplacedCell(current_cell, cell_id, hash & mask_);
|
| + } else if (IsHashMatch(*current_cell, hash)) {
|
| + EntryCell entry_cell(cell_id, hash, *current_cell, small_table_);
|
| + CheckState(entry_cell);
|
| + if (entry_cell.GetState() != ENTRY_DELETED) {
|
| + entries.cells.push_back(entry_cell);
|
| + if (entry_cell.GetGroup() == ENTRY_EVICTED)
|
| + entries.evicted_count++;
|
| + }
|
| + }
|
| + }
|
| + bucket_id = GetNextBucket(mask_ + 1, header()->max_bucket, extra_table_,
|
| + &bucket);
|
| + if (!bucket_id)
|
| + break;
|
| + }
|
| + return entries;
|
| +}
|
| +
|
| +EntryCell IndexTable::CreateEntryCell(uint32 hash, Addr address) {
|
| + DCHECK(IsValidAddress(address));
|
| + DCHECK(address.ToIndexEntryAddress());
|
| +
|
| + int bucket_id = static_cast<int>(hash & mask_);
|
| + int cell_id = 0;
|
| + IndexBucket* bucket = &main_table_[bucket_id];
|
| + IndexCell* current_cell = NULL;
|
| + bool found = false;
|
| + for (; !found;) {
|
| + for (int i = 0; i < kCellsPerBucket && !found; i++) {
|
| + current_cell = &bucket->cells[i];
|
| + if (!GetAddressValue(*current_cell)) {
|
| + cell_id = bucket_id * kCellsPerBucket + i;
|
| + found = true;
|
| + }
|
| + }
|
| + if (found)
|
| + break;
|
| + bucket_id = GetNextBucket(mask_ + 1, header()->max_bucket, extra_table_,
|
| + &bucket);
|
| + if (!bucket_id)
|
| + break;
|
| + }
|
| +
|
| + if (!found) {
|
| + bucket_id = NewExtraBucket();
|
| + if (bucket_id) {
|
| + cell_id = bucket_id * kCellsPerBucket;
|
| + bucket->next = cell_id;
|
| + bucket = &extra_table_[bucket_id - (mask_ + 1)];
|
| + bucket->hash = hash & mask_;
|
| + found = true;
|
| + } else {
|
| + // address 0 is a reserved value, and the caller interprets it as invalid.
|
| + address.set_value(0);
|
| + }
|
| + }
|
| +
|
| + EntryCell entry_cell(cell_id, hash, address, small_table_);
|
| + if (address.file_type() == BLOCK_EVICTED)
|
| + entry_cell.SetGroup(ENTRY_EVICTED);
|
| + else
|
| + entry_cell.SetGroup(ENTRY_NO_USE);
|
| + Save(&entry_cell);
|
| +
|
| + if (found) {
|
| + bitmap_->Set(cell_id, true);
|
| + backup_bitmap_->Set(cell_id, true);
|
| + header()->used_cells++;
|
| + modified_ = true;
|
| + }
|
| +
|
| + return entry_cell;
|
| +}
|
| +
|
| +EntryCell IndexTable::FindEntryCell(uint32 hash, Addr address) {
|
| + return FindEntryCellImpl(hash, address, false);
|
| +}
|
| +
|
| +int IndexTable::CalculateTimestamp(Time time) {
|
| + TimeDelta delta = time - Time::FromInternalValue(header_->base_time);
|
| + return std::max(delta.InMinutes(), 0);
|
| +}
|
| +
|
| +void IndexTable::SetSate(uint32 hash, Addr address, EntryState state) {
|
| + EntryCell cell = FindEntryCellImpl(hash, address, state == ENTRY_FREE);
|
| + if (!cell.IsValid()) {
|
| + NOTREACHED();
|
| + return;
|
| + }
|
| +
|
| + EntryState old_state = cell.GetState();
|
| + if (state == ENTRY_FREE) {
|
| + DCHECK_EQ(old_state, ENTRY_DELETED);
|
| + } else if (state == ENTRY_NEW) {
|
| + DCHECK_EQ(old_state, ENTRY_FREE);
|
| + } else if (state == ENTRY_OPEN) {
|
| + DCHECK_EQ(old_state, ENTRY_USED);
|
| + } else if (state == ENTRY_MODIFIED) {
|
| + DCHECK_EQ(old_state, ENTRY_OPEN);
|
| + } else if (state == ENTRY_DELETED) {
|
| + DCHECK(old_state == ENTRY_NEW || old_state == ENTRY_OPEN ||
|
| + old_state == ENTRY_MODIFIED);
|
| + } else if (state == ENTRY_USED) {
|
| + DCHECK(old_state == ENTRY_NEW || old_state == ENTRY_OPEN ||
|
| + old_state == ENTRY_MODIFIED);
|
| + }
|
| +
|
| + modified_ = true;
|
| + if (state == ENTRY_DELETED) {
|
| + bitmap_->Set(cell.cell_id(), false);
|
| + backup_bitmap_->Set(cell.cell_id(), false);
|
| + } else if (state == ENTRY_FREE) {
|
| + cell.Clear();
|
| + Write(cell);
|
| + header()->used_cells--;
|
| + return;
|
| + }
|
| + cell.SetState(state);
|
| +
|
| + Save(&cell);
|
| +}
|
| +
|
| +void IndexTable::UpdateTime(uint32 hash, Addr address, base::Time current) {
|
| + EntryCell cell = FindEntryCell(hash, address);
|
| + if (!cell.IsValid())
|
| + return;
|
| +
|
| + int minutes = CalculateTimestamp(current);
|
| +
|
| + // Keep about 3 months of headroom.
|
| + const int kMaxTimestamp = (1 << 20) - 60 * 24 * 90;
|
| + if (minutes > kMaxTimestamp) {
|
| + // TODO(rvargas):
|
| + // Update header->old_time and trigger a timer
|
| + // Rebaseline timestamps and don't update sums
|
| + // Start a timer (about 2 backups)
|
| + // fix all ckecksums and trigger another timer
|
| + // update header->old_time because rebaseline is done.
|
| + minutes = std::min(minutes, (1 << 20) - 1);
|
| + }
|
| +
|
| + cell.SetTimestamp(minutes);
|
| + Save(&cell);
|
| +}
|
| +
|
| +void IndexTable::Save(EntryCell* cell) {
|
| + cell->FixSum();
|
| + Write(*cell);
|
| +}
|
| +
|
| +void IndexTable::GetOldest(CellList* no_use, CellList* low_use,
|
| + CellList* high_use) {
|
| + header_->num_no_use_entries = 0;
|
| + header_->num_low_use_entries = 0;
|
| + header_->num_high_use_entries = 0;
|
| + header_->num_evicted_entries = 0;
|
| +
|
| + int no_use_time = kint32max;
|
| + int low_use_time = kint32max;
|
| + int high_use_time = kint32max;
|
| + for (int i = 0; i < static_cast<int32>(mask_ + 1); i++) {
|
| + int bucket_id = i;
|
| + IndexBucket* bucket = &main_table_[i];
|
| + for (;;) {
|
| + GetOldestFromBucket(bucket, i, no_use, &no_use_time, low_use,
|
| + &low_use_time, high_use, &high_use_time);
|
| +
|
| + bucket_id = GetNextBucket(mask_ + 1, header()->max_bucket, extra_table_,
|
| + &bucket);
|
| + if (!bucket_id)
|
| + break;
|
| + }
|
| + }
|
| + header_->num_entries = header_->num_no_use_entries +
|
| + header_->num_low_use_entries +
|
| + header_->num_high_use_entries +
|
| + header_->num_evicted_entries;
|
| + modified_ = true;
|
| +}
|
| +
|
| +bool IndexTable::GetNextCells(IndexIterator* iterator) {
|
| + int current_time = iterator->timestamp;
|
| + iterator->cells.clear();
|
| + iterator->timestamp = iterator->forward ? kint32max : 0;
|
| +
|
| + for (int i = 0; i < static_cast<int32>(mask_ + 1); i++) {
|
| + int bucket_id = i;
|
| + IndexBucket* bucket = &main_table_[i];
|
| + for (;;) {
|
| + GetNewestFromBucket(bucket, i, current_time, iterator);
|
| +
|
| + bucket_id = GetNextBucket(mask_ + 1, header()->max_bucket, extra_table_,
|
| + &bucket);
|
| + if (!bucket_id)
|
| + break;
|
| + }
|
| + }
|
| + return !iterator->cells.empty();
|
| +}
|
| +
|
| +void IndexTable::OnBackupTimer() {
|
| + if (!modified_)
|
| + return;
|
| +
|
| + int num_words = (header_->table_len + 31) / 32;
|
| + int num_bytes = num_words * 4 + static_cast<int>(sizeof(*header_));
|
| + scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(num_bytes));
|
| + memcpy(buffer->data(), header_, sizeof(*header_));
|
| + memcpy(buffer->data() + sizeof(*header_), backup_bitmap_storage_.get(),
|
| + num_words * 4);
|
| + backend_->SaveIndex(buffer, num_bytes);
|
| + modified_ = false;
|
| +}
|
| +
|
| +// -----------------------------------------------------------------------
|
| +
|
| +EntryCell IndexTable::FindEntryCellImpl(uint32 hash, Addr address,
|
| + bool allow_deleted) {
|
| + int bucket_id = static_cast<int>(hash & mask_);
|
| + IndexBucket* bucket = &main_table_[bucket_id];
|
| + for (;;) {
|
| + for (int i = 0; i < kCellsPerBucket; i++) {
|
| + IndexCell* current_cell = &bucket->cells[i];
|
| + if (!GetAddressValue(*current_cell))
|
| + continue;
|
| + DCHECK(SanityCheck(*current_cell));
|
| + if (IsHashMatch(*current_cell, hash)) {
|
| + // We have a match.
|
| + int cell_id = bucket_id * kCellsPerBucket + i;
|
| + EntryCell entry_cell(cell_id, hash, *current_cell, small_table_);
|
| + if (entry_cell.GetAddress() != address)
|
| + continue;
|
| +
|
| + if (!allow_deleted && entry_cell.GetState() == ENTRY_DELETED)
|
| + continue;
|
| +
|
| + return entry_cell;
|
| + }
|
| + }
|
| + bucket_id = GetNextBucket(mask_ + 1, header()->max_bucket, extra_table_,
|
| + &bucket);
|
| + if (!bucket_id)
|
| + break;
|
| + }
|
| + return EntryCell();
|
| +}
|
| +
|
| +void IndexTable::CheckState(const EntryCell& cell) {
|
| + int current_state = cell.GetState();
|
| + if (current_state != ENTRY_FIXING) {
|
| + bool present = ((current_state & 3) != 0); // Look at the last two bits.
|
| + if (present != bitmap_->Get(cell.cell_id()) ||
|
| + present != backup_bitmap_->Get(cell.cell_id())) {
|
| + // There's a mismatch.
|
| + if (current_state == ENTRY_DELETED) {
|
| + // We were in the process of deleting this entry. Finish now.
|
| + backend_->DeleteCell(cell);
|
| + } else {
|
| + current_state = ENTRY_FIXING;
|
| + EntryCell bad_cell(cell);
|
| + bad_cell.SetState(ENTRY_FIXING);
|
| + Save(&bad_cell);
|
| + }
|
| + }
|
| + }
|
| +
|
| + if (current_state == ENTRY_FIXING)
|
| + backend_->FixCell(cell);
|
| +}
|
| +
|
| +void IndexTable::Write(const EntryCell& cell) {
|
| + IndexBucket* bucket = NULL;
|
| + int bucket_id = cell.cell_id() / kCellsPerBucket;
|
| + if (bucket_id < static_cast<int32>(mask_ + 1)) {
|
| + bucket = &main_table_[bucket_id];
|
| + } else {
|
| + DCHECK_LE(bucket_id, header()->max_bucket);
|
| + bucket = &extra_table_[bucket_id - (mask_ + 1)];
|
| + }
|
| +
|
| + int cell_number = cell.cell_id() % kCellsPerBucket;
|
| + if (GetAddressValue(bucket->cells[cell_number]) && cell.GetAddressValue()) {
|
| + DCHECK_EQ(cell.GetAddressValue(),
|
| + GetAddressValue(bucket->cells[cell_number]));
|
| + }
|
| + cell.Serialize(&bucket->cells[cell_number]);
|
| +}
|
| +
|
| +int IndexTable::NewExtraBucket() {
|
| + int safe_window = (header()->table_len < kNumExtraBlocks * 2) ?
|
| + kNumExtraBlocks / 4 : kNumExtraBlocks;
|
| + if (header()->table_len - header()->max_bucket * kCellsPerBucket <
|
| + safe_window) {
|
| + backend_->GrowIndex();
|
| + }
|
| +
|
| + if (header()->max_bucket * kCellsPerBucket ==
|
| + header()->table_len - kCellsPerBucket) {
|
| + return 0;
|
| + }
|
| +
|
| + header()->max_bucket++;
|
| + return header()->max_bucket;
|
| +}
|
| +
|
| +void IndexTable::GetOldestFromBucket(IndexBucket* bucket, int bucket_hash,
|
| + CellList* no_use, int* no_use_time,
|
| + CellList* low_use, int* low_use_time,
|
| + CellList* high_use, int* high_use_time) {
|
| + for (int i = 0; i < kCellsPerBucket; i++) {
|
| + IndexCell& current_cell = bucket->cells[i];
|
| + if (!GetAddressValue(current_cell))
|
| + continue;
|
| + DCHECK(SanityCheck(current_cell));
|
| + if (!IsNormalState(current_cell))
|
| + continue;
|
| +
|
| + EntryCell entry_cell(0, GetFullHash(current_cell, bucket_hash),
|
| + current_cell, small_table_);
|
| + switch (GetCellGroup(current_cell)) {
|
| + case ENTRY_NO_USE:
|
| + UpdateListWithCell(bucket_hash, entry_cell, no_use, no_use_time);
|
| + header_->num_no_use_entries++;
|
| + break;
|
| + case ENTRY_LOW_USE:
|
| + UpdateListWithCell(bucket_hash, entry_cell, low_use, low_use_time);
|
| + header_->num_low_use_entries++;
|
| + break;
|
| + case ENTRY_HIGH_USE:
|
| + UpdateListWithCell(bucket_hash, entry_cell, high_use, high_use_time);
|
| + header_->num_high_use_entries++;
|
| + break;
|
| + case ENTRY_EVICTED:
|
| + header_->num_evicted_entries++;
|
| + break;
|
| + default:
|
| + NOTREACHED();
|
| + }
|
| + }
|
| +}
|
| +
|
| +void IndexTable::GetNewestFromBucket(IndexBucket* bucket,
|
| + int bucket_hash,
|
| + int limit_time,
|
| + IndexIterator* iterator) {
|
| + for (int i = 0; i < kCellsPerBucket; i++) {
|
| + IndexCell& current_cell = bucket->cells[i];
|
| + if (!GetAddressValue(current_cell))
|
| + continue;
|
| + DCHECK(SanityCheck(current_cell));
|
| + if (!IsNormalState(current_cell))
|
| + continue;
|
| +
|
| + int time = GetCellTimestamp(current_cell);
|
| + switch (GetCellGroup(current_cell)) {
|
| + case disk_cache::ENTRY_NO_USE:
|
| + case disk_cache::ENTRY_LOW_USE:
|
| + case disk_cache::ENTRY_HIGH_USE:
|
| + if (iterator->forward && time <= limit_time)
|
| + continue;
|
| + if (!iterator->forward && time >= limit_time)
|
| + continue;
|
| +
|
| + if ((iterator->forward && time < iterator->timestamp) ||
|
| + (!iterator->forward && time > iterator->timestamp)) {
|
| + iterator->timestamp = time;
|
| + iterator->cells.clear();
|
| + }
|
| + if (time == iterator->timestamp) {
|
| + EntryCell entry_cell(0, GetFullHash(current_cell, bucket_hash),
|
| + current_cell, small_table_);
|
| + CellInfo cell_info = {
|
| + entry_cell.hash(),
|
| + entry_cell.GetAddress()
|
| + };
|
| + iterator->cells.push_back(cell_info);
|
| + }
|
| + }
|
| + }
|
| +}
|
| +
|
| +void IndexTable::MoveCells(IndexBucket* old_extra_table) {
|
| + int max_hash = (mask_ + 1) / 2;
|
| + int max_bucket = header()->max_bucket;
|
| + header()->max_bucket = mask_;
|
| + int used_cells = header()->used_cells;
|
| +
|
| + // Consider a large cache: a cell stores the upper 18 bits of the hash
|
| + // (h >> 14). If the table is say 8 times the original size (growing from 4x),
|
| + // the bit that we are interested in would be the 3rd bit of the stored value,
|
| + // in other words 'multiplier' >> 1.
|
| + uint32 new_bit = (1 << extra_bits_) >> 1;
|
| +
|
| + scoped_ptr<IndexBucket[]> old_main_table;
|
| + IndexBucket* source_table = main_table_;
|
| + bool upgrade_format = !extra_bits_;
|
| + if (upgrade_format) {
|
| + // This method should deal with migrating a small table to a big one. Given
|
| + // that the first thing to do is read the old table, set small_table_ for
|
| + // the size of the old table. Now, when moving a cell, the result cannot be
|
| + // placed in the old table or we will end up reading it again and attempting
|
| + // to move it, so we have to copy the whole table at once.
|
| + DCHECK(!small_table_);
|
| + small_table_ = true;
|
| + old_main_table.reset(new IndexBucket[max_hash]);
|
| + memcpy(old_main_table.get(), main_table_, max_hash * sizeof(IndexBucket));
|
| + memset(main_table_, 0, max_hash * sizeof(IndexBucket));
|
| + source_table = old_main_table.get();
|
| + }
|
| +
|
| + for (int i = 0; i < max_hash; i++) {
|
| + int bucket_id = i;
|
| + IndexBucket* bucket = &source_table[i];
|
| + for (;;) {
|
| + for (int j = 0; j < kCellsPerBucket; j++) {
|
| + IndexCell& current_cell = bucket->cells[j];
|
| + if (!GetAddressValue(current_cell))
|
| + continue;
|
| + DCHECK(SanityCheck(current_cell));
|
| + if (bucket_id == i) {
|
| + if (upgrade_format || (GetHashValue(current_cell) & new_bit)) {
|
| + // Move this cell to the upper half of the table.
|
| + MoveSingleCell(¤t_cell, bucket_id * kCellsPerBucket + j, i,
|
| + true);
|
| + }
|
| + } else {
|
| + // All cells on extra buckets have to move.
|
| + MoveSingleCell(¤t_cell, bucket_id * kCellsPerBucket + j, i,
|
| + true);
|
| + }
|
| + }
|
| +
|
| + bucket_id = GetNextBucket(max_hash, max_bucket, old_extra_table, &bucket);
|
| + if (!bucket_id)
|
| + break;
|
| + }
|
| + }
|
| +
|
| + DCHECK_EQ(header()->used_cells, used_cells);
|
| +
|
| + if (upgrade_format) {
|
| + small_table_ = false;
|
| + header()->flags &= ~SMALL_CACHE;
|
| + }
|
| +}
|
| +
|
| +void IndexTable::MoveSingleCell(IndexCell* current_cell, int cell_id,
|
| + int main_table_index, bool growing) {
|
| + uint32 hash = GetFullHash(*current_cell, main_table_index);
|
| + EntryCell old_cell(cell_id, hash, *current_cell, small_table_);
|
| +
|
| + bool upgrade_format = !extra_bits_ && growing;
|
| + if (upgrade_format)
|
| + small_table_ = false;
|
| + EntryCell new_cell = CreateEntryCell(hash, old_cell.GetAddress());
|
| +
|
| + if (!new_cell.IsValid()) {
|
| + // We'll deal with this entry later.
|
| + if (upgrade_format)
|
| + small_table_ = true;
|
| + return;
|
| + }
|
| +
|
| + new_cell.SetState(old_cell.GetState());
|
| + new_cell.SetGroup(old_cell.GetGroup());
|
| + new_cell.SetReuse(old_cell.GetReuse());
|
| + new_cell.SetTimestamp(old_cell.GetTimestamp());
|
| + Save(&new_cell);
|
| + modified_ = true;
|
| + if (upgrade_format)
|
| + small_table_ = true;
|
| +
|
| + if (old_cell.GetState() == ENTRY_DELETED) {
|
| + bitmap_->Set(new_cell.cell_id(), false);
|
| + backup_bitmap_->Set(new_cell.cell_id(), false);
|
| + }
|
| +
|
| + if (!growing || cell_id / kCellsPerBucket == main_table_index) {
|
| + // Only delete entries that live on the main table.
|
| + if (!upgrade_format) {
|
| + old_cell.Clear();
|
| + Write(old_cell);
|
| + }
|
| +
|
| + if (cell_id != new_cell.cell_id()) {
|
| + bitmap_->Set(old_cell.cell_id(), false);
|
| + backup_bitmap_->Set(old_cell.cell_id(), false);
|
| + }
|
| + }
|
| + header()->used_cells--;
|
| +}
|
| +
|
| +void IndexTable::HandleMisplacedCell(IndexCell* current_cell, int cell_id,
|
| + int main_table_index) {
|
| + // The cell may be misplaced, or a duplicate cell exists with this data.
|
| + uint32 hash = GetFullHash(*current_cell, main_table_index);
|
| + MoveSingleCell(current_cell, cell_id, main_table_index, false);
|
| +
|
| + // Now look for a duplicate cell.
|
| + CheckBucketList(hash & mask_);
|
| +}
|
| +
|
| +void IndexTable::CheckBucketList(int bucket_id) {
|
| + typedef std::pair<int, EntryGroup> AddressAndGroup;
|
| + std::set<AddressAndGroup> entries;
|
| + IndexBucket* bucket = &main_table_[bucket_id];
|
| + int bucket_hash = bucket_id;
|
| + for (;;) {
|
| + for (int i = 0; i < kCellsPerBucket; i++) {
|
| + IndexCell* current_cell = &bucket->cells[i];
|
| + if (!GetAddressValue(*current_cell))
|
| + continue;
|
| + if (!SanityCheck(*current_cell)) {
|
| + NOTREACHED();
|
| + current_cell->Clear();
|
| + continue;
|
| + }
|
| + int cell_id = bucket_id * kCellsPerBucket + i;
|
| + EntryCell cell(cell_id, GetFullHash(*current_cell, bucket_hash),
|
| + *current_cell, small_table_);
|
| + if (!entries.insert(std::make_pair(cell.GetAddress().value(),
|
| + cell.GetGroup())).second) {
|
| + current_cell->Clear();
|
| + continue;
|
| + }
|
| + CheckState(cell);
|
| + }
|
| +
|
| + bucket_id = GetNextBucket(mask_ + 1, header()->max_bucket, extra_table_,
|
| + &bucket);
|
| + if (!bucket_id)
|
| + break;
|
| + }
|
| +}
|
| +
|
| +uint32 IndexTable::GetAddressValue(const IndexCell& cell) {
|
| + if (small_table_)
|
| + return GetCellSmallTableAddress(cell);
|
| +
|
| + return GetCellAddress(cell);
|
| +}
|
| +
|
| +uint32 IndexTable::GetHashValue(const IndexCell& cell) {
|
| + if (small_table_)
|
| + return GetCellSmallTableHash(cell);
|
| +
|
| + return GetCellHash(cell);
|
| +}
|
| +
|
| +uint32 IndexTable::GetFullHash(const IndexCell& cell, uint32 lower_part) {
|
| + // It is OK for the high order bits of lower_part to overlap with the stored
|
| + // part of the hash.
|
| + if (small_table_)
|
| + return (GetCellSmallTableHash(cell) << kHashSmallTableShift) | lower_part;
|
| +
|
| + return (GetCellHash(cell) << kHashShift) | lower_part;
|
| +}
|
| +
|
| +// All the bits stored in the cell should match the provided hash.
|
| +bool IndexTable::IsHashMatch(const IndexCell& cell, uint32 hash) {
|
| + hash = small_table_ ? hash >> kHashSmallTableShift : hash >> kHashShift;
|
| + return GetHashValue(cell) == hash;
|
| +}
|
| +
|
| +bool IndexTable::MisplacedHash(const IndexCell& cell, uint32 hash) {
|
| + if (!extra_bits_)
|
| + return false;
|
| +
|
| + uint32 mask = (1 << extra_bits_) - 1;
|
| + hash = small_table_ ? hash >> kHashSmallTableShift : hash >> kHashShift;
|
| + return (GetHashValue(cell) & mask) != (hash & mask);
|
| +}
|
| +
|
| +} // namespace disk_cache
|
|
|
| Property changes on: net\disk_cache\v3\index_table.cc
|
| ___________________________________________________________________
|
| Added: svn:eol-style
|
| + LF
|
|
|
|
|