Chromium Code Reviews| Index: content/browser/dom_storage/dom_storage_area.cc |
| diff --git a/content/browser/dom_storage/dom_storage_area.cc b/content/browser/dom_storage/dom_storage_area.cc |
| index 90a55a0ed8bddbfc261ee5386c2ba89e698f8da5..ae47f71aefff8594ce42b9b4b9d7c7ce0ef1c41c 100644 |
| --- a/content/browser/dom_storage/dom_storage_area.cc |
| +++ b/content/browser/dom_storage/dom_storage_area.cc |
| @@ -4,6 +4,8 @@ |
| #include "content/browser/dom_storage/dom_storage_area.h" |
| +#include <algorithm> |
| + |
| #include "base/bind.h" |
| #include "base/location.h" |
| #include "base/logging.h" |
| @@ -25,13 +27,27 @@ using storage::DatabaseUtil; |
| namespace content { |
| -static const int kCommitTimerSeconds = 1; |
| +// Delay for a moment after a value is set in anticipation |
| +// of other values being set, so changes are batched. |
| +static const int kCommitTimerDefaultDelay = 5; |
| + |
| +// Avoid committing too frequently regardless of the amount of data |
| +// being written. |
| +static const int kMaxCommitsPerHour = 6; |
| + |
| +// A data rate limit applies to the size of the key/value pairs being written. |
| +// A rate of 500k per hour is enough to fully populate an origins area two |
| +// times over. |
| +static const int kMaxDataPerHour = 500 * 1024;; |
|
cmumford
2015/02/03 00:45:24
double semicolon
michaeln
2015/02/03 19:59:14
Done. Also made this a function of the constant pe
|
| DOMStorageArea::CommitBatch::CommitBatch() |
| : clear_all_first(false) { |
| } |
| DOMStorageArea::CommitBatch::~CommitBatch() {} |
| +size_t DOMStorageArea::CommitBatch::GetDataSize() { |
| + return DOMStorageMap::CountBytes(changed_values); |
| +} |
| // static |
| const base::FilePath::CharType DOMStorageArea::kDatabaseFileExtension[] = |
| @@ -65,7 +81,10 @@ DOMStorageArea::DOMStorageArea( |
| kPerStorageAreaOverQuotaAllowance)), |
| is_initial_import_done_(true), |
| is_shutdown_(false), |
| - commit_batches_in_flight_(0) { |
| + commit_batches_in_flight_(0), |
| + start_time_(base::Time::Now()), |
| + data_rate_limiter_(kMaxDataPerHour, base::TimeDelta::FromHours(1)), |
| + commit_rate_limiter_(kMaxCommitsPerHour, base::TimeDelta::FromHours(1)) { |
| if (!directory.empty()) { |
| base::FilePath path = directory.Append(DatabaseFileNameFromOrigin(origin_)); |
| backing_.reset(new LocalStorageDatabaseAdapter(path)); |
| @@ -88,7 +107,10 @@ DOMStorageArea::DOMStorageArea( |
| session_storage_backing_(session_storage_backing), |
| is_initial_import_done_(true), |
| is_shutdown_(false), |
| - commit_batches_in_flight_(0) { |
| + commit_batches_in_flight_(0), |
| + start_time_(base::Time::Now()), |
| + data_rate_limiter_(kMaxDataPerHour, base::TimeDelta::FromHours(1)), |
| + commit_rate_limiter_(kMaxCommitsPerHour, base::TimeDelta::FromHours(1)) { |
| DCHECK(namespace_id != kLocalStorageNamespaceId); |
| if (session_storage_backing) { |
| backing_.reset(new SessionStorageDatabaseAdapter( |
| @@ -137,7 +159,8 @@ bool DOMStorageArea::SetItem(const base::string16& key, |
| if (!map_->HasOneRef()) |
| map_ = map_->DeepCopy(); |
| bool success = map_->SetItem(key, value, old_value); |
| - if (success && backing_) { |
| + if (success && backing_ && |
| + (old_value->is_null() || old_value->string() != value)) { |
| CommitBatch* commit_batch = CreateCommitBatchIfNeeded(); |
| commit_batch->changed_values[key] = base::NullableString16(value, false); |
| } |
| @@ -329,12 +352,20 @@ DOMStorageArea::CommitBatch* DOMStorageArea::CreateCommitBatchIfNeeded() { |
| task_runner_->PostDelayedTask( |
| FROM_HERE, |
| base::Bind(&DOMStorageArea::OnCommitTimer, this), |
| - base::TimeDelta::FromSeconds(kCommitTimerSeconds)); |
| + ComputeCommitDelay()); |
| } |
| } |
| return commit_batch_.get(); |
| } |
| +base::TimeDelta DOMStorageArea::ComputeCommitDelay() { |
| + base::TimeDelta elapsed_time = base::Time::Now() - start_time_; |
| + return std::max( |
| + base::TimeDelta::FromSeconds(kCommitTimerDefaultDelay), |
| + std::max(commit_rate_limiter_.ComputeDelayNeeded(elapsed_time), |
| + data_rate_limiter_.ComputeDelayNeeded(elapsed_time))); |
| +} |
| + |
| void DOMStorageArea::OnCommitTimer() { |
| if (is_shutdown_) |
| return; |
| @@ -346,6 +377,9 @@ void DOMStorageArea::OnCommitTimer() { |
| if (!commit_batch_) |
| return; |
| + commit_rate_limiter_.AddSamples(1); |
| + data_rate_limiter_.AddSamples(commit_batch_->GetDataSize()); |
| + |
| // This method executes on the primary sequence, we schedule |
| // a task for immediate execution on the commit sequence. |
| DCHECK(task_runner_->IsRunningOnPrimarySequence()); |
| @@ -381,7 +415,7 @@ void DOMStorageArea::OnCommitComplete() { |
| task_runner_->PostDelayedTask( |
| FROM_HERE, |
| base::Bind(&DOMStorageArea::OnCommitTimer, this), |
| - base::TimeDelta::FromSeconds(kCommitTimerSeconds)); |
| + ComputeCommitDelay()); |
| } |
| } |