Index: content/browser/dom_storage/dom_storage_area.cc |
diff --git a/content/browser/dom_storage/dom_storage_area.cc b/content/browser/dom_storage/dom_storage_area.cc |
index 67c7b98ac1ad2daa431256da55079eadbd02a822..a141da37b21456c6761b953a9e14abfe6bfd38af 100644 |
--- a/content/browser/dom_storage/dom_storage_area.cc |
+++ b/content/browser/dom_storage/dom_storage_area.cc |
@@ -33,12 +33,11 @@ namespace { |
// Delay for a moment after a value is set in anticipation |
// of other values being set, so changes are batched. |
-const int kCommitDefaultDelaySecs = 5; |
+const int kCommitDefaultDelaySecs = 10; |
michaeln
2015/05/14 01:33:48
Please don't hastily remove the commit rate limite
|
// To avoid excessive IO we apply limits to the amount of data being written |
// and the frequency of writes. The specific values used are somewhat arbitrary. |
-const int kMaxBytesPerDay = kPerStorageAreaQuota * 2; |
-const int kMaxCommitsPerHour = 6; |
+const int kMaxBytesPerMinute = kPerStorageAreaQuota; |
michaeln
2015/05/14 01:33:48
Adjusting the values is ok, but this adjustment go
|
} // namespace |
@@ -109,8 +108,7 @@ DOMStorageArea::DOMStorageArea(const GURL& origin, |
is_shutdown_(false), |
commit_batches_in_flight_(0), |
start_time_(base::TimeTicks::Now()), |
- data_rate_limiter_(kMaxBytesPerDay, base::TimeDelta::FromHours(24)), |
- commit_rate_limiter_(kMaxCommitsPerHour, base::TimeDelta::FromHours(1)) { |
+ data_rate_limiter_(kMaxBytesPerMinute, base::TimeDelta::FromMinutes(1)) { |
if (!directory.empty()) { |
base::FilePath path = directory.Append(DatabaseFileNameFromOrigin(origin_)); |
backing_.reset(new LocalStorageDatabaseAdapter(path)); |
@@ -134,8 +132,7 @@ DOMStorageArea::DOMStorageArea(int64 namespace_id, |
is_shutdown_(false), |
commit_batches_in_flight_(0), |
start_time_(base::TimeTicks::Now()), |
- data_rate_limiter_(kMaxBytesPerDay, base::TimeDelta::FromHours(24)), |
- commit_rate_limiter_(kMaxCommitsPerHour, base::TimeDelta::FromHours(1)) { |
+ data_rate_limiter_(kMaxBytesPerMinute, base::TimeDelta::FromMinutes(1)) { |
DCHECK(namespace_id != kLocalStorageNamespaceId); |
if (session_storage_backing) { |
backing_.reset(new SessionStorageDatabaseAdapter( |
@@ -400,8 +397,7 @@ base::TimeDelta DOMStorageArea::ComputeCommitDelay() const { |
base::TimeDelta elapsed_time = base::TimeTicks::Now() - start_time_; |
base::TimeDelta delay = std::max( |
base::TimeDelta::FromSeconds(kCommitDefaultDelaySecs), |
- std::max(commit_rate_limiter_.ComputeDelayNeeded(elapsed_time), |
- data_rate_limiter_.ComputeDelayNeeded(elapsed_time))); |
+ data_rate_limiter_.ComputeDelayNeeded(elapsed_time)); |
UMA_HISTOGRAM_LONG_TIMES("LocalStorage.CommitDelay", delay); |
return delay; |
} |
@@ -424,7 +420,6 @@ void DOMStorageArea::PostCommitTask() { |
DCHECK(backing_.get()); |
- commit_rate_limiter_.add_samples(1); |
data_rate_limiter_.add_samples(commit_batch_->GetDataSize()); |
// This method executes on the primary sequence, we schedule |