Index: content/browser/dom_storage/dom_storage_area.cc |
diff --git a/content/browser/dom_storage/dom_storage_area.cc b/content/browser/dom_storage/dom_storage_area.cc |
index 90a55a0ed8bddbfc261ee5386c2ba89e698f8da5..8dfb4c76200ad74a94b6347bdd4380a75f10905c 100644 |
--- a/content/browser/dom_storage/dom_storage_area.cc |
+++ b/content/browser/dom_storage/dom_storage_area.cc |
@@ -4,11 +4,14 @@ |
#include "content/browser/dom_storage/dom_storage_area.h" |
+#include <algorithm> |
+ |
#include "base/bind.h" |
#include "base/location.h" |
#include "base/logging.h" |
#include "base/metrics/histogram.h" |
#include "base/strings/utf_string_conversions.h" |
+#include "base/sys_info.h" |
#include "base/time/time.h" |
#include "content/browser/dom_storage/dom_storage_namespace.h" |
#include "content/browser/dom_storage/dom_storage_task_runner.h" |
@@ -25,13 +28,54 @@ using storage::DatabaseUtil; |
namespace content { |
-static const int kCommitTimerSeconds = 1; |
+// Delay for a moment after a value is set in anticipation |
+// of other values being set, so changes are batched. |
+static const int kCommitDefaultDelay = 5; |
cmumford
2015/02/11 22:52:27
What is best practice regarding units? Like kCommi
michaeln
2015/02/11 23:30:27
yup, done
|
+ |
+// The delay used if a commit is scheduled during startup, also the |
+// process uptime duration used as a proxy for 'during startup'. |
+static const int kCommitDuringStartupDelay = 60; |
+ |
+// To avoid excessive IO we apply limits to the amount of data being written |
+// and the frequency of writes. The specific values used are somewhat arbitrary. |
+static const int kMaxDataPerDay = kPerStorageAreaQuota * 2; |
cmumford
2015/02/11 22:52:27
kMaxBytesPerDay?
michaeln
2015/02/11 23:30:28
Done.
|
+static const int kMaxCommitsPerHour = 6; |
+ |
+DOMStorageArea::RateLimiter::RateLimiter(size_t desired_rate, |
+ base::TimeDelta time_quantum) |
+ : rate_(desired_rate), samples_(0), time_quantum_(time_quantum) { |
+ DCHECK_GT(desired_rate, 0ul); |
+} |
+ |
+void DOMStorageArea::RateLimiter::add_samples(size_t samples) { |
+ samples_ += samples; |
+} |
+ |
+// Computes the total time needed to process the total samples seen |
+// at the desired rate. |
+base::TimeDelta DOMStorageArea::RateLimiter::ComputeTimeNeeded() const { |
+ return base::TimeDelta::FromInternalValue((samples_ / rate_) * |
+ time_quantum_.ToInternalValue()); |
+} |
+ |
+// Given the elapsed time since the start of the rate limiting session, |
+// computes the delay needed to mimic having processed the total samples |
+// seen at the desired rate. |
+base::TimeDelta DOMStorageArea::RateLimiter::ComputeDelayNeeded( |
+ const base::TimeDelta elapsed_time) const { |
+ base::TimeDelta time_needed = ComputeTimeNeeded(); |
+ if (time_needed > elapsed_time) |
+ return time_needed - elapsed_time; |
+ return base::TimeDelta(); |
+} |
-DOMStorageArea::CommitBatch::CommitBatch() |
- : clear_all_first(false) { |
+DOMStorageArea::CommitBatch::CommitBatch() : clear_all_first(false) { |
} |
DOMStorageArea::CommitBatch::~CommitBatch() {} |
+size_t DOMStorageArea::CommitBatch::GetDataSize() const { |
+ return DOMStorageMap::CountBytes(changed_values); |
+} |
// static |
const base::FilePath::CharType DOMStorageArea::kDatabaseFileExtension[] = |
@@ -55,17 +99,21 @@ GURL DOMStorageArea::OriginFromDatabaseFileName(const base::FilePath& name) { |
return storage::GetOriginFromIdentifier(origin_id); |
} |
-DOMStorageArea::DOMStorageArea( |
- const GURL& origin, const base::FilePath& directory, |
- DOMStorageTaskRunner* task_runner) |
- : namespace_id_(kLocalStorageNamespaceId), origin_(origin), |
+DOMStorageArea::DOMStorageArea(const GURL& origin, |
+ const base::FilePath& directory, |
+ DOMStorageTaskRunner* task_runner) |
+ : namespace_id_(kLocalStorageNamespaceId), |
+ origin_(origin), |
directory_(directory), |
task_runner_(task_runner), |
map_(new DOMStorageMap(kPerStorageAreaQuota + |
kPerStorageAreaOverQuotaAllowance)), |
is_initial_import_done_(true), |
is_shutdown_(false), |
- commit_batches_in_flight_(0) { |
+ commit_batches_in_flight_(0), |
+ start_time_(base::TimeTicks::Now()), |
+ data_rate_limiter_(kMaxDataPerDay, base::TimeDelta::FromHours(24)), |
+ commit_rate_limiter_(kMaxCommitsPerHour, base::TimeDelta::FromHours(1)) { |
if (!directory.empty()) { |
base::FilePath path = directory.Append(DatabaseFileNameFromOrigin(origin_)); |
backing_.reset(new LocalStorageDatabaseAdapter(path)); |
@@ -73,12 +121,11 @@ DOMStorageArea::DOMStorageArea( |
} |
} |
-DOMStorageArea::DOMStorageArea( |
- int64 namespace_id, |
- const std::string& persistent_namespace_id, |
- const GURL& origin, |
- SessionStorageDatabase* session_storage_backing, |
- DOMStorageTaskRunner* task_runner) |
+DOMStorageArea::DOMStorageArea(int64 namespace_id, |
+ const std::string& persistent_namespace_id, |
+ const GURL& origin, |
+ SessionStorageDatabase* session_storage_backing, |
+ DOMStorageTaskRunner* task_runner) |
: namespace_id_(namespace_id), |
persistent_namespace_id_(persistent_namespace_id), |
origin_(origin), |
@@ -88,7 +135,10 @@ DOMStorageArea::DOMStorageArea( |
session_storage_backing_(session_storage_backing), |
is_initial_import_done_(true), |
is_shutdown_(false), |
- commit_batches_in_flight_(0) { |
+ commit_batches_in_flight_(0), |
+ start_time_(base::TimeTicks::Now()), |
+ data_rate_limiter_(kMaxDataPerDay, base::TimeDelta::FromHours(24)), |
+ commit_rate_limiter_(kMaxCommitsPerHour, base::TimeDelta::FromHours(1)) { |
DCHECK(namespace_id != kLocalStorageNamespaceId); |
if (session_storage_backing) { |
backing_.reset(new SessionStorageDatabaseAdapter( |
@@ -137,7 +187,8 @@ bool DOMStorageArea::SetItem(const base::string16& key, |
if (!map_->HasOneRef()) |
map_ = map_->DeepCopy(); |
bool success = map_->SetItem(key, value, old_value); |
- if (success && backing_) { |
+ if (success && backing_ && |
+ (old_value->is_null() || old_value->string() != value)) { |
CommitBatch* commit_batch = CreateCommitBatchIfNeeded(); |
commit_batch->changed_values[key] = base::NullableString16(value, false); |
} |
@@ -212,19 +263,21 @@ DOMStorageArea* DOMStorageArea::ShallowCopy( |
copy->is_initial_import_done_ = true; |
// All the uncommitted changes to this area need to happen before the actual |
- // shallow copy is made (scheduled by the upper layer). Another OnCommitTimer |
- // call might be in the event queue at this point, but it's handled gracefully |
- // when it fires. |
+ // shallow copy is made (scheduled by the upper layer sometime after return). |
if (commit_batch_) |
- OnCommitTimer(); |
+ ScheduleImmediateCommit(); |
return copy; |
} |
bool DOMStorageArea::HasUncommittedChanges() const { |
- DCHECK(!is_shutdown_); |
return commit_batch_.get() || commit_batches_in_flight_; |
} |
+void DOMStorageArea::ScheduleImmediateCommit() { |
+ DCHECK(HasUncommittedChanges()); |
+ OnCommitTimer(); |
+} |
+ |
void DOMStorageArea::DeleteOrigin() { |
DCHECK(!is_shutdown_); |
// This function shouldn't be called for sessionStorage. |
@@ -327,25 +380,42 @@ DOMStorageArea::CommitBatch* DOMStorageArea::CreateCommitBatchIfNeeded() { |
// started after the commits have happened. |
if (!commit_batches_in_flight_) { |
task_runner_->PostDelayedTask( |
- FROM_HERE, |
- base::Bind(&DOMStorageArea::OnCommitTimer, this), |
- base::TimeDelta::FromSeconds(kCommitTimerSeconds)); |
+ FROM_HERE, base::Bind(&DOMStorageArea::OnCommitTimer, this), |
+ ComputeCommitDelay()); |
} |
} |
return commit_batch_.get(); |
} |
+base::TimeDelta DOMStorageArea::ComputeCommitDelay() const { |
+ base::TimeDelta elapsed_time = base::TimeTicks::Now() - start_time_; |
+ base::TimeDelta delay; |
+ if (base::SysInfo::Uptime() < kCommitDuringStartupDelay * 1000) |
+ base::TimeDelta::FromSeconds(kCommitDuringStartupDelay); |
cmumford
2015/02/11 22:52:27
Did you want to assign these values to delay?
michaeln
2015/02/11 23:30:28
that would be a huge improvement!
|
+ else |
+ base::TimeDelta::FromSeconds(kCommitDefaultDelay); |
+ delay = std::max( |
+ delay, |
+ std::max(commit_rate_limiter_.ComputeDelayNeeded(elapsed_time), |
+ data_rate_limiter_.ComputeDelayNeeded(elapsed_time))); |
+ UMA_HISTOGRAM_LONG_TIMES("LocalStorage.CommitDelay", delay); |
+ return delay; |
+} |
+ |
void DOMStorageArea::OnCommitTimer() { |
if (is_shutdown_) |
return; |
DCHECK(backing_.get()); |
- // It's possible that there is nothing to commit, since a shallow copy occured |
- // before the timer fired. |
+ // It's possible that there is nothing to commit if an immediate |
+ // commit occured after the timer was scheduled but before it fired. |
if (!commit_batch_) |
return; |
+ commit_rate_limiter_.add_samples(1); |
+ data_rate_limiter_.add_samples(commit_batch_->GetDataSize()); |
+ |
// This method executes on the primary sequence, we schedule |
// a task for immediate execution on the commit sequence. |
DCHECK(task_runner_->IsRunningOnPrimarySequence()); |
@@ -379,9 +449,8 @@ void DOMStorageArea::OnCommitComplete() { |
if (commit_batch_.get() && !commit_batches_in_flight_) { |
// More changes have accrued, restart the timer. |
task_runner_->PostDelayedTask( |
- FROM_HERE, |
- base::Bind(&DOMStorageArea::OnCommitTimer, this), |
- base::TimeDelta::FromSeconds(kCommitTimerSeconds)); |
+ FROM_HERE, base::Bind(&DOMStorageArea::OnCommitTimer, this), |
+ ComputeCommitDelay()); |
} |
} |