Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(123)

Side by Side Diff: base/prefs/json_pref_store.cc

Issue 1136983004: Revert of Implement lossy pref behavior for JsonPrefStore. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@prefs-fix-flags
Patch Set: Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/prefs/json_pref_store.h ('k') | base/prefs/json_pref_store_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/prefs/json_pref_store.h" 5 #include "base/prefs/json_pref_store.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/bind.h" 9 #include "base/bind.h"
10 #include "base/callback.h" 10 #include "base/callback.h"
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after
151 const scoped_refptr<base::SequencedTaskRunner>& sequenced_task_runner, 151 const scoped_refptr<base::SequencedTaskRunner>& sequenced_task_runner,
152 scoped_ptr<PrefFilter> pref_filter) 152 scoped_ptr<PrefFilter> pref_filter)
153 : path_(filename), 153 : path_(filename),
154 sequenced_task_runner_(sequenced_task_runner), 154 sequenced_task_runner_(sequenced_task_runner),
155 prefs_(new base::DictionaryValue()), 155 prefs_(new base::DictionaryValue()),
156 read_only_(false), 156 read_only_(false),
157 writer_(filename, sequenced_task_runner), 157 writer_(filename, sequenced_task_runner),
158 pref_filter_(pref_filter.Pass()), 158 pref_filter_(pref_filter.Pass()),
159 initialized_(false), 159 initialized_(false),
160 filtering_in_progress_(false), 160 filtering_in_progress_(false),
161 pending_lossy_write_(false),
162 read_error_(PREF_READ_ERROR_NONE), 161 read_error_(PREF_READ_ERROR_NONE),
163 write_count_histogram_(writer_.commit_interval(), path_) { 162 write_count_histogram_(writer_.commit_interval(), path_) {
164 DCHECK(!path_.empty()); 163 DCHECK(!path_.empty());
165 } 164 }
166 165
167 JsonPrefStore::JsonPrefStore( 166 JsonPrefStore::JsonPrefStore(
168 const base::FilePath& filename, 167 const base::FilePath& filename,
169 const base::FilePath& alternate_filename, 168 const base::FilePath& alternate_filename,
170 const scoped_refptr<base::SequencedTaskRunner>& sequenced_task_runner, 169 const scoped_refptr<base::SequencedTaskRunner>& sequenced_task_runner,
171 scoped_ptr<PrefFilter> pref_filter) 170 scoped_ptr<PrefFilter> pref_filter)
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
246 base::Value* value, 245 base::Value* value,
247 uint32 flags) { 246 uint32 flags) {
248 DCHECK(CalledOnValidThread()); 247 DCHECK(CalledOnValidThread());
249 248
250 DCHECK(value); 249 DCHECK(value);
251 scoped_ptr<base::Value> new_value(value); 250 scoped_ptr<base::Value> new_value(value);
252 base::Value* old_value = NULL; 251 base::Value* old_value = NULL;
253 prefs_->Get(key, &old_value); 252 prefs_->Get(key, &old_value);
254 if (!old_value || !value->Equals(old_value)) { 253 if (!old_value || !value->Equals(old_value)) {
255 prefs_->Set(key, new_value.release()); 254 prefs_->Set(key, new_value.release());
256 ScheduleWrite(flags); 255 if (!read_only_)
256 writer_.ScheduleWrite(this);
257 } 257 }
258 } 258 }
259 259
260 void JsonPrefStore::RemoveValue(const std::string& key, uint32 flags) { 260 void JsonPrefStore::RemoveValue(const std::string& key, uint32 flags) {
261 DCHECK(CalledOnValidThread()); 261 DCHECK(CalledOnValidThread());
262 262
263 if (prefs_->RemovePath(key, NULL)) 263 if (prefs_->RemovePath(key, NULL))
264 ReportValueChanged(key, flags); 264 ReportValueChanged(key, flags);
265 } 265 }
266 266
267 void JsonPrefStore::RemoveValueSilently(const std::string& key, uint32 flags) { 267 void JsonPrefStore::RemoveValueSilently(const std::string& key, uint32 flags) {
268 DCHECK(CalledOnValidThread()); 268 DCHECK(CalledOnValidThread());
269 269
270 prefs_->RemovePath(key, NULL); 270 prefs_->RemovePath(key, NULL);
271 ScheduleWrite(flags); 271 if (!read_only_)
272 writer_.ScheduleWrite(this);
272 } 273 }
273 274
274 bool JsonPrefStore::ReadOnly() const { 275 bool JsonPrefStore::ReadOnly() const {
275 DCHECK(CalledOnValidThread()); 276 DCHECK(CalledOnValidThread());
276 277
277 return read_only_; 278 return read_only_;
278 } 279 }
279 280
280 PersistentPrefStore::PrefReadError JsonPrefStore::GetReadError() const { 281 PersistentPrefStore::PrefReadError JsonPrefStore::GetReadError() const {
281 DCHECK(CalledOnValidThread()); 282 DCHECK(CalledOnValidThread());
(...skipping 19 matching lines...) Expand all
301 base::PostTaskAndReplyWithResult( 302 base::PostTaskAndReplyWithResult(
302 sequenced_task_runner_.get(), 303 sequenced_task_runner_.get(),
303 FROM_HERE, 304 FROM_HERE,
304 base::Bind(&ReadPrefsFromDisk, path_, alternate_path_), 305 base::Bind(&ReadPrefsFromDisk, path_, alternate_path_),
305 base::Bind(&JsonPrefStore::OnFileRead, AsWeakPtr())); 306 base::Bind(&JsonPrefStore::OnFileRead, AsWeakPtr()));
306 } 307 }
307 308
308 void JsonPrefStore::CommitPendingWrite() { 309 void JsonPrefStore::CommitPendingWrite() {
309 DCHECK(CalledOnValidThread()); 310 DCHECK(CalledOnValidThread());
310 311
311 // Schedule a write for any lossy writes that are outstanding to ensure that
312 // they get flushed when this function is called.
313 if (pending_lossy_write_)
314 writer_.ScheduleWrite(this);
315
316 if (writer_.HasPendingWrite() && !read_only_) 312 if (writer_.HasPendingWrite() && !read_only_)
317 writer_.DoScheduledWrite(); 313 writer_.DoScheduledWrite();
318 } 314 }
319 315
320 void JsonPrefStore::ReportValueChanged(const std::string& key, uint32 flags) { 316 void JsonPrefStore::ReportValueChanged(const std::string& key, uint32 flags) {
321 DCHECK(CalledOnValidThread()); 317 DCHECK(CalledOnValidThread());
322 318
323 if (pref_filter_) 319 if (pref_filter_)
324 pref_filter_->FilterUpdate(key); 320 pref_filter_->FilterUpdate(key);
325 321
326 FOR_EACH_OBSERVER(PrefStore::Observer, observers_, OnPrefValueChanged(key)); 322 FOR_EACH_OBSERVER(PrefStore::Observer, observers_, OnPrefValueChanged(key));
327 323
328 ScheduleWrite(flags); 324 if (!read_only_)
325 writer_.ScheduleWrite(this);
329 } 326 }
330 327
331 void JsonPrefStore::RegisterOnNextSuccessfulWriteCallback( 328 void JsonPrefStore::RegisterOnNextSuccessfulWriteCallback(
332 const base::Closure& on_next_successful_write) { 329 const base::Closure& on_next_successful_write) {
333 DCHECK(CalledOnValidThread()); 330 DCHECK(CalledOnValidThread());
334 331
335 writer_.RegisterOnNextSuccessfulWriteCallback(on_next_successful_write); 332 writer_.RegisterOnNextSuccessfulWriteCallback(on_next_successful_write);
336 } 333 }
337 334
338 void JsonPrefStore::OnFileRead(scoped_ptr<ReadResult> read_result) { 335 void JsonPrefStore::OnFileRead(scoped_ptr<ReadResult> read_result) {
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
397 } 394 }
398 } 395 }
399 396
400 JsonPrefStore::~JsonPrefStore() { 397 JsonPrefStore::~JsonPrefStore() {
401 CommitPendingWrite(); 398 CommitPendingWrite();
402 } 399 }
403 400
404 bool JsonPrefStore::SerializeData(std::string* output) { 401 bool JsonPrefStore::SerializeData(std::string* output) {
405 DCHECK(CalledOnValidThread()); 402 DCHECK(CalledOnValidThread());
406 403
407 pending_lossy_write_ = false;
408
409 write_count_histogram_.RecordWriteOccured(); 404 write_count_histogram_.RecordWriteOccured();
410 405
411 if (pref_filter_) 406 if (pref_filter_)
412 pref_filter_->FilterSerializeData(prefs_.get()); 407 pref_filter_->FilterSerializeData(prefs_.get());
413 408
414 JSONStringValueSerializer serializer(output); 409 JSONStringValueSerializer serializer(output);
415 // Not pretty-printing prefs shrinks pref file size by ~30%. To obtain 410 // Not pretty-printing prefs shrinks pref file size by ~30%. To obtain
416 // readable prefs for debugging purposes, you can dump your prefs into any 411 // readable prefs for debugging purposes, you can dump your prefs into any
417 // command-line or online JSON pretty printing tool. 412 // command-line or online JSON pretty printing tool.
418 serializer.set_pretty_print(false); 413 serializer.set_pretty_print(false);
(...skipping 11 matching lines...) Expand all
430 FOR_EACH_OBSERVER(PrefStore::Observer, 425 FOR_EACH_OBSERVER(PrefStore::Observer,
431 observers_, 426 observers_,
432 OnInitializationCompleted(false)); 427 OnInitializationCompleted(false));
433 return; 428 return;
434 } 429 }
435 430
436 prefs_ = prefs.Pass(); 431 prefs_ = prefs.Pass();
437 432
438 initialized_ = true; 433 initialized_ = true;
439 434
440 if (schedule_write) 435 if (schedule_write && !read_only_)
441 ScheduleWrite(DEFAULT_PREF_WRITE_FLAGS); 436 writer_.ScheduleWrite(this);
442 437
443 if (error_delegate_ && read_error_ != PREF_READ_ERROR_NONE) 438 if (error_delegate_ && read_error_ != PREF_READ_ERROR_NONE)
444 error_delegate_->OnError(read_error_); 439 error_delegate_->OnError(read_error_);
445 440
446 FOR_EACH_OBSERVER(PrefStore::Observer, 441 FOR_EACH_OBSERVER(PrefStore::Observer,
447 observers_, 442 observers_,
448 OnInitializationCompleted(true)); 443 OnInitializationCompleted(true));
449 444
450 return; 445 return;
451 } 446 }
452 447
453 void JsonPrefStore::ScheduleWrite(uint32 flags) {
454 if (read_only_)
455 return;
456
457 if (flags & LOSSY_PREF_WRITE_FLAG)
458 pending_lossy_write_ = true;
459 else
460 writer_.ScheduleWrite(this);
461 }
462
463 // NOTE: This value should NOT be changed without renaming the histogram 448 // NOTE: This value should NOT be changed without renaming the histogram
464 // otherwise it will create incompatible buckets. 449 // otherwise it will create incompatible buckets.
465 const int32_t 450 const int32_t
466 JsonPrefStore::WriteCountHistogram::kHistogramWriteReportIntervalMins = 5; 451 JsonPrefStore::WriteCountHistogram::kHistogramWriteReportIntervalMins = 5;
467 452
468 JsonPrefStore::WriteCountHistogram::WriteCountHistogram( 453 JsonPrefStore::WriteCountHistogram::WriteCountHistogram(
469 const base::TimeDelta& commit_interval, 454 const base::TimeDelta& commit_interval,
470 const base::FilePath& path) 455 const base::FilePath& path)
471 : WriteCountHistogram(commit_interval, 456 : WriteCountHistogram(commit_interval,
472 path, 457 path,
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
540 DCHECK_EQ(31, num_buckets); 525 DCHECK_EQ(31, num_buckets);
541 526
542 // The histogram below is an expansion of the UMA_HISTOGRAM_CUSTOM_COUNTS 527 // The histogram below is an expansion of the UMA_HISTOGRAM_CUSTOM_COUNTS
543 // macro adapted to allow for a dynamically suffixed histogram name. 528 // macro adapted to allow for a dynamically suffixed histogram name.
544 // Note: The factory creates and owns the histogram. 529 // Note: The factory creates and owns the histogram.
545 base::HistogramBase* histogram = base::Histogram::FactoryGet( 530 base::HistogramBase* histogram = base::Histogram::FactoryGet(
546 histogram_name, min_value, max_value, num_buckets, 531 histogram_name, min_value, max_value, num_buckets,
547 base::HistogramBase::kUmaTargetedHistogramFlag); 532 base::HistogramBase::kUmaTargetedHistogramFlag);
548 return histogram; 533 return histogram;
549 } 534 }
OLDNEW
« no previous file with comments | « base/prefs/json_pref_store.h ('k') | base/prefs/json_pref_store_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698