Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(277)

Side by Side Diff: components/prefs/json_pref_store.cc

Issue 1645073005: Revert of Move base/prefs to components/prefs (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « components/prefs/json_pref_store.h ('k') | components/prefs/json_pref_store_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/prefs/json_pref_store.h"
6
7 #include <stddef.h>
8
9 #include <algorithm>
10 #include <utility>
11
12 #include "base/bind.h"
13 #include "base/callback.h"
14 #include "base/files/file_path.h"
15 #include "base/files/file_util.h"
16 #include "base/json/json_file_value_serializer.h"
17 #include "base/json/json_string_value_serializer.h"
18 #include "base/macros.h"
19 #include "base/memory/ref_counted.h"
20 #include "base/metrics/histogram.h"
21 #include "base/prefs/pref_filter.h"
22 #include "base/sequenced_task_runner.h"
23 #include "base/strings/string_number_conversions.h"
24 #include "base/strings/string_util.h"
25 #include "base/task_runner_util.h"
26 #include "base/threading/sequenced_worker_pool.h"
27 #include "base/time/default_clock.h"
28 #include "base/values.h"
29
30 // Result returned from internal read tasks.
31 struct JsonPrefStore::ReadResult {
32 public:
33 ReadResult();
34 ~ReadResult();
35
36 scoped_ptr<base::Value> value;
37 PrefReadError error;
38 bool no_dir;
39
40 private:
41 DISALLOW_COPY_AND_ASSIGN(ReadResult);
42 };
43
44 JsonPrefStore::ReadResult::ReadResult()
45 : error(PersistentPrefStore::PREF_READ_ERROR_NONE), no_dir(false) {
46 }
47
48 JsonPrefStore::ReadResult::~ReadResult() {
49 }
50
51 namespace {
52
53 // Some extensions we'll tack on to copies of the Preferences files.
54 const base::FilePath::CharType kBadExtension[] = FILE_PATH_LITERAL("bad");
55
56 PersistentPrefStore::PrefReadError HandleReadErrors(
57 const base::Value* value,
58 const base::FilePath& path,
59 int error_code,
60 const std::string& error_msg) {
61 if (!value) {
62 DVLOG(1) << "Error while loading JSON file: " << error_msg
63 << ", file: " << path.value();
64 switch (error_code) {
65 case JSONFileValueDeserializer::JSON_ACCESS_DENIED:
66 return PersistentPrefStore::PREF_READ_ERROR_ACCESS_DENIED;
67 case JSONFileValueDeserializer::JSON_CANNOT_READ_FILE:
68 return PersistentPrefStore::PREF_READ_ERROR_FILE_OTHER;
69 case JSONFileValueDeserializer::JSON_FILE_LOCKED:
70 return PersistentPrefStore::PREF_READ_ERROR_FILE_LOCKED;
71 case JSONFileValueDeserializer::JSON_NO_SUCH_FILE:
72 return PersistentPrefStore::PREF_READ_ERROR_NO_FILE;
73 default:
74 // JSON errors indicate file corruption of some sort.
75 // Since the file is corrupt, move it to the side and continue with
76 // empty preferences. This will result in them losing their settings.
77 // We keep the old file for possible support and debugging assistance
78 // as well as to detect if they're seeing these errors repeatedly.
79 // TODO(erikkay) Instead, use the last known good file.
80 base::FilePath bad = path.ReplaceExtension(kBadExtension);
81
82 // If they've ever had a parse error before, put them in another bucket.
83 // TODO(erikkay) if we keep this error checking for very long, we may
84 // want to differentiate between recent and long ago errors.
85 bool bad_existed = base::PathExists(bad);
86 base::Move(path, bad);
87 return bad_existed ? PersistentPrefStore::PREF_READ_ERROR_JSON_REPEAT
88 : PersistentPrefStore::PREF_READ_ERROR_JSON_PARSE;
89 }
90 }
91 if (!value->IsType(base::Value::TYPE_DICTIONARY))
92 return PersistentPrefStore::PREF_READ_ERROR_JSON_TYPE;
93 return PersistentPrefStore::PREF_READ_ERROR_NONE;
94 }
95
96 // Records a sample for |size| in the Settings.JsonDataReadSizeKilobytes
97 // histogram suffixed with the base name of the JSON file under |path|.
98 void RecordJsonDataSizeHistogram(const base::FilePath& path, size_t size) {
99 std::string spaceless_basename;
100 base::ReplaceChars(path.BaseName().MaybeAsASCII(), " ", "_",
101 &spaceless_basename);
102
103 // The histogram below is an expansion of the UMA_HISTOGRAM_CUSTOM_COUNTS
104 // macro adapted to allow for a dynamically suffixed histogram name.
105 // Note: The factory creates and owns the histogram.
106 base::HistogramBase* histogram = base::Histogram::FactoryGet(
107 "Settings.JsonDataReadSizeKilobytes." + spaceless_basename, 1, 10000, 50,
108 base::HistogramBase::kUmaTargetedHistogramFlag);
109 histogram->Add(static_cast<int>(size) / 1024);
110 }
111
112 scoped_ptr<JsonPrefStore::ReadResult> ReadPrefsFromDisk(
113 const base::FilePath& path,
114 const base::FilePath& alternate_path) {
115 if (!base::PathExists(path) && !alternate_path.empty() &&
116 base::PathExists(alternate_path)) {
117 base::Move(alternate_path, path);
118 }
119
120 int error_code;
121 std::string error_msg;
122 scoped_ptr<JsonPrefStore::ReadResult> read_result(
123 new JsonPrefStore::ReadResult);
124 JSONFileValueDeserializer deserializer(path);
125 read_result->value = deserializer.Deserialize(&error_code, &error_msg);
126 read_result->error =
127 HandleReadErrors(read_result->value.get(), path, error_code, error_msg);
128 read_result->no_dir = !base::PathExists(path.DirName());
129
130 if (read_result->error == PersistentPrefStore::PREF_READ_ERROR_NONE)
131 RecordJsonDataSizeHistogram(path, deserializer.get_last_read_size());
132
133 return read_result;
134 }
135
136 } // namespace
137
138 // static
139 scoped_refptr<base::SequencedTaskRunner> JsonPrefStore::GetTaskRunnerForFile(
140 const base::FilePath& filename,
141 base::SequencedWorkerPool* worker_pool) {
142 std::string token("json_pref_store-");
143 token.append(filename.AsUTF8Unsafe());
144 return worker_pool->GetSequencedTaskRunnerWithShutdownBehavior(
145 worker_pool->GetNamedSequenceToken(token),
146 base::SequencedWorkerPool::BLOCK_SHUTDOWN);
147 }
148
149 JsonPrefStore::JsonPrefStore(
150 const base::FilePath& pref_filename,
151 const scoped_refptr<base::SequencedTaskRunner>& sequenced_task_runner,
152 scoped_ptr<PrefFilter> pref_filter)
153 : JsonPrefStore(pref_filename,
154 base::FilePath(),
155 sequenced_task_runner,
156 std::move(pref_filter)) {}
157
158 JsonPrefStore::JsonPrefStore(
159 const base::FilePath& pref_filename,
160 const base::FilePath& pref_alternate_filename,
161 const scoped_refptr<base::SequencedTaskRunner>& sequenced_task_runner,
162 scoped_ptr<PrefFilter> pref_filter)
163 : path_(pref_filename),
164 alternate_path_(pref_alternate_filename),
165 sequenced_task_runner_(sequenced_task_runner),
166 prefs_(new base::DictionaryValue()),
167 read_only_(false),
168 writer_(pref_filename, sequenced_task_runner),
169 pref_filter_(std::move(pref_filter)),
170 initialized_(false),
171 filtering_in_progress_(false),
172 pending_lossy_write_(false),
173 read_error_(PREF_READ_ERROR_NONE),
174 write_count_histogram_(writer_.commit_interval(), path_) {
175 DCHECK(!path_.empty());
176 }
177
178 bool JsonPrefStore::GetValue(const std::string& key,
179 const base::Value** result) const {
180 DCHECK(CalledOnValidThread());
181
182 base::Value* tmp = nullptr;
183 if (!prefs_->Get(key, &tmp))
184 return false;
185
186 if (result)
187 *result = tmp;
188 return true;
189 }
190
191 void JsonPrefStore::AddObserver(PrefStore::Observer* observer) {
192 DCHECK(CalledOnValidThread());
193
194 observers_.AddObserver(observer);
195 }
196
197 void JsonPrefStore::RemoveObserver(PrefStore::Observer* observer) {
198 DCHECK(CalledOnValidThread());
199
200 observers_.RemoveObserver(observer);
201 }
202
203 bool JsonPrefStore::HasObservers() const {
204 DCHECK(CalledOnValidThread());
205
206 return observers_.might_have_observers();
207 }
208
209 bool JsonPrefStore::IsInitializationComplete() const {
210 DCHECK(CalledOnValidThread());
211
212 return initialized_;
213 }
214
215 bool JsonPrefStore::GetMutableValue(const std::string& key,
216 base::Value** result) {
217 DCHECK(CalledOnValidThread());
218
219 return prefs_->Get(key, result);
220 }
221
222 void JsonPrefStore::SetValue(const std::string& key,
223 scoped_ptr<base::Value> value,
224 uint32_t flags) {
225 DCHECK(CalledOnValidThread());
226
227 DCHECK(value);
228 base::Value* old_value = nullptr;
229 prefs_->Get(key, &old_value);
230 if (!old_value || !value->Equals(old_value)) {
231 prefs_->Set(key, std::move(value));
232 ReportValueChanged(key, flags);
233 }
234 }
235
236 void JsonPrefStore::SetValueSilently(const std::string& key,
237 scoped_ptr<base::Value> value,
238 uint32_t flags) {
239 DCHECK(CalledOnValidThread());
240
241 DCHECK(value);
242 base::Value* old_value = nullptr;
243 prefs_->Get(key, &old_value);
244 if (!old_value || !value->Equals(old_value)) {
245 prefs_->Set(key, std::move(value));
246 ScheduleWrite(flags);
247 }
248 }
249
250 void JsonPrefStore::RemoveValue(const std::string& key, uint32_t flags) {
251 DCHECK(CalledOnValidThread());
252
253 if (prefs_->RemovePath(key, nullptr))
254 ReportValueChanged(key, flags);
255 }
256
257 void JsonPrefStore::RemoveValueSilently(const std::string& key,
258 uint32_t flags) {
259 DCHECK(CalledOnValidThread());
260
261 prefs_->RemovePath(key, nullptr);
262 ScheduleWrite(flags);
263 }
264
265 bool JsonPrefStore::ReadOnly() const {
266 DCHECK(CalledOnValidThread());
267
268 return read_only_;
269 }
270
271 PersistentPrefStore::PrefReadError JsonPrefStore::GetReadError() const {
272 DCHECK(CalledOnValidThread());
273
274 return read_error_;
275 }
276
277 PersistentPrefStore::PrefReadError JsonPrefStore::ReadPrefs() {
278 DCHECK(CalledOnValidThread());
279
280 OnFileRead(ReadPrefsFromDisk(path_, alternate_path_));
281 return filtering_in_progress_ ? PREF_READ_ERROR_ASYNCHRONOUS_TASK_INCOMPLETE
282 : read_error_;
283 }
284
285 void JsonPrefStore::ReadPrefsAsync(ReadErrorDelegate* error_delegate) {
286 DCHECK(CalledOnValidThread());
287
288 initialized_ = false;
289 error_delegate_.reset(error_delegate);
290
291 // Weakly binds the read task so that it doesn't kick in during shutdown.
292 base::PostTaskAndReplyWithResult(
293 sequenced_task_runner_.get(),
294 FROM_HERE,
295 base::Bind(&ReadPrefsFromDisk, path_, alternate_path_),
296 base::Bind(&JsonPrefStore::OnFileRead, AsWeakPtr()));
297 }
298
299 void JsonPrefStore::CommitPendingWrite() {
300 DCHECK(CalledOnValidThread());
301
302 // Schedule a write for any lossy writes that are outstanding to ensure that
303 // they get flushed when this function is called.
304 SchedulePendingLossyWrites();
305
306 if (writer_.HasPendingWrite() && !read_only_)
307 writer_.DoScheduledWrite();
308 }
309
310 void JsonPrefStore::SchedulePendingLossyWrites() {
311 if (pending_lossy_write_)
312 writer_.ScheduleWrite(this);
313 }
314
315 void JsonPrefStore::ReportValueChanged(const std::string& key, uint32_t flags) {
316 DCHECK(CalledOnValidThread());
317
318 if (pref_filter_)
319 pref_filter_->FilterUpdate(key);
320
321 FOR_EACH_OBSERVER(PrefStore::Observer, observers_, OnPrefValueChanged(key));
322
323 ScheduleWrite(flags);
324 }
325
326 void JsonPrefStore::RegisterOnNextSuccessfulWriteCallback(
327 const base::Closure& on_next_successful_write) {
328 DCHECK(CalledOnValidThread());
329
330 writer_.RegisterOnNextSuccessfulWriteCallback(on_next_successful_write);
331 }
332
333 void JsonPrefStore::OnFileRead(scoped_ptr<ReadResult> read_result) {
334 DCHECK(CalledOnValidThread());
335
336 DCHECK(read_result);
337
338 scoped_ptr<base::DictionaryValue> unfiltered_prefs(new base::DictionaryValue);
339
340 read_error_ = read_result->error;
341
342 bool initialization_successful = !read_result->no_dir;
343
344 if (initialization_successful) {
345 switch (read_error_) {
346 case PREF_READ_ERROR_ACCESS_DENIED:
347 case PREF_READ_ERROR_FILE_OTHER:
348 case PREF_READ_ERROR_FILE_LOCKED:
349 case PREF_READ_ERROR_JSON_TYPE:
350 case PREF_READ_ERROR_FILE_NOT_SPECIFIED:
351 read_only_ = true;
352 break;
353 case PREF_READ_ERROR_NONE:
354 DCHECK(read_result->value.get());
355 unfiltered_prefs.reset(
356 static_cast<base::DictionaryValue*>(read_result->value.release()));
357 break;
358 case PREF_READ_ERROR_NO_FILE:
359 // If the file just doesn't exist, maybe this is first run. In any case
360 // there's no harm in writing out default prefs in this case.
361 case PREF_READ_ERROR_JSON_PARSE:
362 case PREF_READ_ERROR_JSON_REPEAT:
363 break;
364 case PREF_READ_ERROR_ASYNCHRONOUS_TASK_INCOMPLETE:
365 // This is a special error code to be returned by ReadPrefs when it
366 // can't complete synchronously, it should never be returned by the read
367 // operation itself.
368 case PREF_READ_ERROR_MAX_ENUM:
369 NOTREACHED();
370 break;
371 }
372 }
373
374 if (pref_filter_) {
375 filtering_in_progress_ = true;
376 const PrefFilter::PostFilterOnLoadCallback post_filter_on_load_callback(
377 base::Bind(
378 &JsonPrefStore::FinalizeFileRead, AsWeakPtr(),
379 initialization_successful));
380 pref_filter_->FilterOnLoad(post_filter_on_load_callback,
381 std::move(unfiltered_prefs));
382 } else {
383 FinalizeFileRead(initialization_successful, std::move(unfiltered_prefs),
384 false);
385 }
386 }
387
388 JsonPrefStore::~JsonPrefStore() {
389 CommitPendingWrite();
390 }
391
392 bool JsonPrefStore::SerializeData(std::string* output) {
393 DCHECK(CalledOnValidThread());
394
395 pending_lossy_write_ = false;
396
397 write_count_histogram_.RecordWriteOccured();
398
399 if (pref_filter_)
400 pref_filter_->FilterSerializeData(prefs_.get());
401
402 JSONStringValueSerializer serializer(output);
403 // Not pretty-printing prefs shrinks pref file size by ~30%. To obtain
404 // readable prefs for debugging purposes, you can dump your prefs into any
405 // command-line or online JSON pretty printing tool.
406 serializer.set_pretty_print(false);
407 return serializer.Serialize(*prefs_);
408 }
409
410 void JsonPrefStore::FinalizeFileRead(bool initialization_successful,
411 scoped_ptr<base::DictionaryValue> prefs,
412 bool schedule_write) {
413 DCHECK(CalledOnValidThread());
414
415 filtering_in_progress_ = false;
416
417 if (!initialization_successful) {
418 FOR_EACH_OBSERVER(PrefStore::Observer,
419 observers_,
420 OnInitializationCompleted(false));
421 return;
422 }
423
424 prefs_ = std::move(prefs);
425
426 initialized_ = true;
427
428 if (schedule_write)
429 ScheduleWrite(DEFAULT_PREF_WRITE_FLAGS);
430
431 if (error_delegate_ && read_error_ != PREF_READ_ERROR_NONE)
432 error_delegate_->OnError(read_error_);
433
434 FOR_EACH_OBSERVER(PrefStore::Observer,
435 observers_,
436 OnInitializationCompleted(true));
437
438 return;
439 }
440
441 void JsonPrefStore::ScheduleWrite(uint32_t flags) {
442 if (read_only_)
443 return;
444
445 if (flags & LOSSY_PREF_WRITE_FLAG)
446 pending_lossy_write_ = true;
447 else
448 writer_.ScheduleWrite(this);
449 }
450
451 // NOTE: This value should NOT be changed without renaming the histogram
452 // otherwise it will create incompatible buckets.
453 const int32_t
454 JsonPrefStore::WriteCountHistogram::kHistogramWriteReportIntervalMins = 5;
455
456 JsonPrefStore::WriteCountHistogram::WriteCountHistogram(
457 const base::TimeDelta& commit_interval,
458 const base::FilePath& path)
459 : WriteCountHistogram(commit_interval,
460 path,
461 scoped_ptr<base::Clock>(new base::DefaultClock)) {
462 }
463
464 JsonPrefStore::WriteCountHistogram::WriteCountHistogram(
465 const base::TimeDelta& commit_interval,
466 const base::FilePath& path,
467 scoped_ptr<base::Clock> clock)
468 : commit_interval_(commit_interval),
469 path_(path),
470 clock_(clock.release()),
471 report_interval_(
472 base::TimeDelta::FromMinutes(kHistogramWriteReportIntervalMins)),
473 last_report_time_(clock_->Now()),
474 writes_since_last_report_(0) {
475 }
476
477 JsonPrefStore::WriteCountHistogram::~WriteCountHistogram() {
478 ReportOutstandingWrites();
479 }
480
481 void JsonPrefStore::WriteCountHistogram::RecordWriteOccured() {
482 ReportOutstandingWrites();
483
484 ++writes_since_last_report_;
485 }
486
487 void JsonPrefStore::WriteCountHistogram::ReportOutstandingWrites() {
488 base::Time current_time = clock_->Now();
489 base::TimeDelta time_since_last_report = current_time - last_report_time_;
490
491 if (time_since_last_report <= report_interval_)
492 return;
493
494 // If the time since the last report exceeds the report interval, report all
495 // the writes since the last report. They must have all occurred in the same
496 // report interval.
497 base::HistogramBase* histogram = GetHistogram();
498 histogram->Add(writes_since_last_report_);
499
500 // There may be several report intervals that elapsed that don't have any
501 // writes in them. Report these too.
502 int64_t total_num_intervals_elapsed =
503 (time_since_last_report / report_interval_);
504 for (int64_t i = 0; i < total_num_intervals_elapsed - 1; ++i)
505 histogram->Add(0);
506
507 writes_since_last_report_ = 0;
508 last_report_time_ += total_num_intervals_elapsed * report_interval_;
509 }
510
511 base::HistogramBase* JsonPrefStore::WriteCountHistogram::GetHistogram() {
512 std::string spaceless_basename;
513 base::ReplaceChars(path_.BaseName().MaybeAsASCII(), " ", "_",
514 &spaceless_basename);
515 std::string histogram_name =
516 "Settings.JsonDataWriteCount." + spaceless_basename;
517
518 // The min value for a histogram is 1. The max value is the maximum number of
519 // writes that can occur in the window being recorded. The number of buckets
520 // used is the max value (plus the underflow/overflow buckets).
521 int32_t min_value = 1;
522 int32_t max_value = report_interval_ / commit_interval_;
523 int32_t num_buckets = max_value + 1;
524
525 // NOTE: These values should NOT be changed without renaming the histogram
526 // otherwise it will create incompatible buckets.
527 DCHECK_EQ(30, max_value);
528 DCHECK_EQ(31, num_buckets);
529
530 // The histogram below is an expansion of the UMA_HISTOGRAM_CUSTOM_COUNTS
531 // macro adapted to allow for a dynamically suffixed histogram name.
532 // Note: The factory creates and owns the histogram.
533 base::HistogramBase* histogram = base::Histogram::FactoryGet(
534 histogram_name, min_value, max_value, num_buckets,
535 base::HistogramBase::kUmaTargetedHistogramFlag);
536 return histogram;
537 }
OLDNEW
« no previous file with comments | « components/prefs/json_pref_store.h ('k') | components/prefs/json_pref_store_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698