Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(18)

Side by Side Diff: components/sync/syncable/directory.cc

Issue 2427803002: [Sync] Replacing NULL with nullptr in code and null in comments for components/sync/ (Closed)
Patch Set: Fixing start of sentence capitlization. Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "components/sync/syncable/directory.h" 5 #include "components/sync/syncable/directory.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <iterator> 8 #include <iterator>
9 #include <utility> 9 #include <utility>
10 10
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
94 } 94 }
95 95
96 Directory::Kernel::~Kernel() {} 96 Directory::Kernel::~Kernel() {}
97 97
98 Directory::Directory( 98 Directory::Directory(
99 DirectoryBackingStore* store, 99 DirectoryBackingStore* store,
100 const WeakHandle<UnrecoverableErrorHandler>& unrecoverable_error_handler, 100 const WeakHandle<UnrecoverableErrorHandler>& unrecoverable_error_handler,
101 const base::Closure& report_unrecoverable_error_function, 101 const base::Closure& report_unrecoverable_error_function,
102 NigoriHandler* nigori_handler, 102 NigoriHandler* nigori_handler,
103 Cryptographer* cryptographer) 103 Cryptographer* cryptographer)
104 : kernel_(NULL), 104 : kernel_(nullptr),
105 store_(store), 105 store_(store),
106 unrecoverable_error_handler_(unrecoverable_error_handler), 106 unrecoverable_error_handler_(unrecoverable_error_handler),
107 report_unrecoverable_error_function_(report_unrecoverable_error_function), 107 report_unrecoverable_error_function_(report_unrecoverable_error_function),
108 unrecoverable_error_set_(false), 108 unrecoverable_error_set_(false),
109 nigori_handler_(nigori_handler), 109 nigori_handler_(nigori_handler),
110 cryptographer_(cryptographer), 110 cryptographer_(cryptographer),
111 invariant_check_level_(VERIFY_CHANGES), 111 invariant_check_level_(VERIFY_CHANGES),
112 weak_ptr_factory_(this) {} 112 weak_ptr_factory_(this) {}
113 113
114 Directory::~Directory() { 114 Directory::~Directory() {
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
203 203
204 DeleteJournal* Directory::delete_journal() { 204 DeleteJournal* Directory::delete_journal() {
205 DCHECK(delete_journal_.get()); 205 DCHECK(delete_journal_.get());
206 return delete_journal_.get(); 206 return delete_journal_.get();
207 } 207 }
208 208
209 void Directory::Close() { 209 void Directory::Close() {
210 store_.reset(); 210 store_.reset();
211 if (kernel_) { 211 if (kernel_) {
212 delete kernel_; 212 delete kernel_;
213 kernel_ = NULL; 213 kernel_ = nullptr;
214 } 214 }
215 } 215 }
216 216
217 void Directory::OnUnrecoverableError(const BaseTransaction* trans, 217 void Directory::OnUnrecoverableError(const BaseTransaction* trans,
218 const tracked_objects::Location& location, 218 const tracked_objects::Location& location,
219 const std::string& message) { 219 const std::string& message) {
220 DCHECK(trans != NULL); 220 DCHECK(trans != nullptr);
221 unrecoverable_error_set_ = true; 221 unrecoverable_error_set_ = true;
222 unrecoverable_error_handler_.Call( 222 unrecoverable_error_handler_.Call(
223 FROM_HERE, &UnrecoverableErrorHandler::OnUnrecoverableError, location, 223 FROM_HERE, &UnrecoverableErrorHandler::OnUnrecoverableError, location,
224 message); 224 message);
225 } 225 }
226 226
227 EntryKernel* Directory::GetEntryById(const Id& id) { 227 EntryKernel* Directory::GetEntryById(const Id& id) {
228 ScopedKernelLock lock(this); 228 ScopedKernelLock lock(this);
229 return GetEntryById(lock, id); 229 return GetEntryById(lock, id);
230 } 230 }
231 231
232 EntryKernel* Directory::GetEntryById(const ScopedKernelLock& lock, 232 EntryKernel* Directory::GetEntryById(const ScopedKernelLock& lock,
233 const Id& id) { 233 const Id& id) {
234 DCHECK(kernel_); 234 DCHECK(kernel_);
235 // Find it in the in memory ID index. 235 // Find it in the in memory ID index.
236 IdsMap::iterator id_found = kernel_->ids_map.find(id.value()); 236 IdsMap::iterator id_found = kernel_->ids_map.find(id.value());
237 if (id_found != kernel_->ids_map.end()) { 237 if (id_found != kernel_->ids_map.end()) {
238 return id_found->second; 238 return id_found->second;
239 } 239 }
240 return NULL; 240 return nullptr;
241 } 241 }
242 242
243 EntryKernel* Directory::GetEntryByClientTag(const string& tag) { 243 EntryKernel* Directory::GetEntryByClientTag(const string& tag) {
244 ScopedKernelLock lock(this); 244 ScopedKernelLock lock(this);
245 DCHECK(kernel_); 245 DCHECK(kernel_);
246 246
247 TagsMap::iterator it = kernel_->client_tags_map.find(tag); 247 TagsMap::iterator it = kernel_->client_tags_map.find(tag);
248 if (it != kernel_->client_tags_map.end()) { 248 if (it != kernel_->client_tags_map.end()) {
249 return it->second; 249 return it->second;
250 } 250 }
251 return NULL; 251 return nullptr;
252 } 252 }
253 253
254 EntryKernel* Directory::GetEntryByServerTag(const string& tag) { 254 EntryKernel* Directory::GetEntryByServerTag(const string& tag) {
255 ScopedKernelLock lock(this); 255 ScopedKernelLock lock(this);
256 DCHECK(kernel_); 256 DCHECK(kernel_);
257 TagsMap::iterator it = kernel_->server_tags_map.find(tag); 257 TagsMap::iterator it = kernel_->server_tags_map.find(tag);
258 if (it != kernel_->server_tags_map.end()) { 258 if (it != kernel_->server_tags_map.end()) {
259 return it->second; 259 return it->second;
260 } 260 }
261 return NULL; 261 return nullptr;
262 } 262 }
263 263
264 EntryKernel* Directory::GetEntryByHandle(int64_t metahandle) { 264 EntryKernel* Directory::GetEntryByHandle(int64_t metahandle) {
265 ScopedKernelLock lock(this); 265 ScopedKernelLock lock(this);
266 return GetEntryByHandle(lock, metahandle); 266 return GetEntryByHandle(lock, metahandle);
267 } 267 }
268 268
269 EntryKernel* Directory::GetEntryByHandle(const ScopedKernelLock& lock, 269 EntryKernel* Directory::GetEntryByHandle(const ScopedKernelLock& lock,
270 int64_t metahandle) { 270 int64_t metahandle) {
271 // Look up in memory 271 // Look up in memory
272 MetahandlesMap::iterator found = kernel_->metahandles_map.find(metahandle); 272 MetahandlesMap::iterator found = kernel_->metahandles_map.find(metahandle);
273 if (found != kernel_->metahandles_map.end()) { 273 if (found != kernel_->metahandles_map.end()) {
274 // Found it in memory. Easy. 274 // Found it in memory. Easy.
275 return found->second.get(); 275 return found->second.get();
276 } 276 }
277 return NULL; 277 return nullptr;
278 } 278 }
279 279
280 bool Directory::GetChildHandlesById(BaseTransaction* trans, 280 bool Directory::GetChildHandlesById(BaseTransaction* trans,
281 const Id& parent_id, 281 const Id& parent_id,
282 Directory::Metahandles* result) { 282 Directory::Metahandles* result) {
283 if (!SyncAssert(this == trans->directory(), FROM_HERE, 283 if (!SyncAssert(this == trans->directory(), FROM_HERE,
284 "Directories don't match", trans)) 284 "Directories don't match", trans))
285 return false; 285 return false;
286 result->clear(); 286 result->clear();
287 287
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
339 } 339 }
340 340
341 bool Directory::InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry) { 341 bool Directory::InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry) {
342 ScopedKernelLock lock(this); 342 ScopedKernelLock lock(this);
343 return InsertEntry(lock, trans, entry); 343 return InsertEntry(lock, trans, entry);
344 } 344 }
345 345
346 bool Directory::InsertEntry(const ScopedKernelLock& lock, 346 bool Directory::InsertEntry(const ScopedKernelLock& lock,
347 BaseWriteTransaction* trans, 347 BaseWriteTransaction* trans,
348 EntryKernel* entry) { 348 EntryKernel* entry) {
349 if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans)) 349 if (!SyncAssert(nullptr != entry, FROM_HERE, "Entry is null", trans))
350 return false; 350 return false;
351 351
352 static const char error[] = "Entry already in memory index."; 352 static const char error[] = "Entry already in memory index.";
353 353
354 if (!SyncAssert(kernel_->metahandles_map 354 if (!SyncAssert(kernel_->metahandles_map
355 .insert(std::make_pair(entry->ref(META_HANDLE), 355 .insert(std::make_pair(entry->ref(META_HANDLE),
356 base::WrapUnique(entry))) 356 base::WrapUnique(entry)))
357 .second, 357 .second,
358 FROM_HERE, error, trans)) { 358 FROM_HERE, error, trans)) {
359 return false; 359 return false;
(...skipping 22 matching lines...) Expand all
382 "Client tag should be empty", trans)) 382 "Client tag should be empty", trans))
383 return false; 383 return false;
384 384
385 return true; 385 return true;
386 } 386 }
387 387
388 bool Directory::ReindexId(BaseWriteTransaction* trans, 388 bool Directory::ReindexId(BaseWriteTransaction* trans,
389 EntryKernel* const entry, 389 EntryKernel* const entry,
390 const Id& new_id) { 390 const Id& new_id) {
391 ScopedKernelLock lock(this); 391 ScopedKernelLock lock(this);
392 if (NULL != GetEntryById(lock, new_id)) 392 if (nullptr != GetEntryById(lock, new_id))
393 return false; 393 return false;
394 394
395 { 395 {
396 // Update the indices that depend on the ID field. 396 // Update the indices that depend on the ID field.
397 ScopedParentChildIndexUpdater updater_b(lock, entry, 397 ScopedParentChildIndexUpdater updater_b(lock, entry,
398 &kernel_->parent_child_index); 398 &kernel_->parent_child_index);
399 size_t num_erased = kernel_->ids_map.erase(entry->ref(ID).value()); 399 size_t num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
400 DCHECK_EQ(1U, num_erased); 400 DCHECK_EQ(1U, num_erased);
401 entry->put(ID, new_id); 401 entry->put(ID, new_id);
402 kernel_->ids_map[entry->ref(ID).value()] = entry; 402 kernel_->ids_map[entry->ref(ID).value()] = entry;
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
473 IndexByAttachmentId::const_iterator index_iter = 473 IndexByAttachmentId::const_iterator index_iter =
474 kernel_->index_by_attachment_id.find(attachment_id_proto.unique_id()); 474 kernel_->index_by_attachment_id.find(attachment_id_proto.unique_id());
475 if (index_iter == kernel_->index_by_attachment_id.end()) 475 if (index_iter == kernel_->index_by_attachment_id.end())
476 return; 476 return;
477 const MetahandleSet& metahandle_set = index_iter->second; 477 const MetahandleSet& metahandle_set = index_iter->second;
478 std::copy(metahandle_set.begin(), metahandle_set.end(), 478 std::copy(metahandle_set.begin(), metahandle_set.end(),
479 back_inserter(*result)); 479 back_inserter(*result));
480 } 480 }
481 481
482 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const { 482 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const {
483 DCHECK(trans != NULL); 483 DCHECK(trans != nullptr);
484 return unrecoverable_error_set_; 484 return unrecoverable_error_set_;
485 } 485 }
486 486
487 void Directory::ClearDirtyMetahandles(const ScopedKernelLock& lock) { 487 void Directory::ClearDirtyMetahandles(const ScopedKernelLock& lock) {
488 kernel_->transaction_mutex.AssertAcquired(); 488 kernel_->transaction_mutex.AssertAcquired();
489 kernel_->dirty_metahandles.clear(); 489 kernel_->dirty_metahandles.clear();
490 } 490 }
491 491
492 bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans, 492 bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans,
493 const EntryKernel* const entry) const { 493 const EntryKernel* const entry) const {
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
529 if (!entry) 529 if (!entry)
530 continue; 530 continue;
531 // Skip over false positives; it happens relatively infrequently. 531 // Skip over false positives; it happens relatively infrequently.
532 if (!entry->is_dirty()) 532 if (!entry->is_dirty())
533 continue; 533 continue;
534 snapshot->dirty_metas.insert(snapshot->dirty_metas.end(), 534 snapshot->dirty_metas.insert(snapshot->dirty_metas.end(),
535 base::MakeUnique<EntryKernel>(*entry)); 535 base::MakeUnique<EntryKernel>(*entry));
536 DCHECK_EQ(1U, kernel_->dirty_metahandles.count(*i)); 536 DCHECK_EQ(1U, kernel_->dirty_metahandles.count(*i));
537 // We don't bother removing from the index here as we blow the entire thing 537 // We don't bother removing from the index here as we blow the entire thing
538 // in a moment, and it unnecessarily complicates iteration. 538 // in a moment, and it unnecessarily complicates iteration.
539 entry->clear_dirty(NULL); 539 entry->clear_dirty(nullptr);
540 } 540 }
541 ClearDirtyMetahandles(lock); 541 ClearDirtyMetahandles(lock);
542 542
543 // Set purged handles. 543 // Set purged handles.
544 DCHECK(snapshot->metahandles_to_purge.empty()); 544 DCHECK(snapshot->metahandles_to_purge.empty());
545 snapshot->metahandles_to_purge.swap(kernel_->metahandles_to_purge); 545 snapshot->metahandles_to_purge.swap(kernel_->metahandles_to_purge);
546 546
547 // Fill kernel_info_status and kernel_info. 547 // Fill kernel_info_status and kernel_info.
548 snapshot->kernel_info = kernel_->persisted_info; 548 snapshot->kernel_info = kernel_->persisted_info;
549 snapshot->kernel_info_status = kernel_->info_status; 549 snapshot->kernel_info_status = kernel_->info_status;
(...skipping 28 matching lines...) Expand all
578 578
579 // Need a write transaction as we are about to permanently purge entries. 579 // Need a write transaction as we are about to permanently purge entries.
580 WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this); 580 WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this);
581 ScopedKernelLock lock(this); 581 ScopedKernelLock lock(this);
582 // Now drop everything we can out of memory. 582 // Now drop everything we can out of memory.
583 for (auto i = snapshot.dirty_metas.begin(); i != snapshot.dirty_metas.end(); 583 for (auto i = snapshot.dirty_metas.begin(); i != snapshot.dirty_metas.end();
584 ++i) { 584 ++i) {
585 MetahandlesMap::iterator found = 585 MetahandlesMap::iterator found =
586 kernel_->metahandles_map.find((*i)->ref(META_HANDLE)); 586 kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
587 EntryKernel* entry = 587 EntryKernel* entry =
588 (found == kernel_->metahandles_map.end() ? NULL : found->second.get()); 588 (found == kernel_->metahandles_map.end() ? nullptr
589 : found->second.get());
589 if (entry && SafeToPurgeFromMemory(&trans, entry)) { 590 if (entry && SafeToPurgeFromMemory(&trans, entry)) {
590 // We now drop deleted metahandles that are up to date on both the client 591 // We now drop deleted metahandles that are up to date on both the client
591 // and the server. 592 // and the server.
592 std::unique_ptr<EntryKernel> unique_entry = std::move(found->second); 593 std::unique_ptr<EntryKernel> unique_entry = std::move(found->second);
593 594
594 size_t num_erased = 0; 595 size_t num_erased = 0;
595 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE)); 596 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
596 DCHECK_EQ(1u, num_erased); 597 DCHECK_EQ(1u, num_erased);
597 num_erased = kernel_->ids_map.erase(entry->ref(ID).value()); 598 num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
598 DCHECK_EQ(1u, num_erased); 599 DCHECK_EQ(1u, num_erased);
(...skipping 705 matching lines...) Expand 10 before | Expand all | Expand 10 after
1304 return metahandle; 1305 return metahandle;
1305 } 1306 }
1306 1307
1307 // Generates next client ID based on a randomly generated GUID. 1308 // Generates next client ID based on a randomly generated GUID.
1308 Id Directory::NextId() { 1309 Id Directory::NextId() {
1309 return Id::CreateFromClientString(base::GenerateGUID()); 1310 return Id::CreateFromClientString(base::GenerateGUID());
1310 } 1311 }
1311 1312
1312 bool Directory::HasChildren(BaseTransaction* trans, const Id& id) { 1313 bool Directory::HasChildren(BaseTransaction* trans, const Id& id) {
1313 ScopedKernelLock lock(this); 1314 ScopedKernelLock lock(this);
1314 return kernel_->parent_child_index.GetChildren(id) != NULL; 1315 return kernel_->parent_child_index.GetChildren(id) != nullptr;
1315 } 1316 }
1316 1317
1317 Id Directory::GetFirstChildId(BaseTransaction* trans, 1318 Id Directory::GetFirstChildId(BaseTransaction* trans,
1318 const EntryKernel* parent) { 1319 const EntryKernel* parent) {
1319 DCHECK(parent); 1320 DCHECK(parent);
1320 DCHECK(parent->ref(IS_DIR)); 1321 DCHECK(parent->ref(IS_DIR));
1321 1322
1322 ScopedKernelLock lock(this); 1323 ScopedKernelLock lock(this);
1323 const OrderedChildSet* children = 1324 const OrderedChildSet* children =
1324 kernel_->parent_child_index.GetChildren(parent->ref(ID)); 1325 kernel_->parent_child_index.GetChildren(parent->ref(ID));
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
1378 ScopedKernelLock lock(this); 1379 ScopedKernelLock lock(this);
1379 ScopedParentChildIndexUpdater updater(lock, e, &kernel_->parent_child_index); 1380 ScopedParentChildIndexUpdater updater(lock, e, &kernel_->parent_child_index);
1380 1381
1381 // Note: The ScopedParentChildIndexUpdater will update this set for us as we 1382 // Note: The ScopedParentChildIndexUpdater will update this set for us as we
1382 // leave this function. 1383 // leave this function.
1383 const OrderedChildSet* siblings = 1384 const OrderedChildSet* siblings =
1384 kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID)); 1385 kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
1385 1386
1386 if (!siblings) { 1387 if (!siblings) {
1387 // This parent currently has no other children. 1388 // This parent currently has no other children.
1388 DCHECK(predecessor == NULL); 1389 DCHECK(predecessor == nullptr);
1389 UniquePosition pos = UniquePosition::InitialPosition(suffix); 1390 UniquePosition pos = UniquePosition::InitialPosition(suffix);
1390 e->put(UNIQUE_POSITION, pos); 1391 e->put(UNIQUE_POSITION, pos);
1391 return; 1392 return;
1392 } 1393 }
1393 1394
1394 if (predecessor == NULL) { 1395 if (predecessor == nullptr) {
1395 // We have at least one sibling, and we're inserting to the left of them. 1396 // We have at least one sibling, and we're inserting to the left of them.
1396 UniquePosition successor_pos = (*siblings->begin())->ref(UNIQUE_POSITION); 1397 UniquePosition successor_pos = (*siblings->begin())->ref(UNIQUE_POSITION);
1397 1398
1398 UniquePosition pos; 1399 UniquePosition pos;
1399 if (!successor_pos.IsValid()) { 1400 if (!successor_pos.IsValid()) {
1400 // If all our successors are of non-positionable types, just create an 1401 // If all our successors are of non-positionable types, just create an
1401 // initial position. We arbitrarily choose to sort invalid positions to 1402 // initial position. We arbitrarily choose to sort invalid positions to
1402 // the right of the valid positions. 1403 // the right of the valid positions.
1403 // 1404 //
1404 // We really shouldn't need to support this. See TODO above. 1405 // We really shouldn't need to support this. See TODO above.
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after
1528 Directory::Kernel* Directory::kernel() { 1529 Directory::Kernel* Directory::kernel() {
1529 return kernel_; 1530 return kernel_;
1530 } 1531 }
1531 1532
1532 const Directory::Kernel* Directory::kernel() const { 1533 const Directory::Kernel* Directory::kernel() const {
1533 return kernel_; 1534 return kernel_;
1534 } 1535 }
1535 1536
1536 } // namespace syncable 1537 } // namespace syncable
1537 } // namespace syncer 1538 } // namespace syncer
OLDNEW
« no previous file with comments | « components/sync/syncable/directory.h ('k') | components/sync/syncable/directory_backing_store.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698