Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(210)

Side by Side Diff: chrome/browser/sync/syncable/syncable.cc

Issue 500113: EntryKernel: change from assignable refs to puts. (Closed)
Patch Set: Resolved conflicts, updated. Created 10 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « chrome/browser/sync/syncable/syncable.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2009 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2009 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "chrome/browser/sync/syncable/syncable.h" 5 #include "chrome/browser/sync/syncable/syncable.h"
6 6
7 #include "build/build_config.h" 7 #include "build/build_config.h"
8 8
9 #include <sys/stat.h> 9 #include <sys/stat.h>
10 #if defined(OS_POSIX) 10 #if defined(OS_POSIX)
(...skipping 259 matching lines...) Expand 10 before | Expand all | Expand 10 after
270 270
271 EntryKernel* Directory::GetEntryById(const Id& id) { 271 EntryKernel* Directory::GetEntryById(const Id& id) {
272 ScopedKernelLock lock(this); 272 ScopedKernelLock lock(this);
273 return GetEntryById(id, &lock); 273 return GetEntryById(id, &lock);
274 } 274 }
275 275
276 EntryKernel* Directory::GetEntryById(const Id& id, 276 EntryKernel* Directory::GetEntryById(const Id& id,
277 ScopedKernelLock* const lock) { 277 ScopedKernelLock* const lock) {
278 DCHECK(kernel_); 278 DCHECK(kernel_);
279 // First look up in memory 279 // First look up in memory
280 kernel_->needle.ref(ID) = id; 280 kernel_->needle.put(ID, id);
281 IdsIndex::iterator id_found = kernel_->ids_index->find(&kernel_->needle); 281 IdsIndex::iterator id_found = kernel_->ids_index->find(&kernel_->needle);
282 if (id_found != kernel_->ids_index->end()) { 282 if (id_found != kernel_->ids_index->end()) {
283 // Found it in memory. Easy. 283 // Found it in memory. Easy.
284 return *id_found; 284 return *id_found;
285 } 285 }
286 return NULL; 286 return NULL;
287 } 287 }
288 288
289 EntryKernel* Directory::GetEntryByTag(const string& tag) { 289 EntryKernel* Directory::GetEntryByTag(const string& tag) {
290 ScopedKernelLock lock(this); 290 ScopedKernelLock lock(this);
(...skipping 13 matching lines...) Expand all
304 } 304 }
305 305
306 EntryKernel* Directory::GetEntryByHandle(const int64 metahandle) { 306 EntryKernel* Directory::GetEntryByHandle(const int64 metahandle) {
307 ScopedKernelLock lock(this); 307 ScopedKernelLock lock(this);
308 return GetEntryByHandle(metahandle, &lock); 308 return GetEntryByHandle(metahandle, &lock);
309 } 309 }
310 310
311 EntryKernel* Directory::GetEntryByHandle(const int64 metahandle, 311 EntryKernel* Directory::GetEntryByHandle(const int64 metahandle,
312 ScopedKernelLock* lock) { 312 ScopedKernelLock* lock) {
313 // Look up in memory 313 // Look up in memory
314 kernel_->needle.ref(META_HANDLE) = metahandle; 314 kernel_->needle.put(META_HANDLE, metahandle);
315 MetahandlesIndex::iterator found = 315 MetahandlesIndex::iterator found =
316 kernel_->metahandles_index->find(&kernel_->needle); 316 kernel_->metahandles_index->find(&kernel_->needle);
317 if (found != kernel_->metahandles_index->end()) { 317 if (found != kernel_->metahandles_index->end()) {
318 // Found it in memory. Easy. 318 // Found it in memory. Easy.
319 return *found; 319 return *found;
320 } 320 }
321 return NULL; 321 return NULL;
322 } 322 }
323 323
324 // An interface to specify the details of which children 324 // An interface to specify the details of which children
(...skipping 10 matching lines...) Expand all
335 EntryKernel needle_; 335 EntryKernel needle_;
336 }; 336 };
337 337
338 // Matches all children. 338 // Matches all children.
339 // TODO(chron): Unit test this by itself 339 // TODO(chron): Unit test this by itself
340 struct AllPathsMatcher : public PathMatcher { 340 struct AllPathsMatcher : public PathMatcher {
341 explicit AllPathsMatcher(const Id& parent_id) : PathMatcher(parent_id) { 341 explicit AllPathsMatcher(const Id& parent_id) : PathMatcher(parent_id) {
342 } 342 }
343 343
344 virtual Index::iterator lower_bound(Index* index) { 344 virtual Index::iterator lower_bound(Index* index) {
345 needle_.ref(PARENT_ID) = parent_id_; 345 needle_.put(PARENT_ID, parent_id_);
346 needle_.ref(META_HANDLE) = std::numeric_limits<int64>::min(); 346 needle_.put(META_HANDLE, std::numeric_limits<int64>::min());
347 return index->lower_bound(&needle_); 347 return index->lower_bound(&needle_);
348 } 348 }
349 349
350 virtual Index::iterator upper_bound(Index* index) { 350 virtual Index::iterator upper_bound(Index* index) {
351 needle_.ref(PARENT_ID) = parent_id_; 351 needle_.put(PARENT_ID, parent_id_);
352 needle_.ref(META_HANDLE) = std::numeric_limits<int64>::max(); 352 needle_.put(META_HANDLE, std::numeric_limits<int64>::max());
353 return index->upper_bound(&needle_); 353 return index->upper_bound(&needle_);
354 } 354 }
355 }; 355 };
356 356
357 void Directory::GetChildHandles(BaseTransaction* trans, const Id& parent_id, 357 void Directory::GetChildHandles(BaseTransaction* trans, const Id& parent_id,
358 Directory::ChildHandles* result) { 358 Directory::ChildHandles* result) {
359 AllPathsMatcher matcher(parent_id); 359 AllPathsMatcher matcher(parent_id);
360 return GetChildHandlesImpl(trans, parent_id, &matcher, result); 360 return GetChildHandlesImpl(trans, parent_id, &matcher, result);
361 } 361 }
362 362
(...skipping 21 matching lines...) Expand all
384 384
385 EntryKernel* Directory::GetRootEntry() { 385 EntryKernel* Directory::GetRootEntry() {
386 return GetEntryById(Id()); 386 return GetEntryById(Id());
387 } 387 }
388 388
389 void ZeroFields(EntryKernel* entry, int first_field) { 389 void ZeroFields(EntryKernel* entry, int first_field) {
390 int i = first_field; 390 int i = first_field;
391 // Note that bitset<> constructor sets all bits to zero, and strings 391 // Note that bitset<> constructor sets all bits to zero, and strings
392 // initialize to empty. 392 // initialize to empty.
393 for ( ; i < INT64_FIELDS_END; ++i) 393 for ( ; i < INT64_FIELDS_END; ++i)
394 entry->ref(static_cast<Int64Field>(i)) = 0; 394 entry->put(static_cast<Int64Field>(i), 0);
395 for ( ; i < ID_FIELDS_END; ++i) 395 for ( ; i < ID_FIELDS_END; ++i)
396 entry->ref(static_cast<IdField>(i)).Clear(); 396 entry->mutable_ref(static_cast<IdField>(i)).Clear();
397 for ( ; i < BIT_FIELDS_END; ++i) 397 for ( ; i < BIT_FIELDS_END; ++i)
398 entry->ref(static_cast<BitField>(i)) = false; 398 entry->put(static_cast<BitField>(i), false);
399 if (i < BLOB_FIELDS_END) 399 if (i < BLOB_FIELDS_END)
400 i = BLOB_FIELDS_END; 400 i = BLOB_FIELDS_END;
401 } 401 }
402 402
403 void Directory::InsertEntry(EntryKernel* entry) { 403 void Directory::InsertEntry(EntryKernel* entry) {
404 ScopedKernelLock lock(this); 404 ScopedKernelLock lock(this);
405 InsertEntry(entry, &lock); 405 InsertEntry(entry, &lock);
406 } 406 }
407 407
408 void Directory::InsertEntry(EntryKernel* entry, ScopedKernelLock* lock) { 408 void Directory::InsertEntry(EntryKernel* entry, ScopedKernelLock* lock) {
409 DCHECK(NULL != lock); 409 DCHECK(NULL != lock);
410 CHECK(NULL != entry); 410 CHECK(NULL != entry);
411 static const char error[] = "Entry already in memory index."; 411 static const char error[] = "Entry already in memory index.";
412 CHECK(kernel_->metahandles_index->insert(entry).second) << error; 412 CHECK(kernel_->metahandles_index->insert(entry).second) << error;
413 413
414 if (!entry->ref(IS_DEL)) { 414 if (!entry->ref(IS_DEL)) {
415 CHECK(kernel_->parent_id_child_index->insert(entry).second) << error; 415 CHECK(kernel_->parent_id_child_index->insert(entry).second) << error;
416 } 416 }
417 CHECK(kernel_->ids_index->insert(entry).second) << error; 417 CHECK(kernel_->ids_index->insert(entry).second) << error;
418 } 418 }
419 419
420 void Directory::Undelete(EntryKernel* const entry) { 420 void Directory::Undelete(EntryKernel* const entry) {
421 DCHECK(entry->ref(IS_DEL)); 421 DCHECK(entry->ref(IS_DEL));
422 ScopedKernelLock lock(this); 422 ScopedKernelLock lock(this);
423 entry->ref(IS_DEL) = false; 423 entry->put(IS_DEL, false);
424 entry->mark_dirty(); 424 entry->mark_dirty();
425 CHECK(kernel_->parent_id_child_index->insert(entry).second); 425 CHECK(kernel_->parent_id_child_index->insert(entry).second);
426 } 426 }
427 427
428 void Directory::Delete(EntryKernel* const entry) { 428 void Directory::Delete(EntryKernel* const entry) {
429 DCHECK(!entry->ref(IS_DEL)); 429 DCHECK(!entry->ref(IS_DEL));
430 entry->ref(IS_DEL) = true; 430 entry->put(IS_DEL, true);
431 entry->mark_dirty(); 431 entry->mark_dirty();
432 ScopedKernelLock lock(this); 432 ScopedKernelLock lock(this);
433 CHECK(1 == kernel_->parent_id_child_index->erase(entry)); 433 CHECK(1 == kernel_->parent_id_child_index->erase(entry));
434 } 434 }
435 435
436 bool Directory::ReindexId(EntryKernel* const entry, const Id& new_id) { 436 bool Directory::ReindexId(EntryKernel* const entry, const Id& new_id) {
437 ScopedKernelLock lock(this); 437 ScopedKernelLock lock(this);
438 if (NULL != GetEntryById(new_id, &lock)) 438 if (NULL != GetEntryById(new_id, &lock))
439 return false; 439 return false;
440 CHECK(1 == kernel_->ids_index->erase(entry)); 440 CHECK(1 == kernel_->ids_index->erase(entry));
441 entry->ref(ID) = new_id; 441 entry->put(ID, new_id);
442 CHECK(kernel_->ids_index->insert(entry).second); 442 CHECK(kernel_->ids_index->insert(entry).second);
443 return true; 443 return true;
444 } 444 }
445 445
446 void Directory::ReindexParentId(EntryKernel* const entry, 446 void Directory::ReindexParentId(EntryKernel* const entry,
447 const Id& new_parent_id) { 447 const Id& new_parent_id) {
448 448
449 ScopedKernelLock lock(this); 449 ScopedKernelLock lock(this);
450 if (entry->ref(IS_DEL)) { 450 if (entry->ref(IS_DEL)) {
451 entry->ref(PARENT_ID) = new_parent_id; 451 entry->put(PARENT_ID, new_parent_id);
452 return; 452 return;
453 } 453 }
454 454
455 if (entry->ref(PARENT_ID) == new_parent_id) { 455 if (entry->ref(PARENT_ID) == new_parent_id) {
456 return; 456 return;
457 } 457 }
458 458
459 CHECK(1 == kernel_->parent_id_child_index->erase(entry)); 459 CHECK(1 == kernel_->parent_id_child_index->erase(entry));
460 entry->ref(PARENT_ID) = new_parent_id; 460 entry->put(PARENT_ID, new_parent_id);
461 CHECK(kernel_->parent_id_child_index->insert(entry).second); 461 CHECK(kernel_->parent_id_child_index->insert(entry).second);
462 } 462 }
463 463
464 // static 464 // static
465 bool Directory::SafeToPurgeFromMemory(const EntryKernel* const entry) { 465 bool Directory::SafeToPurgeFromMemory(const EntryKernel* const entry) {
466 return entry->ref(IS_DEL) && !entry->is_dirty() && !entry->ref(SYNCING) && 466 return entry->ref(IS_DEL) && !entry->is_dirty() && !entry->ref(SYNCING) &&
467 !entry->ref(IS_UNAPPLIED_UPDATE) && !entry->ref(IS_UNSYNCED); 467 !entry->ref(IS_UNAPPLIED_UPDATE) && !entry->ref(IS_UNSYNCED);
468 } 468 }
469 469
470 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) { 470 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
525 } 525 }
526 526
527 void Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) { 527 void Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
528 // Need a write transaction as we are about to permanently purge entries. 528 // Need a write transaction as we are about to permanently purge entries.
529 WriteTransaction trans(this, VACUUM_AFTER_SAVE, __FILE__, __LINE__); 529 WriteTransaction trans(this, VACUUM_AFTER_SAVE, __FILE__, __LINE__);
530 ScopedKernelLock lock(this); 530 ScopedKernelLock lock(this);
531 kernel_->flushed_metahandles_.Push(0); // Begin flush marker 531 kernel_->flushed_metahandles_.Push(0); // Begin flush marker
532 // Now drop everything we can out of memory. 532 // Now drop everything we can out of memory.
533 for (OriginalEntries::const_iterator i = snapshot.dirty_metas.begin(); 533 for (OriginalEntries::const_iterator i = snapshot.dirty_metas.begin();
534 i != snapshot.dirty_metas.end(); ++i) { 534 i != snapshot.dirty_metas.end(); ++i) {
535 kernel_->needle.ref(META_HANDLE) = i->ref(META_HANDLE); 535 kernel_->needle.put(META_HANDLE, i->ref(META_HANDLE));
536 MetahandlesIndex::iterator found = 536 MetahandlesIndex::iterator found =
537 kernel_->metahandles_index->find(&kernel_->needle); 537 kernel_->metahandles_index->find(&kernel_->needle);
538 EntryKernel* entry = (found == kernel_->metahandles_index->end() ? 538 EntryKernel* entry = (found == kernel_->metahandles_index->end() ?
539 NULL : *found); 539 NULL : *found);
540 if (entry && SafeToPurgeFromMemory(entry)) { 540 if (entry && SafeToPurgeFromMemory(entry)) {
541 // We now drop deleted metahandles that are up to date on both the client 541 // We now drop deleted metahandles that are up to date on both the client
542 // and the server. 542 // and the server.
543 size_t num_erased = 0; 543 size_t num_erased = 0;
544 kernel_->flushed_metahandles_.Push(entry->ref(META_HANDLE)); 544 kernel_->flushed_metahandles_.Push(entry->ref(META_HANDLE));
545 num_erased = kernel_->ids_index->erase(entry); 545 num_erased = kernel_->ids_index->erase(entry);
(...skipping 22 matching lines...) Expand all
568 ScopedKernelLock lock(this); 568 ScopedKernelLock lock(this);
569 kernel_->info_status_ = KERNEL_SHARE_INFO_DIRTY; 569 kernel_->info_status_ = KERNEL_SHARE_INFO_DIRTY;
570 570
571 // Because we optimistically cleared the dirty bit on the real entries when 571 // Because we optimistically cleared the dirty bit on the real entries when
572 // taking the snapshot, we must restore it on failure. Not doing this could 572 // taking the snapshot, we must restore it on failure. Not doing this could
573 // cause lost data, if no other changes are made to the in-memory entries 573 // cause lost data, if no other changes are made to the in-memory entries
574 // that would cause the dirty bit to get set again. Setting the bit ensures 574 // that would cause the dirty bit to get set again. Setting the bit ensures
575 // that SaveChanges will at least try again later. 575 // that SaveChanges will at least try again later.
576 for (OriginalEntries::const_iterator i = snapshot.dirty_metas.begin(); 576 for (OriginalEntries::const_iterator i = snapshot.dirty_metas.begin();
577 i != snapshot.dirty_metas.end(); ++i) { 577 i != snapshot.dirty_metas.end(); ++i) {
578 kernel_->needle.ref(META_HANDLE) = i->ref(META_HANDLE); 578 kernel_->needle.put(META_HANDLE, i->ref(META_HANDLE));
579 MetahandlesIndex::iterator found = 579 MetahandlesIndex::iterator found =
580 kernel_->metahandles_index->find(&kernel_->needle); 580 kernel_->metahandles_index->find(&kernel_->needle);
581 if (found != kernel_->metahandles_index->end()) { 581 if (found != kernel_->metahandles_index->end()) {
582 (*found)->mark_dirty(); 582 (*found)->mark_dirty();
583 } 583 }
584 } 584 }
585 585
586 for (ExtendedAttributes::const_iterator i = snapshot.dirty_xattrs.begin(); 586 for (ExtendedAttributes::const_iterator i = snapshot.dirty_xattrs.begin();
587 i != snapshot.dirty_xattrs.end(); ++i) { 587 i != snapshot.dirty_xattrs.end(); ++i) {
588 ExtendedAttributeKey key(i->first.metahandle, i->first.key); 588 ExtendedAttributeKey key(i->first.metahandle, i->first.key);
(...skipping 418 matching lines...) Expand 10 before | Expand all | Expand 10 after
1007 write_transaction_(trans) { 1007 write_transaction_(trans) {
1008 Init(trans, parent_id, name); 1008 Init(trans, parent_id, name);
1009 } 1009 }
1010 1010
1011 1011
1012 void MutableEntry::Init(WriteTransaction* trans, const Id& parent_id, 1012 void MutableEntry::Init(WriteTransaction* trans, const Id& parent_id,
1013 const string& name) { 1013 const string& name) {
1014 kernel_ = new EntryKernel; 1014 kernel_ = new EntryKernel;
1015 ZeroFields(kernel_, BEGIN_FIELDS); 1015 ZeroFields(kernel_, BEGIN_FIELDS);
1016 kernel_->mark_dirty(); 1016 kernel_->mark_dirty();
1017 kernel_->ref(ID) = trans->directory_->NextId(); 1017 kernel_->put(ID, trans->directory_->NextId());
1018 kernel_->ref(META_HANDLE) = trans->directory_->NextMetahandle(); 1018 kernel_->put(META_HANDLE, trans->directory_->NextMetahandle());
1019 kernel_->ref(PARENT_ID) = parent_id; 1019 kernel_->put(PARENT_ID, parent_id);
1020 kernel_->ref(NON_UNIQUE_NAME) = name; 1020 kernel_->put(NON_UNIQUE_NAME, name);
1021 const int64 now = Now(); 1021 const int64 now = Now();
1022 kernel_->ref(CTIME) = now; 1022 kernel_->put(CTIME, now);
1023 kernel_->ref(MTIME) = now; 1023 kernel_->put(MTIME, now);
1024 // We match the database defaults here 1024 // We match the database defaults here
1025 kernel_->ref(BASE_VERSION) = CHANGES_VERSION; 1025 kernel_->put(BASE_VERSION, CHANGES_VERSION);
1026 trans->directory()->InsertEntry(kernel_); 1026 trans->directory()->InsertEntry(kernel_);
1027 // Because this entry is new, it was originally deleted. 1027 // Because this entry is new, it was originally deleted.
1028 kernel_->ref(IS_DEL) = true; 1028 kernel_->put(IS_DEL, true);
1029 trans->SaveOriginal(kernel_); 1029 trans->SaveOriginal(kernel_);
1030 kernel_->ref(IS_DEL) = false; 1030 kernel_->put(IS_DEL, false);
1031 } 1031 }
1032 1032
1033 MutableEntry::MutableEntry(WriteTransaction* trans, CreateNewUpdateItem, 1033 MutableEntry::MutableEntry(WriteTransaction* trans, CreateNewUpdateItem,
1034 const Id& id) 1034 const Id& id)
1035 : Entry(trans), write_transaction_(trans) { 1035 : Entry(trans), write_transaction_(trans) {
1036 Entry same_id(trans, GET_BY_ID, id); 1036 Entry same_id(trans, GET_BY_ID, id);
1037 if (same_id.good()) { 1037 if (same_id.good()) {
1038 kernel_ = NULL; // already have an item with this ID. 1038 kernel_ = NULL; // already have an item with this ID.
1039 return; 1039 return;
1040 } 1040 }
1041 kernel_ = new EntryKernel; 1041 kernel_ = new EntryKernel;
1042 ZeroFields(kernel_, BEGIN_FIELDS); 1042 ZeroFields(kernel_, BEGIN_FIELDS);
1043 kernel_->ref(ID) = id; 1043 kernel_->put(ID, id);
1044 kernel_->mark_dirty(); 1044 kernel_->mark_dirty();
1045 kernel_->ref(META_HANDLE) = trans->directory_->NextMetahandle(); 1045 kernel_->put(META_HANDLE, trans->directory_->NextMetahandle());
1046 kernel_->ref(IS_DEL) = true; 1046 kernel_->put(IS_DEL, true);
1047 // We match the database defaults here 1047 // We match the database defaults here
1048 kernel_->ref(BASE_VERSION) = CHANGES_VERSION; 1048 kernel_->put(BASE_VERSION, CHANGES_VERSION);
1049 trans->directory()->InsertEntry(kernel_); 1049 trans->directory()->InsertEntry(kernel_);
1050 trans->SaveOriginal(kernel_); 1050 trans->SaveOriginal(kernel_);
1051 } 1051 }
1052 1052
1053 MutableEntry::MutableEntry(WriteTransaction* trans, GetById, const Id& id) 1053 MutableEntry::MutableEntry(WriteTransaction* trans, GetById, const Id& id)
1054 : Entry(trans, GET_BY_ID, id), write_transaction_(trans) { 1054 : Entry(trans, GET_BY_ID, id), write_transaction_(trans) {
1055 trans->SaveOriginal(kernel_); 1055 trans->SaveOriginal(kernel_);
1056 } 1056 }
1057 1057
1058 MutableEntry::MutableEntry(WriteTransaction* trans, GetByHandle, 1058 MutableEntry::MutableEntry(WriteTransaction* trans, GetByHandle,
(...skipping 14 matching lines...) Expand all
1073 } else { 1073 } else {
1074 dir()->Undelete(kernel_); 1074 dir()->Undelete(kernel_);
1075 PutPredecessor(Id()); // Restores position to the 0th index. 1075 PutPredecessor(Id()); // Restores position to the 0th index.
1076 return true; 1076 return true;
1077 } 1077 }
1078 } 1078 }
1079 1079
1080 bool MutableEntry::Put(Int64Field field, const int64& value) { 1080 bool MutableEntry::Put(Int64Field field, const int64& value) {
1081 DCHECK(kernel_); 1081 DCHECK(kernel_);
1082 if (kernel_->ref(field) != value) { 1082 if (kernel_->ref(field) != value) {
1083 kernel_->ref(field) = value; 1083 kernel_->put(field, value);
1084 kernel_->mark_dirty(); 1084 kernel_->mark_dirty();
1085 } 1085 }
1086 return true; 1086 return true;
1087 } 1087 }
1088 1088
1089 bool MutableEntry::Put(IdField field, const Id& value) { 1089 bool MutableEntry::Put(IdField field, const Id& value) {
1090 DCHECK(kernel_); 1090 DCHECK(kernel_);
1091 if (kernel_->ref(field) != value) { 1091 if (kernel_->ref(field) != value) {
1092 if (ID == field) { 1092 if (ID == field) {
1093 if (!dir()->ReindexId(kernel_, value)) 1093 if (!dir()->ReindexId(kernel_, value))
1094 return false; 1094 return false;
1095 } else if (PARENT_ID == field) { 1095 } else if (PARENT_ID == field) {
1096 dir()->ReindexParentId(kernel_, value); 1096 dir()->ReindexParentId(kernel_, value);
1097 PutPredecessor(Id()); 1097 PutPredecessor(Id());
1098 } else { 1098 } else {
1099 kernel_->ref(field) = value; 1099 kernel_->put(field, value);
1100 } 1100 }
1101 kernel_->mark_dirty(); 1101 kernel_->mark_dirty();
1102 } 1102 }
1103 return true; 1103 return true;
1104 } 1104 }
1105 1105
1106 bool MutableEntry::Put(BaseVersion field, int64 value) { 1106 bool MutableEntry::Put(BaseVersion field, int64 value) {
1107 DCHECK(kernel_); 1107 DCHECK(kernel_);
1108 if (kernel_->ref(field) != value) { 1108 if (kernel_->ref(field) != value) {
1109 kernel_->ref(field) = value; 1109 kernel_->put(field, value);
1110 kernel_->mark_dirty(); 1110 kernel_->mark_dirty();
1111 } 1111 }
1112 return true; 1112 return true;
1113 } 1113 }
1114 1114
1115 bool MutableEntry::Put(StringField field, const string& value) { 1115 bool MutableEntry::Put(StringField field, const string& value) {
1116 return PutImpl(field, value); 1116 return PutImpl(field, value);
1117 } 1117 }
1118 1118
1119 bool MutableEntry::PutImpl(StringField field, const string& value) { 1119 bool MutableEntry::PutImpl(StringField field, const string& value) {
1120 DCHECK(kernel_); 1120 DCHECK(kernel_);
1121 if (kernel_->ref(field) != value) { 1121 if (kernel_->ref(field) != value) {
1122 kernel_->ref(field) = value; 1122 kernel_->put(field, value);
1123 kernel_->mark_dirty(); 1123 kernel_->mark_dirty();
1124 } 1124 }
1125 return true; 1125 return true;
1126 } 1126 }
1127 1127
1128 bool MutableEntry::Put(IndexedBitField field, bool value) { 1128 bool MutableEntry::Put(IndexedBitField field, bool value) {
1129 DCHECK(kernel_); 1129 DCHECK(kernel_);
1130 if (kernel_->ref(field) != value) { 1130 if (kernel_->ref(field) != value) {
1131 MetahandleSet* index; 1131 MetahandleSet* index;
1132 if (IS_UNSYNCED == field) 1132 if (IS_UNSYNCED == field)
1133 index = dir()->kernel_->unsynced_metahandles; 1133 index = dir()->kernel_->unsynced_metahandles;
1134 else 1134 else
1135 index = dir()->kernel_->unapplied_update_metahandles; 1135 index = dir()->kernel_->unapplied_update_metahandles;
1136 1136
1137 ScopedKernelLock lock(dir()); 1137 ScopedKernelLock lock(dir());
1138 if (value) 1138 if (value)
1139 CHECK(index->insert(kernel_->ref(META_HANDLE)).second); 1139 CHECK(index->insert(kernel_->ref(META_HANDLE)).second);
1140 else 1140 else
1141 CHECK(1 == index->erase(kernel_->ref(META_HANDLE))); 1141 CHECK(1 == index->erase(kernel_->ref(META_HANDLE)));
1142 kernel_->ref(field) = value; 1142 kernel_->put(field, value);
1143 kernel_->mark_dirty(); 1143 kernel_->mark_dirty();
1144 } 1144 }
1145 return true; 1145 return true;
1146 } 1146 }
1147 1147
1148 void MutableEntry::UnlinkFromOrder() { 1148 void MutableEntry::UnlinkFromOrder() {
1149 Id old_previous = Get(PREV_ID); 1149 Id old_previous = Get(PREV_ID);
1150 Id old_next = Get(NEXT_ID); 1150 Id old_next = Get(NEXT_ID);
1151 1151
1152 // Self-looping signifies that this item is not in the order. If we were to 1152 // Self-looping signifies that this item is not in the order. If we were to
(...skipping 259 matching lines...) Expand 10 before | Expand all | Expand 10 after
1412 return s << std::dec; 1412 return s << std::dec;
1413 } 1413 }
1414 1414
1415 FastDump& operator<<(FastDump& dump, const syncable::Blob& blob) { 1415 FastDump& operator<<(FastDump& dump, const syncable::Blob& blob) {
1416 if (blob.empty()) 1416 if (blob.empty())
1417 return dump; 1417 return dump;
1418 string buffer(HexEncode(&blob[0], blob.size())); 1418 string buffer(HexEncode(&blob[0], blob.size()));
1419 dump.out_->sputn(buffer.c_str(), buffer.size()); 1419 dump.out_->sputn(buffer.c_str(), buffer.size());
1420 return dump; 1420 return dump;
1421 } 1421 }
OLDNEW
« no previous file with comments | « chrome/browser/sync/syncable/syncable.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698