Index: components/sync/syncable/directory.cc |
diff --git a/sync/syncable/directory.cc b/components/sync/syncable/directory.cc |
similarity index 89% |
rename from sync/syncable/directory.cc |
rename to components/sync/syncable/directory.cc |
index ca1efc1c95bda86f708e288e667e15b106d39b25..c17e0257e90db34124cb7c7aeffb0585a5e3dfee 100644 |
--- a/sync/syncable/directory.cc |
+++ b/components/sync/syncable/directory.cc |
@@ -2,7 +2,7 @@ |
// Use of this source code is governed by a BSD-style license that can be |
// found in the LICENSE file. |
-#include "sync/syncable/directory.h" |
+#include "components/sync/syncable/directory.h" |
#include <stddef.h> |
#include <stdint.h> |
@@ -17,22 +17,22 @@ |
#include "base/stl_util.h" |
#include "base/strings/string_number_conversions.h" |
#include "base/trace_event/trace_event.h" |
-#include "sync/internal_api/public/base/attachment_id_proto.h" |
-#include "sync/internal_api/public/base/unique_position.h" |
-#include "sync/internal_api/public/util/unrecoverable_error_handler.h" |
-#include "sync/syncable/entry.h" |
-#include "sync/syncable/entry_kernel.h" |
-#include "sync/syncable/in_memory_directory_backing_store.h" |
-#include "sync/syncable/model_neutral_mutable_entry.h" |
-#include "sync/syncable/on_disk_directory_backing_store.h" |
-#include "sync/syncable/scoped_kernel_lock.h" |
-#include "sync/syncable/scoped_parent_child_index_updater.h" |
-#include "sync/syncable/syncable-inl.h" |
-#include "sync/syncable/syncable_base_transaction.h" |
-#include "sync/syncable/syncable_changes_version.h" |
-#include "sync/syncable/syncable_read_transaction.h" |
-#include "sync/syncable/syncable_util.h" |
-#include "sync/syncable/syncable_write_transaction.h" |
+#include "components/sync/base/attachment_id_proto.h" |
+#include "components/sync/base/unique_position.h" |
+#include "components/sync/base/unrecoverable_error_handler.h" |
+#include "components/sync/syncable/entry.h" |
+#include "components/sync/syncable/entry_kernel.h" |
+#include "components/sync/syncable/in_memory_directory_backing_store.h" |
+#include "components/sync/syncable/model_neutral_mutable_entry.h" |
+#include "components/sync/syncable/on_disk_directory_backing_store.h" |
+#include "components/sync/syncable/scoped_kernel_lock.h" |
+#include "components/sync/syncable/scoped_parent_child_index_updater.h" |
+#include "components/sync/syncable/syncable-inl.h" |
+#include "components/sync/syncable/syncable_base_transaction.h" |
+#include "components/sync/syncable/syncable_changes_version.h" |
+#include "components/sync/syncable/syncable_read_transaction.h" |
+#include "components/sync/syncable/syncable_util.h" |
+#include "components/sync/syncable/syncable_write_transaction.h" |
using std::string; |
@@ -73,8 +73,7 @@ bool Directory::PersistedKernelInfo::HasEmptyDownloadProgress( |
} |
Directory::SaveChangesSnapshot::SaveChangesSnapshot() |
- : kernel_info_status(KERNEL_SHARE_INFO_INVALID) { |
-} |
+ : kernel_info_status(KERNEL_SHARE_INFO_INVALID) {} |
Directory::SaveChangesSnapshot::~SaveChangesSnapshot() { |
STLDeleteElements(&dirty_metas); |
@@ -134,8 +133,7 @@ DirOpenResult Directory::Open( |
const WeakHandle<TransactionObserver>& transaction_observer) { |
TRACE_EVENT0("sync", "SyncDatabaseOpen"); |
- const DirOpenResult result = |
- OpenImpl(name, delegate, transaction_observer); |
+ const DirOpenResult result = OpenImpl(name, delegate, transaction_observer); |
if (OPENED != result) |
Close(); |
@@ -170,7 +168,8 @@ void Directory::InitializeIndices(MetahandlesMap* handles_map) { |
kernel_->client_tags_map[entry->ref(UNIQUE_CLIENT_TAG)] = entry; |
} |
DCHECK(kernel_->ids_map.find(entry->ref(ID).value()) == |
- kernel_->ids_map.end()) << "Unexpected duplicate use of ID"; |
+ kernel_->ids_map.end()) |
+ << "Unexpected duplicate use of ID"; |
kernel_->ids_map[entry->ref(ID).value()] = entry; |
DCHECK(!entry->is_dirty()); |
AddToAttachmentIndex(lock, metahandle, entry->ref(ATTACHMENT_METADATA)); |
@@ -180,8 +179,7 @@ void Directory::InitializeIndices(MetahandlesMap* handles_map) { |
DirOpenResult Directory::OpenImpl( |
const string& name, |
DirectoryChangeDelegate* delegate, |
- const WeakHandle<TransactionObserver>& |
- transaction_observer) { |
+ const WeakHandle<TransactionObserver>& transaction_observer) { |
KernelLoadInfo info; |
// Temporary indices before kernel_ initialized in case Load fails. We 0(1) |
// swap these later. |
@@ -233,7 +231,7 @@ void Directory::Close() { |
void Directory::OnUnrecoverableError(const BaseTransaction* trans, |
const tracked_objects::Location& location, |
- const std::string & message) { |
+ const std::string& message) { |
DCHECK(trans != NULL); |
unrecoverable_error_set_ = true; |
unrecoverable_error_handler_.Call( |
@@ -286,8 +284,7 @@ EntryKernel* Directory::GetEntryByHandle(int64_t metahandle) { |
EntryKernel* Directory::GetEntryByHandle(const ScopedKernelLock& lock, |
int64_t metahandle) { |
// Look up in memory |
- MetahandlesMap::iterator found = |
- kernel_->metahandles_map.find(metahandle); |
+ MetahandlesMap::iterator found = kernel_->metahandles_map.find(metahandle); |
if (found != kernel_->metahandles_map.end()) { |
// Found it in memory. Easy. |
return found->second; |
@@ -295,9 +292,9 @@ EntryKernel* Directory::GetEntryByHandle(const ScopedKernelLock& lock, |
return NULL; |
} |
-bool Directory::GetChildHandlesById( |
- BaseTransaction* trans, const Id& parent_id, |
- Directory::Metahandles* result) { |
+bool Directory::GetChildHandlesById(BaseTransaction* trans, |
+ const Id& parent_id, |
+ Directory::Metahandles* result) { |
if (!SyncAssert(this == trans->directory(), FROM_HERE, |
"Directories don't match", trans)) |
return false; |
@@ -308,9 +305,8 @@ bool Directory::GetChildHandlesById( |
return true; |
} |
-int Directory::GetTotalNodeCount( |
- BaseTransaction* trans, |
- EntryKernel* kernel) const { |
+int Directory::GetTotalNodeCount(BaseTransaction* trans, |
+ EntryKernel* kernel) const { |
if (!SyncAssert(this == trans->directory(), FROM_HERE, |
"Directories don't match", trans)) |
return false; |
@@ -322,8 +318,8 @@ int Directory::GetTotalNodeCount( |
while (!child_sets.empty()) { |
const OrderedChildSet* set = child_sets.front(); |
child_sets.pop_front(); |
- for (OrderedChildSet::const_iterator it = set->begin(); |
- it != set->end(); ++it) { |
+ for (OrderedChildSet::const_iterator it = set->begin(); it != set->end(); |
+ ++it) { |
count++; |
GetChildSetForKernel(trans, *it, &child_sets); |
} |
@@ -348,9 +344,8 @@ void Directory::GetChildSetForKernel( |
child_sets->push_back(descendants); |
} |
-int Directory::GetPositionIndex( |
- BaseTransaction* trans, |
- EntryKernel* kernel) const { |
+int Directory::GetPositionIndex(BaseTransaction* trans, |
+ EntryKernel* kernel) const { |
const OrderedChildSet* siblings = |
kernel_->parent_child_index.GetSiblings(kernel); |
@@ -371,32 +366,26 @@ bool Directory::InsertEntry(const ScopedKernelLock& lock, |
static const char error[] = "Entry already in memory index."; |
- if (!SyncAssert( |
- kernel_->metahandles_map.insert( |
- std::make_pair(entry->ref(META_HANDLE), entry)).second, |
- FROM_HERE, |
- error, |
- trans)) { |
+ if (!SyncAssert(kernel_->metahandles_map |
+ .insert(std::make_pair(entry->ref(META_HANDLE), entry)) |
+ .second, |
+ FROM_HERE, error, trans)) { |
return false; |
} |
if (!SyncAssert( |
- kernel_->ids_map.insert( |
- std::make_pair(entry->ref(ID).value(), entry)).second, |
- FROM_HERE, |
- error, |
- trans)) { |
+ kernel_->ids_map.insert(std::make_pair(entry->ref(ID).value(), entry)) |
+ .second, |
+ FROM_HERE, error, trans)) { |
return false; |
} |
if (ParentChildIndex::ShouldInclude(entry)) { |
- if (!SyncAssert(kernel_->parent_child_index.Insert(entry), |
- FROM_HERE, |
- error, |
+ if (!SyncAssert(kernel_->parent_child_index.Insert(entry), FROM_HERE, error, |
trans)) { |
return false; |
} |
} |
- AddToAttachmentIndex( |
- lock, entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA)); |
+ AddToAttachmentIndex(lock, entry->ref(META_HANDLE), |
+ entry->ref(ATTACHMENT_METADATA)); |
// Should NEVER be created with a client tag or server tag. |
if (!SyncAssert(entry->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE, |
@@ -420,7 +409,7 @@ bool Directory::ReindexId(BaseWriteTransaction* trans, |
{ |
// Update the indices that depend on the ID field. |
ScopedParentChildIndexUpdater updater_b(lock, entry, |
- &kernel_->parent_child_index); |
+ &kernel_->parent_child_index); |
size_t num_erased = kernel_->ids_map.erase(entry->ref(ID).value()); |
DCHECK_EQ(1U, num_erased); |
entry->put(ID, new_id); |
@@ -437,7 +426,7 @@ bool Directory::ReindexParentId(BaseWriteTransaction* trans, |
{ |
// Update the indices that depend on the PARENT_ID field. |
ScopedParentChildIndexUpdater index_updater(lock, entry, |
- &kernel_->parent_child_index); |
+ &kernel_->parent_child_index); |
entry->put(PARENT_ID, new_parent_id); |
} |
return true; |
@@ -471,9 +460,9 @@ void Directory::AddToAttachmentIndex( |
IndexByAttachmentId::iterator iter = |
kernel_->index_by_attachment_id.find(unique_id); |
if (iter == kernel_->index_by_attachment_id.end()) { |
- iter = kernel_->index_by_attachment_id.insert(std::make_pair( |
- unique_id, |
- MetahandleSet())).first; |
+ iter = kernel_->index_by_attachment_id |
+ .insert(std::make_pair(unique_id, MetahandleSet())) |
+ .first; |
} |
iter->second.insert(metahandle); |
} |
@@ -500,8 +489,8 @@ void Directory::GetMetahandlesByAttachmentId( |
if (index_iter == kernel_->index_by_attachment_id.end()) |
return; |
const MetahandleSet& metahandle_set = index_iter->second; |
- std::copy( |
- metahandle_set.begin(), metahandle_set.end(), back_inserter(*result)); |
+ std::copy(metahandle_set.begin(), metahandle_set.end(), |
+ back_inserter(*result)); |
} |
bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const { |
@@ -517,26 +506,21 @@ void Directory::ClearDirtyMetahandles(const ScopedKernelLock& lock) { |
bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans, |
const EntryKernel* const entry) const { |
bool safe = entry->ref(IS_DEL) && !entry->is_dirty() && |
- !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) && |
- !entry->ref(IS_UNSYNCED); |
+ !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) && |
+ !entry->ref(IS_UNSYNCED); |
if (safe) { |
int64_t handle = entry->ref(META_HANDLE); |
const ModelType type = entry->GetServerModelType(); |
- if (!SyncAssert(kernel_->dirty_metahandles.count(handle) == 0U, |
- FROM_HERE, |
+ if (!SyncAssert(kernel_->dirty_metahandles.count(handle) == 0U, FROM_HERE, |
"Dirty metahandles should be empty", trans)) |
return false; |
// TODO(tim): Bug 49278. |
- if (!SyncAssert(!kernel_->unsynced_metahandles.count(handle), |
- FROM_HERE, |
- "Unsynced handles should be empty", |
- trans)) |
+ if (!SyncAssert(!kernel_->unsynced_metahandles.count(handle), FROM_HERE, |
+ "Unsynced handles should be empty", trans)) |
return false; |
if (!SyncAssert(!kernel_->unapplied_update_metahandles[type].count(handle), |
- FROM_HERE, |
- "Unapplied metahandles should be empty", |
- trans)) |
+ FROM_HERE, "Unapplied metahandles should be empty", trans)) |
return false; |
} |
@@ -580,8 +564,8 @@ void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) { |
// This one we reset on failure. |
kernel_->info_status = KERNEL_SHARE_INFO_VALID; |
- delete_journal_->TakeSnapshotAndClear( |
- &trans, &snapshot->delete_journals, &snapshot->delete_journals_to_purge); |
+ delete_journal_->TakeSnapshotAndClear(&trans, &snapshot->delete_journals, |
+ &snapshot->delete_journals_to_purge); |
} |
bool Directory::SaveChanges() { |
@@ -614,8 +598,8 @@ bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) { |
i != snapshot.dirty_metas.end(); ++i) { |
MetahandlesMap::iterator found = |
kernel_->metahandles_map.find((*i)->ref(META_HANDLE)); |
- EntryKernel* entry = (found == kernel_->metahandles_map.end() ? |
- NULL : found->second); |
+ EntryKernel* entry = |
+ (found == kernel_->metahandles_map.end() ? NULL : found->second); |
if (entry && SafeToPurgeFromMemory(&trans, entry)) { |
// We now drop deleted metahandles that are up to date on both the client |
// and the server. |
@@ -634,13 +618,11 @@ bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) { |
kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG)); |
DCHECK_EQ(1u, num_erased); |
} |
- if (!SyncAssert(!kernel_->parent_child_index.Contains(entry), |
- FROM_HERE, |
- "Deleted entry still present", |
- (&trans))) |
+ if (!SyncAssert(!kernel_->parent_child_index.Contains(entry), FROM_HERE, |
+ "Deleted entry still present", (&trans))) |
return false; |
- RemoveFromAttachmentIndex( |
- lock, entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA)); |
+ RemoveFromAttachmentIndex(lock, entry->ref(META_HANDLE), |
+ entry->ref(ATTACHMENT_METADATA)); |
delete entry; |
} |
@@ -652,8 +634,8 @@ bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) { |
void Directory::UnapplyEntry(EntryKernel* entry) { |
int64_t handle = entry->ref(META_HANDLE); |
- ModelType server_type = GetModelTypeFromSpecifics( |
- entry->ref(SERVER_SPECIFICS)); |
+ ModelType server_type = |
+ GetModelTypeFromSpecifics(entry->ref(SERVER_SPECIFICS)); |
// Clear enough so that on the next sync cycle all local data will |
// be overwritten. |
@@ -711,8 +693,8 @@ void Directory::DeleteEntry(const ScopedKernelLock& lock, |
EntryKernel* entry, |
EntryKernelSet* entries_to_journal) { |
int64_t handle = entry->ref(META_HANDLE); |
- ModelType server_type = GetModelTypeFromSpecifics( |
- entry->ref(SERVER_SPECIFICS)); |
+ ModelType server_type = |
+ GetModelTypeFromSpecifics(entry->ref(SERVER_SPECIFICS)); |
kernel_->metahandles_to_purge.insert(handle); |
@@ -723,20 +705,17 @@ void Directory::DeleteEntry(const ScopedKernelLock& lock, |
DCHECK_EQ(1u, num_erased); |
num_erased = kernel_->unsynced_metahandles.erase(handle); |
DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0); |
- num_erased = |
- kernel_->unapplied_update_metahandles[server_type].erase(handle); |
+ num_erased = kernel_->unapplied_update_metahandles[server_type].erase(handle); |
DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0); |
if (kernel_->parent_child_index.Contains(entry)) |
kernel_->parent_child_index.Remove(entry); |
if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { |
- num_erased = |
- kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG)); |
+ num_erased = kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG)); |
DCHECK_EQ(1u, num_erased); |
} |
if (!entry->ref(UNIQUE_SERVER_TAG).empty()) { |
- num_erased = |
- kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG)); |
+ num_erased = kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG)); |
DCHECK_EQ(1u, num_erased); |
} |
RemoveFromAttachmentIndex(lock, handle, entry->ref(ATTACHMENT_METADATA)); |
@@ -809,8 +788,8 @@ bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types, |
delete_journal_->AddJournalBatch(&trans, entries_to_journal); |
// Ensure meta tracking for these data types reflects the purged state. |
- for (ModelTypeSet::Iterator it = disabled_types.First(); |
- it.Good(); it.Inc()) { |
+ for (ModelTypeSet::Iterator it = disabled_types.First(); it.Good(); |
+ it.Inc()) { |
kernel_->persisted_info.transaction_version[it.Get()] = 0; |
// Don't discard progress markers or context for unapplied types. |
@@ -907,9 +886,8 @@ void Directory::GetDownloadProgress( |
kernel_->persisted_info.download_progress[model_type]); |
} |
-void Directory::GetDownloadProgressAsString( |
- ModelType model_type, |
- std::string* value_out) const { |
+void Directory::GetDownloadProgressAsString(ModelType model_type, |
+ std::string* value_out) const { |
ScopedKernelLock lock(this); |
kernel_->persisted_info.download_progress[model_type].SerializeToString( |
value_out); |
@@ -951,10 +929,9 @@ void Directory::GetDataTypeContext(BaseTransaction* trans, |
context->CopyFrom(kernel_->persisted_info.datatype_context[type]); |
} |
-void Directory::SetDataTypeContext( |
- BaseWriteTransaction* trans, |
- ModelType type, |
- const sync_pb::DataTypeContext& context) { |
+void Directory::SetDataTypeContext(BaseWriteTransaction* trans, |
+ ModelType type, |
+ const sync_pb::DataTypeContext& context) { |
ScopedKernelLock lock(this); |
kernel_->persisted_info.datatype_context[type].CopyFrom(context); |
kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; |
@@ -978,8 +955,8 @@ bool Directory::InitialSyncEndedForType(ModelType type) { |
return InitialSyncEndedForType(&trans, type); |
} |
-bool Directory::InitialSyncEndedForType( |
- BaseTransaction* trans, ModelType type) { |
+bool Directory::InitialSyncEndedForType(BaseTransaction* trans, |
+ ModelType type) { |
// True iff the type's root node has been created and changes |
// for the type have been applied at least once. |
Entry root(trans, GET_TYPE_ROOT, type); |
@@ -1033,7 +1010,6 @@ void Directory::set_bag_of_chips(const string& bag_of_chips) { |
kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; |
} |
- |
string Directory::cache_guid() const { |
// No need to lock since nothing ever writes to it after load. |
return kernel_->cache_guid; |
@@ -1195,7 +1171,7 @@ bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction* trans) { |
bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans, |
const MetahandleSet& handles) { |
MetahandleSet::const_iterator i; |
- for (i = handles.begin() ; i != handles.end() ; ++i) { |
+ for (i = handles.begin(); i != handles.end(); ++i) { |
int64_t metahandle = *i; |
Entry e(trans, GET_BY_HANDLE, metahandle); |
if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans)) |
@@ -1204,28 +1180,24 @@ bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans, |
syncable::Id parentid = e.GetParentId(); |
if (id.IsRoot()) { |
- if (!SyncAssert(e.GetIsDir(), FROM_HERE, |
- "Entry should be a directory", |
+ if (!SyncAssert(e.GetIsDir(), FROM_HERE, "Entry should be a directory", |
trans)) |
return false; |
- if (!SyncAssert(parentid.IsRoot(), FROM_HERE, |
- "Entry should be root", |
+ if (!SyncAssert(parentid.IsRoot(), FROM_HERE, "Entry should be root", |
trans)) |
- return false; |
+ return false; |
if (!SyncAssert(!e.GetIsUnsynced(), FROM_HERE, "Entry should be synced", |
trans)) |
- return false; |
+ return false; |
continue; |
} |
if (!e.GetIsDel()) { |
if (!SyncAssert(id != parentid, FROM_HERE, |
- "Id should be different from parent id.", |
- trans)) |
- return false; |
+ "Id should be different from parent id.", trans)) |
+ return false; |
if (!SyncAssert(!e.GetNonUniqueName().empty(), FROM_HERE, |
- "Non unique name should not be empty.", |
- trans)) |
+ "Non unique name should not be empty.", trans)) |
return false; |
if (!parentid.IsNull()) { |
@@ -1260,8 +1232,7 @@ bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans, |
if (CHANGES_VERSION == base_version || 0 == base_version) { |
ModelType model_type = e.GetModelType(); |
bool is_client_creatable_type_root_folder = |
- parentid.IsRoot() && |
- IsTypeWithClientGeneratedRoot(model_type) && |
+ parentid.IsRoot() && IsTypeWithClientGeneratedRoot(model_type) && |
e.GetUniqueServerTag() == ModelTypeToRootTag(model_type); |
if (e.GetIsUnappliedUpdate()) { |
// Must be a new item, or a de-duplicated unique client tag |
@@ -1275,16 +1246,14 @@ bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans, |
} |
// It came from the server, so it must have a server ID. |
if (!SyncAssert(id.ServerKnows(), FROM_HERE, |
- "The id should be from a server.", |
- trans)) |
+ "The id should be from a server.", trans)) |
return false; |
} else { |
if (e.GetIsDir()) { |
// TODO(chron): Implement this mode if clients ever need it. |
// For now, you can't combine a client tag and a directory. |
if (!SyncAssert(!using_unique_client_tag, FROM_HERE, |
- "Directory cannot have a client tag.", |
- trans)) |
+ "Directory cannot have a client tag.", trans)) |
return false; |
} |
if (is_client_creatable_type_root_folder) { |
@@ -1313,8 +1282,7 @@ bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans, |
// is an unsynced create or a successful delete in the local copy. |
// Either way, that's a mismatch. |
if (!SyncAssert(0 == server_version, FROM_HERE, |
- "Server version should be zero.", |
- trans)) |
+ "Server version should be zero.", trans)) |
return false; |
// Items that aren't using the unique client tag should have a zero |
// base version only if they have a local ID. Items with unique client |
@@ -1322,15 +1290,12 @@ bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans, |
// de-duplication; the unique client tag trumps the server ID. |
if (!using_unique_client_tag) { |
if (!SyncAssert(!id.ServerKnows(), FROM_HERE, |
- "Should be a client only id.", |
- trans)) |
+ "Should be a client only id.", trans)) |
return false; |
} |
} |
} else { |
- if (!SyncAssert(id.ServerKnows(), |
- FROM_HERE, |
- "Should be a server id.", |
+ if (!SyncAssert(id.ServerKnows(), FROM_HERE, "Should be a server id.", |
trans)) |
return false; |
} |
@@ -1452,7 +1417,7 @@ void Directory::PutPredecessor(EntryKernel* e, EntryKernel* predecessor) { |
// |
// We really shouldn't need to support this. See TODO above. |
pos = UniquePosition::InitialPosition(suffix); |
- } else { |
+ } else { |
DCHECK(!siblings->empty()); |
pos = UniquePosition::Before(successor_pos, suffix); |
} |
@@ -1472,9 +1437,8 @@ void Directory::PutPredecessor(EntryKernel* e, EntryKernel* predecessor) { |
++neighbour; |
if (neighbour == siblings->end()) { |
// Inserting at the end of the list. |
- UniquePosition pos = UniquePosition::After( |
- predecessor->ref(UNIQUE_POSITION), |
- suffix); |
+ UniquePosition pos = |
+ UniquePosition::After(predecessor->ref(UNIQUE_POSITION), suffix); |
e->put(UNIQUE_POSITION, pos); |
return; |
} |
@@ -1489,10 +1453,9 @@ void Directory::PutPredecessor(EntryKernel* e, EntryKernel* predecessor) { |
CHECK(successor->ref(UNIQUE_POSITION).IsValid()) << *successor; |
// Finally, the normal case: inserting between two elements. |
- UniquePosition pos = UniquePosition::Between( |
- predecessor->ref(UNIQUE_POSITION), |
- successor->ref(UNIQUE_POSITION), |
- suffix); |
+ UniquePosition pos = |
+ UniquePosition::Between(predecessor->ref(UNIQUE_POSITION), |
+ successor->ref(UNIQUE_POSITION), suffix); |
e->put(UNIQUE_POSITION, pos); |
return; |
} |