Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(34)

Side by Side Diff: chrome/browser/sync/syncable/syncable.cc

Issue 194065: Initial commit of sync engine code to browser/sync.... (Closed) Base URL: svn://chrome-svn/chrome/trunk/src/
Patch Set: Fixes to gtest include path, reverted syncapi. Created 11 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
Property Changes:
Added: svn:eol-style
+ LF
OLDNEW
(Empty)
1 // Copyright (c) 2009 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "chrome/browser/sync/syncable/syncable.h"
6
7 #include <sys/stat.h>
8 #include <sys/types.h>
9 #include <time.h>
10 #ifdef OS_MACOSX
11 #include <CoreFoundation/CoreFoundation.h>
12 #elif defined(OS_LINUX)
13 #include <glib.h>
14 #elif defined(OS_WINDOWS)
15 #include <shlwapi.h> // for PathMatchSpec
16 #endif
17
18 #include <algorithm>
19 #include <functional>
20 #include <iomanip>
21 #include <iterator>
22 #include <set>
23 #include <string>
24
25 #include "base/hash_tables.h"
26 #include "base/logging.h"
27 #include "base/perftimer.h"
28 #include "base/scoped_ptr.h"
29 #include "base/time.h"
30 #include "chrome/browser/sync/engine/syncer.h"
31 #include "chrome/browser/sync/engine/syncer_util.h"
32 #include "chrome/browser/sync/protocol/service_constants.h"
33 #include "chrome/browser/sync/syncable/directory_backing_store.h"
34 #include "chrome/browser/sync/syncable/directory_manager.h"
35 #include "chrome/browser/sync/syncable/syncable-inl.h"
36 #include "chrome/browser/sync/syncable/syncable_changes_version.h"
37 #include "chrome/browser/sync/syncable/syncable_columns.h"
38 #include "chrome/browser/sync/util/character_set_converters.h"
39 #include "chrome/browser/sync/util/compat-file.h"
40 #include "chrome/browser/sync/util/crypto_helpers.h"
41 #include "chrome/browser/sync/util/event_sys-inl.h"
42 #include "chrome/browser/sync/util/fast_dump.h"
43 #include "chrome/browser/sync/util/path_helpers.h"
44
45 namespace {
46 enum InvariantCheckLevel {
47 OFF = 0,
48 VERIFY_IN_MEMORY = 1,
49 FULL_DB_VERIFICATION = 2
50 };
51
52 static const InvariantCheckLevel kInvariantCheckLevel = VERIFY_IN_MEMORY;
53
54 // Max number of milliseconds to spend checking syncable entry invariants
55 static const int kInvariantCheckMaxMs = 50;
56 } // namespace
57
58 // if sizeof(time_t) != sizeof(int32) we need to alter or expand the sqlite
59 // datatype.
60 COMPILE_ASSERT(sizeof(time_t) == sizeof(int32), time_t_is_not_int32);
61
62 using browser_sync::FastDump;
63 using browser_sync::SyncerUtil;
64 using std::string;
65
66
67 namespace syncable {
68
69 int64 Now() {
70 #ifdef OS_WINDOWS
71 FILETIME filetime;
72 SYSTEMTIME systime;
73 GetSystemTime(&systime);
74 SystemTimeToFileTime(&systime, &filetime);
75 // MSDN recommends converting via memcpy like this.
76 LARGE_INTEGER n;
77 memcpy(&n, &filetime, sizeof(filetime));
78 return n.QuadPart;
79 #elif (defined(OS_LINUX) || defined(OS_MACOSX))
80 struct timeval tv;
81 gettimeofday(&tv, NULL);
82 return static_cast<int64>(tv.tv_sec);
83 #else
84 #error NEED OS SPECIFIC Now() implementation
85 #endif
86 }
87
88 ///////////////////////////////////////////////////////////////////////////
89 // Compare functions and hashes for the indices.
90
91 // Callback for sqlite3
92 int ComparePathNames16(void*, int a_bytes, const void* a, int b_bytes,
93 const void* b) {
94 #ifdef OS_WINDOWS
95 DCHECK_EQ(0, a_bytes % 2);
96 DCHECK_EQ(0, b_bytes % 2);
97 int result = CompareString(LOCALE_INVARIANT, NORM_IGNORECASE,
98 static_cast<const PathChar*>(a), a_bytes / 2,
99 static_cast<const PathChar*>(b), b_bytes / 2);
100 CHECK(0 != result) << "Error comparing strings: " << GetLastError();
101 return result - 2; // Convert to -1, 0, 1
102 #elif defined(OS_LINUX)
103 // misnomer for Linux. These are already utf8 bit strings.
104 gchar *case_folded_a;
105 gchar *case_folded_b;
106 GError *err = NULL;
107 case_folded_a = g_utf8_casefold(reinterpret_cast<const gchar*>(a), a_bytes);
108 CHECK(case_folded_a != NULL) << "g_utf8_casefold failed";
109 case_folded_b = g_utf8_casefold(reinterpret_cast<const gchar*>(b), b_bytes);
110 CHECK(case_folded_b != NULL) << "g_utf8_casefold failed";
111 gint result = g_utf8_collate(case_folded_a, case_folded_b);
112 g_free(case_folded_a);
113 g_free(case_folded_b);
114 if (result < 0) return -1;
115 if (result > 0) return 1;
116 return 0;
117 #elif defined(OS_MACOSX)
118 CFStringRef a_str;
119 CFStringRef b_str;
120 a_str = CFStringCreateWithBytes(NULL, reinterpret_cast<const UInt8*>(a),
121 a_bytes, kCFStringEncodingUTF8, FALSE);
122 b_str = CFStringCreateWithBytes(NULL, reinterpret_cast<const UInt8*>(b),
123 b_bytes, kCFStringEncodingUTF8, FALSE);
124 CFComparisonResult res;
125 res = CFStringCompare(a_str, b_str, kCFCompareCaseInsensitive);
126 CFRelease(a_str);
127 CFRelease(b_str);
128 return res;
129 #else
130 #error no ComparePathNames16() for your OS
131 #endif
132 }
133
134 template <Int64Field field_index>
135 class SameField {
136 public:
137 inline bool operator()(const syncable::EntryKernel* a,
138 const syncable::EntryKernel* b) const {
139 return a->ref(field_index) == b->ref(field_index);
140 }
141 };
142
143 template <Int64Field field_index>
144 class HashField {
145 public:
146 inline size_t operator()(const syncable::EntryKernel* a) const {
147 return hasher_(a->ref(field_index));
148 }
149 base::hash_set<int64> hasher_;
150 };
151
152 // TODO(ncarter): Rename!
153 int ComparePathNames(const PathString& a, const PathString& b) {
154 const size_t val_size = sizeof(PathString::value_type);
155 return ComparePathNames16(NULL, a.size() * val_size, a.data(),
156 b.size() * val_size, b.data());
157 }
158
159 class LessParentIdAndNames {
160 public:
161 bool operator() (const syncable::EntryKernel* a,
162 const syncable::EntryKernel* b) const {
163 if (a->ref(PARENT_ID) != b->ref(PARENT_ID))
164 return a->ref(PARENT_ID) < b->ref(PARENT_ID);
165 return ComparePathNames(a->ref(NAME), b->ref(NAME)) < 0;
166 }
167 };
168
169 bool LessPathNames::operator() (const PathString& a,
170 const PathString& b) const {
171 return ComparePathNames(a, b) < 0;
172 }
173
174 // static
175 Name Name::FromEntryKernel(EntryKernel* kernel) {
176 PathString& sync_name_ref = kernel->ref(UNSANITIZED_NAME).empty() ?
177 kernel->ref(NAME) : kernel->ref(UNSANITIZED_NAME);
178 return Name(kernel->ref(NAME), sync_name_ref, kernel->ref(NON_UNIQUE_NAME));
179 }
180
181 ///////////////////////////////////////////////////////////////////////////
182 // Directory
183
184 static const DirectoryChangeEvent kShutdownChangesEvent =
185 { DirectoryChangeEvent::SHUTDOWN, 0, 0 };
186
187 void DestroyThreadNodeKey(void* vnode) {
188 ThreadNode* const node = reinterpret_cast<ThreadNode*>(vnode);
189 CHECK(!node->in_list)
190 << "\nThread exited while holding the transaction mutex!\n" << *node;
191 delete node;
192 }
193
194 Directory::Kernel::Kernel(const PathString& db_path,
195 const PathString& name,
196 const KernelLoadInfo& info)
197 : db_path(db_path),
198 refcount(1),
199 name_(name),
200 metahandles_index(new Directory::MetahandlesIndex),
201 ids_index(new Directory::IdsIndex),
202 parent_id_and_names_index(new Directory::ParentIdAndNamesIndex),
203 extended_attributes(new ExtendedAttributes),
204 unapplied_update_metahandles(new MetahandleSet),
205 unsynced_metahandles(new MetahandleSet),
206 channel(new Directory::Channel(syncable::DIRECTORY_DESTROYED)),
207 changes_channel(new Directory::ChangesChannel(kShutdownChangesEvent)),
208 last_sync_timestamp_(info.kernel_info.last_sync_timestamp),
209 initial_sync_ended_(info.kernel_info.initial_sync_ended),
210 store_birthday_(info.kernel_info.store_birthday),
211 next_id(info.kernel_info.next_id),
212 cache_guid_(info.cache_guid),
213 next_metahandle(info.max_metahandle + 1) {
214 info_status_ = Directory::KERNEL_SHARE_INFO_VALID;
215 CHECK(0 == pthread_mutex_init(&mutex, NULL));
216 CHECK(0 == pthread_key_create(&thread_node_key, &DestroyThreadNodeKey));
217 }
218
219 inline void DeleteEntry(EntryKernel* kernel) {
220 delete kernel;
221 }
222
223 void Directory::Kernel::AddRef() {
224 base::subtle::NoBarrier_AtomicIncrement(&refcount, 1);
225 }
226
227 void Directory::Kernel::Release() {
228 if (!base::subtle::NoBarrier_AtomicIncrement(&refcount, -1))
229 delete this;
230 }
231
232 Directory::Kernel::~Kernel() {
233 CHECK(0 == refcount);
234 delete channel;
235 delete changes_channel;
236 CHECK(0 == pthread_mutex_destroy(&mutex));
237 pthread_key_delete(thread_node_key);
238 delete unsynced_metahandles;
239 delete unapplied_update_metahandles;
240 delete extended_attributes;
241 delete parent_id_and_names_index;
242 delete ids_index;
243 for_each(metahandles_index->begin(), metahandles_index->end(), DeleteEntry);
244 delete metahandles_index;
245 }
246
247 Directory::Directory() : kernel_(NULL), store_(NULL) {
248 }
249
250 Directory::~Directory() {
251 Close();
252 }
253
254 BOOL PathNameMatch(const PathString& pathname, const PathString& pathspec) {
255 #ifdef OS_WINDOWS
256 // NB If we go Vista only this is easier:
257 // http://msdn2.microsoft.com/en-us/library/ms628611.aspx
258
259 // PathMatchSpec strips spaces from the start of pathspec, so we compare those
260 // ourselves.
261 const PathChar* pathname_ptr = pathname.c_str();
262 const PathChar* pathspec_ptr = pathspec.c_str();
263
264 while (*pathname_ptr == ' ' && *pathspec_ptr == ' ')
265 ++pathname_ptr, ++pathspec_ptr;
266
267 // if we have more inital spaces in the pathspec than in the pathname then the
268 // result from PathMatchSpec will be erronous
269 if (*pathspec_ptr == ' ')
270 return FALSE;
271
272 // PathMatchSpec also gets "confused" when there are ';' characters in name or
273 // in spec. So, if we match (f.i.) ";" with ";" PathMatchSpec will return
274 // FALSE (which is wrong). Luckily for us, we can easily fix this by
275 // substituting ';' with ':' which is illegal character in file name and
276 // we're not going to see it there. With ':' in path name and spec
277 // PathMatchSpec works fine.
278 if ((NULL == wcschr(pathname_ptr, L';')) &&
279 (NULL == wcschr(pathspec_ptr, L';'))) {
280 // No ';' in file name and in spec. Just pass it as it is.
281 return ::PathMatchSpec(pathname_ptr, pathspec_ptr);
282 }
283
284 // We need to subst ';' with ':' in both, name and spec
285 PathString name_subst(pathname_ptr);
286 PathString spec_subst(pathspec_ptr);
287
288 PathString::size_type index = name_subst.find(L';');
289 while (PathString::npos != index) {
290 name_subst[index] = L':';
291 index = name_subst.find(L';', index + 1);
292 }
293
294 index = spec_subst.find(L';');
295 while (PathString::npos != index) {
296 spec_subst[index] = L':';
297 index = spec_subst.find(L';', index + 1);
298 }
299
300 return ::PathMatchSpec(name_subst.c_str(), spec_subst.c_str());
301 #else
302 return 0 == ComparePathNames(pathname, pathspec);
303 #endif
304 }
305
306 DirOpenResult Directory::Open(const PathString& file_path,
307 const PathString& name) {
308 const DirOpenResult result = OpenImpl(file_path, name);
309 if (OPENED != result)
310 Close();
311 return result;
312 }
313
314 void Directory::InitializeIndices() {
315 MetahandlesIndex::iterator it = kernel_->metahandles_index->begin();
316 for (; it != kernel_->metahandles_index->end(); ++it) {
317 EntryKernel* entry = *it;
318 if (!entry->ref(IS_DEL))
319 kernel_->parent_id_and_names_index->insert(entry);
320 kernel_->ids_index->insert(entry);
321 if (entry->ref(IS_UNSYNCED))
322 kernel_->unsynced_metahandles->insert(entry->ref(META_HANDLE));
323 if (entry->ref(IS_UNAPPLIED_UPDATE))
324 kernel_->unapplied_update_metahandles->insert(entry->ref(META_HANDLE));
325 }
326 }
327
328 DirectoryBackingStore* Directory::CreateBackingStore(
329 const PathString& dir_name, const PathString& backing_filepath) {
330 return new DirectoryBackingStore(dir_name, backing_filepath);
331 }
332
333 DirOpenResult Directory::OpenImpl(const PathString& file_path,
334 const PathString& name) {
335 DCHECK_EQ(static_cast<DirectoryBackingStore*>(NULL), store_);
336 const PathString db_path = ::GetFullPath(file_path);
337 store_ = CreateBackingStore(name, db_path);
338
339 KernelLoadInfo info;
340 // Temporary indicies before kernel_ initialized in case Load fails. We 0(1)
341 // swap these later.
342 MetahandlesIndex metas_bucket;
343 ExtendedAttributes xattrs_bucket;
344 DirOpenResult result = store_->Load(&metas_bucket, &xattrs_bucket, &info);
345 if (OPENED != result)
346 return result;
347
348 kernel_ = new Kernel(db_path, name, info);
349 kernel_->metahandles_index->swap(metas_bucket);
350 kernel_->extended_attributes->swap(xattrs_bucket);
351 InitializeIndices();
352 return OPENED;
353 }
354
355 void Directory::Close() {
356 if (store_)
357 delete store_;
358 store_ = NULL;
359 if (kernel_) {
360 bool del = !base::subtle::NoBarrier_AtomicIncrement(&kernel_->refcount, -1);
361 DCHECK(del) << "Kernel should only have a single ref";
362 if (del)
363 delete kernel_;
364 kernel_ = NULL;
365 }
366 }
367
368 EntryKernel* Directory::GetEntryById(const Id& id) {
369 ScopedKernelLock lock(this);
370 return GetEntryById(id, &lock);
371 }
372
373 EntryKernel* Directory::GetEntryById(const Id& id,
374 ScopedKernelLock* const lock) {
375 DCHECK(kernel_);
376 // First look up in memory
377 kernel_->needle.ref(ID) = id;
378 IdsIndex::iterator id_found = kernel_->ids_index->find(&kernel_->needle);
379 if (id_found != kernel_->ids_index->end()) {
380 // Found it in memory. Easy.
381 return *id_found;
382 }
383 return NULL;
384 }
385
386 EntryKernel* Directory::GetEntryByTag(const PathString& tag) {
387 ScopedKernelLock lock(this);
388 DCHECK(kernel_);
389 // We don't currently keep a separate index for the tags. Since tags
390 // only exist for server created items that are the first items
391 // to be created in a store, they should have small metahandles.
392 // So, we just iterate over the items in sorted metahandle order,
393 // looking for a match.
394 MetahandlesIndex& set = *kernel_->metahandles_index;
395 for (MetahandlesIndex::iterator i = set.begin(); i != set.end(); ++i) {
396 if ((*i)->ref(SINGLETON_TAG) == tag) {
397 return *i;
398 }
399 }
400 return NULL;
401 }
402
403 EntryKernel* Directory::GetEntryByHandle(const int64 metahandle) {
404 ScopedKernelLock lock(this);
405 return GetEntryByHandle(metahandle, &lock);
406 }
407
408 EntryKernel* Directory::GetEntryByHandle(const int64 metahandle,
409 ScopedKernelLock* lock) {
410 // Look up in memory
411 kernel_->needle.ref(META_HANDLE) = metahandle;
412 MetahandlesIndex::iterator found =
413 kernel_->metahandles_index->find(&kernel_->needle);
414 if (found != kernel_->metahandles_index->end()) {
415 // Found it in memory. Easy.
416 return *found;
417 }
418 return NULL;
419 }
420
421 EntryKernel* Directory::GetChildWithName(const Id& parent_id,
422 const PathString& name) {
423 ScopedKernelLock lock(this);
424 return GetChildWithName(parent_id, name, &lock);
425 }
426
427 // Will return child entry if the folder is opened,
428 // otherwise it will return NULL.
429 EntryKernel* Directory::GetChildWithName(const Id& parent_id,
430 const PathString& name,
431 ScopedKernelLock* const lock) {
432 PathString dbname = name;
433 EntryKernel* parent = GetEntryById(parent_id, lock);
434 if (parent == NULL)
435 return NULL;
436 return GetChildWithNameImpl(parent_id, dbname, lock);
437 }
438
439 // Will return child entry even when the folder is not
440 // opened. This is used by syncer to apply update when folder is closed.
441 EntryKernel* Directory::GetChildWithDBName(const Id& parent_id,
442 const PathString& name) {
443 ScopedKernelLock lock(this);
444 return GetChildWithNameImpl(parent_id, name, &lock);
445 }
446
447 EntryKernel* Directory::GetChildWithNameImpl(const Id& parent_id,
448 const PathString& name,
449 ScopedKernelLock* const lock) {
450 // First look up in memory:
451 kernel_->needle.ref(NAME) = name;
452 kernel_->needle.ref(PARENT_ID) = parent_id;
453 ParentIdAndNamesIndex::iterator found =
454 kernel_->parent_id_and_names_index->find(&kernel_->needle);
455 if (found != kernel_->parent_id_and_names_index->end()) {
456 // Found it in memory. Easy.
457 return *found;
458 }
459 return NULL;
460 }
461
462 // An interface to specify the details of which children
463 // GetChildHandles() is looking for.
464 struct PathMatcher {
465 explicit PathMatcher(const Id& parent_id) : parent_id_(parent_id) { }
466 virtual ~PathMatcher() { }
467 enum MatchType {
468 NO_MATCH,
469 MATCH,
470 // Means we found the only entry we're looking for in
471 // memory so we don't need to check the DB.
472 EXACT_MATCH
473 };
474 virtual MatchType PathMatches(const PathString& path) = 0;
475 typedef Directory::ParentIdAndNamesIndex Index;
476 virtual Index::iterator lower_bound(Index* index) = 0;
477 virtual Index::iterator upper_bound(Index* index) = 0;
478 const Id parent_id_;
479 EntryKernel needle_;
480 };
481
482 // Matches all children.
483 struct AllPathsMatcher : public PathMatcher {
484 explicit AllPathsMatcher(const Id& parent_id) : PathMatcher(parent_id) {
485 }
486 virtual MatchType PathMatches(const PathString& path) {
487 return MATCH;
488 }
489 virtual Index::iterator lower_bound(Index* index) {
490 needle_.ref(PARENT_ID) = parent_id_;
491 needle_.ref(NAME).clear();
492 return index->lower_bound(&needle_);
493 }
494
495 virtual Index::iterator upper_bound(Index* index) {
496 needle_.ref(PARENT_ID) = parent_id_;
497 needle_.ref(NAME).clear();
498 Index::iterator i = index->upper_bound(&needle_),
499 end = index->end();
500 while (i != end && (*i)->ref(PARENT_ID) == parent_id_)
501 ++i;
502 return i;
503 }
504 };
505
506 // Matches an exact filename only; no wildcards.
507 struct ExactPathMatcher : public PathMatcher {
508 ExactPathMatcher(const PathString& pathspec, const Id& parent_id)
509 : PathMatcher(parent_id), pathspec_(pathspec) {
510 }
511 virtual MatchType PathMatches(const PathString& path) {
512 return 0 == ComparePathNames(path, pathspec_) ? EXACT_MATCH : NO_MATCH;
513 }
514 virtual Index::iterator lower_bound(Index* index) {
515 needle_.ref(PARENT_ID) = parent_id_;
516 needle_.ref(NAME) = pathspec_;
517 return index->lower_bound(&needle_);
518 }
519 virtual Index::iterator upper_bound(Index* index) {
520 needle_.ref(PARENT_ID) = parent_id_;
521 needle_.ref(NAME) = pathspec_;
522 return index->upper_bound(&needle_);
523 }
524 const PathString pathspec_;
525 };
526
527 // Matches a pathspec with wildcards.
528 struct PartialPathMatcher : public PathMatcher {
529 PartialPathMatcher(const PathString& pathspec,
530 PathString::size_type wildcard, const Id& parent_id)
531 : PathMatcher(parent_id), pathspec_(pathspec) {
532 if (0 == wildcard)
533 return;
534 lesser_.assign(pathspec_.data(), wildcard);
535 greater_.assign(pathspec_.data(), wildcard);
536 // Increment the last letter of greater so we can then less than
537 // compare to it.
538 PathString::size_type i = greater_.size() - 1;
539 do {
540 if (greater_[i] == std::numeric_limits<PathString::value_type>::max()) {
541 greater_.resize(i); // Try the preceding character.
542 if (0 == i--)
543 break;
544 } else {
545 greater_[i] += 1;
546 }
547 // Yes, there are cases where incrementing a character
548 // actually decreases its position in the sort. Example: 9 -> :
549 } while (ComparePathNames(lesser_, greater_) >= 0);
550 }
551
552 virtual MatchType PathMatches(const PathString& path) {
553 return PathNameMatch(path, pathspec_) ? MATCH : NO_MATCH;
554 }
555
556 virtual Index::iterator lower_bound(Index* index) {
557 needle_.ref(PARENT_ID) = parent_id_;
558 needle_.ref(NAME) = lesser_;
559 return index->lower_bound(&needle_);
560 }
561 virtual Index::iterator upper_bound(Index* index) {
562 if (greater_.empty()) {
563 needle_.ref(PARENT_ID) = parent_id_;
564 needle_.ref(NAME).clear();
565 Index::iterator i = index->upper_bound(&needle_),
566 end = index->end();
567 while (i != end && (*i)->ref(PARENT_ID) == parent_id_)
568 ++i;
569 return i;
570 } else {
571 needle_.ref(PARENT_ID) = parent_id_;
572 needle_.ref(NAME) = greater_;
573 return index->lower_bound(&needle_);
574 }
575 }
576
577 const PathString pathspec_;
578 PathString lesser_;
579 PathString greater_;
580 };
581
582
583 void Directory::GetChildHandles(BaseTransaction* trans, const Id& parent_id,
584 Directory::ChildHandles* result) {
585 AllPathsMatcher matcher(parent_id);
586 return GetChildHandlesImpl(trans, parent_id, &matcher, result);
587 }
588
589 void Directory::GetChildHandlesImpl(BaseTransaction* trans, const Id& parent_id,
590 PathMatcher* matcher,
591 Directory::ChildHandles* result) {
592 CHECK(this == trans->directory());
593 result->clear();
594 {
595 ScopedKernelLock lock(this);
596 ParentIdAndNamesIndex* const index =
597 kernel_->parent_id_and_names_index;
598 typedef ParentIdAndNamesIndex::iterator iterator;
599 for (iterator i = matcher->lower_bound(index),
600 end = matcher->upper_bound(index); i != end; ++i) {
601 // root's parent_id is NULL in the db but 0 in memory, so
602 // have avoid listing the root as its own child.
603 if ((*i)->ref(ID) == (*i)->ref(PARENT_ID))
604 continue;
605 PathMatcher::MatchType match = matcher->PathMatches((*i)->ref(NAME));
606 if (PathMatcher::NO_MATCH == match)
607 continue;
608 result->push_back((*i)->ref(META_HANDLE));
609 if (PathMatcher::EXACT_MATCH == match)
610 return;
611 }
612 }
613 }
614
615 EntryKernel* Directory::GetRootEntry() {
616 return GetEntryById(Id());
617 }
618
619 EntryKernel* Directory::GetEntryByPath(const PathString& path) {
620 CHECK(kernel_);
621 EntryKernel* result = GetRootEntry();
622 CHECK(result) << "There should always be a root node.";
623 for (PathSegmentIterator<PathString> i(path), end;
624 i != end && NULL != result; ++i) {
625 result = GetChildWithName(result->ref(ID), *i);
626 }
627 return result;
628 }
629
630 void ZeroFields(EntryKernel* entry, int first_field) {
631 int i = first_field;
632 // Note that bitset<> constructor sets all bits to zero, and strings
633 // initialize to empty.
634 for ( ; i < INT64_FIELDS_END; ++i)
635 entry->ref(static_cast<Int64Field>(i)) = 0;
636 for ( ; i < ID_FIELDS_END; ++i)
637 entry->ref(static_cast<IdField>(i)).Clear();
638 for ( ; i < BIT_FIELDS_END; ++i)
639 entry->ref(static_cast<BitField>(i)) = false;
640 if (i < BLOB_FIELDS_END)
641 i = BLOB_FIELDS_END;
642 }
643
644 void Directory::InsertEntry(EntryKernel* entry) {
645 ScopedKernelLock lock(this);
646 InsertEntry(entry, &lock);
647 }
648
649 void Directory::InsertEntry(EntryKernel* entry, ScopedKernelLock* lock) {
650 DCHECK(NULL != lock);
651 CHECK(NULL != entry);
652 static const char error[] = "Entry already in memory index.";
653 CHECK(kernel_->metahandles_index->insert(entry).second) << error;
654 if (!entry->ref(IS_DEL))
655 CHECK(kernel_->parent_id_and_names_index->insert(entry).second) << error;
656 CHECK(kernel_->ids_index->insert(entry).second) << error;
657 }
658
659 bool Directory::Undelete(EntryKernel* const entry) {
660 DCHECK(entry->ref(IS_DEL));
661 ScopedKernelLock lock(this);
662 if (NULL != GetChildWithName(entry->ref(PARENT_ID), entry->ref(NAME), &lock))
663 return false; // Would have duplicated existing entry.
664 entry->ref(IS_DEL) = false;
665 entry->dirty[IS_DEL] = true;
666 CHECK(kernel_->parent_id_and_names_index->insert(entry).second);
667 return true;
668 }
669
670 bool Directory::Delete(EntryKernel* const entry) {
671 DCHECK(!entry->ref(IS_DEL));
672 entry->ref(IS_DEL) = true;
673 entry->dirty[IS_DEL] = true;
674 ScopedKernelLock lock(this);
675 CHECK(1 == kernel_->parent_id_and_names_index->erase(entry));
676 return true;
677 }
678
679 bool Directory::ReindexId(EntryKernel* const entry, const Id& new_id) {
680 ScopedKernelLock lock(this);
681 if (NULL != GetEntryById(new_id, &lock))
682 return false;
683 CHECK(1 == kernel_->ids_index->erase(entry));
684 entry->ref(ID) = new_id;
685 CHECK(kernel_->ids_index->insert(entry).second);
686 return true;
687 }
688
689 bool Directory::ReindexParentIdAndName(EntryKernel* const entry,
690 const Id& new_parent_id,
691 const PathString& new_name) {
692 ScopedKernelLock lock(this);
693 PathString new_indexed_name = new_name;
694 if (entry->ref(IS_DEL)) {
695 entry->ref(PARENT_ID) = new_parent_id;
696 entry->ref(NAME) = new_indexed_name;
697 return true;
698 }
699
700 // check for a case changing rename
701 if (entry->ref(PARENT_ID) == new_parent_id &&
702 0 == ComparePathNames(entry->ref(NAME), new_indexed_name)) {
703 entry->ref(NAME) = new_indexed_name;
704 } else {
705 if (NULL != GetChildWithName(new_parent_id, new_indexed_name, &lock))
706 return false;
707 CHECK(1 == kernel_->parent_id_and_names_index->erase(entry));
708 entry->ref(PARENT_ID) = new_parent_id;
709 entry->ref(NAME) = new_indexed_name;
710 CHECK(kernel_->parent_id_and_names_index->insert(entry).second);
711 }
712 return true;
713 }
714
715 // static
716 bool Directory::SafeToPurgeFromMemory(const EntryKernel* const entry) {
717 return entry->ref(IS_DEL) && !entry->dirty.any() && !entry->ref(SYNCING) &&
718 !entry->ref(IS_UNAPPLIED_UPDATE) && !entry->ref(IS_UNSYNCED);
719 }
720
721 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
722 ReadTransaction trans(this, __FILE__, __LINE__);
723 ScopedKernelLock lock(this);
724 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
725 // clear dirty flags.
726 for (MetahandlesIndex::iterator i = kernel_->metahandles_index->begin();
727 i != kernel_->metahandles_index->end(); ++i) {
728 EntryKernel* entry = *i;
729 if (!entry->dirty.any())
730 continue;
731 snapshot->dirty_metas.insert(snapshot->dirty_metas.end(), *entry);
732 entry->dirty.reset();
733 // TODO(timsteele): The previous *windows only* SaveChanges code path seems
734 // to have a bug in that the IS_NEW bit is not rolled back if the entire DB
735 // transaction is rolled back, due to the "recent" windows optimization of
736 // using a ReadTransaction rather than a WriteTransaction in SaveChanges.
737 // This bit is only used to decide whether we should sqlite INSERT or
738 // UPDATE, and if we are INSERTing we make sure to dirty all the fields so
739 // as to overwrite the database default values. For now, this is rectified
740 // by flipping the bit to false here (note that the snapshot will contain
741 // the "original" value), and then resetting it on failure in
742 // HandleSaveChangesFailure, where "failure" is defined as "the DB
743 // "transaction was rolled back". This is safe because the only user of this
744 // bit is in fact SaveChanges, which enforces mutually exclusive access by
745 // way of save_changes_mutex_. The TODO is to consider abolishing this bit
746 // in favor of using a sqlite INSERT OR REPLACE, which could(would?) imply
747 // that all bits need to be written rather than just the dirty ones in
748 // the BindArg helper function.
749 entry->ref(IS_NEW) = false;
750 }
751
752 // Do the same for extended attributes.
753 for (ExtendedAttributes::iterator i = kernel_->extended_attributes->begin();
754 i != kernel_->extended_attributes->end(); ++i) {
755 if (!i->second.dirty)
756 continue;
757 snapshot->dirty_xattrs[i->first] = i->second;
758 i->second.dirty = false;
759 }
760
761 // Fill kernel_info_status and kernel_info.
762 PersistedKernelInfo& info = snapshot->kernel_info;
763 info.initial_sync_ended = kernel_->initial_sync_ended_;
764 info.last_sync_timestamp = kernel_->last_sync_timestamp_;
765 // To avoid duplicates when the process crashes, we record the next_id to be
766 // greater magnitude than could possibly be reached before the next save
767 // changes. In other words, it's effectively impossible for the user to
768 // generate 65536 new bookmarks in 3 seconds.
769 info.next_id = kernel_->next_id - 65536;
770 info.store_birthday = kernel_->store_birthday_;
771 snapshot->kernel_info_status = kernel_->info_status_;
772 // This one we reset on failure.
773 kernel_->info_status_ = KERNEL_SHARE_INFO_VALID;
774 }
775
776 bool Directory::SaveChanges() {
777 bool success = false;
778 DCHECK(store_);
779 PThreadScopedLock<PThreadMutex> lock(&kernel_->save_changes_mutex);
780 // Snapshot and save.
781 SaveChangesSnapshot snapshot;
782 TakeSnapshotForSaveChanges(&snapshot);
783 success = store_->SaveChanges(snapshot);
784
785 // Handle success or failure.
786 if (success)
787 VacuumAfterSaveChanges(snapshot);
788 else
789 HandleSaveChangesFailure(snapshot);
790 return success;
791 }
792
793 void Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
794 // Need a write transaction as we are about to permanently purge entries.
795 WriteTransaction trans(this, VACUUM_AFTER_SAVE, __FILE__, __LINE__);
796 ScopedKernelLock lock(this);
797 kernel_->flushed_metahandles_.Push(0); // Begin flush marker
798 // Now drop everything we can out of memory.
799 for (OriginalEntries::const_iterator i = snapshot.dirty_metas.begin();
800 i != snapshot.dirty_metas.end(); ++i) {
801 kernel_->needle.ref(META_HANDLE) = i->ref(META_HANDLE);
802 MetahandlesIndex::iterator found =
803 kernel_->metahandles_index->find(&kernel_->needle);
804 EntryKernel* entry = (found == kernel_->metahandles_index->end() ?
805 NULL : *found);
806 if (entry && SafeToPurgeFromMemory(entry)) {
807 // We now drop deleted metahandles that are up to date on both the client
808 // and the server.
809 size_t num_erased = 0;
810 kernel_->flushed_metahandles_.Push(entry->ref(META_HANDLE));
811 num_erased = kernel_->ids_index->erase(entry);
812 DCHECK_EQ(1, num_erased);
813 num_erased = kernel_->metahandles_index->erase(entry);
814 DCHECK_EQ(1, num_erased);
815 delete entry;
816 }
817 }
818
819 ExtendedAttributes::const_iterator i = snapshot.dirty_xattrs.begin();
820 while (i != snapshot.dirty_xattrs.end()) {
821 ExtendedAttributeKey key(i->first.metahandle, i->first.key);
822 ExtendedAttributes::iterator found =
823 kernel_->extended_attributes->find(key);
824 if (found == kernel_->extended_attributes->end() ||
825 found->second.dirty || !i->second.is_deleted) {
826 ++i;
827 } else {
828 kernel_->extended_attributes->erase(found);
829 }
830 }
831 }
832
833 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
834 ScopedKernelLock lock(this);
835 kernel_->info_status_ = KERNEL_SHARE_INFO_DIRTY;
836
837 // Because we cleared dirty bits on the real entries when taking the snapshot,
838 // we should make sure the fact that the snapshot was not persisted gets
839 // reflected in the entries. Not doing this would mean if no other changes
840 // occur to the same fields of the entries in dirty_metas some changes could
841 // end up being lost, if they also failed to be committed to the server.
842 // Setting the bits ensures that SaveChanges will at least try again later.
843 for (OriginalEntries::const_iterator i = snapshot.dirty_metas.begin();
844 i != snapshot.dirty_metas.end(); ++i) {
845 kernel_->needle.ref(META_HANDLE) = i->ref(META_HANDLE);
846 MetahandlesIndex::iterator found =
847 kernel_->metahandles_index->find(&kernel_->needle);
848 if (found != kernel_->metahandles_index->end()) {
849 (*found)->dirty |= i->dirty;
850 (*found)->ref(IS_NEW) = i->ref(IS_NEW);
851 }
852 }
853
854 for (ExtendedAttributes::const_iterator i = snapshot.dirty_xattrs.begin();
855 i != snapshot.dirty_xattrs.end(); ++i) {
856 ExtendedAttributeKey key(i->first.metahandle, i->first.key);
857 ExtendedAttributes::iterator found =
858 kernel_->extended_attributes->find(key);
859 if (found != kernel_->extended_attributes->end())
860 found->second.dirty = true;
861 }
862 }
863
864 int64 Directory::last_sync_timestamp() const {
865 ScopedKernelLock lock(this);
866 return kernel_->last_sync_timestamp_;
867 }
868
869 void Directory::set_last_sync_timestamp(int64 timestamp) {
870 ScopedKernelLock lock(this);
871 if (kernel_->last_sync_timestamp_ == timestamp)
872 return;
873 kernel_->last_sync_timestamp_ = timestamp;
874 kernel_->info_status_ = KERNEL_SHARE_INFO_DIRTY;
875 }
876
877 bool Directory::initial_sync_ended() const {
878 ScopedKernelLock lock(this);
879 return kernel_->initial_sync_ended_;
880 }
881
882 void Directory::set_initial_sync_ended(bool x) {
883 ScopedKernelLock lock(this);
884 if (kernel_->initial_sync_ended_ == x)
885 return;
886 kernel_->initial_sync_ended_ = x;
887 kernel_->info_status_ = KERNEL_SHARE_INFO_DIRTY;
888 }
889
890 string Directory::store_birthday() const {
891 ScopedKernelLock lock(this);
892 return kernel_->store_birthday_;
893 }
894
895 void Directory::set_store_birthday(string store_birthday) {
896 ScopedKernelLock lock(this);
897 if (kernel_->store_birthday_ == store_birthday)
898 return;
899 kernel_->store_birthday_ = store_birthday;
900 kernel_->info_status_ = KERNEL_SHARE_INFO_DIRTY;
901 }
902
903 string Directory::cache_guid() const {
904 // No need to lock since nothing ever writes to it after load.
905 return kernel_->cache_guid_;
906 }
907
908 void Directory::GetAllMetaHandles(BaseTransaction* trans,
909 MetahandleSet* result) {
910 result->clear();
911 ScopedKernelLock lock(this);
912 MetahandlesIndex::iterator i;
913 for (i = kernel_->metahandles_index->begin();
914 i != kernel_->metahandles_index->end();
915 ++i) {
916 result->insert((*i)->ref(META_HANDLE));
917 }
918 }
919
920 void Directory::GetUnsyncedMetaHandles(BaseTransaction* trans,
921 UnsyncedMetaHandles* result) {
922 result->clear();
923 ScopedKernelLock lock(this);
924 copy(kernel_->unsynced_metahandles->begin(),
925 kernel_->unsynced_metahandles->end(), back_inserter(*result));
926 }
927
928 void Directory::GetAllExtendedAttributes(BaseTransaction* trans,
929 int64 metahandle,
930 std::set<ExtendedAttribute>* result) {
931 AttributeKeySet keys;
932 GetExtendedAttributesList(trans, metahandle, &keys);
933 AttributeKeySet::iterator iter;
934 for (iter = keys.begin(); iter != keys.end(); ++iter) {
935 ExtendedAttributeKey key(metahandle, *iter);
936 ExtendedAttribute extended_attribute(trans, GET_BY_HANDLE, key);
937 CHECK(extended_attribute.good());
938 result->insert(extended_attribute);
939 }
940 }
941
942 void Directory::GetExtendedAttributesList(BaseTransaction* trans,
943 int64 metahandle, AttributeKeySet* result) {
944 ExtendedAttributes::iterator iter;
945 for (iter = kernel_->extended_attributes->begin();
946 iter != kernel_->extended_attributes->end(); ++iter) {
947 if (iter->first.metahandle == metahandle) {
948 if (!iter->second.is_deleted)
949 result->insert(iter->first.key);
950 }
951 }
952 }
953
954 void Directory::DeleteAllExtendedAttributes(WriteTransaction* trans,
955 int64 metahandle) {
956 AttributeKeySet keys;
957 GetExtendedAttributesList(trans, metahandle, &keys);
958 AttributeKeySet::iterator iter;
959 for (iter = keys.begin(); iter != keys.end(); ++iter) {
960 ExtendedAttributeKey key(metahandle, *iter);
961 MutableExtendedAttribute attribute(trans, GET_BY_HANDLE, key);
962 // This flags the attribute for deletion during SaveChanges. At that time
963 // any deleted attributes are purged from disk and memory.
964 attribute.delete_attribute();
965 }
966 }
967
968 int64 Directory::unsynced_entity_count() const {
969 ScopedKernelLock lock(this);
970 return kernel_->unsynced_metahandles->size();
971 }
972
973 void Directory::GetUnappliedUpdateMetaHandles(BaseTransaction* trans,
974 UnappliedUpdateMetaHandles* result) {
975 result->clear();
976 ScopedKernelLock lock(this);
977 copy(kernel_->unapplied_update_metahandles->begin(),
978 kernel_->unapplied_update_metahandles->end(),
979 back_inserter(*result));
980 }
981
982
983 class IdFilter {
984 public:
985 virtual ~IdFilter() { }
986 virtual bool ShouldConsider(const Id& id) const = 0;
987 };
988
989
990 class FullScanFilter : public IdFilter {
991 public:
992 virtual bool ShouldConsider(const Id& id) const {
993 return true;
994 }
995 };
996
997 class SomeIdsFilter : public IdFilter {
998 public:
999 virtual bool ShouldConsider(const Id& id) const {
1000 return binary_search(ids_.begin(), ids_.end(), id);
1001 }
1002 std::vector<Id> ids_;
1003 };
1004
1005 void Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
1006 const OriginalEntries* originals) {
1007 MetahandleSet handles;
1008 SomeIdsFilter filter;
1009 filter.ids_.reserve(originals->size());
1010 for (OriginalEntries::const_iterator i = originals->begin(),
1011 end = originals->end(); i != end; ++i) {
1012 Entry e(trans, GET_BY_HANDLE, i->ref(META_HANDLE));
1013 CHECK(e.good());
1014 filter.ids_.push_back(e.Get(ID));
1015 handles.insert(i->ref(META_HANDLE));
1016 }
1017 std::sort(filter.ids_.begin(), filter.ids_.end());
1018 CheckTreeInvariants(trans, handles, filter);
1019 }
1020
1021 void Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
1022 bool full_scan) {
1023 // TODO(timsteele): This is called every time a WriteTransaction finishes.
1024 // The performance hit is substantial given that we now examine every single
1025 // syncable entry. Need to redesign this.
1026 MetahandleSet handles;
1027 GetAllMetaHandles(trans, &handles);
1028 if (full_scan) {
1029 FullScanFilter fullfilter;
1030 CheckTreeInvariants(trans, handles, fullfilter);
1031 } else {
1032 SomeIdsFilter filter;
1033 MetahandleSet::iterator i;
1034 for (i = handles.begin() ; i != handles.end() ; ++i) {
1035 Entry e(trans, GET_BY_HANDLE, *i);
1036 CHECK(e.good());
1037 filter.ids_.push_back(e.Get(ID));
1038 }
1039 sort(filter.ids_.begin(), filter.ids_.end());
1040 CheckTreeInvariants(trans, handles, filter);
1041 }
1042 }
1043
1044 void Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
1045 const MetahandleSet& handles,
1046 const IdFilter& idfilter) {
1047 int64 max_ms = kInvariantCheckMaxMs;
1048 if (max_ms < 0)
1049 max_ms = std::numeric_limits<int64>::max();
1050 PerfTimer check_timer;
1051 MetahandleSet::const_iterator i;
1052 int entries_done = 0;
1053 for (i = handles.begin() ; i != handles.end() ; ++i) {
1054 int64 metahandle = *i;
1055 Entry e(trans, GET_BY_HANDLE, metahandle);
1056 CHECK(e.good());
1057 syncable::Id id = e.Get(ID);
1058 syncable::Id parentid = e.Get(PARENT_ID);
1059
1060 if (id.IsRoot()) {
1061 CHECK(e.Get(IS_DIR)) << e;
1062 CHECK(parentid.IsRoot()) << e;
1063 CHECK(!e.Get(IS_UNSYNCED)) << e;
1064 ++entries_done;
1065 continue;
1066 }
1067 if (!e.Get(IS_DEL)) {
1068 CHECK(id != parentid) << e;
1069 CHECK(!e.Get(NAME).empty()) << e;
1070 int safety_count = handles.size() + 1;
1071 while (!parentid.IsRoot()) {
1072 if (!idfilter.ShouldConsider(parentid))
1073 break;
1074 Entry parent(trans, GET_BY_ID, parentid);
1075 CHECK(parent.good()) << e;
1076 CHECK(parent.Get(IS_DIR)) << parent << e;
1077 CHECK(!parent.Get(IS_DEL)) << parent << e;
1078 CHECK(handles.end() != handles.find(parent.Get(META_HANDLE)))
1079 << e << parent;
1080 parentid = parent.Get(PARENT_ID);
1081 CHECK(--safety_count >= 0) << e << parent;
1082 }
1083 }
1084 int64 base_version = e.Get(BASE_VERSION);
1085 int64 server_version = e.Get(SERVER_VERSION);
1086 if (CHANGES_VERSION == base_version || 0 == base_version) {
1087 if (e.Get(IS_UNAPPLIED_UPDATE)) {
1088 // Unapplied new item.
1089 CHECK(e.Get(IS_DEL)) << e;
1090 CHECK(id.ServerKnows()) << e;
1091 } else {
1092 // Uncommitted item.
1093 if (!e.Get(IS_DEL)) {
1094 CHECK(e.Get(IS_UNSYNCED)) << e;
1095 }
1096 CHECK(0 == server_version) << e;
1097 CHECK(!id.ServerKnows()) << e;
1098 }
1099 } else {
1100 CHECK(id.ServerKnows());
1101 }
1102 ++entries_done;
1103 int64 elapsed_ms = check_timer.Elapsed().InMilliseconds();
1104 if (elapsed_ms > max_ms) {
1105 LOG(INFO) << "Cutting Invariant check short after " << elapsed_ms << "ms."
1106 " Processed " << entries_done << "/" << handles.size() << " entries";
1107 return;
1108 }
1109 }
1110 // I did intend to add a check here to ensure no entries had been pulled into
1111 // memory by this function, but we can't guard against another ReadTransaction
1112 // pulling entries into RAM
1113 }
1114
1115 ///////////////////////////////////////////////////////////////////////////////
1116 // ScopedKernelLocks
1117
1118 ScopedKernelLock::ScopedKernelLock(const Directory* dir)
1119 : dir_(const_cast<Directory*>(dir)) {
1120 // Swap out the dbhandle to enforce the "No IO while holding kernel
1121 // lock" rule.
1122 // HA!! Yeah right. What about your pre-cached queries :P
1123 pthread_mutex_lock(&dir->kernel_->mutex);
1124 }
1125 ScopedKernelLock::~ScopedKernelLock() {
1126 pthread_mutex_unlock(&dir_->kernel_->mutex);
1127 }
1128
1129 ScopedKernelUnlock::ScopedKernelUnlock(ScopedKernelLock* lock)
1130 : lock_(lock) {
1131 pthread_mutex_unlock(&lock->dir_->kernel_->mutex);
1132 }
1133 ScopedKernelUnlock::~ScopedKernelUnlock() {
1134 pthread_mutex_lock(&lock_->dir_->kernel_->mutex);
1135 }
1136
1137 ///////////////////////////////////////////////////////////////////////////
1138 // Transactions
1139 #if defined LOG_ALL || !defined NDEBUG
1140 static const bool kLoggingInfo = true;
1141 #else
1142 static const bool kLoggingInfo = false;
1143 #endif
1144
1145 ThreadNode* BaseTransaction::MakeThreadNode() {
1146 ThreadNode* node = reinterpret_cast<ThreadNode*>
1147 (pthread_getspecific(dirkernel_->thread_node_key));
1148 if (NULL == node) {
1149 node = new ThreadNode;
1150 node->id = GetCurrentThreadId();
1151 pthread_setspecific(dirkernel_->thread_node_key, node);
1152 } else if (node->in_list) {
1153 logging::LogMessage(source_file_, line_, logging::LOG_FATAL).stream()
1154 << " Recursive Lock attempt by thread id " << node->id << "." << std::endl
1155 << "Already entered transaction at " << node->file << ":" << node->line;
1156 }
1157 node->file = source_file_;
1158 node->line = line_;
1159 node->wait_started = base::TimeTicks::Now();
1160 return node;
1161 }
1162
1163 void BaseTransaction::Lock(ThreadCounts* const thread_counts,
1164 ThreadNode* node, TransactionClass tclass) {
1165 ScopedTransactionLock lock(&dirkernel_->transaction_mutex);
1166 // Increment the waiters count.
1167 node->tclass = tclass;
1168 thread_counts->waiting += 1;
1169 node->Insert(&thread_counts->waiting_headtail);
1170
1171 // Block until we can own the reader/writer lock
1172 bool ready = 1 == thread_counts->waiting;
1173 while (true) {
1174 if (ready) {
1175 if (0 == thread_counts->active) {
1176 // We can take the lock because there is no contention.
1177 break;
1178 } else if (READ == tclass
1179 && READ == thread_counts->active_headtail.next->tclass) {
1180 // We can take the lock because reads can run simultaneously.
1181 break;
1182 }
1183 }
1184 // Wait to be woken up and check again.
1185 node->wake_up = false;
1186 do {
1187 CHECK(0 == pthread_cond_wait(&node->condvar.condvar_,
1188 &dirkernel_->transaction_mutex.mutex_));
1189 } while (!node->wake_up);
1190 ready = true;
1191 }
1192
1193 // Move from the list of waiters to the list of active.
1194 thread_counts->waiting -= 1;
1195 thread_counts->active += 1;
1196 CHECK(WRITE != tclass || 1 == thread_counts->active);
1197 node->Remove();
1198 node->Insert(&thread_counts->active_headtail);
1199 if (WRITE == tclass)
1200 node->current_write_trans = static_cast<WriteTransaction*>(this);
1201 }
1202
1203 void BaseTransaction::AfterLock(ThreadNode* node) {
1204 time_acquired_ = base::TimeTicks::Now();
1205
1206 const base::TimeDelta elapsed = time_acquired_ - node->wait_started;
1207 if (kLoggingInfo && elapsed.InMilliseconds() > 200) {
1208 logging::LogMessage(source_file_, line_, logging::LOG_INFO).stream()
1209 << name_ << " transaction waited "
1210 << elapsed.InSecondsF() << " seconds.";
1211 }
1212 }
1213
1214 void BaseTransaction::Init(ThreadCounts* const thread_counts,
1215 TransactionClass tclass) {
1216 ThreadNode* const node = MakeThreadNode();
1217 Lock(thread_counts, node, tclass);
1218 AfterLock(node);
1219 }
1220
1221 BaseTransaction::BaseTransaction(Directory* directory, const char* name,
1222 const char* source_file, int line)
1223 : directory_(directory), dirkernel_(directory->kernel_), name_(name),
1224 source_file_(source_file), line_(line) {
1225 }
1226
1227 void BaseTransaction::UnlockAndLog(ThreadCounts* const thread_counts,
1228 OriginalEntries* originals_arg) {
1229 scoped_ptr<OriginalEntries> originals(originals_arg);
1230 const base::TimeDelta elapsed = base::TimeTicks::Now() - time_acquired_;
1231 if (kLoggingInfo && elapsed.InMilliseconds() > 50) {
1232 logging::LogMessage(source_file_, line_, logging::LOG_INFO).stream()
1233 << name_ << " transaction completed in " << elapsed.InSecondsF()
1234 << " seconds.";
1235 }
1236
1237 {
1238 ScopedTransactionLock lock(&dirkernel_->transaction_mutex);
1239 // Let go of the reader/writer lock
1240 thread_counts->active -= 1;
1241 ThreadNode* const node = reinterpret_cast<ThreadNode*>
1242 (pthread_getspecific(dirkernel_->thread_node_key));
1243 CHECK(node != NULL);
1244 node->Remove();
1245 node->current_write_trans = NULL;
1246 if (0 == thread_counts->active) {
1247 // Wake up a waiting thread, FIFO
1248 if (dirkernel_->thread_counts.waiting > 0) {
1249 ThreadNode* const headtail =
1250 &dirkernel_->thread_counts.waiting_headtail;
1251 ThreadNode* node = headtail->next;
1252 node->wake_up = true;
1253 CHECK(0 == pthread_cond_signal(&node->condvar.condvar_));
1254 if (READ == node->tclass) do {
1255 // Wake up all consecutive readers.
1256 node = node->next;
1257 if (node == headtail)
1258 break;
1259 if (READ != node->tclass)
1260 break;
1261 node->wake_up = true;
1262 CHECK(0 == pthread_cond_signal(&node->condvar.condvar_));
1263 } while (true);
1264 }
1265 }
1266 if (NULL == originals.get() || originals->empty())
1267 return;
1268 dirkernel_->changes_channel_mutex.Lock();
1269 // Tell listeners to calculate changes while we still have the mutex.
1270 DirectoryChangeEvent event = { DirectoryChangeEvent::CALCULATE_CHANGES,
1271 originals.get(), this, writer_ };
1272 dirkernel_->changes_channel->NotifyListeners(event);
1273 }
1274 DirectoryChangeEvent event = { DirectoryChangeEvent::TRANSACTION_COMPLETE,
1275 NULL, NULL, INVALID };
1276 dirkernel_->changes_channel->NotifyListeners(event);
1277 dirkernel_->changes_channel_mutex.Unlock();
1278 }
1279
1280 ReadTransaction::ReadTransaction(Directory* directory, const char* file,
1281 int line)
1282 : BaseTransaction(directory, "Read", file, line) {
1283 Init(&dirkernel_->thread_counts, READ);
1284 writer_ = INVALID;
1285 }
1286
1287 ReadTransaction::ReadTransaction(const ScopedDirLookup& scoped_dir,
1288 const char* file, int line)
1289 : BaseTransaction(scoped_dir.operator -> (), "Read", file, line) {
1290 Init(&dirkernel_->thread_counts, READ);
1291 writer_ = INVALID;
1292 }
1293
1294 ReadTransaction::~ReadTransaction() {
1295 UnlockAndLog(&dirkernel_->thread_counts, NULL);
1296 }
1297
1298 WriteTransaction::WriteTransaction(Directory* directory, WriterTag writer,
1299 const char* file, int line)
1300 : BaseTransaction(directory, "Write", file, line), skip_destructor_(false),
1301 originals_(new OriginalEntries) {
1302 Init(&dirkernel_->thread_counts, WRITE);
1303 writer_ = writer;
1304 }
1305
1306 WriteTransaction::WriteTransaction(const ScopedDirLookup& scoped_dir,
1307 WriterTag writer, const char* file, int line)
1308 : BaseTransaction(scoped_dir.operator -> (), "Write", file, line),
1309 skip_destructor_(false), originals_(new OriginalEntries) {
1310 Init(&dirkernel_->thread_counts, WRITE);
1311 writer_ = writer;
1312 }
1313
1314 WriteTransaction::WriteTransaction(Directory* directory, const char* name,
1315 WriterTag writer,
1316 const char* file, int line,
1317 bool skip_destructor,
1318 OriginalEntries* originals)
1319 : BaseTransaction(directory, name, file, line),
1320 skip_destructor_(skip_destructor), originals_(originals) {
1321 writer_ = writer;
1322 }
1323
1324 void WriteTransaction::SaveOriginal(EntryKernel* entry) {
1325 if (NULL == entry)
1326 return;
1327 OriginalEntries::iterator i = originals_->lower_bound(*entry);
1328 if (i == originals_->end() ||
1329 i->ref(META_HANDLE) != entry->ref(META_HANDLE)) {
1330 originals_->insert(i, *entry);
1331 }
1332 }
1333
1334 WriteTransaction::~WriteTransaction() {
1335 if (skip_destructor_)
1336 return;
1337 if (OFF != kInvariantCheckLevel) {
1338 const bool full_scan = (FULL_DB_VERIFICATION == kInvariantCheckLevel);
1339 if (full_scan)
1340 directory()->CheckTreeInvariants(this, full_scan);
1341 else
1342 directory()->CheckTreeInvariants(this, originals_);
1343 }
1344 UnlockAndLog(&dirkernel_->thread_counts, originals_);
1345 }
1346
1347 ///////////////////////////////////////////////////////////////////////////
1348 // Entry
1349
1350 Entry::Entry(BaseTransaction* trans, GetById, const Id& id)
1351 : basetrans_(trans) {
1352 kernel_ = trans->directory()->GetEntryById(id);
1353 }
1354
1355 Entry::Entry(BaseTransaction* trans, GetByTag, const PathString& tag)
1356 : basetrans_(trans) {
1357 kernel_ = trans->directory()->GetEntryByTag(tag);
1358 }
1359
1360 Entry::Entry(BaseTransaction* trans, GetByHandle, int64 metahandle)
1361 : basetrans_(trans) {
1362 kernel_ = trans->directory()->GetEntryByHandle(metahandle);
1363 }
1364
1365 Entry::Entry(BaseTransaction* trans, GetByPath, const PathString& path)
1366 : basetrans_(trans) {
1367 kernel_ = trans->directory()->GetEntryByPath(path);
1368 }
1369
1370 Entry::Entry(BaseTransaction* trans, GetByParentIdAndName, const Id& parentid,
1371 const PathString& name)
1372 : basetrans_(trans) {
1373 kernel_ = trans->directory()->GetChildWithName(parentid, name);
1374 }
1375
1376 Entry::Entry(BaseTransaction* trans, GetByParentIdAndDBName, const Id& parentid,
1377 const PathString& name)
1378 : basetrans_(trans) {
1379 kernel_ = trans->directory()->GetChildWithDBName(parentid, name);
1380 }
1381
1382
1383 Directory* Entry::dir() const {
1384 return basetrans_->directory();
1385 }
1386
1387 PathString Entry::Get(StringField field) const {
1388 DCHECK(kernel_);
1389 return kernel_->ref(field);
1390 }
1391
1392 void Entry::GetAllExtendedAttributes(BaseTransaction* trans,
1393 std::set<ExtendedAttribute> *result) {
1394 dir()->GetAllExtendedAttributes(trans, kernel_->ref(META_HANDLE), result);
1395 }
1396
1397 void Entry::GetExtendedAttributesList(BaseTransaction* trans,
1398 AttributeKeySet* result) {
1399 dir()->GetExtendedAttributesList(trans, kernel_->ref(META_HANDLE), result);
1400 }
1401
1402 void Entry::DeleteAllExtendedAttributes(WriteTransaction *trans) {
1403 dir()->DeleteAllExtendedAttributes(trans, kernel_->ref(META_HANDLE));
1404 }
1405
1406 ///////////////////////////////////////////////////////////////////////////
1407 // MutableEntry
1408
1409 MutableEntry::MutableEntry(WriteTransaction* trans, Create,
1410 const Id& parent_id, const PathString& name)
1411 : Entry(trans) {
1412 if (NULL != trans->directory()->GetChildWithName(parent_id, name)) {
1413 kernel_ = NULL; // would have duplicated an existing entry.
1414 return;
1415 }
1416 Init(trans, parent_id, name);
1417 }
1418
1419
1420 void MutableEntry::Init(WriteTransaction* trans, const Id& parent_id,
1421 const PathString& name) {
1422 kernel_ = new EntryKernel;
1423 ZeroFields(kernel_, BEGIN_FIELDS);
1424 kernel_->ref(ID) = trans->directory_->NextId();
1425 kernel_->dirty[ID] = true;
1426 kernel_->ref(META_HANDLE) = trans->directory_->NextMetahandle();
1427 kernel_->dirty[META_HANDLE] = true;
1428 kernel_->ref(PARENT_ID) = parent_id;
1429 kernel_->dirty[PARENT_ID] = true;
1430 kernel_->ref(NAME) = name;
1431 kernel_->dirty[NAME] = true;
1432 kernel_->ref(NON_UNIQUE_NAME) = name;
1433 kernel_->dirty[NON_UNIQUE_NAME] = true;
1434 kernel_->ref(IS_NEW) = true;
1435 const int64 now = Now();
1436 kernel_->ref(CTIME) = now;
1437 kernel_->dirty[CTIME] = true;
1438 kernel_->ref(MTIME) = now;
1439 kernel_->dirty[MTIME] = true;
1440 // We match the database defaults here
1441 kernel_->ref(BASE_VERSION) = CHANGES_VERSION;
1442 trans->directory()->InsertEntry(kernel_);
1443 // Because this entry is new, it was originally deleted.
1444 kernel_->ref(IS_DEL) = true;
1445 trans->SaveOriginal(kernel_);
1446 kernel_->ref(IS_DEL) = false;
1447 }
1448
1449 MutableEntry::MutableEntry(WriteTransaction* trans, CreateNewUpdateItem,
1450 const Id& id)
1451 : Entry(trans) {
1452 Entry same_id(trans, GET_BY_ID, id);
1453 if (same_id.good()) {
1454 kernel_ = NULL; // already have an item with this ID.
1455 return;
1456 }
1457 kernel_ = new EntryKernel;
1458 ZeroFields(kernel_, BEGIN_FIELDS);
1459 kernel_->ref(ID) = id;
1460 kernel_->dirty[ID] = true;
1461 kernel_->ref(META_HANDLE) = trans->directory_->NextMetahandle();
1462 kernel_->dirty[META_HANDLE] = true;
1463 kernel_->ref(IS_DEL) = true;
1464 kernel_->dirty[IS_DEL] = true;
1465 kernel_->ref(IS_NEW) = true;
1466 // We match the database defaults here
1467 kernel_->ref(BASE_VERSION) = CHANGES_VERSION;
1468 trans->directory()->InsertEntry(kernel_);
1469 trans->SaveOriginal(kernel_);
1470 }
1471
1472 MutableEntry::MutableEntry(WriteTransaction* trans, GetById, const Id& id)
1473 : Entry(trans, GET_BY_ID, id) {
1474 trans->SaveOriginal(kernel_);
1475 }
1476
1477 MutableEntry::MutableEntry(WriteTransaction* trans, GetByHandle,
1478 int64 metahandle)
1479 : Entry(trans, GET_BY_HANDLE, metahandle) {
1480 trans->SaveOriginal(kernel_);
1481 }
1482
1483 MutableEntry::MutableEntry(WriteTransaction* trans, GetByPath,
1484 const PathString& path)
1485 : Entry(trans, GET_BY_PATH, path) {
1486 trans->SaveOriginal(kernel_);
1487 }
1488
1489 MutableEntry::MutableEntry(WriteTransaction* trans, GetByParentIdAndName,
1490 const Id& parentid, const PathString& name)
1491 : Entry(trans, GET_BY_PARENTID_AND_NAME, parentid, name) {
1492 trans->SaveOriginal(kernel_);
1493 }
1494
1495 MutableEntry::MutableEntry(WriteTransaction* trans, GetByParentIdAndDBName,
1496 const Id& parentid, const PathString& name)
1497 : Entry(trans, GET_BY_PARENTID_AND_DBNAME, parentid, name) {
1498 trans->SaveOriginal(kernel_);
1499 }
1500
1501 bool MutableEntry::PutIsDel(bool is_del) {
1502 DCHECK(kernel_);
1503 if (is_del == kernel_->ref(IS_DEL))
1504 return true;
1505 if (is_del) {
1506 UnlinkFromOrder();
1507 if (!dir()->Delete(kernel_))
1508 return false;
1509 return true;
1510 } else {
1511 if (!dir()->Undelete(kernel_))
1512 return false;
1513 PutPredecessor(Id()); // Restores position to the 0th index.
1514 return true;
1515 }
1516 }
1517
1518 bool MutableEntry::Put(Int64Field field, const int64& value) {
1519 DCHECK(kernel_);
1520 if (kernel_->ref(field) != value) {
1521 kernel_->ref(field) = value;
1522 kernel_->dirty[static_cast<int>(field)] = true;
1523 }
1524 return true;
1525 }
1526
1527 bool MutableEntry::Put(IdField field, const Id& value) {
1528 DCHECK(kernel_);
1529 if (kernel_->ref(field) != value) {
1530 if (ID == field) {
1531 if (!dir()->ReindexId(kernel_, value))
1532 return false;
1533 } else if (PARENT_ID == field) {
1534 if (!dir()->ReindexParentIdAndName(kernel_, value, kernel_->ref(NAME)))
1535 return false;
1536 } else {
1537 kernel_->ref(field) = value;
1538 }
1539 kernel_->dirty[static_cast<int>(field)] = true;
1540 }
1541 return true;
1542 }
1543
1544 WriteTransaction* MutableEntry::trans() const {
1545 // We are in a mutable entry, so we must be in a write transaction.
1546 // Maybe we could keep a pointer to the transaction in MutableEntry.
1547 ThreadNode* node = reinterpret_cast<ThreadNode*>
1548 (pthread_getspecific(dir()->kernel_->thread_node_key));
1549 return node->current_write_trans;
1550 }
1551
1552 bool MutableEntry::Put(BaseVersion field, int64 value) {
1553 DCHECK(kernel_);
1554 if (kernel_->ref(field) != value) {
1555 kernel_->ref(field) = value;
1556 kernel_->dirty[static_cast<int>(field)] = true;
1557 }
1558 return true;
1559 }
1560
1561 bool MutableEntry::Put(StringField field, const PathString& value) {
1562 return PutImpl(field, value);
1563 }
1564
1565 bool MutableEntry::PutImpl(StringField field, const PathString& value) {
1566 DCHECK(kernel_);
1567 if (kernel_->ref(field) != value) {
1568 if (NAME == field) {
1569 if (!dir()->ReindexParentIdAndName(kernel_, kernel_->ref(PARENT_ID),
1570 value))
1571 return false;
1572 } else {
1573 kernel_->ref(field) = value;
1574 }
1575 kernel_->dirty[static_cast<int>(field)] = true;
1576 }
1577 return true;
1578 }
1579
1580 bool MutableEntry::Put(IndexedBitField field, bool value) {
1581 DCHECK(kernel_);
1582 if (kernel_->ref(field) != value) {
1583 MetahandleSet* index;
1584 if (IS_UNSYNCED == field)
1585 index = dir()->kernel_->unsynced_metahandles;
1586 else
1587 index = dir()->kernel_->unapplied_update_metahandles;
1588
1589 ScopedKernelLock lock(dir());
1590 if (value)
1591 CHECK(index->insert(kernel_->ref(META_HANDLE)).second);
1592 else
1593 CHECK(1 == index->erase(kernel_->ref(META_HANDLE)));
1594 kernel_->ref(field) = value;
1595 kernel_->dirty[static_cast<int>(field)] = true;
1596 }
1597 return true;
1598 }
1599
1600 // Avoids temporary collision in index when renaming a bookmark
1601 // to another folder.
1602 bool MutableEntry::PutParentIdAndName(const Id& parent_id,
1603 const Name& name) {
1604 DCHECK(kernel_);
1605 const bool parent_id_changes = parent_id != kernel_->ref(PARENT_ID);
1606 bool db_name_changes = name.db_value() != kernel_->ref(NAME);
1607 if (parent_id_changes || db_name_changes) {
1608 if (!dir()->ReindexParentIdAndName(kernel_, parent_id,
1609 name.db_value()))
1610 return false;
1611 }
1612 Put(UNSANITIZED_NAME, name.GetUnsanitizedName());
1613 Put(NON_UNIQUE_NAME, name.non_unique_value());
1614 if (db_name_changes)
1615 kernel_->dirty[NAME] = true;
1616 if (parent_id_changes) {
1617 kernel_->dirty[PARENT_ID] = true;
1618 PutPredecessor(Id()); // Put in the 0th position.
1619 }
1620 return true;
1621 }
1622
1623 void MutableEntry::UnlinkFromOrder() {
1624 Id old_previous = Get(PREV_ID);
1625 Id old_next = Get(NEXT_ID);
1626
1627 // Self-looping signifies that this item is not in the order. If
1628 // we were to set these to 0, we could get into trouble because
1629 // this node might look like the first node in the ordering.
1630 Put(NEXT_ID, Get(ID));
1631 Put(PREV_ID, Get(ID));
1632
1633 if (!old_previous.IsRoot()) {
1634 if (old_previous == old_next) {
1635 // Note previous == next doesn't imply previous == next == Get(ID). We
1636 // could have prev==next=="c-XX" and Get(ID)=="sX..." if an item was added
1637 // and deleted before receiving the server ID in the commit response.
1638 CHECK((old_next == Get(ID)) || !old_next.ServerKnows());
1639 return; // Done if we were already self-looped (hence unlinked).
1640 }
1641 MutableEntry previous_entry(trans(), GET_BY_ID, old_previous);
1642 CHECK(previous_entry.good());
1643 previous_entry.Put(NEXT_ID, old_next);
1644 }
1645
1646 if (!old_next.IsRoot()) {
1647 MutableEntry next_entry(trans(), GET_BY_ID, old_next);
1648 CHECK(next_entry.good());
1649 next_entry.Put(PREV_ID, old_previous);
1650 }
1651 }
1652
1653 bool MutableEntry::PutPredecessor(const Id& predecessor_id) {
1654 // TODO(ncarter): Maybe there should be an independent HAS_POSITION bit?
1655 if (!Get(IS_BOOKMARK_OBJECT))
1656 return true;
1657 UnlinkFromOrder();
1658
1659 if (Get(IS_DEL)) {
1660 DCHECK(predecessor_id.IsNull());
1661 return true;
1662 }
1663
1664 // This is classic insert-into-doubly-linked-list from CS 101 and your last
1665 // job interview. An "IsRoot" Id signifies the head or tail.
1666 Id successor_id;
1667 if (!predecessor_id.IsRoot()) {
1668 MutableEntry predecessor(trans(), GET_BY_ID, predecessor_id);
1669 CHECK(predecessor.good());
1670 if (predecessor.Get(PARENT_ID) != Get(PARENT_ID))
1671 return false;
1672 successor_id = predecessor.Get(NEXT_ID);
1673 predecessor.Put(NEXT_ID, Get(ID));
1674 } else {
1675 syncable::Directory* dir = trans()->directory();
1676 successor_id = dir->GetFirstChildId(trans(), Get(PARENT_ID));
1677 }
1678 if (!successor_id.IsRoot()) {
1679 MutableEntry successor(trans(), GET_BY_ID, successor_id);
1680 CHECK(successor.good());
1681 if (successor.Get(PARENT_ID) != Get(PARENT_ID))
1682 return false;
1683 successor.Put(PREV_ID, Get(ID));
1684 }
1685 DCHECK(predecessor_id != Get(ID));
1686 DCHECK(successor_id != Get(ID));
1687 Put(PREV_ID, predecessor_id);
1688 Put(NEXT_ID, successor_id);
1689 return true;
1690 }
1691
1692 ///////////////////////////////////////////////////////////////////////////
1693 // High-level functions
1694
1695 int64 Directory::NextMetahandle() {
1696 ScopedKernelLock lock(this);
1697 int64 metahandle = (kernel_->next_metahandle)++;
1698 return metahandle;
1699 }
1700
1701 // Always returns a client ID that is the string representation of a negative
1702 // number.
1703 Id Directory::NextId() {
1704 int64 result;
1705 {
1706 ScopedKernelLock lock(this);
1707 result = (kernel_->next_id)--;
1708 kernel_->info_status_ = KERNEL_SHARE_INFO_DIRTY;
1709 }
1710 DCHECK_LT(result, 0);
1711 return Id::CreateFromClientString(Int64ToString(result));
1712 }
1713
1714 Id Directory::GetChildWithNullIdField(IdField field,
1715 BaseTransaction* trans,
1716 const Id& parent_id) {
1717 // This query is O(number of children), which should be acceptable
1718 // when this method is used as the first step in enumerating the children of
1719 // a node. But careless use otherwise could potentially result in
1720 // O((number of children)^2) performance.
1721 ChildHandles child_handles;
1722 GetChildHandles(trans, parent_id, &child_handles);
1723 ChildHandles::const_iterator it;
1724 for (it = child_handles.begin(); it != child_handles.end(); ++it) {
1725 Entry e(trans, GET_BY_HANDLE, *it);
1726 CHECK(e.good());
1727 if (e.Get(field).IsRoot())
1728 return e.Get(ID);
1729 }
1730
1731 return Id();
1732 }
1733
1734 Id Directory::GetFirstChildId(BaseTransaction* trans,
1735 const Id& parent_id) {
1736 return GetChildWithNullIdField(PREV_ID, trans, parent_id);
1737 }
1738
1739 Id Directory::GetLastChildId(BaseTransaction* trans,
1740 const Id& parent_id) {
1741 return GetChildWithNullIdField(NEXT_ID, trans, parent_id);
1742 }
1743
1744 ExtendedAttribute::ExtendedAttribute(BaseTransaction* trans, GetByHandle,
1745 const ExtendedAttributeKey& key) {
1746 Directory::Kernel* const kernel = trans->directory()->kernel_;
1747 ScopedKernelLock lock(trans->directory());
1748 Init(trans, kernel, &lock, key);
1749 }
1750
1751 bool ExtendedAttribute::Init(BaseTransaction* trans,
1752 Directory::Kernel* const kernel,
1753 ScopedKernelLock* lock,
1754 const ExtendedAttributeKey& key) {
1755 i_ = kernel->extended_attributes->find(key);
1756 good_ = kernel->extended_attributes->end() != i_;
1757 return good_;
1758 }
1759
1760 MutableExtendedAttribute::MutableExtendedAttribute(
1761 WriteTransaction* trans, GetByHandle,
1762 const ExtendedAttributeKey& key) :
1763 ExtendedAttribute(trans, GET_BY_HANDLE, key) {
1764 }
1765
1766 MutableExtendedAttribute::MutableExtendedAttribute(
1767 WriteTransaction* trans, Create, const ExtendedAttributeKey& key) {
1768 Directory::Kernel* const kernel = trans->directory()->kernel_;
1769 ScopedKernelLock lock(trans->directory());
1770 if (!Init(trans, kernel, &lock, key)) {
1771 ExtendedAttributeValue val;
1772 val.dirty = true;
1773 i_ = kernel->extended_attributes->insert(std::make_pair(key, val)).first;
1774 good_ = true;
1775 }
1776 }
1777
1778 bool IsLegalNewParent(BaseTransaction* trans, const Id& entry_id,
1779 const Id& new_parent_id) {
1780 if (entry_id.IsRoot())
1781 return false;
1782 // we have to ensure that the entry is not an ancestor of the new parent.
1783 Id ancestor_id = new_parent_id;
1784 while (!ancestor_id.IsRoot()) {
1785 if (entry_id == ancestor_id)
1786 return false;
1787 Entry new_parent(trans, GET_BY_ID, ancestor_id);
1788 CHECK(new_parent.good());
1789 ancestor_id = new_parent.Get(PARENT_ID);
1790 }
1791 return true;
1792 }
1793
1794 // returns -1 if s contains any non [0-9] characters
1795 static int PathStringToInteger(PathString s) {
1796 PathString::const_iterator i = s.begin();
1797 for (; i != s.end(); ++i) {
1798 if (PathString::npos == PathString(PSTR("0123456789")).find(*i))
1799 return -1;
1800 }
1801 return
1802 #if !PATHSTRING_IS_STD_STRING
1803 _wtoi
1804 #else
1805 atoi
1806 #endif
1807 (s.c_str());
1808 }
1809
1810 static PathString IntegerToPathString(int i) {
1811 const size_t kBufSize = 25;
1812 PathChar buf[kBufSize];
1813 #if !PATHSTRING_IS_STD_STRING
1814 const int radix = 10;
1815 _itow(i, buf, radix);
1816 #else
1817 snprintf(buf, kBufSize, "%d", i);
1818 #endif
1819 return buf;
1820 }
1821
1822 // appends ~1 to the end of 's' unless there is already ~#, in which case
1823 // it just increments the number
1824 static PathString FixBasenameInCollision(const PathString s) {
1825 PathString::size_type last_tilde = s.find_last_of(PSTR('~'));
1826 if (PathString::npos == last_tilde) return s + PSTR("~1");
1827 if (s.size() == (last_tilde + 1)) return s + PSTR("1");
1828 // we have ~, but not necessarily ~# (for some number >= 0). check for that
1829 int n;
1830 if ((n = PathStringToInteger(s.substr(last_tilde + 1))) != -1) {
1831 n++;
1832 PathString pre_number = s.substr(0, last_tilde + 1);
1833 return pre_number + IntegerToPathString(n);
1834 } else {
1835 // we have a ~, but not a number following it, so we'll add another
1836 // ~ and this time, a number
1837 return s + PSTR("~1");
1838 }
1839 }
1840
1841 void DBName::MakeNoncollidingForEntry(BaseTransaction* trans,
1842 const Id& parent_id,
1843 Entry *e) {
1844 const PathString& desired_name = *this;
1845 CHECK(!desired_name.empty());
1846 PathString::size_type first_dot = desired_name.find_first_of(PSTR('.'));
1847 if (PathString::npos == first_dot)
1848 first_dot = desired_name.size();
1849 PathString basename = desired_name.substr(0, first_dot);
1850 PathString dotextension = desired_name.substr(first_dot);
1851 CHECK(basename + dotextension == desired_name);
1852 for (;;) {
1853 // check for collision
1854 PathString testname = basename + dotextension;
1855 Entry same_path_entry(trans, GET_BY_PARENTID_AND_DBNAME,
1856 parent_id, testname);
1857 if (!same_path_entry.good() || (e && same_path_entry.Get(ID) == e->Get(ID)))
1858 break;
1859 // there was a collision, so fix the name
1860 basename = FixBasenameInCollision(basename);
1861 }
1862 // Set our value to the new value. This invalidates desired_name.
1863 PathString new_value = basename + dotextension;
1864 swap(new_value);
1865 }
1866
1867 PathString GetFullPath(BaseTransaction* trans, const Entry& e) {
1868 PathString result;
1869 #ifdef STL_MSVC
1870 result.reserve(MAX_PATH);
1871 #endif
1872 ReverseAppend(e.Get(NAME), &result);
1873 Id id = e.Get(PARENT_ID);
1874 while (!id.IsRoot()) {
1875 result.push_back(kPathSeparator[0]);
1876 Entry ancestor(trans, GET_BY_ID, id);
1877 if (!ancestor.good()) {
1878 // This can happen if the parent folder got deleted before the entry.
1879 LOG(WARNING) << "Cannot get full path of " << e
1880 << "\nbecause an ancestor folder has been deleted.";
1881 result.clear();
1882 return result;
1883 }
1884 ReverseAppend(ancestor.Get(NAME), &result);
1885 id = ancestor.Get(PARENT_ID);
1886 }
1887 result.push_back(kPathSeparator[0]);
1888 reverse(result.begin(), result.end());
1889 return result;
1890 }
1891
1892 const Blob* GetExtendedAttributeValue(const Entry& e,
1893 const PathString& attribute_name) {
1894 ExtendedAttributeKey key(e.Get(META_HANDLE), attribute_name);
1895 ExtendedAttribute extended_attribute(e.trans(), GET_BY_HANDLE, key);
1896 if (extended_attribute.good() && !extended_attribute.is_deleted())
1897 return &extended_attribute.value();
1898 return NULL;
1899 }
1900
1901 // This function sets only the flags needed to get this entry to sync.
1902 void MarkForSyncing(syncable::MutableEntry* e) {
1903 DCHECK_NE(static_cast<MutableEntry*>(NULL), e);
1904 DCHECK(!e->IsRoot()) << "We shouldn't mark a permanent object for syncing.";
1905 e->Put(IS_UNSYNCED, true);
1906 e->Put(SYNCING, false);
1907 }
1908
1909 } // namespace syncable
1910
1911 namespace {
1912 class DumpSeparator {
1913 } separator;
1914 class DumpColon {
1915 } colon;
1916 } // namespace
1917
1918 inline FastDump& operator<<(FastDump& dump, const DumpSeparator&) {
1919 dump.out_->sputn(", ", 2);
1920 return dump;
1921 }
1922
1923 inline FastDump& operator<<(FastDump& dump, const DumpColon&) {
1924 dump.out_->sputn(": ", 2);
1925 return dump;
1926 }
1927
1928 std::ostream& operator<<(std::ostream& stream, const syncable::Entry& entry) {
1929 // Using ostreams directly here is dreadfully slow, because a mutex is
1930 // acquired for every <<. Users noticed it spiking CPU.
1931 using browser_sync::ToUTF8;
1932 using syncable::BitField;
1933 using syncable::BitTemp;
1934 using syncable::BlobField;
1935 using syncable::EntryKernel;
1936 using syncable::g_metas_columns;
1937 using syncable::IdField;
1938 using syncable::Int64Field;
1939 using syncable::StringField;
1940 using syncable::BEGIN_FIELDS;
1941 using syncable::BIT_FIELDS_END;
1942 using syncable::BIT_TEMPS_BEGIN;
1943 using syncable::BIT_TEMPS_END;
1944 using syncable::BLOB_FIELDS_END;
1945 using syncable::INT64_FIELDS_END;
1946 using syncable::ID_FIELDS_END;
1947 using syncable::STRING_FIELDS_END;
1948
1949 int i;
1950 FastDump s(&stream);
1951 EntryKernel* const kernel = entry.kernel_;
1952 for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
1953 s << g_metas_columns[i].name << colon
1954 << kernel->ref(static_cast<Int64Field>(i)) << separator;
1955 }
1956 for ( ; i < ID_FIELDS_END; ++i) {
1957 s << g_metas_columns[i].name << colon
1958 << kernel->ref(static_cast<IdField>(i)) << separator;
1959 }
1960 s << "Flags: ";
1961 for ( ; i < BIT_FIELDS_END; ++i) {
1962 if (kernel->ref(static_cast<BitField>(i)))
1963 s << g_metas_columns[i].name << separator;
1964 }
1965 for ( ; i < STRING_FIELDS_END; ++i) {
1966 ToUTF8 field(kernel->ref(static_cast<StringField>(i)));
1967 s << g_metas_columns[i].name << colon << field.get_string() << separator;
1968 }
1969 for ( ; i < BLOB_FIELDS_END; ++i) {
1970 s << g_metas_columns[i].name << colon
1971 << kernel->ref(static_cast<BlobField>(i)) << separator;
1972 }
1973 s << "TempFlags: ";
1974 for ( ; i < BIT_TEMPS_END; ++i) {
1975 if (kernel->ref(static_cast<BitTemp>(i)))
1976 s << "#" << i - BIT_TEMPS_BEGIN << separator;
1977 }
1978 return stream;
1979 }
1980
1981 std::ostream& operator<<(std::ostream& s, const syncable::Blob& blob) {
1982 for (syncable::Blob::const_iterator i = blob.begin(); i != blob.end(); ++i)
1983 s << std::hex << std::setw(2)
1984 << std::setfill('0') << static_cast<unsigned int>(*i);
1985 return s << std::dec;
1986 }
1987
1988 FastDump& operator<<(FastDump& dump, const syncable::Blob& blob) {
1989 if (blob.empty())
1990 return dump;
1991 string buffer(HexEncode(&blob[0], blob.size()));
1992 dump.out_->sputn(buffer.c_str(), buffer.size());
1993 return dump;
1994 }
1995
1996 std::ostream& operator<<(std::ostream& s, const syncable::ThreadNode& node) {
1997 s << "thread id: " << std::hex << node.id << "\n"
1998 << "file: " << node.file << "\n"
1999 << "line: " << std::dec << node.line << "\n"
2000 << "wait_started: " << node.wait_started.ToInternalValue();
2001 return s;
2002 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698