OLD | NEW |
---|---|
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "sync/syncable/directory.h" | 5 #include "sync/syncable/directory.h" |
6 | 6 |
7 #include <inttypes.h> | |
7 #include <stddef.h> | 8 #include <stddef.h> |
8 #include <stdint.h> | 9 #include <stdint.h> |
9 | 10 |
10 #include <algorithm> | 11 #include <algorithm> |
11 #include <iterator> | 12 #include <iterator> |
12 #include <utility> | 13 #include <utility> |
13 | 14 |
14 #include "base/base64.h" | 15 #include "base/base64.h" |
15 #include "base/guid.h" | 16 #include "base/guid.h" |
16 #include "base/metrics/histogram.h" | 17 #include "base/metrics/histogram.h" |
17 #include "base/stl_util.h" | 18 #include "base/stl_util.h" |
18 #include "base/strings/string_number_conversions.h" | 19 #include "base/strings/string_number_conversions.h" |
20 #include "base/strings/stringprintf.h" | |
21 #include "base/trace_event/memory_dump_manager.h" | |
22 #include "base/trace_event/process_memory_dump.h" | |
19 #include "base/trace_event/trace_event.h" | 23 #include "base/trace_event/trace_event.h" |
20 #include "sync/internal_api/public/base/attachment_id_proto.h" | 24 #include "sync/internal_api/public/base/attachment_id_proto.h" |
21 #include "sync/internal_api/public/base/unique_position.h" | 25 #include "sync/internal_api/public/base/unique_position.h" |
22 #include "sync/internal_api/public/util/unrecoverable_error_handler.h" | 26 #include "sync/internal_api/public/util/unrecoverable_error_handler.h" |
27 #include "sync/protocol/proto_value_conversions.h" | |
23 #include "sync/syncable/entry.h" | 28 #include "sync/syncable/entry.h" |
24 #include "sync/syncable/entry_kernel.h" | 29 #include "sync/syncable/entry_kernel.h" |
25 #include "sync/syncable/in_memory_directory_backing_store.h" | 30 #include "sync/syncable/in_memory_directory_backing_store.h" |
26 #include "sync/syncable/model_neutral_mutable_entry.h" | 31 #include "sync/syncable/model_neutral_mutable_entry.h" |
27 #include "sync/syncable/on_disk_directory_backing_store.h" | 32 #include "sync/syncable/on_disk_directory_backing_store.h" |
28 #include "sync/syncable/scoped_kernel_lock.h" | 33 #include "sync/syncable/scoped_kernel_lock.h" |
29 #include "sync/syncable/scoped_parent_child_index_updater.h" | 34 #include "sync/syncable/scoped_parent_child_index_updater.h" |
30 #include "sync/syncable/syncable-inl.h" | 35 #include "sync/syncable/syncable-inl.h" |
31 #include "sync/syncable/syncable_base_transaction.h" | 36 #include "sync/syncable/syncable_base_transaction.h" |
32 #include "sync/syncable/syncable_changes_version.h" | 37 #include "sync/syncable/syncable_changes_version.h" |
33 #include "sync/syncable/syncable_read_transaction.h" | 38 #include "sync/syncable/syncable_read_transaction.h" |
34 #include "sync/syncable/syncable_util.h" | 39 #include "sync/syncable/syncable_util.h" |
35 #include "sync/syncable/syncable_write_transaction.h" | 40 #include "sync/syncable/syncable_write_transaction.h" |
36 | 41 |
37 using std::string; | 42 using std::string; |
38 | 43 |
39 namespace syncer { | 44 namespace syncer { |
40 namespace syncable { | 45 namespace syncable { |
41 | 46 |
47 namespace { | |
48 | |
49 template <typename Key, typename Value> | |
50 size_t GetUnorderedMapMemoryUsage(const std::unordered_map<Key, Value> map) { | |
DmitrySkiba
2016/08/09 17:44:10
I think you missed & here: const std::unordered_ma
| |
51 // Considers the hashtable to be an array of buckets which are linked list of | |
52 // nodes. Each node contains a next pointer and the cached hash value. | |
53 size_t node_size = sizeof(std::pair<Key, Value>) + 2 * sizeof(void*); | |
54 size_t bucket_overhead = sizeof(void*); | |
55 return map.bucket_count() * bucket_overhead + node_size * map.size(); | |
56 } | |
57 } | |
58 | |
42 // static | 59 // static |
43 const base::FilePath::CharType Directory::kSyncDatabaseFilename[] = | 60 const base::FilePath::CharType Directory::kSyncDatabaseFilename[] = |
44 FILE_PATH_LITERAL("SyncData.sqlite3"); | 61 FILE_PATH_LITERAL("SyncData.sqlite3"); |
45 | 62 |
46 Directory::PersistedKernelInfo::PersistedKernelInfo() { | 63 Directory::PersistedKernelInfo::PersistedKernelInfo() { |
47 ModelTypeSet protocol_types = ProtocolTypes(); | 64 ModelTypeSet protocol_types = ProtocolTypes(); |
48 for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good(); | 65 for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good(); |
49 iter.Inc()) { | 66 iter.Inc()) { |
50 ResetDownloadProgress(iter.Get()); | 67 ResetDownloadProgress(iter.Get()); |
51 transaction_version[iter.Get()] = 0; | 68 transaction_version[iter.Get()] = 0; |
(...skipping 156 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
208 // Save changes back in case there are any metahandles to purge. | 225 // Save changes back in case there are any metahandles to purge. |
209 if (!SaveChanges()) | 226 if (!SaveChanges()) |
210 return FAILED_INITIAL_WRITE; | 227 return FAILED_INITIAL_WRITE; |
211 | 228 |
212 // Now that we've successfully opened the store, install an error handler to | 229 // Now that we've successfully opened the store, install an error handler to |
213 // deal with catastrophic errors that may occur later on. Use a weak pointer | 230 // deal with catastrophic errors that may occur later on. Use a weak pointer |
214 // because we cannot guarantee that this Directory will outlive the Closure. | 231 // because we cannot guarantee that this Directory will outlive the Closure. |
215 store_->SetCatastrophicErrorHandler(base::Bind( | 232 store_->SetCatastrophicErrorHandler(base::Bind( |
216 &Directory::OnCatastrophicError, weak_ptr_factory_.GetWeakPtr())); | 233 &Directory::OnCatastrophicError, weak_ptr_factory_.GetWeakPtr())); |
217 | 234 |
235 UMA_HISTOGRAM_COUNTS("Sync.DirectoryMemoryUsageKB", | |
236 GetApproximateMemoryUsage()); | |
237 | |
218 return OPENED; | 238 return OPENED; |
219 } | 239 } |
220 | 240 |
221 DeleteJournal* Directory::delete_journal() { | 241 DeleteJournal* Directory::delete_journal() { |
222 DCHECK(delete_journal_.get()); | 242 DCHECK(delete_journal_.get()); |
223 return delete_journal_.get(); | 243 return delete_journal_.get(); |
224 } | 244 } |
225 | 245 |
226 void Directory::Close() { | 246 void Directory::Close() { |
227 store_.reset(); | 247 store_.reset(); |
(...skipping 685 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
913 ScopedKernelLock lock(this); | 933 ScopedKernelLock lock(this); |
914 kernel_->persisted_info.download_progress[model_type].SerializeToString( | 934 kernel_->persisted_info.download_progress[model_type].SerializeToString( |
915 value_out); | 935 value_out); |
916 } | 936 } |
917 | 937 |
918 size_t Directory::GetEntriesCount() const { | 938 size_t Directory::GetEntriesCount() const { |
919 ScopedKernelLock lock(this); | 939 ScopedKernelLock lock(this); |
920 return kernel_->metahandles_map.size(); | 940 return kernel_->metahandles_map.size(); |
921 } | 941 } |
922 | 942 |
943 size_t Directory::GetApproximateMemoryUsage() { | |
944 size_t total = 0; | |
945 ScopedKernelLock lock(this); | |
946 total += sizeof(this) + GetUnorderedMapMemoryUsage(kernel_->metahandles_map) + | |
947 GetUnorderedMapMemoryUsage(kernel_->ids_map) + | |
948 GetUnorderedMapMemoryUsage(kernel_->server_tags_map) + | |
949 GetUnorderedMapMemoryUsage(kernel_->client_tags_map) + | |
950 GetUnorderedMapMemoryUsage(kernel_->index_by_attachment_id); | |
951 total += kernel_->parent_child_index.MemoryUsage(); | |
952 | |
953 for (size_t i = 0; i < MODEL_TYPE_COUNT; ++i) | |
954 total += | |
955 kernel_->persisted_info.datatype_context[i].context().capacity() + 1; | |
956 | |
957 for (auto entry : kernel_->index_by_attachment_id) | |
DmitrySkiba
2016/08/09 17:44:10
const auto&, all other places too.
| |
958 total += entry.first.capacity() + 1 + entry.second.size() * sizeof(int64_t); | |
959 | |
960 for (auto entry : kernel_->ids_map) | |
961 total += entry.first.capacity() + 1; | |
962 | |
963 for (auto handle : kernel_->metahandles_map) { | |
964 // If entry was not changed after the last estimation, use the cached size. | |
965 if (handle.second->cached_size()) { | |
966 total += handle.second->cached_size(); | |
967 continue; | |
968 } | |
969 | |
970 size_t entry_size = 0; | |
DmitrySkiba
2016/08/09 17:44:10
I think this whole thing should be moved into Entr
| |
971 for (unsigned i = ID_FIELDS_BEGIN; i < ID_FIELDS_END; ++i) { | |
972 entry_size += | |
973 handle.second->ref(static_cast<IdField>(i)).value().capacity() + 1; | |
974 } | |
975 for (unsigned i = STRING_FIELDS_BEGIN; i < STRING_FIELDS_END; ++i) { | |
976 if (i == UNIQUE_SERVER_TAG || i == UNIQUE_CLIENT_TAG) { | |
977 // These are stored again in either |client_tags_map| or | |
978 // |server_tags_map|. | |
979 entry_size += | |
980 2 * | |
981 (handle.second->ref(static_cast<StringField>(i)).capacity() + 1); | |
982 } else { | |
983 entry_size += | |
984 handle.second->ref(static_cast<StringField>(i)).capacity() + 1; | |
985 } | |
986 } | |
987 const sync_pb::EntitySpecifics* last_entity = nullptr; | |
988 for (unsigned i = PROTO_FIELDS_BEGIN; i < PROTO_FIELDS_END; ++i) { | |
989 const auto& field = handle.second->ref(static_cast<ProtoField>(i)); | |
990 if (&sync_pb::EntitySpecifics::default_instance() != &field) { | |
991 // Any two fields referencing to same entity are consecutive. Continue | |
992 // if already accounted. | |
993 if (last_entity == &field) | |
994 continue; | |
995 last_entity = &field; | |
996 entry_size += GetEntitySpecificsSize(field); | |
997 } else { | |
998 last_entity = nullptr; | |
999 } | |
1000 } | |
1001 const sync_pb::AttachmentMetadata* last_field = nullptr; | |
1002 for (unsigned i = ATTACHMENT_METADATA_FIELDS_BEGIN; | |
1003 i < ATTACHMENT_METADATA_FIELDS_END; ++i) { | |
1004 const auto& field = | |
1005 handle.second->ref(static_cast<AttachmentMetadataField>(i)); | |
1006 if (&sync_pb::AttachmentMetadata::default_instance() != &field) { | |
1007 if (last_field == &field) | |
1008 continue; | |
1009 last_field = &field; | |
1010 entry_size += sizeof(sync_pb::AttachmentMetadata); | |
1011 entry_size += | |
1012 field.record_size() * (sizeof(sync_pb::AttachmentMetadataRecord) + | |
1013 sizeof(sync_pb::AttachmentIdProto)); | |
1014 for (int i = 0; i < field.record_size(); ++i) | |
1015 entry_size += field.record(i).id().unique_id().capacity() + 1; | |
1016 } else { | |
1017 last_field = nullptr; | |
1018 } | |
1019 } | |
1020 for (unsigned i = UNIQUE_POSITION_FIELDS_BEGIN; | |
1021 i < UNIQUE_POSITION_FIELDS_END; ++i) { | |
1022 entry_size += handle.second->ref(static_cast<UniquePositionField>(i)) | |
1023 .compressed_byte_size(); | |
1024 } | |
1025 | |
1026 handle.second->set_cached_size(entry_size); | |
1027 total += entry_size; | |
1028 } | |
1029 return total; | |
1030 } | |
1031 | |
1032 void Directory::OnMemoryDump(base::trace_event::ProcessMemoryDump* pmd) { | |
1033 auto dump = pmd->CreateAllocatorDump(base::StringPrintf( | |
1034 "sync/0x%" PRIXPTR, reinterpret_cast<uintptr_t>(this))); | |
1035 dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, | |
1036 base::trace_event::MemoryAllocatorDump::kUnitsBytes, | |
1037 GetApproximateMemoryUsage()); | |
1038 const char* system_allocator_name = | |
1039 base::trace_event::MemoryDumpManager::GetInstance() | |
1040 ->system_allocator_pool_name(); | |
1041 if (system_allocator_name) | |
1042 pmd->AddSuballocation(dump->guid(), system_allocator_name); | |
1043 } | |
1044 | |
923 void Directory::SetDownloadProgress( | 1045 void Directory::SetDownloadProgress( |
924 ModelType model_type, | 1046 ModelType model_type, |
925 const sync_pb::DataTypeProgressMarker& new_progress) { | 1047 const sync_pb::DataTypeProgressMarker& new_progress) { |
926 ScopedKernelLock lock(this); | 1048 ScopedKernelLock lock(this); |
927 kernel_->persisted_info.download_progress[model_type].CopyFrom(new_progress); | 1049 kernel_->persisted_info.download_progress[model_type].CopyFrom(new_progress); |
928 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | 1050 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; |
929 } | 1051 } |
930 | 1052 |
931 bool Directory::HasEmptyDownloadProgress(ModelType type) const { | 1053 bool Directory::HasEmptyDownloadProgress(ModelType type) const { |
932 ScopedKernelLock lock(this); | 1054 ScopedKernelLock lock(this); |
(...skipping 646 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1579 Directory::Kernel* Directory::kernel() { | 1701 Directory::Kernel* Directory::kernel() { |
1580 return kernel_; | 1702 return kernel_; |
1581 } | 1703 } |
1582 | 1704 |
1583 const Directory::Kernel* Directory::kernel() const { | 1705 const Directory::Kernel* Directory::kernel() const { |
1584 return kernel_; | 1706 return kernel_; |
1585 } | 1707 } |
1586 | 1708 |
1587 } // namespace syncable | 1709 } // namespace syncable |
1588 } // namespace syncer | 1710 } // namespace syncer |
OLD | NEW |