OLD | NEW |
---|---|
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "extensions/browser/computed_hashes.h" | 5 #include "extensions/browser/computed_hashes.h" |
6 | 6 |
7 #include "base/base64.h" | 7 #include "base/base64.h" |
8 #include "base/file_util.h" | 8 #include "base/file_util.h" |
9 #include "base/files/file_path.h" | 9 #include "base/files/file_path.h" |
10 #include "base/json/json_reader.h" | 10 #include "base/json/json_reader.h" |
11 #include "base/json/json_writer.h" | 11 #include "base/json/json_writer.h" |
12 #include "base/stl_util.h" | |
13 #include "base/values.h" | |
14 #include "crypto/secure_hash.h" | |
15 #include "crypto/sha2.h" | |
12 | 16 |
13 namespace { | 17 namespace { |
18 const char kBlockHashesKey[] = "block_hashes"; | |
19 const char kBlockSizeKey[] = "block_size"; | |
20 const char kFileHashesKey[] = "file_hashes"; | |
14 const char kPathKey[] = "path"; | 21 const char kPathKey[] = "path"; |
15 const char kBlockSizeKey[] = "block_size"; | 22 const char kVersionKey[] = "version"; |
16 const char kBlockHashesKey[] = "block_hashes"; | 23 const int kVersion = 2; |
17 } | 24 } |
Ken Rockot(use gerrit already)
2014/08/05 23:10:32
nit: It's still a pretty small block, but perhaps
asargent_no_longer_on_chrome
2014/08/06 04:47:32
Done.
| |
18 | 25 |
19 namespace extensions { | 26 namespace extensions { |
20 | 27 |
21 ComputedHashes::Reader::Reader() { | 28 ComputedHashes::Reader::Reader() { |
22 } | 29 } |
Ken Rockot(use gerrit already)
2014/08/05 23:10:32
nit on existing code: Missing blank line
asargent_no_longer_on_chrome
2014/08/06 04:47:33
Done.
| |
23 ComputedHashes::Reader::~Reader() { | 30 ComputedHashes::Reader::~Reader() { |
24 } | 31 } |
25 | 32 |
26 bool ComputedHashes::Reader::InitFromFile(const base::FilePath& path) { | 33 bool ComputedHashes::Reader::InitFromFile(const base::FilePath& path) { |
27 std::string contents; | 34 std::string contents; |
28 if (!base::ReadFileToString(path, &contents)) | 35 if (!base::ReadFileToString(path, &contents)) |
29 return false; | 36 return false; |
30 | 37 |
38 base::DictionaryValue* top_dictionary = NULL; | |
39 scoped_ptr<base::Value> value(base::JSONReader::Read(contents)); | |
40 if (!value.get() || !value->GetAsDictionary(&top_dictionary)) | |
41 return false; | |
42 | |
43 // For now we don't support forwards or backwards compatability in the | |
44 // format, so we return false on version mismatch. | |
45 int version = 0; | |
46 if (!top_dictionary->GetInteger(kVersionKey, &version) || version != kVersion) | |
47 return false; | |
48 | |
31 base::ListValue* all_hashes = NULL; | 49 base::ListValue* all_hashes = NULL; |
32 scoped_ptr<base::Value> value(base::JSONReader::Read(contents)); | 50 if (!top_dictionary->GetList(kFileHashesKey, &all_hashes)) |
33 if (!value.get() || !value->GetAsList(&all_hashes)) | |
34 return false; | 51 return false; |
35 | 52 |
36 for (size_t i = 0; i < all_hashes->GetSize(); i++) { | 53 for (size_t i = 0; i < all_hashes->GetSize(); i++) { |
37 base::DictionaryValue* dictionary = NULL; | 54 base::DictionaryValue* dictionary = NULL; |
38 if (!all_hashes->GetDictionary(i, &dictionary)) | 55 if (!all_hashes->GetDictionary(i, &dictionary)) |
39 return false; | 56 return false; |
40 | 57 |
41 std::string relative_path_utf8; | 58 std::string relative_path_utf8; |
42 if (!dictionary->GetString(kPathKey, &relative_path_utf8)) | 59 if (!dictionary->GetString(kPathKey, &relative_path_utf8)) |
43 return false; | 60 return false; |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
84 base::FilePath path = relative_path.NormalizePathSeparatorsTo('/'); | 101 base::FilePath path = relative_path.NormalizePathSeparatorsTo('/'); |
85 std::map<base::FilePath, HashInfo>::iterator i = data_.find(path); | 102 std::map<base::FilePath, HashInfo>::iterator i = data_.find(path); |
86 if (i == data_.end()) | 103 if (i == data_.end()) |
87 return false; | 104 return false; |
88 HashInfo& info = i->second; | 105 HashInfo& info = i->second; |
89 *block_size = info.first; | 106 *block_size = info.first; |
90 *hashes = info.second; | 107 *hashes = info.second; |
91 return true; | 108 return true; |
92 } | 109 } |
93 | 110 |
94 ComputedHashes::Writer::Writer() { | 111 ComputedHashes::Writer::Writer() : file_list_(new base::ListValue) { |
95 } | 112 } |
Ken Rockot(use gerrit already)
2014/08/05 23:10:32
nit on existing code: Missing blank line.
asargent_no_longer_on_chrome
2014/08/06 04:47:32
Done.
| |
96 ComputedHashes::Writer::~Writer() { | 113 ComputedHashes::Writer::~Writer() { |
97 } | 114 } |
98 | 115 |
99 void ComputedHashes::Writer::AddHashes(const base::FilePath& relative_path, | 116 void ComputedHashes::Writer::AddHashes(const base::FilePath& relative_path, |
100 int block_size, | 117 int block_size, |
101 const std::vector<std::string>& hashes) { | 118 const std::vector<std::string>& hashes) { |
102 base::DictionaryValue* dict = new base::DictionaryValue(); | 119 base::DictionaryValue* dict = new base::DictionaryValue(); |
103 base::ListValue* block_hashes = new base::ListValue(); | 120 base::ListValue* block_hashes = new base::ListValue(); |
104 file_list_.Append(dict); | 121 file_list_->Append(dict); |
105 dict->SetString(kPathKey, | 122 dict->SetString(kPathKey, |
106 relative_path.NormalizePathSeparatorsTo('/').AsUTF8Unsafe()); | 123 relative_path.NormalizePathSeparatorsTo('/').AsUTF8Unsafe()); |
107 dict->SetInteger(kBlockSizeKey, block_size); | 124 dict->SetInteger(kBlockSizeKey, block_size); |
108 dict->Set(kBlockHashesKey, block_hashes); | 125 dict->Set(kBlockHashesKey, block_hashes); |
109 | 126 |
110 for (std::vector<std::string>::const_iterator i = hashes.begin(); | 127 for (std::vector<std::string>::const_iterator i = hashes.begin(); |
111 i != hashes.end(); | 128 i != hashes.end(); |
112 ++i) { | 129 ++i) { |
113 std::string encoded; | 130 std::string encoded; |
114 base::Base64Encode(*i, &encoded); | 131 base::Base64Encode(*i, &encoded); |
115 block_hashes->AppendString(encoded); | 132 block_hashes->AppendString(encoded); |
116 } | 133 } |
117 } | 134 } |
118 | 135 |
119 bool ComputedHashes::Writer::WriteToFile(const base::FilePath& path) { | 136 bool ComputedHashes::Writer::WriteToFile(const base::FilePath& path) { |
120 std::string json; | 137 std::string json; |
121 if (!base::JSONWriter::Write(&file_list_, &json)) | 138 base::DictionaryValue top_dictionary; |
139 top_dictionary.SetInteger(kVersionKey, kVersion); | |
140 top_dictionary.Set(kFileHashesKey, file_list_.release()); | |
141 | |
142 if (!base::JSONWriter::Write(&top_dictionary, &json)) | |
122 return false; | 143 return false; |
123 int written = base::WriteFile(path, json.data(), json.size()); | 144 int written = base::WriteFile(path, json.data(), json.size()); |
124 if (static_cast<unsigned>(written) != json.size()) { | 145 if (static_cast<unsigned>(written) != json.size()) { |
125 LOG(ERROR) << "Error writing " << path.MaybeAsASCII() | 146 LOG(ERROR) << "Error writing " << path.AsUTF8Unsafe() |
126 << " ; write result:" << written << " expected:" << json.size(); | 147 << " ; write result:" << written << " expected:" << json.size(); |
127 return false; | 148 return false; |
128 } | 149 } |
129 return true; | 150 return true; |
130 } | 151 } |
131 | 152 |
153 void ComputedHashes::ComputeHashesForContent(const std::string& contents, | |
154 size_t block_size, | |
155 std::vector<std::string>* hashes) { | |
156 size_t offset = 0; | |
157 do { | |
158 const char* block_start = contents.data() + offset; | |
159 size_t bytes_to_read = std::min(contents.size() - offset, block_size); | |
160 DCHECK(bytes_to_read >= 0); | |
Ken Rockot(use gerrit already)
2014/08/05 23:10:32
Sort of an invalid DCHECK because a size_t is alwa
asargent_no_longer_on_chrome
2014/08/06 04:47:32
Ah, good catch. Done.
| |
161 scoped_ptr<crypto::SecureHash> hash( | |
162 crypto::SecureHash::Create(crypto::SecureHash::SHA256)); | |
163 hash->Update(block_start, bytes_to_read); | |
164 | |
165 hashes->push_back(std::string()); | |
166 std::string* buffer = &(hashes->back()); | |
Ken Rockot(use gerrit already)
2014/08/05 23:10:32
Can you not use a reference here instead? Or is th
asargent_no_longer_on_chrome
2014/08/06 04:47:32
I don't think there's a rule about it one way or a
| |
167 buffer->resize(crypto::kSHA256Length); | |
168 hash->Finish(string_as_array(buffer), buffer->size()); | |
169 | |
170 if (bytes_to_read == 0) | |
Ken Rockot(use gerrit already)
2014/08/05 23:10:32
nit: Might want to comment to make this escape mor
asargent_no_longer_on_chrome
2014/08/06 04:47:32
Done.
| |
171 break; | |
172 else | |
Ken Rockot(use gerrit already)
2014/08/05 23:10:32
nit: no need for the else
asargent_no_longer_on_chrome
2014/08/06 04:47:32
Done.
| |
173 offset += bytes_to_read; | |
174 } while (offset < contents.size()); | |
175 } | |
176 | |
132 } // namespace extensions | 177 } // namespace extensions |
OLD | NEW |