Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(770)

Side by Side Diff: delta_diff_generator.cc

Issue 3597014: AU: reuse scratch blocks in delta generation, tolerate insufficient scratch. (Closed) Base URL: ssh://git@chromiumos-git/update_engine.git
Patch Set: address comments, fix code duplication Created 10 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « delta_diff_generator.h ('k') | delta_diff_generator_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2010 The Chromium OS Authors. All rights reserved. 1 // Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "update_engine/delta_diff_generator.h" 5 #include "update_engine/delta_diff_generator.h"
6 6
7 #include <errno.h> 7 #include <errno.h>
8 #include <fcntl.h> 8 #include <fcntl.h>
9 #include <sys/stat.h> 9 #include <sys/stat.h>
10 #include <sys/types.h> 10 #include <sys/types.h>
11 11
12 #include <algorithm> 12 #include <algorithm>
13 #include <map>
13 #include <set> 14 #include <set>
14 #include <string> 15 #include <string>
15 #include <utility> 16 #include <utility>
16 #include <vector> 17 #include <vector>
17 18
18 #include <base/logging.h> 19 #include <base/logging.h>
19 #include <base/string_util.h> 20 #include <base/string_util.h>
20 #include <bzlib.h> 21 #include <bzlib.h>
21 22
22 #include "update_engine/bzip.h" 23 #include "update_engine/bzip.h"
23 #include "update_engine/cycle_breaker.h" 24 #include "update_engine/cycle_breaker.h"
24 #include "update_engine/extent_mapper.h" 25 #include "update_engine/extent_mapper.h"
26 #include "update_engine/extent_ranges.h"
25 #include "update_engine/file_writer.h" 27 #include "update_engine/file_writer.h"
26 #include "update_engine/filesystem_iterator.h" 28 #include "update_engine/filesystem_iterator.h"
27 #include "update_engine/graph_types.h" 29 #include "update_engine/graph_types.h"
28 #include "update_engine/graph_utils.h" 30 #include "update_engine/graph_utils.h"
29 #include "update_engine/payload_signer.h" 31 #include "update_engine/payload_signer.h"
30 #include "update_engine/subprocess.h" 32 #include "update_engine/subprocess.h"
31 #include "update_engine/topological_sort.h" 33 #include "update_engine/topological_sort.h"
32 #include "update_engine/update_metadata.pb.h" 34 #include "update_engine/update_metadata.pb.h"
33 #include "update_engine/utils.h" 35 #include "update_engine/utils.h"
34 36
35 using std::make_pair; 37 using std::make_pair;
38 using std::map;
36 using std::max; 39 using std::max;
37 using std::min; 40 using std::min;
38 using std::set; 41 using std::set;
39 using std::string; 42 using std::string;
40 using std::vector; 43 using std::vector;
41 44
42 namespace chromeos_update_engine { 45 namespace chromeos_update_engine {
43 46
44 typedef DeltaDiffGenerator::Block Block; 47 typedef DeltaDiffGenerator::Block Block;
45 48
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
128 << ") and also " << vertex << "(" 131 << ") and also " << vertex << "("
129 << graph[vertex].file_name << ")"; 132 << graph[vertex].file_name << ")";
130 } 133 }
131 (*blocks)[block].*access_type = vertex; 134 (*blocks)[block].*access_type = vertex;
132 } 135 }
133 } 136 }
134 } 137 }
135 return true; 138 return true;
136 } 139 }
137 140
138 // For a given regular file which must exist at new_root + path, and may 141 // For a given regular file which must exist at new_root + path, and
139 // exist at old_root + path, creates a new InstallOperation and adds it to 142 // may exist at old_root + path, creates a new InstallOperation and
140 // the graph. Also, populates the 'blocks' array as necessary. 143 // adds it to the graph. Also, populates the |blocks| array as
141 // Also, writes the data necessary to send the file down to the client 144 // necessary, if |blocks| is non-NULL. Also, writes the data
142 // into data_fd, which has length *data_file_size. *data_file_size is 145 // necessary to send the file down to the client into data_fd, which
143 // updated appropriately. 146 // has length *data_file_size. *data_file_size is updated
144 // Returns true on success. 147 // appropriately. If |existing_vertex| is no kInvalidIndex, use that
148 // rather than allocating a new vertex. Returns true on success.
145 bool DeltaReadFile(Graph* graph, 149 bool DeltaReadFile(Graph* graph,
150 Vertex::Index existing_vertex,
146 vector<Block>* blocks, 151 vector<Block>* blocks,
147 const string& old_root, 152 const string& old_root,
148 const string& new_root, 153 const string& new_root,
149 const string& path, // within new_root 154 const string& path, // within new_root
150 int data_fd, 155 int data_fd,
151 off_t* data_file_size) { 156 off_t* data_file_size) {
152 vector<char> data; 157 vector<char> data;
153 DeltaArchiveManifest_InstallOperation operation; 158 DeltaArchiveManifest_InstallOperation operation;
154 159
155 TEST_AND_RETURN_FALSE(DeltaDiffGenerator::ReadFileToDiff(old_root + path, 160 TEST_AND_RETURN_FALSE(DeltaDiffGenerator::ReadFileToDiff(old_root + path,
156 new_root + path, 161 new_root + path,
157 &data, 162 &data,
158 &operation)); 163 &operation));
159 164
160 // Write the data 165 // Write the data
161 if (operation.type() != DeltaArchiveManifest_InstallOperation_Type_MOVE) { 166 if (operation.type() != DeltaArchiveManifest_InstallOperation_Type_MOVE) {
162 operation.set_data_offset(*data_file_size); 167 operation.set_data_offset(*data_file_size);
163 operation.set_data_length(data.size()); 168 operation.set_data_length(data.size());
164 } 169 }
165 170
166 TEST_AND_RETURN_FALSE(utils::WriteAll(data_fd, &data[0], data.size())); 171 TEST_AND_RETURN_FALSE(utils::WriteAll(data_fd, &data[0], data.size()));
167 *data_file_size += data.size(); 172 *data_file_size += data.size();
168 173
169 // Now, insert into graph and blocks vector 174 // Now, insert into graph and blocks vector
170 graph->resize(graph->size() + 1); 175 Vertex::Index vertex = existing_vertex;
171 graph->back().op = operation; 176 if (vertex == Vertex::kInvalidIndex) {
172 CHECK(graph->back().op.has_type()); 177 graph->resize(graph->size() + 1);
173 graph->back().file_name = path; 178 vertex = graph->size() - 1;
179 }
180 (*graph)[vertex].op = operation;
181 CHECK((*graph)[vertex].op.has_type());
182 (*graph)[vertex].file_name = path;
174 183
175 TEST_AND_RETURN_FALSE(AddInstallOpToBlocksVector(graph->back().op, 184 if (blocks)
176 blocks, 185 TEST_AND_RETURN_FALSE(AddInstallOpToBlocksVector((*graph)[vertex].op,
177 *graph, 186 blocks,
178 graph->size() - 1)); 187 *graph,
188 vertex));
179 return true; 189 return true;
180 } 190 }
181 191
182 // For each regular file within new_root, creates a node in the graph, 192 // For each regular file within new_root, creates a node in the graph,
183 // determines the best way to compress it (REPLACE, REPLACE_BZ, COPY, BSDIFF), 193 // determines the best way to compress it (REPLACE, REPLACE_BZ, COPY, BSDIFF),
184 // and writes any necessary data to the end of data_fd. 194 // and writes any necessary data to the end of data_fd.
185 bool DeltaReadFiles(Graph* graph, 195 bool DeltaReadFiles(Graph* graph,
186 vector<Block>* blocks, 196 vector<Block>* blocks,
187 const string& old_root, 197 const string& old_root,
188 const string& new_root, 198 const string& new_root,
189 int data_fd, 199 int data_fd,
190 off_t* data_file_size) { 200 off_t* data_file_size) {
191 set<ino_t> visited_inodes; 201 set<ino_t> visited_inodes;
192 for (FilesystemIterator fs_iter(new_root, 202 for (FilesystemIterator fs_iter(new_root,
193 utils::SetWithValue<string>("/lost+found")); 203 utils::SetWithValue<string>("/lost+found"));
194 !fs_iter.IsEnd(); fs_iter.Increment()) { 204 !fs_iter.IsEnd(); fs_iter.Increment()) {
195 if (!S_ISREG(fs_iter.GetStat().st_mode)) 205 if (!S_ISREG(fs_iter.GetStat().st_mode))
196 continue; 206 continue;
197 207
198 // Make sure we visit each inode only once. 208 // Make sure we visit each inode only once.
199 if (utils::SetContainsKey(visited_inodes, fs_iter.GetStat().st_ino)) 209 if (utils::SetContainsKey(visited_inodes, fs_iter.GetStat().st_ino))
200 continue; 210 continue;
201 visited_inodes.insert(fs_iter.GetStat().st_ino); 211 visited_inodes.insert(fs_iter.GetStat().st_ino);
202 if (fs_iter.GetStat().st_size == 0) 212 if (fs_iter.GetStat().st_size == 0)
203 continue; 213 continue;
204 214
205 LOG(INFO) << "Encoding file " << fs_iter.GetPartialPath(); 215 LOG(INFO) << "Encoding file " << fs_iter.GetPartialPath();
206 216
207 TEST_AND_RETURN_FALSE(DeltaReadFile(graph, 217 TEST_AND_RETURN_FALSE(DeltaReadFile(graph,
218 Vertex::kInvalidIndex,
208 blocks, 219 blocks,
209 old_root, 220 old_root,
210 new_root, 221 new_root,
211 fs_iter.GetPartialPath(), 222 fs_iter.GetPartialPath(),
212 data_fd, 223 data_fd,
213 data_file_size)); 224 data_file_size));
214 } 225 }
215 return true; 226 return true;
216 } 227 }
217 228
218 // Attempts to find |block_count| blocks to use as scratch space. Returns true 229 // This class allocates non-existent temp blocks, starting from
219 // on success. Right now we return exactly as many blocks as are required. 230 // kTempBlockStart. Other code is responsible for converting these
220 // 231 // temp blocks into real blocks, as the client can't read or write to
221 // TODO(adlr): Consider returning all scratch blocks, even if there are extras, 232 // these blocks.
222 // to make it easier for a scratch allocator to find contiguous regions for 233 class DummyExtentAllocator {
223 // specific scratch writes.
224 bool FindScratchSpace(const vector<Block>& blocks,
225 vector<Block>::size_type block_count,
226 vector<Extent>* out) {
227 // Scan |blocks| for blocks that are neither read, nor written. If we don't
228 // find enough of those, look past the end of |blocks| till the end of the
229 // partition. If we don't find |block_count| scratch blocks, return false.
230 //
231 // TODO(adlr): Return blocks that are written by operations that don't have
232 // incoming edges (and thus, can be deferred until all old blocks are read by
233 // other operations).
234 vector<Extent> ret;
235 vector<Block>::size_type blocks_found = 0;
236 const size_t kPartitionBlocks = kRootFSPartitionSize / kBlockSize;
237 for (vector<Block>::size_type i = 0;
238 i < kPartitionBlocks && blocks_found < block_count; i++) {
239 if (i >= blocks.size() ||
240 (blocks[i].reader == Vertex::kInvalidIndex &&
241 blocks[i].writer == Vertex::kInvalidIndex)) {
242 graph_utils::AppendBlockToExtents(&ret, i);
243 blocks_found++;
244 }
245 }
246 LOG(INFO) << "found " << blocks_found << " scratch blocks";
247 if (blocks_found == block_count) {
248 out->swap(ret);
249 return true;
250 }
251 return false;
252 }
253
254 // This class takes a collection of Extents and allows the client to
255 // allocate space from these extents. The client must not request more
256 // space then exists in the source extents. Space is allocated from the
257 // beginning of the source extents on; no consideration is paid to
258 // fragmentation.
259 class LinearExtentAllocator {
260 public: 234 public:
261 explicit LinearExtentAllocator(const vector<Extent>& extents) 235 explicit DummyExtentAllocator()
262 : extents_(extents), 236 : next_block_(kTempBlockStart) {}
263 extent_index_(0),
264 extent_blocks_allocated_(0) {}
265 vector<Extent> Allocate(const uint64_t block_count) { 237 vector<Extent> Allocate(const uint64_t block_count) {
266 vector<Extent> ret; 238 vector<Extent> ret(1);
267 for (uint64_t blocks = 0; blocks < block_count; blocks++) { 239 ret[0].set_start_block(next_block_);
268 CHECK_LT(extent_index_, extents_.size()); 240 ret[0].set_num_blocks(block_count);
269 CHECK_LT(extent_blocks_allocated_, extents_[extent_index_].num_blocks()); 241 next_block_ += block_count;
270 graph_utils::AppendBlockToExtents(
271 &ret,
272 extents_[extent_index_].start_block() + extent_blocks_allocated_);
273 extent_blocks_allocated_++;
274 if (extent_blocks_allocated_ >= extents_[extent_index_].num_blocks()) {
275 extent_blocks_allocated_ = 0;
276 extent_index_++;
277 }
278 }
279 return ret; 242 return ret;
280 } 243 }
281 private: 244 private:
282 const vector<Extent> extents_; 245 uint64_t next_block_;
283 vector<Extent>::size_type extent_index_; // current Extent
284 // number of blocks allocated from the current extent
285 uint64_t extent_blocks_allocated_;
286 }; 246 };
287 247
288 // Reads blocks from image_path that are not yet marked as being written 248 // Reads blocks from image_path that are not yet marked as being written
289 // in the blocks array. These blocks that remain are non-file-data blocks. 249 // in the blocks array. These blocks that remain are non-file-data blocks.
290 // In the future we might consider intelligent diffing between this data 250 // In the future we might consider intelligent diffing between this data
291 // and data in the previous image, but for now we just bzip2 compress it 251 // and data in the previous image, but for now we just bzip2 compress it
292 // and include it in the update. 252 // and include it in the update.
293 // Creates a new node in the graph to write these blocks and writes the 253 // Creates a new node in the graph to write these blocks and writes the
294 // appropriate blob to blobs_fd. Reads and updates blobs_length; 254 // appropriate blob to blobs_fd. Reads and updates blobs_length;
295 bool ReadUnwrittenBlocks(const vector<Block>& blocks, 255 bool ReadUnwrittenBlocks(const vector<Block>& blocks,
296 int blobs_fd, 256 int blobs_fd,
297 off_t* blobs_length, 257 off_t* blobs_length,
298 const string& image_path, 258 const string& image_path,
299 DeltaArchiveManifest_InstallOperation* out_op) { 259 Vertex* vertex) {
260 DeltaArchiveManifest_InstallOperation* out_op = &vertex->op;
300 int image_fd = open(image_path.c_str(), O_RDONLY, 000); 261 int image_fd = open(image_path.c_str(), O_RDONLY, 000);
301 TEST_AND_RETURN_FALSE_ERRNO(image_fd >= 0); 262 TEST_AND_RETURN_FALSE_ERRNO(image_fd >= 0);
302 ScopedFdCloser image_fd_closer(&image_fd); 263 ScopedFdCloser image_fd_closer(&image_fd);
303 264
304 string temp_file_path; 265 string temp_file_path;
305 TEST_AND_RETURN_FALSE(utils::MakeTempFile("/tmp/CrAU_temp_data.XXXXXX", 266 TEST_AND_RETURN_FALSE(utils::MakeTempFile("/tmp/CrAU_temp_data.XXXXXX",
306 &temp_file_path, 267 &temp_file_path,
307 NULL)); 268 NULL));
308 269
309 FILE* file = fopen(temp_file_path.c_str(), "w"); 270 FILE* file = fopen(temp_file_path.c_str(), "w");
310 TEST_AND_RETURN_FALSE(file); 271 TEST_AND_RETURN_FALSE(file);
311 int err = BZ_OK; 272 int err = BZ_OK;
312 273
313 BZFILE* bz_file = BZ2_bzWriteOpen(&err, 274 BZFILE* bz_file = BZ2_bzWriteOpen(&err,
314 file, 275 file,
315 9, // max compression 276 9, // max compression
316 0, // verbosity 277 0, // verbosity
317 0); // default work factor 278 0); // default work factor
318 TEST_AND_RETURN_FALSE(err == BZ_OK); 279 TEST_AND_RETURN_FALSE(err == BZ_OK);
319 280
320 vector<Extent> extents; 281 vector<Extent> extents;
321 vector<Block>::size_type block_count = 0; 282 vector<Block>::size_type block_count = 0;
322 283
323 LOG(INFO) << "Appending left over blocks to extents"; 284 LOG(INFO) << "Appending left over blocks to extents";
324 for (vector<Block>::size_type i = 0; i < blocks.size(); i++) { 285 for (vector<Block>::size_type i = 0; i < blocks.size(); i++) {
325 if (blocks[i].writer != Vertex::kInvalidIndex) 286 if (blocks[i].writer != Vertex::kInvalidIndex)
326 continue; 287 continue;
288 if (blocks[i].reader != Vertex::kInvalidIndex) {
289 graph_utils::AddReadBeforeDep(vertex, blocks[i].reader, i);
290 }
327 graph_utils::AppendBlockToExtents(&extents, i); 291 graph_utils::AppendBlockToExtents(&extents, i);
328 block_count++; 292 block_count++;
329 } 293 }
330 294
331 // Code will handle 'buf' at any size that's a multiple of kBlockSize, 295 // Code will handle 'buf' at any size that's a multiple of kBlockSize,
332 // so we arbitrarily set it to 1024 * kBlockSize. 296 // so we arbitrarily set it to 1024 * kBlockSize.
333 vector<char> buf(1024 * kBlockSize); 297 vector<char> buf(1024 * kBlockSize);
334 298
335 LOG(INFO) << "Reading left over blocks"; 299 LOG(INFO) << "Reading left over blocks";
336 vector<Block>::size_type blocks_copied_count = 0; 300 vector<Block>::size_type blocks_copied_count = 0;
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
370 334
371 vector<char> compressed_data; 335 vector<char> compressed_data;
372 LOG(INFO) << "Reading compressed data off disk"; 336 LOG(INFO) << "Reading compressed data off disk";
373 TEST_AND_RETURN_FALSE(utils::ReadFile(temp_file_path, &compressed_data)); 337 TEST_AND_RETURN_FALSE(utils::ReadFile(temp_file_path, &compressed_data));
374 TEST_AND_RETURN_FALSE(unlink(temp_file_path.c_str()) == 0); 338 TEST_AND_RETURN_FALSE(unlink(temp_file_path.c_str()) == 0);
375 339
376 // Add node to graph to write these blocks 340 // Add node to graph to write these blocks
377 out_op->set_type(DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ); 341 out_op->set_type(DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ);
378 out_op->set_data_offset(*blobs_length); 342 out_op->set_data_offset(*blobs_length);
379 out_op->set_data_length(compressed_data.size()); 343 out_op->set_data_length(compressed_data.size());
344 LOG(INFO) << "Rootfs non-data blocks compressed take up "
345 << compressed_data.size();
380 *blobs_length += compressed_data.size(); 346 *blobs_length += compressed_data.size();
381 out_op->set_dst_length(kBlockSize * block_count); 347 out_op->set_dst_length(kBlockSize * block_count);
382 DeltaDiffGenerator::StoreExtents(extents, out_op->mutable_dst_extents()); 348 DeltaDiffGenerator::StoreExtents(extents, out_op->mutable_dst_extents());
383 349
384 TEST_AND_RETURN_FALSE(utils::WriteAll(blobs_fd, 350 TEST_AND_RETURN_FALSE(utils::WriteAll(blobs_fd,
385 &compressed_data[0], 351 &compressed_data[0],
386 compressed_data.size())); 352 compressed_data.size()));
387 LOG(INFO) << "done with extra blocks"; 353 LOG(INFO) << "done with extra blocks";
388 return true; 354 return true;
389 } 355 }
(...skipping 221 matching lines...) Expand 10 before | Expand all | Expand 10 after
611 TEST_AND_RETURN_FALSE( 577 TEST_AND_RETURN_FALSE(
612 GatherExtents(new_filename, operation.mutable_dst_extents())); 578 GatherExtents(new_filename, operation.mutable_dst_extents()));
613 operation.set_dst_length(new_data.size()); 579 operation.set_dst_length(new_data.size());
614 580
615 out_data->swap(data); 581 out_data->swap(data);
616 *out_op = operation; 582 *out_op = operation;
617 583
618 return true; 584 return true;
619 } 585 }
620 586
587 namespace {
588
589 // Takes a collection (vector or RepeatedPtrField) of Extent and
590 // returns a vector of the blocks referenced, in order.
591 template<typename T>
592 vector<uint64_t> ExpandExtents(const T& extents) {
593 vector<uint64_t> ret;
594 for (size_t i = 0, e = static_cast<size_t>(extents.size()); i != e; ++i) {
595 const Extent extent = graph_utils::GetElement(extents, i);
596 if (extent.start_block() == kSparseHole) {
597 ret.resize(ret.size() + extent.num_blocks(), kSparseHole);
598 } else {
599 for (uint64_t block = extent.start_block();
600 block < (extent.start_block() + extent.num_blocks()); block++) {
601 ret.push_back(block);
602 }
603 }
604 }
605 return ret;
606 }
607
608 // Takes a vector of blocks and returns an equivalent vector of Extent
609 // objects.
610 vector<Extent> CompressExtents(const vector<uint64_t>& blocks) {
611 vector<Extent> new_extents;
612 for (vector<uint64_t>::const_iterator it = blocks.begin(), e = blocks.end();
613 it != e; ++it) {
614 graph_utils::AppendBlockToExtents(&new_extents, *it);
615 }
616 return new_extents;
617 }
618
619 } // namespace {}
620
621 void DeltaDiffGenerator::SubstituteBlocks( 621 void DeltaDiffGenerator::SubstituteBlocks(
622 DeltaArchiveManifest_InstallOperation* op, 622 Vertex* vertex,
623 const vector<Extent>& remove_extents, 623 const vector<Extent>& remove_extents,
624 const vector<Extent>& replace_extents) { 624 const vector<Extent>& replace_extents) {
625 // First, expand out the blocks that op reads from 625 // First, expand out the blocks that op reads from
626 vector<uint64_t> read_blocks; 626 vector<uint64_t> read_blocks = ExpandExtents(vertex->op.src_extents());
627 for (int i = 0; i < op->src_extents_size(); i++) {
628 const Extent& extent = op->src_extents(i);
629 if (extent.start_block() == kSparseHole) {
630 read_blocks.resize(read_blocks.size() + extent.num_blocks(), kSparseHole);
631 } else {
632 for (uint64_t block = extent.start_block();
633 block < (extent.start_block() + extent.num_blocks()); block++) {
634 read_blocks.push_back(block);
635 }
636 }
637 }
638 { 627 {
639 // Expand remove_extents and replace_extents 628 // Expand remove_extents and replace_extents
640 vector<uint64_t> remove_extents_expanded; 629 vector<uint64_t> remove_extents_expanded =
641 for (vector<Extent>::const_iterator it = remove_extents.begin(); 630 ExpandExtents(remove_extents);
642 it != remove_extents.end(); ++it) { 631 vector<uint64_t> replace_extents_expanded =
643 const Extent& extent = *it; 632 ExpandExtents(replace_extents);
644 for (uint64_t block = extent.start_block();
645 block < (extent.start_block() + extent.num_blocks()); block++) {
646 remove_extents_expanded.push_back(block);
647 }
648 }
649 vector<uint64_t> replace_extents_expanded;
650 for (vector<Extent>::const_iterator it = replace_extents.begin();
651 it != replace_extents.end(); ++it) {
652 const Extent& extent = *it;
653 for (uint64_t block = extent.start_block();
654 block < (extent.start_block() + extent.num_blocks()); block++) {
655 replace_extents_expanded.push_back(block);
656 }
657 }
658 CHECK_EQ(remove_extents_expanded.size(), replace_extents_expanded.size()); 633 CHECK_EQ(remove_extents_expanded.size(), replace_extents_expanded.size());
634 map<uint64_t, uint64_t> conversion;
659 for (vector<uint64_t>::size_type i = 0; 635 for (vector<uint64_t>::size_type i = 0;
660 i < replace_extents_expanded.size(); i++) { 636 i < replace_extents_expanded.size(); i++) {
661 vector<uint64_t>::size_type index = 0; 637 conversion[remove_extents_expanded[i]] = replace_extents_expanded[i];
662 CHECK(utils::VectorIndexOf(read_blocks, 638 }
663 remove_extents_expanded[i], 639 utils::ApplyMap(&read_blocks, conversion);
664 &index)); 640 for (Vertex::EdgeMap::iterator it = vertex->out_edges.begin(),
665 CHECK(read_blocks[index] == remove_extents_expanded[i]); 641 e = vertex->out_edges.end(); it != e; ++it) {
666 read_blocks[index] = replace_extents_expanded[i]; 642 vector<uint64_t> write_before_deps_expanded =
643 ExpandExtents(it->second.write_extents);
644 utils::ApplyMap(&write_before_deps_expanded, conversion);
645 it->second.write_extents = CompressExtents(write_before_deps_expanded);
667 } 646 }
668 } 647 }
669 // Convert read_blocks back to extents 648 // Convert read_blocks back to extents
670 op->clear_src_extents(); 649 vertex->op.clear_src_extents();
671 vector<Extent> new_extents; 650 vector<Extent> new_extents = CompressExtents(read_blocks);
672 for (vector<uint64_t>::const_iterator it = read_blocks.begin(); 651 DeltaDiffGenerator::StoreExtents(new_extents,
673 it != read_blocks.end(); ++it) { 652 vertex->op.mutable_src_extents());
674 graph_utils::AppendBlockToExtents(&new_extents, *it);
675 }
676 DeltaDiffGenerator::StoreExtents(new_extents, op->mutable_src_extents());
677 } 653 }
678 654
679 bool DeltaDiffGenerator::CutEdges(Graph* graph, 655 bool DeltaDiffGenerator::CutEdges(Graph* graph,
680 const vector<Block>& blocks, 656 const set<Edge>& edges,
681 const set<Edge>& edges) { 657 vector<CutEdgeVertexes>* out_cuts) {
682 // First, find enough scratch space for the edges we'll be cutting. 658 DummyExtentAllocator scratch_allocator;
683 vector<Block>::size_type blocks_required = 0; 659 vector<CutEdgeVertexes> cuts;
684 for (set<Edge>::const_iterator it = edges.begin(); it != edges.end(); ++it) { 660 cuts.reserve(edges.size());
685 blocks_required += graph_utils::EdgeWeight(*graph, *it);
686 }
687 vector<Extent> scratch_extents;
688 LOG(INFO) << "requesting " << blocks_required << " blocks of scratch";
689 TEST_AND_RETURN_FALSE(
690 FindScratchSpace(blocks, blocks_required, &scratch_extents));
691 LinearExtentAllocator scratch_allocator(scratch_extents);
692 661
693 uint64_t scratch_blocks_used = 0; 662 uint64_t scratch_blocks_used = 0;
694 for (set<Edge>::const_iterator it = edges.begin(); 663 for (set<Edge>::const_iterator it = edges.begin();
695 it != edges.end(); ++it) { 664 it != edges.end(); ++it) {
665 cuts.resize(cuts.size() + 1);
696 vector<Extent> old_extents = 666 vector<Extent> old_extents =
697 (*graph)[it->first].out_edges[it->second].extents; 667 (*graph)[it->first].out_edges[it->second].extents;
698 // Choose some scratch space 668 // Choose some scratch space
699 scratch_blocks_used += graph_utils::EdgeWeight(*graph, *it); 669 scratch_blocks_used += graph_utils::EdgeWeight(*graph, *it);
700 LOG(INFO) << "using " << graph_utils::EdgeWeight(*graph, *it) 670 LOG(INFO) << "using " << graph_utils::EdgeWeight(*graph, *it)
701 << " scratch blocks (" 671 << " scratch blocks ("
702 << scratch_blocks_used << ")"; 672 << scratch_blocks_used << ")";
703 vector<Extent> scratch = 673 cuts.back().tmp_extents =
704 scratch_allocator.Allocate(graph_utils::EdgeWeight(*graph, *it)); 674 scratch_allocator.Allocate(graph_utils::EdgeWeight(*graph, *it));
705 // create vertex to copy original->scratch 675 // create vertex to copy original->scratch
676 cuts.back().new_vertex = graph->size();
706 graph->resize(graph->size() + 1); 677 graph->resize(graph->size() + 1);
678 cuts.back().old_src = it->first;
679 cuts.back().old_dst = it->second;
680
681 EdgeProperties& cut_edge_properties =
682 (*graph)[it->first].out_edges.find(it->second)->second;
683
684 // This should never happen, as we should only be cutting edges between
685 // real file nodes, and write-before relationships are created from
686 // a real file node to a temp copy node:
687 CHECK(cut_edge_properties.write_extents.empty())
688 << "Can't cut edge that has write-before relationship.";
707 689
708 // make node depend on the copy operation 690 // make node depend on the copy operation
709 (*graph)[it->first].out_edges.insert(make_pair(graph->size() - 1, 691 (*graph)[it->first].out_edges.insert(make_pair(graph->size() - 1,
710 EdgeProperties())); 692 cut_edge_properties));
711 693
712 // Set src/dst extents and other proto variables for copy operation 694 // Set src/dst extents and other proto variables for copy operation
713 graph->back().op.set_type(DeltaArchiveManifest_InstallOperation_Type_MOVE); 695 graph->back().op.set_type(DeltaArchiveManifest_InstallOperation_Type_MOVE);
714 DeltaDiffGenerator::StoreExtents( 696 DeltaDiffGenerator::StoreExtents(
715 (*graph)[it->first].out_edges[it->second].extents, 697 cut_edge_properties.extents,
716 graph->back().op.mutable_src_extents()); 698 graph->back().op.mutable_src_extents());
717 DeltaDiffGenerator::StoreExtents(scratch, 699 DeltaDiffGenerator::StoreExtents(cuts.back().tmp_extents,
718 graph->back().op.mutable_dst_extents()); 700 graph->back().op.mutable_dst_extents());
719 graph->back().op.set_src_length( 701 graph->back().op.set_src_length(
720 graph_utils::EdgeWeight(*graph, *it) * kBlockSize); 702 graph_utils::EdgeWeight(*graph, *it) * kBlockSize);
721 graph->back().op.set_dst_length(graph->back().op.src_length()); 703 graph->back().op.set_dst_length(graph->back().op.src_length());
722 704
723 // make the dest node read from the scratch space 705 // make the dest node read from the scratch space
724 DeltaDiffGenerator::SubstituteBlocks( 706 DeltaDiffGenerator::SubstituteBlocks(
725 &((*graph)[it->second].op), 707 &((*graph)[it->second]),
726 (*graph)[it->first].out_edges[it->second].extents, 708 (*graph)[it->first].out_edges[it->second].extents,
727 scratch); 709 cuts.back().tmp_extents);
728 710
729 // delete the old edge 711 // delete the old edge
730 CHECK_EQ(1, (*graph)[it->first].out_edges.erase(it->second)); 712 CHECK_EQ(1, (*graph)[it->first].out_edges.erase(it->second));
731 713
732 // Add an edge from dst to copy operation 714 // Add an edge from dst to copy operation
733 (*graph)[it->second].out_edges.insert(make_pair(graph->size() - 1, 715 EdgeProperties write_before_edge_properties;
734 EdgeProperties())); 716 write_before_edge_properties.write_extents = cuts.back().tmp_extents;
717 (*graph)[it->second].out_edges.insert(
718 make_pair(graph->size() - 1, write_before_edge_properties));
735 } 719 }
720 out_cuts->swap(cuts);
736 return true; 721 return true;
737 } 722 }
738 723
739 // Stores all Extents in 'extents' into 'out'. 724 // Stores all Extents in 'extents' into 'out'.
740 void DeltaDiffGenerator::StoreExtents( 725 void DeltaDiffGenerator::StoreExtents(
741 vector<Extent>& extents, 726 const vector<Extent>& extents,
742 google::protobuf::RepeatedPtrField<Extent>* out) { 727 google::protobuf::RepeatedPtrField<Extent>* out) {
743 for (vector<Extent>::const_iterator it = extents.begin(); 728 for (vector<Extent>::const_iterator it = extents.begin();
744 it != extents.end(); ++it) { 729 it != extents.end(); ++it) {
745 Extent* new_extent = out->Add(); 730 Extent* new_extent = out->Add();
746 *new_extent = *it; 731 *new_extent = *it;
747 } 732 }
748 } 733 }
749 734
750 // Creates all the edges for the graph. Writers of a block point to 735 // Creates all the edges for the graph. Writers of a block point to
751 // readers of the same block. This is because for an edge A->B, B 736 // readers of the same block. This is because for an edge A->B, B
(...skipping 15 matching lines...) Expand all
767 // No existing edge. Create one 752 // No existing edge. Create one
768 (*graph)[blocks[i].writer].out_edges.insert( 753 (*graph)[blocks[i].writer].out_edges.insert(
769 make_pair(blocks[i].reader, EdgeProperties())); 754 make_pair(blocks[i].reader, EdgeProperties()));
770 edge_it = (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader); 755 edge_it = (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader);
771 CHECK(edge_it != (*graph)[blocks[i].writer].out_edges.end()); 756 CHECK(edge_it != (*graph)[blocks[i].writer].out_edges.end());
772 } 757 }
773 graph_utils::AppendBlockToExtents(&edge_it->second.extents, i); 758 graph_utils::AppendBlockToExtents(&edge_it->second.extents, i);
774 } 759 }
775 } 760 }
776 761
762 namespace {
763
764 class SortCutsByTopoOrderLess {
765 public:
766 SortCutsByTopoOrderLess(vector<vector<Vertex::Index>::size_type>& table)
767 : table_(table) {}
768 bool operator()(const CutEdgeVertexes& a, const CutEdgeVertexes& b) {
769 return table_[a.old_dst] < table_[b.old_dst];
770 }
771 private:
772 vector<vector<Vertex::Index>::size_type>& table_;
773 };
774
775 } // namespace {}
776
777 void DeltaDiffGenerator::GenerateReverseTopoOrderMap(
778 vector<Vertex::Index>& op_indexes,
779 vector<vector<Vertex::Index>::size_type>* reverse_op_indexes) {
780 vector<vector<Vertex::Index>::size_type> table(op_indexes.size());
781 for (vector<Vertex::Index>::size_type i = 0, e = op_indexes.size();
782 i != e; ++i) {
783 Vertex::Index node = op_indexes[i];
784 if (table.size() < (node + 1)) {
785 table.resize(node + 1);
786 }
787 table[node] = i;
788 }
789 reverse_op_indexes->swap(table);
790 }
791
792 void DeltaDiffGenerator::SortCutsByTopoOrder(vector<Vertex::Index>& op_indexes,
793 vector<CutEdgeVertexes>* cuts) {
794 // first, make a reverse lookup table.
795 vector<vector<Vertex::Index>::size_type> table;
796 GenerateReverseTopoOrderMap(op_indexes, &table);
797 SortCutsByTopoOrderLess less(table);
798 sort(cuts->begin(), cuts->end(), less);
799 }
800
801 void DeltaDiffGenerator::MoveFullOpsToBack(Graph* graph,
802 vector<Vertex::Index>* op_indexes) {
803 vector<Vertex::Index> ret;
804 vector<Vertex::Index> full_ops;
805 ret.reserve(op_indexes->size());
806 for (vector<Vertex::Index>::size_type i = 0, e = op_indexes->size(); i != e;
807 ++i) {
808 DeltaArchiveManifest_InstallOperation_Type type =
809 (*graph)[(*op_indexes)[i]].op.type();
810 if (type == DeltaArchiveManifest_InstallOperation_Type_REPLACE ||
811 type == DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ) {
812 full_ops.push_back((*op_indexes)[i]);
813 } else {
814 ret.push_back((*op_indexes)[i]);
815 }
816 }
817 LOG(INFO) << "Stats: " << full_ops.size() << " full ops out of "
818 << (full_ops.size() + ret.size()) << " total ops.";
819 ret.insert(ret.end(), full_ops.begin(), full_ops.end());
820 op_indexes->swap(ret);
821 }
822
823 namespace {
824
825 template<typename T>
826 bool TempBlocksExistInExtents(const T& extents) {
827 for (int i = 0, e = extents.size(); i < e; ++i) {
828 Extent extent = graph_utils::GetElement(extents, i);
829 uint64_t start = extent.start_block();
830 uint64_t num = extent.num_blocks();
831 if (start == kSparseHole)
832 continue;
833 if (start >= kTempBlockStart ||
834 (start + num) >= kTempBlockStart) {
835 LOG(ERROR) << "temp block!";
836 LOG(ERROR) << "start: " << start << ", num: " << num;
837 LOG(ERROR) << "kTempBlockStart: " << kTempBlockStart;
838 LOG(ERROR) << "returning true";
839 return true;
840 }
841 // check for wrap-around, which would be a bug:
842 CHECK(start <= (start + num));
843 }
844 return false;
845 }
846
847 } // namespace {}
848
849 bool DeltaDiffGenerator::AssignTempBlocks(
850 Graph* graph,
851 const string& new_root,
852 int data_fd,
853 off_t* data_file_size,
854 vector<Vertex::Index>* op_indexes,
855 vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
856 vector<CutEdgeVertexes>& cuts) {
857 CHECK(!cuts.empty());
858 for (vector<CutEdgeVertexes>::size_type i = cuts.size() - 1, e = 0;
859 true ; --i) {
860 LOG(INFO) << "Fixing temp blocks in cut " << i
861 << ": old dst: " << cuts[i].old_dst << " new vertex: "
862 << cuts[i].new_vertex;
863 const uint64_t blocks_needed =
864 graph_utils::BlocksInExtents(cuts[i].tmp_extents);
865 LOG(INFO) << "Scanning for usable blocks (" << blocks_needed << " needed)";
866 // For now, just look for a single op w/ sufficient blocks, not
867 // considering blocks from outgoing read-before deps.
868 Vertex::Index node = cuts[i].old_dst;
869 DeltaArchiveManifest_InstallOperation_Type node_type =
870 (*graph)[node].op.type();
871 if (node_type == DeltaArchiveManifest_InstallOperation_Type_REPLACE ||
872 node_type == DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ) {
873 LOG(INFO) << "This was already converted to full, so skipping.";
874 // Delete the temp node and pointer to it from old src
875 if (!(*graph)[cuts[i].old_src].out_edges.erase(cuts[i].new_vertex)) {
876 LOG(INFO) << "Odd. node " << cuts[i].old_src << " didn't point to "
877 << cuts[i].new_vertex;
878 }
879 (*graph)[cuts[i].new_vertex].valid = false;
880 vector<Vertex::Index>::size_type new_topo_idx =
881 (*reverse_op_indexes)[cuts[i].new_vertex];
882 op_indexes->erase(op_indexes->begin() + new_topo_idx);
883 GenerateReverseTopoOrderMap(*op_indexes, reverse_op_indexes);
884 continue;
885 }
886 bool found_node = false;
887 for (vector<Vertex::Index>::size_type j = (*reverse_op_indexes)[node] + 1,
888 je = op_indexes->size(); j < je; ++j) {
889 Vertex::Index test_node = (*op_indexes)[j];
890 // See if this node has sufficient blocks
891 ExtentRanges ranges;
892 ranges.AddRepeatedExtents((*graph)[test_node].op.dst_extents());
893 ranges.SubtractExtent(ExtentForRange(
894 kTempBlockStart, kSparseHole - kTempBlockStart));
895 ranges.SubtractRepeatedExtents((*graph)[test_node].op.src_extents());
896 // For now, for simplicity, subtract out all blocks in read-before
897 // dependencies.
898 for (Vertex::EdgeMap::const_iterator edge_i =
899 (*graph)[test_node].out_edges.begin(),
900 edge_e = (*graph)[test_node].out_edges.end();
901 edge_i != edge_e; ++edge_i) {
902 ranges.SubtractExtents(edge_i->second.extents);
903 }
904
905 uint64_t blocks_found = ranges.blocks();
906 if (blocks_found < blocks_needed) {
907 if (blocks_found > 0)
908 LOG(INFO) << "insufficient blocks found in topo node " << j
909 << " (node " << (*op_indexes)[j] << "). Found only "
910 << blocks_found;
911 continue;
912 }
913 found_node = true;
914 LOG(INFO) << "Found sufficient blocks in topo node " << j
915 << " (node " << (*op_indexes)[j] << ")";
916 // Sub in the blocks, and make the node supplying the blocks
917 // depend on old_dst.
918 vector<Extent> real_extents =
919 ranges.GetExtentsForBlockCount(blocks_needed);
920
921 // Fix the old dest node w/ the real blocks
922 SubstituteBlocks(&(*graph)[node],
923 cuts[i].tmp_extents,
924 real_extents);
925
926 // Fix the new node w/ the real blocks. Since the new node is just a
927 // copy operation, we can replace all the dest extents w/ the real
928 // blocks.
929 DeltaArchiveManifest_InstallOperation *op =
930 &(*graph)[cuts[i].new_vertex].op;
931 op->clear_dst_extents();
932 StoreExtents(real_extents, op->mutable_dst_extents());
933
934 // Add an edge from the real-block supplier to the old dest block.
935 graph_utils::AddReadBeforeDepExtents(&(*graph)[test_node],
936 node,
937 real_extents);
938 break;
939 }
940 if (!found_node) {
941 // convert to full op
942 LOG(WARNING) << "Failed to find enough temp blocks for cut " << i
943 << " with old dest (graph node " << node
944 << "). Converting to a full op, at the expense of a "
945 << "good compression ratio.";
946 TEST_AND_RETURN_FALSE(ConvertCutToFullOp(graph,
947 cuts[i],
948 new_root,
949 data_fd,
950 data_file_size));
951 // move the full op to the back
952 vector<Vertex::Index> new_op_indexes;
953 for (vector<Vertex::Index>::const_iterator iter_i = op_indexes->begin(),
954 iter_e = op_indexes->end(); iter_i != iter_e; ++iter_i) {
955 if ((*iter_i == cuts[i].old_dst) || (*iter_i == cuts[i].new_vertex))
956 continue;
957 new_op_indexes.push_back(*iter_i);
958 }
959 new_op_indexes.push_back(cuts[i].old_dst);
960 op_indexes->swap(new_op_indexes);
961
962 GenerateReverseTopoOrderMap(*op_indexes, reverse_op_indexes);
963 }
964 if (i == e) {
965 // break out of for() loop
966 break;
967 }
968 }
969 return true;
970 }
971
972 bool DeltaDiffGenerator::NoTempBlocksRemain(const Graph& graph) {
973 size_t idx = 0;
974 for (Graph::const_iterator it = graph.begin(), e = graph.end(); it != e;
975 ++it, ++idx) {
976 if (!it->valid)
977 continue;
978 const DeltaArchiveManifest_InstallOperation& op = it->op;
979 if (TempBlocksExistInExtents(op.dst_extents()) ||
980 TempBlocksExistInExtents(op.src_extents())) {
981 LOG(INFO) << "bad extents in node " << idx;
982 LOG(INFO) << "so yeah";
983 return false;
984 }
985
986 // Check out-edges:
987 for (Vertex::EdgeMap::const_iterator jt = it->out_edges.begin(),
988 je = it->out_edges.end(); jt != je; ++jt) {
989 if (TempBlocksExistInExtents(jt->second.extents) ||
990 TempBlocksExistInExtents(jt->second.write_extents)) {
991 LOG(INFO) << "bad out edge in node " << idx;
992 LOG(INFO) << "so yeah";
993 return false;
994 }
995 }
996 }
997 return true;
998 }
999
777 bool DeltaDiffGenerator::ReorderDataBlobs( 1000 bool DeltaDiffGenerator::ReorderDataBlobs(
778 DeltaArchiveManifest* manifest, 1001 DeltaArchiveManifest* manifest,
779 const std::string& data_blobs_path, 1002 const std::string& data_blobs_path,
780 const std::string& new_data_blobs_path) { 1003 const std::string& new_data_blobs_path) {
781 int in_fd = open(data_blobs_path.c_str(), O_RDONLY, 0); 1004 int in_fd = open(data_blobs_path.c_str(), O_RDONLY, 0);
782 TEST_AND_RETURN_FALSE_ERRNO(in_fd >= 0); 1005 TEST_AND_RETURN_FALSE_ERRNO(in_fd >= 0);
783 ScopedFdCloser in_fd_closer(&in_fd); 1006 ScopedFdCloser in_fd_closer(&in_fd);
784 1007
785 DirectFileWriter writer; 1008 DirectFileWriter writer;
786 TEST_AND_RETURN_FALSE( 1009 TEST_AND_RETURN_FALSE(
(...skipping 20 matching lines...) Expand all
807 TEST_AND_RETURN_FALSE(rc == static_cast<ssize_t>(buf.size())); 1030 TEST_AND_RETURN_FALSE(rc == static_cast<ssize_t>(buf.size()));
808 1031
809 op->set_data_offset(out_file_size); 1032 op->set_data_offset(out_file_size);
810 TEST_AND_RETURN_FALSE(writer.Write(&buf[0], buf.size()) == 1033 TEST_AND_RETURN_FALSE(writer.Write(&buf[0], buf.size()) ==
811 static_cast<ssize_t>(buf.size())); 1034 static_cast<ssize_t>(buf.size()));
812 out_file_size += buf.size(); 1035 out_file_size += buf.size();
813 } 1036 }
814 return true; 1037 return true;
815 } 1038 }
816 1039
1040 bool DeltaDiffGenerator::ConvertCutToFullOp(Graph* graph,
1041 const CutEdgeVertexes& cut,
1042 const string& new_root,
1043 int data_fd,
1044 off_t* data_file_size) {
1045 // Drop all incoming edges, keep all outgoing edges
1046
1047 // Keep all outgoing edges
1048 Vertex::EdgeMap out_edges = (*graph)[cut.old_dst].out_edges;
1049 graph_utils::DropWriteBeforeDeps(&out_edges);
1050
1051 TEST_AND_RETURN_FALSE(DeltaReadFile(graph,
1052 cut.old_dst,
1053 NULL,
1054 "/-!@:&*nonexistent_path",
1055 new_root,
1056 (*graph)[cut.old_dst].file_name,
1057 data_fd,
1058 data_file_size));
1059
1060 (*graph)[cut.old_dst].out_edges = out_edges;
1061
1062 // Right now we don't have doubly-linked edges, so we have to scan
1063 // the whole graph.
1064 graph_utils::DropIncomingEdgesTo(graph, cut.old_dst);
1065
1066 // Delete temp node
1067 (*graph)[cut.old_src].out_edges.erase(cut.new_vertex);
1068 CHECK((*graph)[cut.old_dst].out_edges.find(cut.new_vertex) ==
1069 (*graph)[cut.old_dst].out_edges.end());
1070 (*graph)[cut.new_vertex].valid = false;
1071 return true;
1072 }
1073
1074 bool DeltaDiffGenerator::ConvertGraphToDag(Graph* graph,
1075 const string& new_root,
1076 int fd,
1077 off_t* data_file_size,
1078 vector<Vertex::Index>* final_order) {
1079 CycleBreaker cycle_breaker;
1080 LOG(INFO) << "Finding cycles...";
1081 set<Edge> cut_edges;
1082 cycle_breaker.BreakCycles(*graph, &cut_edges);
1083 LOG(INFO) << "done finding cycles";
1084 CheckGraph(*graph);
1085
1086 // Calculate number of scratch blocks needed
1087
1088 LOG(INFO) << "Cutting cycles...";
1089 vector<CutEdgeVertexes> cuts;
1090 TEST_AND_RETURN_FALSE(CutEdges(graph, cut_edges, &cuts));
1091 LOG(INFO) << "done cutting cycles";
1092 LOG(INFO) << "There are " << cuts.size() << " cuts.";
1093 CheckGraph(*graph);
1094
1095 LOG(INFO) << "Creating initial topological order...";
1096 TopologicalSort(*graph, final_order);
1097 LOG(INFO) << "done with initial topo order";
1098 CheckGraph(*graph);
1099
1100 LOG(INFO) << "Moving full ops to the back";
1101 MoveFullOpsToBack(graph, final_order);
1102 LOG(INFO) << "done moving full ops to back";
1103
1104 vector<vector<Vertex::Index>::size_type> inverse_final_order;
1105 GenerateReverseTopoOrderMap(*final_order, &inverse_final_order);
1106
1107 if (!cuts.empty())
1108 TEST_AND_RETURN_FALSE(AssignTempBlocks(graph,
1109 new_root,
1110 fd,
1111 data_file_size,
1112 final_order,
1113 &inverse_final_order,
1114 cuts));
1115 LOG(INFO) << "Making sure all temp blocks have been allocated";
1116 graph_utils::DumpGraph(*graph);
1117 CHECK(NoTempBlocksRemain(*graph));
1118 LOG(INFO) << "done making sure all temp blocks are allocated";
1119 return true;
1120 }
1121
817 bool DeltaDiffGenerator::GenerateDeltaUpdateFile( 1122 bool DeltaDiffGenerator::GenerateDeltaUpdateFile(
818 const string& old_root, 1123 const string& old_root,
819 const string& old_image, 1124 const string& old_image,
820 const string& new_root, 1125 const string& new_root,
821 const string& new_image, 1126 const string& new_image,
822 const string& old_kernel_part, 1127 const string& old_kernel_part,
823 const string& new_kernel_part, 1128 const string& new_kernel_part,
824 const string& output_path, 1129 const string& output_path,
825 const string& private_key_path) { 1130 const string& private_key_path) {
826 struct stat old_image_stbuf; 1131 struct stat old_image_stbuf;
(...skipping 23 matching lines...) Expand all
850 CheckGraph(graph); 1155 CheckGraph(graph);
851 1156
852 const string kTempFileTemplate("/tmp/CrAU_temp_data.XXXXXX"); 1157 const string kTempFileTemplate("/tmp/CrAU_temp_data.XXXXXX");
853 string temp_file_path; 1158 string temp_file_path;
854 off_t data_file_size = 0; 1159 off_t data_file_size = 0;
855 1160
856 LOG(INFO) << "Reading files..."; 1161 LOG(INFO) << "Reading files...";
857 1162
858 vector<DeltaArchiveManifest_InstallOperation> kernel_ops; 1163 vector<DeltaArchiveManifest_InstallOperation> kernel_ops;
859 1164
860 DeltaArchiveManifest_InstallOperation final_op; 1165 vector<Vertex::Index> final_order;
861 { 1166 {
862 int fd; 1167 int fd;
863 TEST_AND_RETURN_FALSE( 1168 TEST_AND_RETURN_FALSE(
864 utils::MakeTempFile(kTempFileTemplate, &temp_file_path, &fd)); 1169 utils::MakeTempFile(kTempFileTemplate, &temp_file_path, &fd));
865 TEST_AND_RETURN_FALSE(fd >= 0); 1170 TEST_AND_RETURN_FALSE(fd >= 0);
866 ScopedFdCloser fd_closer(&fd); 1171 ScopedFdCloser fd_closer(&fd);
867 1172
868 TEST_AND_RETURN_FALSE(DeltaReadFiles(&graph, 1173 TEST_AND_RETURN_FALSE(DeltaReadFiles(&graph,
869 &blocks, 1174 &blocks,
870 old_root, 1175 old_root,
871 new_root, 1176 new_root,
872 fd, 1177 fd,
873 &data_file_size)); 1178 &data_file_size));
1179 LOG(INFO) << "done reading normal files";
874 CheckGraph(graph); 1180 CheckGraph(graph);
875 1181
1182 graph.resize(graph.size() + 1);
876 TEST_AND_RETURN_FALSE(ReadUnwrittenBlocks(blocks, 1183 TEST_AND_RETURN_FALSE(ReadUnwrittenBlocks(blocks,
877 fd, 1184 fd,
878 &data_file_size, 1185 &data_file_size,
879 new_image, 1186 new_image,
880 &final_op)); 1187 &graph.back()));
881 1188
882 // Read kernel partition 1189 // Read kernel partition
883 TEST_AND_RETURN_FALSE(DeltaCompressKernelPartition(old_kernel_part, 1190 TEST_AND_RETURN_FALSE(DeltaCompressKernelPartition(old_kernel_part,
884 new_kernel_part, 1191 new_kernel_part,
885 &kernel_ops, 1192 &kernel_ops,
886 fd, 1193 fd,
887 &data_file_size)); 1194 &data_file_size));
1195
1196 LOG(INFO) << "done reading kernel";
1197 CheckGraph(graph);
1198
1199 LOG(INFO) << "Creating edges...";
1200 CreateEdges(&graph, blocks);
1201 LOG(INFO) << "Done creating edges";
1202 CheckGraph(graph);
1203
1204 TEST_AND_RETURN_FALSE(ConvertGraphToDag(&graph,
1205 new_root,
1206 fd,
1207 &data_file_size,
1208 &final_order));
888 } 1209 }
889 CheckGraph(graph);
890
891 LOG(INFO) << "Creating edges...";
892 CreateEdges(&graph, blocks);
893 CheckGraph(graph);
894
895 CycleBreaker cycle_breaker;
896 LOG(INFO) << "Finding cycles...";
897 set<Edge> cut_edges;
898 cycle_breaker.BreakCycles(graph, &cut_edges);
899 CheckGraph(graph);
900
901 // Calculate number of scratch blocks needed
902
903 LOG(INFO) << "Cutting cycles...";
904 TEST_AND_RETURN_FALSE(CutEdges(&graph, blocks, cut_edges));
905 CheckGraph(graph);
906
907 vector<Vertex::Index> final_order;
908 LOG(INFO) << "Ordering...";
909 TopologicalSort(graph, &final_order);
910 CheckGraph(graph);
911 1210
912 // Convert to protobuf Manifest object 1211 // Convert to protobuf Manifest object
913 DeltaArchiveManifest manifest; 1212 DeltaArchiveManifest manifest;
914 CheckGraph(graph); 1213 CheckGraph(graph);
915 InstallOperationsToManifest(graph, final_order, kernel_ops, &manifest); 1214 InstallOperationsToManifest(graph, final_order, kernel_ops, &manifest);
916 {
917 // Write final operation
918 DeltaArchiveManifest_InstallOperation* op =
919 manifest.add_install_operations();
920 *op = final_op;
921 CHECK(op->has_type());
922 LOG(INFO) << "final op length: " << op->data_length();
923 }
924 1215
925 CheckGraph(graph); 1216 CheckGraph(graph);
926 manifest.set_block_size(kBlockSize); 1217 manifest.set_block_size(kBlockSize);
927 1218
928 // Reorder the data blobs with the newly ordered manifest 1219 // Reorder the data blobs with the newly ordered manifest
929 string ordered_blobs_path; 1220 string ordered_blobs_path;
930 TEST_AND_RETURN_FALSE(utils::MakeTempFile( 1221 TEST_AND_RETURN_FALSE(utils::MakeTempFile(
931 "/tmp/CrAU_temp_data.ordered.XXXXXX", 1222 "/tmp/CrAU_temp_data.ordered.XXXXXX",
932 &ordered_blobs_path, 1223 &ordered_blobs_path,
933 false)); 1224 false));
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
1059 1350
1060 LOG(INFO) << "All done. Successfully created delta file."; 1351 LOG(INFO) << "All done. Successfully created delta file.";
1061 return true; 1352 return true;
1062 } 1353 }
1063 1354
1064 const char* const kBsdiffPath = "/usr/bin/bsdiff"; 1355 const char* const kBsdiffPath = "/usr/bin/bsdiff";
1065 const char* const kBspatchPath = "/usr/bin/bspatch"; 1356 const char* const kBspatchPath = "/usr/bin/bspatch";
1066 const char* const kDeltaMagic = "CrAU"; 1357 const char* const kDeltaMagic = "CrAU";
1067 1358
1068 }; // namespace chromeos_update_engine 1359 }; // namespace chromeos_update_engine
OLDNEW
« no previous file with comments | « delta_diff_generator.h ('k') | delta_diff_generator_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698