Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(258)

Side by Side Diff: client/cmd/isolate/exp_archive.go

Issue 2983333002: isolate: Split UploadDeps into smaller chunks (Closed)
Patch Set: rebase Created 3 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 The LUCI Authors. 1 // Copyright 2015 The LUCI Authors.
2 // 2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); 3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License. 4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at 5 // You may obtain a copy of the License at
6 // 6 //
7 // http://www.apache.org/licenses/LICENSE-2.0 7 // http://www.apache.org/licenses/LICENSE-2.0
8 // 8 //
9 // Unless required by applicable law or agreed to in writing, software 9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, 10 // distributed under the License is distributed on an "AS IS" BASIS,
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after
180 checker: checker, 180 checker: checker,
181 uploader: uploader, 181 uploader: uploader,
182 files: make(map[string]isolated.File), 182 files: make(map[string]isolated.File),
183 } 183 }
184 } 184 }
185 185
186 func (ut *uploadTracker) Files() map[string]isolated.File { 186 func (ut *uploadTracker) Files() map[string]isolated.File {
187 return ut.files 187 return ut.files
188 } 188 }
189 189
190 func (ut *uploadTracker) UploadDeps(parts partitionedDeps) error { 190 // populateSymlinks adds an isolated.File to files for each provided symlink
191 » // Handle the symlinks. 191 func (ut *uploadTracker) populateSymlinks(symlinks []*Item) error {
192 » for _, item := range parts.links.items { 192 » for _, item := range symlinks {
193 l, err := os.Readlink(item.Path) 193 l, err := os.Readlink(item.Path)
194 if err != nil { 194 if err != nil {
195 return fmt.Errorf("unable to resolve symlink for %q: %v" , item.Path, err) 195 return fmt.Errorf("unable to resolve symlink for %q: %v" , item.Path, err)
196 } 196 }
197 ut.files[item.RelPath] = isolated.SymLink(l) 197 ut.files[item.RelPath] = isolated.SymLink(l)
198 } 198 }
199 return nil
200 }
199 201
200 » // Handle the small to-be-archived files. 202 // tarAndUploadFiles creates bundles of files, uploads them, and adds each bundl e to files.
201 » bundles := ShardItems(parts.filesToArchive.items, archiveMaxSize) 203 func (ut *uploadTracker) tarAndUploadFiles(smallFiles []*Item) error {
204 » bundles := ShardItems(smallFiles, archiveMaxSize)
202 log.Printf("\t%d TAR archives to be isolated", len(bundles)) 205 log.Printf("\t%d TAR archives to be isolated", len(bundles))
203 206
204 for _, bundle := range bundles { 207 for _, bundle := range bundles {
205 bundle := bundle 208 bundle := bundle
206 digest, tarSize, err := bundle.Digest() 209 digest, tarSize, err := bundle.Digest()
207 if err != nil { 210 if err != nil {
208 return err 211 return err
209 } 212 }
210 213
211 log.Printf("Created tar archive %q (%s)", digest, humanize.Bytes (uint64(tarSize))) 214 log.Printf("Created tar archive %q (%s)", digest, humanize.Bytes (uint64(tarSize)))
(...skipping 11 matching lines...) Expand all
223 ut.checker.AddItem(item, false, func(item *Item, ps *isolatedcli ent.PushState) { 226 ut.checker.AddItem(item, false, func(item *Item, ps *isolatedcli ent.PushState) {
224 if ps == nil { 227 if ps == nil {
225 return 228 return
226 } 229 }
227 log.Printf("QUEUED %q for upload", item.RelPath) 230 log.Printf("QUEUED %q for upload", item.RelPath)
228 ut.uploader.Upload(item.RelPath, bundle.Contents, ps, fu nc() { 231 ut.uploader.Upload(item.RelPath, bundle.Contents, ps, fu nc() {
229 log.Printf("UPLOADED %q", item.RelPath) 232 log.Printf("UPLOADED %q", item.RelPath)
230 }) 233 })
231 }) 234 })
232 } 235 }
236 return nil
237 }
233 238
239 // uploadFiles uploads each file and adds it to files.
240 func (ut *uploadTracker) uploadFiles(files []*Item) error {
234 // Handle the large individually-uploaded files. 241 // Handle the large individually-uploaded files.
235 » for _, item := range parts.indivFiles.items { 242 » for _, item := range files {
236 d, err := hashFile(item.Path) 243 d, err := hashFile(item.Path)
237 if err != nil { 244 if err != nil {
238 return err 245 return err
239 } 246 }
240 item.Digest = d 247 item.Digest = d
241 ut.files[item.RelPath] = isolated.BasicFile(item.Digest, int(ite m.Mode), item.Size) 248 ut.files[item.RelPath] = isolated.BasicFile(item.Digest, int(ite m.Mode), item.Size)
242 ut.checker.AddItem(item, false, func(item *Item, ps *isolatedcli ent.PushState) { 249 ut.checker.AddItem(item, false, func(item *Item, ps *isolatedcli ent.PushState) {
243 if ps == nil { 250 if ps == nil {
244 return 251 return
245 } 252 }
246 log.Printf("QUEUED %q for upload", item.RelPath) 253 log.Printf("QUEUED %q for upload", item.RelPath)
247 ut.uploader.UploadFile(item, ps, func() { 254 ut.uploader.UploadFile(item, ps, func() {
248 log.Printf("UPLOADED %q", item.RelPath) 255 log.Printf("UPLOADED %q", item.RelPath)
249 }) 256 })
250 }) 257 })
251 } 258 }
252 return nil 259 return nil
253 } 260 }
254 261
262 func (ut *uploadTracker) UploadDeps(parts partitionedDeps) error {
263 if err := ut.populateSymlinks(parts.links.items); err != nil {
264 return err
265 }
266
267 if err := ut.tarAndUploadFiles(parts.filesToArchive.items); err != nil {
268 return err
269 }
270
271 if err := ut.uploadFiles(parts.indivFiles.items); err != nil {
272 return err
273 }
274 return nil
275 }
276
255 // main contains the core logic for experimental archive. 277 // main contains the core logic for experimental archive.
256 func (c *expArchiveRun) main() error { 278 func (c *expArchiveRun) main() error {
257 // TODO(djd): This func is long and has a lot of internal complexity (li ke, 279 // TODO(djd): This func is long and has a lot of internal complexity (li ke,
258 // such as, archiveCallback). Refactor. 280 // such as, archiveCallback). Refactor.
259 281
260 start := time.Now() 282 start := time.Now()
261 archiveOpts := &c.isolateFlags.ArchiveOptions 283 archiveOpts := &c.isolateFlags.ArchiveOptions
262 // Parse the incoming isolate file. 284 // Parse the incoming isolate file.
263 deps, rootDir, isol, err := isolate.ProcessIsolate(archiveOpts) 285 deps, rootDir, isol, err := isolate.ProcessIsolate(archiveOpts)
264 if err != nil { 286 if err != nil {
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after
410 } 432 }
411 433
412 func hashFile(path string) (isolated.HexDigest, error) { 434 func hashFile(path string) (isolated.HexDigest, error) {
413 f, err := os.Open(path) 435 f, err := os.Open(path)
414 if err != nil { 436 if err != nil {
415 return "", err 437 return "", err
416 } 438 }
417 defer f.Close() 439 defer f.Close()
418 return isolated.Hash(f) 440 return isolated.Hash(f)
419 } 441 }
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698