| OLD | NEW |
| 1 // Copyright 2015 The LUCI Authors. | 1 // Copyright 2015 The LUCI Authors. |
| 2 // | 2 // |
| 3 // Licensed under the Apache License, Version 2.0 (the "License"); | 3 // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 // you may not use this file except in compliance with the License. | 4 // you may not use this file except in compliance with the License. |
| 5 // You may obtain a copy of the License at | 5 // You may obtain a copy of the License at |
| 6 // | 6 // |
| 7 // http://www.apache.org/licenses/LICENSE-2.0 | 7 // http://www.apache.org/licenses/LICENSE-2.0 |
| 8 // | 8 // |
| 9 // Unless required by applicable law or agreed to in writing, software | 9 // Unless required by applicable law or agreed to in writing, software |
| 10 // distributed under the License is distributed on an "AS IS" BASIS, | 10 // distributed under the License is distributed on an "AS IS" BASIS, |
| (...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 162 walker := partitioningWalker{fsView: fsView} | 162 walker := partitioningWalker{fsView: fsView} |
| 163 for _, dep := range deps { | 163 for _, dep := range deps { |
| 164 // Try to walk dep. If dep is a file (or symlink), the inner fun
ction is called exactly once. | 164 // Try to walk dep. If dep is a file (or symlink), the inner fun
ction is called exactly once. |
| 165 if err := filepath.Walk(filepath.Clean(dep), walker.walkFn); err
!= nil { | 165 if err := filepath.Walk(filepath.Clean(dep), walker.walkFn); err
!= nil { |
| 166 return partitionedDeps{}, err | 166 return partitionedDeps{}, err |
| 167 } | 167 } |
| 168 } | 168 } |
| 169 return walker.parts, nil | 169 return walker.parts, nil |
| 170 } | 170 } |
| 171 | 171 |
| 172 func uploadDeps(parts partitionedDeps, checker *Checker, uploader *Uploader) (ma
p[string]isolated.File, error) { | 172 type uploadTracker struct { |
| 173 » // Construct a map of the files that constitute the isolate. | 173 » checker *Checker |
| 174 » files := make(map[string]isolated.File) | 174 » uploader *Uploader |
| 175 » files map[string]isolated.File |
| 176 } |
| 175 | 177 |
| 178 func newUploadTracker(checker *Checker, uploader *Uploader) *uploadTracker { |
| 179 return &uploadTracker{ |
| 180 checker: checker, |
| 181 uploader: uploader, |
| 182 files: make(map[string]isolated.File), |
| 183 } |
| 184 } |
| 185 |
| 186 func (ut *uploadTracker) Files() map[string]isolated.File { |
| 187 return ut.files |
| 188 } |
| 189 |
| 190 func (ut *uploadTracker) UploadDeps(parts partitionedDeps) error { |
| 176 // Handle the symlinks. | 191 // Handle the symlinks. |
| 177 for _, item := range parts.links.items { | 192 for _, item := range parts.links.items { |
| 178 l, err := os.Readlink(item.Path) | 193 l, err := os.Readlink(item.Path) |
| 179 if err != nil { | 194 if err != nil { |
| 180 » » » return nil, fmt.Errorf("unable to resolve symlink for %q
: %v", item.Path, err) | 195 » » » return fmt.Errorf("unable to resolve symlink for %q: %v"
, item.Path, err) |
| 181 } | 196 } |
| 182 » » files[item.RelPath] = isolated.SymLink(l) | 197 » » ut.files[item.RelPath] = isolated.SymLink(l) |
| 183 } | 198 } |
| 184 | 199 |
| 185 // Handle the small to-be-archived files. | 200 // Handle the small to-be-archived files. |
| 186 bundles := ShardItems(parts.filesToArchive.items, archiveMaxSize) | 201 bundles := ShardItems(parts.filesToArchive.items, archiveMaxSize) |
| 187 log.Printf("\t%d TAR archives to be isolated", len(bundles)) | 202 log.Printf("\t%d TAR archives to be isolated", len(bundles)) |
| 188 | 203 |
| 189 for _, bundle := range bundles { | 204 for _, bundle := range bundles { |
| 190 bundle := bundle | 205 bundle := bundle |
| 191 digest, tarSize, err := bundle.Digest() | 206 digest, tarSize, err := bundle.Digest() |
| 192 if err != nil { | 207 if err != nil { |
| 193 » » » return nil, err | 208 » » » return err |
| 194 } | 209 } |
| 195 | 210 |
| 196 log.Printf("Created tar archive %q (%s)", digest, humanize.Bytes
(uint64(tarSize))) | 211 log.Printf("Created tar archive %q (%s)", digest, humanize.Bytes
(uint64(tarSize))) |
| 197 log.Printf("\tcontains %d files (total %s)", len(bundle.Items),
humanize.Bytes(uint64(bundle.ItemSize))) | 212 log.Printf("\tcontains %d files (total %s)", len(bundle.Items),
humanize.Bytes(uint64(bundle.ItemSize))) |
| 198 // Mint an item for this tar. | 213 // Mint an item for this tar. |
| 199 item := &Item{ | 214 item := &Item{ |
| 200 Path: fmt.Sprintf(".%s.tar", digest), | 215 Path: fmt.Sprintf(".%s.tar", digest), |
| 201 RelPath: fmt.Sprintf(".%s.tar", digest), | 216 RelPath: fmt.Sprintf(".%s.tar", digest), |
| 202 Size: tarSize, | 217 Size: tarSize, |
| 203 Mode: 0644, // Read | 218 Mode: 0644, // Read |
| 204 Digest: digest, | 219 Digest: digest, |
| 205 } | 220 } |
| 206 » » files[item.RelPath] = isolated.TarFile(item.Digest, int(item.Mod
e), item.Size) | 221 » » ut.files[item.RelPath] = isolated.TarFile(item.Digest, int(item.
Mode), item.Size) |
| 207 | 222 |
| 208 » » checker.AddItem(item, false, func(item *Item, ps *isolatedclient
.PushState) { | 223 » » ut.checker.AddItem(item, false, func(item *Item, ps *isolatedcli
ent.PushState) { |
| 209 if ps == nil { | 224 if ps == nil { |
| 210 return | 225 return |
| 211 } | 226 } |
| 212 log.Printf("QUEUED %q for upload", item.RelPath) | 227 log.Printf("QUEUED %q for upload", item.RelPath) |
| 213 » » » uploader.Upload(item.RelPath, bundle.Contents, ps, func(
) { | 228 » » » ut.uploader.Upload(item.RelPath, bundle.Contents, ps, fu
nc() { |
| 214 log.Printf("UPLOADED %q", item.RelPath) | 229 log.Printf("UPLOADED %q", item.RelPath) |
| 215 }) | 230 }) |
| 216 }) | 231 }) |
| 217 } | 232 } |
| 218 | 233 |
| 219 // Handle the large individually-uploaded files. | 234 // Handle the large individually-uploaded files. |
| 220 for _, item := range parts.indivFiles.items { | 235 for _, item := range parts.indivFiles.items { |
| 221 d, err := hashFile(item.Path) | 236 d, err := hashFile(item.Path) |
| 222 if err != nil { | 237 if err != nil { |
| 223 » » » return nil, err | 238 » » » return err |
| 224 } | 239 } |
| 225 item.Digest = d | 240 item.Digest = d |
| 226 » » files[item.RelPath] = isolated.BasicFile(item.Digest, int(item.M
ode), item.Size) | 241 » » ut.files[item.RelPath] = isolated.BasicFile(item.Digest, int(ite
m.Mode), item.Size) |
| 227 » » checker.AddItem(item, false, func(item *Item, ps *isolatedclient
.PushState) { | 242 » » ut.checker.AddItem(item, false, func(item *Item, ps *isolatedcli
ent.PushState) { |
| 228 if ps == nil { | 243 if ps == nil { |
| 229 return | 244 return |
| 230 } | 245 } |
| 231 log.Printf("QUEUED %q for upload", item.RelPath) | 246 log.Printf("QUEUED %q for upload", item.RelPath) |
| 232 » » » uploader.UploadFile(item, ps, func() { | 247 » » » ut.uploader.UploadFile(item, ps, func() { |
| 233 log.Printf("UPLOADED %q", item.RelPath) | 248 log.Printf("UPLOADED %q", item.RelPath) |
| 234 }) | 249 }) |
| 235 }) | 250 }) |
| 236 } | 251 } |
| 237 » return files, nil | 252 » return nil |
| 238 } | 253 } |
| 239 | 254 |
| 240 // main contains the core logic for experimental archive. | 255 // main contains the core logic for experimental archive. |
| 241 func (c *expArchiveRun) main() error { | 256 func (c *expArchiveRun) main() error { |
| 242 // TODO(djd): This func is long and has a lot of internal complexity (li
ke, | 257 // TODO(djd): This func is long and has a lot of internal complexity (li
ke, |
| 243 // such as, archiveCallback). Refactor. | 258 // such as, archiveCallback). Refactor. |
| 244 | 259 |
| 245 start := time.Now() | 260 start := time.Now() |
| 246 archiveOpts := &c.isolateFlags.ArchiveOptions | 261 archiveOpts := &c.isolateFlags.ArchiveOptions |
| 247 // Parse the incoming isolate file. | 262 // Parse the incoming isolate file. |
| (...skipping 25 matching lines...) Expand all Loading... |
| 273 if err != nil { | 288 if err != nil { |
| 274 return fmt.Errorf("partitioning deps: %v", err) | 289 return fmt.Errorf("partitioning deps: %v", err) |
| 275 } | 290 } |
| 276 | 291 |
| 277 numFiles := len(parts.filesToArchive.items) + len(parts.indivFiles.items
) | 292 numFiles := len(parts.filesToArchive.items) + len(parts.indivFiles.items
) |
| 278 filesSize := uint64(parts.filesToArchive.totalSize + parts.indivFiles.to
talSize) | 293 filesSize := uint64(parts.filesToArchive.totalSize + parts.indivFiles.to
talSize) |
| 279 log.Printf("Isolate expanded to %d files (total size %s) and %d symlinks
", numFiles, humanize.Bytes(filesSize), len(parts.links.items)) | 294 log.Printf("Isolate expanded to %d files (total size %s) and %d symlinks
", numFiles, humanize.Bytes(filesSize), len(parts.links.items)) |
| 280 log.Printf("\t%d files (%s) to be isolated individually", len(parts.indi
vFiles.items), humanize.Bytes(uint64(parts.indivFiles.totalSize))) | 295 log.Printf("\t%d files (%s) to be isolated individually", len(parts.indi
vFiles.items), humanize.Bytes(uint64(parts.indivFiles.totalSize))) |
| 281 log.Printf("\t%d files (%s) to be isolated in archives", len(parts.files
ToArchive.items), humanize.Bytes(uint64(parts.filesToArchive.totalSize))) | 296 log.Printf("\t%d files (%s) to be isolated in archives", len(parts.files
ToArchive.items), humanize.Bytes(uint64(parts.filesToArchive.totalSize))) |
| 282 | 297 |
| 283 » files, err := uploadDeps(parts, checker, uploader) | 298 » tracker := newUploadTracker(checker, uploader) |
| 284 » if err != nil { | 299 » if err := tracker.UploadDeps(parts); err != nil { |
| 285 return err | 300 return err |
| 286 } | 301 } |
| 287 | 302 |
| 303 isol.Files = tracker.Files() |
| 304 |
| 288 // Marshal the isolated file into JSON, and create an Item to describe i
t. | 305 // Marshal the isolated file into JSON, and create an Item to describe i
t. |
| 289 isol.Files = files | |
| 290 var isolJSON []byte | 306 var isolJSON []byte |
| 291 isolJSON, err = json.Marshal(isol) | 307 isolJSON, err = json.Marshal(isol) |
| 292 if err != nil { | 308 if err != nil { |
| 293 return err | 309 return err |
| 294 } | 310 } |
| 295 isolItem := &Item{ | 311 isolItem := &Item{ |
| 296 Path: archiveOpts.Isolated, | 312 Path: archiveOpts.Isolated, |
| 297 RelPath: filepath.Base(archiveOpts.Isolated), | 313 RelPath: filepath.Base(archiveOpts.Isolated), |
| 298 Digest: isolated.HashBytes(isolJSON), | 314 Digest: isolated.HashBytes(isolJSON), |
| 299 Size: int64(len(isolJSON)), | 315 Size: int64(len(isolJSON)), |
| (...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 394 } | 410 } |
| 395 | 411 |
| 396 func hashFile(path string) (isolated.HexDigest, error) { | 412 func hashFile(path string) (isolated.HexDigest, error) { |
| 397 f, err := os.Open(path) | 413 f, err := os.Open(path) |
| 398 if err != nil { | 414 if err != nil { |
| 399 return "", err | 415 return "", err |
| 400 } | 416 } |
| 401 defer f.Close() | 417 defer f.Close() |
| 402 return isolated.Hash(f) | 418 return isolated.Hash(f) |
| 403 } | 419 } |
| OLD | NEW |