Chromium Code Reviews| Index: client/cmd/isolate/exp_archive.go |
| diff --git a/client/cmd/isolate/exp_archive.go b/client/cmd/isolate/exp_archive.go |
| index 19bcb7cfb4de843efcba8564c37828e17b82b67b..03e4a6375ab99352efaa6751028b8dad5252f674 100644 |
| --- a/client/cmd/isolate/exp_archive.go |
| +++ b/client/cmd/isolate/exp_archive.go |
| @@ -13,11 +13,20 @@ import ( |
| "os" |
| "path/filepath" |
| + humanize "github.com/dustin/go-humanize" |
| "github.com/luci/luci-go/client/isolate" |
| "github.com/luci/luci-go/common/isolated" |
| "github.com/maruel/subcommands" |
| ) |
| +const ( |
| + // archiveThreshold is the size (in bytes) used to determine whether to add |
|
mcgreevy
2016/11/22 23:23:47
I've added some nitpicky comments about the wordin
djd-OOO-Apr2017
2016/11/23 00:36:34
In future stages of this change we have more thres
mcgreevy
2016/11/23 00:44:13
OK, let's stick with the consts, no func, and see
djd-OOO-Apr2017
2016/11/23 00:46:29
(I had done – see ps2)
|
| + // files to an tar archive before uploading. Files smaller than this size will |
|
mcgreevy
2016/11/22 23:23:47
s/an tar/a tar/
djd-OOO-Apr2017
2016/11/23 00:36:34
Done.
|
| + // be combined into archives before |
|
mcgreevy
2016/11/22 23:23:46
s/before uploaded directly to/before being uploade
djd-OOO-Apr2017
2016/11/23 00:36:34
Done.
|
| + // uploaded directly to the server. |
| + archiveThreshold = 100e3 // 100kB |
|
mcgreevy
2016/11/22 23:23:47
I'd expect this to be 100 * 1024 ...
djd-OOO-Apr2017
2016/11/23 00:36:34
Without invoking religious wars (https://en.wikipe
mcgreevy
2016/11/23 00:44:13
I don't care that much.
|
| +) |
| + |
| var cmdExpArchive = &subcommands.Command{ |
| UsageLine: "exparchive <options>", |
| ShortDesc: "EXPERIMENTAL creates a .isolated file and uploads the tree to an isolate server.", |
| @@ -52,6 +61,14 @@ func (c *expArchiveRun) Parse(a subcommands.Application, args []string) error { |
| return nil |
| } |
| +// Item represents a file or symlink referenced by an isolate file. |
| +type Item struct { |
| + Path string |
| + RelPath string |
| + Size int64 |
| + Mode os.FileMode |
| +} |
| + |
| func (c *expArchiveRun) main(a subcommands.Application, args []string) error { |
| // Parse the incoming isolate file. |
| deps, rootDir, isol, err := isolate.ProcessIsolate(&c.ArchiveOptions) |
| @@ -60,8 +77,53 @@ func (c *expArchiveRun) main(a subcommands.Application, args []string) error { |
| } |
| log.Printf("Isolate referenced %d deps", len(deps)) |
| - // TODO(djd): actually do something with the isolated. |
| - _ = rootDir |
| + // Walk each of the deps, partioning the results into symlinks and files categorised by size. |
| + var links, smallFiles, largeFiles []*Item |
| + var smallSize, largeSize int64 // Cumulative size of small/large files. |
| + for _, dep := range deps { |
| + // Try to walk dep. If dep is a file (or symlink), the inner function is called exactly once. |
| + err := filepath.Walk(filepath.Clean(dep), func(path string, info os.FileInfo, err error) error { |
| + if err != nil { |
| + return err |
| + } |
| + if info.IsDir() { |
| + return nil |
| + } |
| + |
| + relPath, err := filepath.Rel(rootDir, path) |
| + if err != nil { |
| + return err |
| + } |
| + |
| + item := &Item{ |
| + Path: path, |
| + RelPath: relPath, |
| + Mode: info.Mode(), |
| + Size: info.Size(), |
| + } |
| + |
| + switch { |
| + case item.Mode&os.ModeSymlink == os.ModeSymlink: |
| + links = append(links, item) |
| + case item.Size < archiveThreshold: |
| + smallFiles = append(smallFiles, item) |
| + smallSize += item.Size |
| + default: |
| + largeFiles = append(largeFiles, item) |
| + largeSize += item.Size |
| + } |
| + return nil |
| + }) |
| + if err != nil { |
| + return err |
| + } |
| + } |
| + |
| + log.Printf("Isolate expanded to %d files (total size %s) and %d symlinks", len(smallFiles)+len(largeFiles), humanize.Bytes(uint64(smallSize+largeSize)), len(links)) |
| + log.Printf("\t%d files (%s) to be isolated individually", len(largeFiles), humanize.Bytes(uint64(largeSize))) |
| + log.Printf("\t%d files (%s) to be isolated in archives", len(smallFiles), humanize.Bytes(uint64(smallSize))) |
| + |
| + // TODO(djd): actually do something with the each of links, smallFiles and largeFiles. |
| // Marshal the isolated file into JSON. |
| isolJSON, err := json.Marshal(isol) |