| Index: client/cmd/isolate/exp_archive.go
|
| diff --git a/client/cmd/isolate/exp_archive.go b/client/cmd/isolate/exp_archive.go
|
| index b5cf5ccdbe527c6029a52eb37d3c44dd79840e5a..b82522a4656f3484252fccdff2bdc7092b979c2f 100644
|
| --- a/client/cmd/isolate/exp_archive.go
|
| +++ b/client/cmd/isolate/exp_archive.go
|
| @@ -169,57 +169,15 @@ func partitionDeps(deps []string, rootDir string, blacklist []string) (partition
|
| return walker.parts, nil
|
| }
|
|
|
| -// main contains the core logic for experimental archive.
|
| -func (c *expArchiveRun) main() error {
|
| - // TODO(djd): This func is long and has a lot of internal complexity (like,
|
| - // such as, archiveCallback). Refactor.
|
| -
|
| - start := time.Now()
|
| - archiveOpts := &c.isolateFlags.ArchiveOptions
|
| - // Parse the incoming isolate file.
|
| - deps, rootDir, isol, err := isolate.ProcessIsolate(archiveOpts)
|
| - if err != nil {
|
| - return fmt.Errorf("failed to process isolate: %v", err)
|
| - }
|
| - log.Printf("Isolate referenced %d deps", len(deps))
|
| -
|
| - // Set up a background context which is cancelled when this function returns.
|
| - ctx, cancel := context.WithCancel(context.Background())
|
| - defer cancel()
|
| -
|
| - // Create the isolated client which connects to the isolate server.
|
| - authCl, err := c.createAuthClient()
|
| - if err != nil {
|
| - return err
|
| - }
|
| - client := isolatedclient.New(nil, authCl, c.isolatedFlags.ServerURL, c.isolatedFlags.Namespace, nil, nil)
|
| -
|
| - // Set up a checker and uploader. We limit the uploader to one concurrent
|
| - // upload, since the uploads are all coming from disk (with the exception of
|
| - // the isolated JSON itself) and we only want a single goroutine reading from
|
| - // disk at once.
|
| - checker := NewChecker(ctx, client)
|
| - uploader := NewUploader(ctx, client, 1)
|
| -
|
| - parts, err := partitionDeps(deps, rootDir, c.isolateFlags.ArchiveOptions.Blacklist)
|
| - if err != nil {
|
| - return fmt.Errorf("partitioning deps: %v", err)
|
| - }
|
| -
|
| +func uploadDeps(parts partitionedDeps, checker *Checker, uploader *Uploader) (map[string]isolated.File, error) {
|
| // Construct a map of the files that constitute the isolate.
|
| files := make(map[string]isolated.File)
|
|
|
| - numFiles := len(parts.filesToArchive.items) + len(parts.indivFiles.items)
|
| - filesSize := uint64(parts.filesToArchive.totalSize + parts.indivFiles.totalSize)
|
| - log.Printf("Isolate expanded to %d files (total size %s) and %d symlinks", numFiles, humanize.Bytes(filesSize), len(parts.links.items))
|
| - log.Printf("\t%d files (%s) to be isolated individually", len(parts.indivFiles.items), humanize.Bytes(uint64(parts.indivFiles.totalSize)))
|
| - log.Printf("\t%d files (%s) to be isolated in archives", len(parts.filesToArchive.items), humanize.Bytes(uint64(parts.filesToArchive.totalSize)))
|
| -
|
| // Handle the symlinks.
|
| for _, item := range parts.links.items {
|
| l, err := os.Readlink(item.Path)
|
| if err != nil {
|
| - return fmt.Errorf("unable to resolve symlink for %q: %v", item.Path, err)
|
| + return nil, fmt.Errorf("unable to resolve symlink for %q: %v", item.Path, err)
|
| }
|
| files[item.RelPath] = isolated.SymLink(l)
|
| }
|
| @@ -232,7 +190,7 @@ func (c *expArchiveRun) main() error {
|
| bundle := bundle
|
| digest, tarSize, err := bundle.Digest()
|
| if err != nil {
|
| - return err
|
| + return nil, err
|
| }
|
|
|
| log.Printf("Created tar archive %q (%s)", digest, humanize.Bytes(uint64(tarSize)))
|
| @@ -262,7 +220,7 @@ func (c *expArchiveRun) main() error {
|
| for _, item := range parts.indivFiles.items {
|
| d, err := hashFile(item.Path)
|
| if err != nil {
|
| - return err
|
| + return nil, err
|
| }
|
| item.Digest = d
|
| files[item.RelPath] = isolated.BasicFile(item.Digest, int(item.Mode), item.Size)
|
| @@ -276,6 +234,56 @@ func (c *expArchiveRun) main() error {
|
| })
|
| })
|
| }
|
| + return files, nil
|
| +}
|
| +
|
| +// main contains the core logic for experimental archive.
|
| +func (c *expArchiveRun) main() error {
|
| + // TODO(djd): This func is long and has a lot of internal complexity (like,
|
| + // such as, archiveCallback). Refactor.
|
| +
|
| + start := time.Now()
|
| + archiveOpts := &c.isolateFlags.ArchiveOptions
|
| + // Parse the incoming isolate file.
|
| + deps, rootDir, isol, err := isolate.ProcessIsolate(archiveOpts)
|
| + if err != nil {
|
| + return fmt.Errorf("failed to process isolate: %v", err)
|
| + }
|
| + log.Printf("Isolate referenced %d deps", len(deps))
|
| +
|
| + // Set up a background context which is cancelled when this function returns.
|
| + ctx, cancel := context.WithCancel(context.Background())
|
| + defer cancel()
|
| +
|
| + // Create the isolated client which connects to the isolate server.
|
| + authCl, err := c.createAuthClient()
|
| + if err != nil {
|
| + return err
|
| + }
|
| + client := isolatedclient.New(nil, authCl, c.isolatedFlags.ServerURL, c.isolatedFlags.Namespace, nil, nil)
|
| +
|
| + // Set up a checker and uploader. We limit the uploader to one concurrent
|
| + // upload, since the uploads are all coming from disk (with the exception of
|
| + // the isolated JSON itself) and we only want a single goroutine reading from
|
| + // disk at once.
|
| + checker := NewChecker(ctx, client)
|
| + uploader := NewUploader(ctx, client, 1)
|
| +
|
| + parts, err := partitionDeps(deps, rootDir, c.isolateFlags.ArchiveOptions.Blacklist)
|
| + if err != nil {
|
| + return fmt.Errorf("partitioning deps: %v", err)
|
| + }
|
| +
|
| + numFiles := len(parts.filesToArchive.items) + len(parts.indivFiles.items)
|
| + filesSize := uint64(parts.filesToArchive.totalSize + parts.indivFiles.totalSize)
|
| + log.Printf("Isolate expanded to %d files (total size %s) and %d symlinks", numFiles, humanize.Bytes(filesSize), len(parts.links.items))
|
| + log.Printf("\t%d files (%s) to be isolated individually", len(parts.indivFiles.items), humanize.Bytes(uint64(parts.indivFiles.totalSize)))
|
| + log.Printf("\t%d files (%s) to be isolated in archives", len(parts.filesToArchive.items), humanize.Bytes(uint64(parts.filesToArchive.totalSize)))
|
| +
|
| + files, err := uploadDeps(parts, checker, uploader)
|
| + if err != nil {
|
| + return err
|
| + }
|
|
|
| // Marshal the isolated file into JSON, and create an Item to describe it.
|
| isol.Files = files
|
|
|