Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(324)

Side by Side Diff: deploytool/cmd/luci_deploy/deploy_container_engine.go

Issue 2963503003: [errors] Greatly simplify common/errors package. (Closed)
Patch Set: fix nits Created 3 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The LUCI Authors. All rights reserved. 1 // Copyright 2016 The LUCI Authors. All rights reserved.
2 // Use of this source code is governed under the Apache License, Version 2.0 2 // Use of this source code is governed under the Apache License, Version 2.0
3 // that can be found in the LICENSE file. 3 // that can be found in the LICENSE file.
4 4
5 package main 5 package main
6 6
7 import ( 7 import (
8 "fmt" 8 "fmt"
9 "sort" 9 "sort"
10 "strconv" 10 "strconv"
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
112 } 112 }
113 113
114 func (d *containerEngineDeployment) stage(w *work, root *managedfs.Dir, params * deployParams) error { 114 func (d *containerEngineDeployment) stage(w *work, root *managedfs.Dir, params * deployParams) error {
115 d.ignoreCurrentVersion = params.ignoreCurrentVersion 115 d.ignoreCurrentVersion = params.ignoreCurrentVersion
116 116
117 // Build a common timestamp suffix for our Docker images. 117 // Build a common timestamp suffix for our Docker images.
118 d.timestampSuffix = strconv.FormatInt(clock.Now(w).Unix(), 10) 118 d.timestampSuffix = strconv.FormatInt(clock.Now(w).Unix(), 10)
119 119
120 podRoot, err := root.EnsureDirectory("pods") 120 podRoot, err := root.EnsureDirectory("pods")
121 if err != nil { 121 if err != nil {
122 » » return errors.Annotate(err).Reason("failed to create pods direct ory").Err() 122 » » return errors.Annotate(err, "failed to create pods directory").E rr()
123 } 123 }
124 124
125 // Stage in parallel. We will stage all pods before we stage any contain ers, 125 // Stage in parallel. We will stage all pods before we stage any contain ers,
126 // as container staging requires some pod staging values to be populated . 126 // as container staging requires some pod staging values to be populated .
127 err = w.RunMulti(func(workC chan<- func() error) { 127 err = w.RunMulti(func(workC chan<- func() error) {
128 // Check and get all Kubernetes contexts in series. 128 // Check and get all Kubernetes contexts in series.
129 // 129 //
130 // These all share the same Kubernetes configuration file, so we don't want 130 // These all share the same Kubernetes configuration file, so we don't want
131 // them to stomp each other if we did them in parallel. 131 // them to stomp each other if we did them in parallel.
132 workC <- func() error { 132 workC <- func() error {
133 for _, name := range d.clusterNames { 133 for _, name := range d.clusterNames {
134 cluster := d.clusters[name] 134 cluster := d.clusters[name]
135 135
136 var err error 136 var err error
137 if cluster.kubeCtx, err = getContainerEngineKube rnetesContext(w, cluster.cluster); err != nil { 137 if cluster.kubeCtx, err = getContainerEngineKube rnetesContext(w, cluster.cluster); err != nil {
138 » » » » » return errors.Annotate(err).Reason("fail ed to get Kubernetes context for %(cluster)q"). 138 » » » » » return errors.Annotate(err, "failed to g et Kubernetes context for %q", cluster.cluster.Name).Err()
139 » » » » » » D("cluster", cluster.cluster.Nam e).Err()
140 } 139 }
141 } 140 }
142 return nil 141 return nil
143 } 142 }
144 143
145 for _, pod := range d.pods { 144 for _, pod := range d.pods {
146 pod := pod 145 pod := pod
147 workC <- func() error { 146 workC <- func() error {
148 // Use the name of this Pod's Component for stag ing directory. 147 // Use the name of this Pod's Component for stag ing directory.
149 name := pod.pod.comp.comp.Name 148 name := pod.pod.comp.comp.Name
150 podDir, err := podRoot.EnsureDirectory(name) 149 podDir, err := podRoot.EnsureDirectory(name)
151 if err != nil { 150 if err != nil {
152 » » » » » return errors.Annotate(err).Reason("fail ed to create pod directory for %(pod)q"). 151 » » » » » return errors.Annotate(err, "failed to c reate pod directory for %q", name).Err()
153 » » » » » » D("pod", name).Err()
154 } 152 }
155 153
156 return pod.stage(w, podDir, params) 154 return pod.stage(w, podDir, params)
157 } 155 }
158 } 156 }
159 }) 157 })
160 if err != nil { 158 if err != nil {
161 return err 159 return err
162 } 160 }
163 161
164 // Now that pods are deployed, deploy our clusters. 162 // Now that pods are deployed, deploy our clusters.
165 clusterRoot, err := root.EnsureDirectory("clusters") 163 clusterRoot, err := root.EnsureDirectory("clusters")
166 if err != nil { 164 if err != nil {
167 » » return errors.Annotate(err).Reason("failed to create clusters di rectory").Err() 165 » » return errors.Annotate(err, "failed to create clusters directory ").Err()
168 } 166 }
169 167
170 return w.RunMulti(func(workC chan<- func() error) { 168 return w.RunMulti(func(workC chan<- func() error) {
171 // Stage each cluster and pod in parallel. 169 // Stage each cluster and pod in parallel.
172 for _, name := range d.clusterNames { 170 for _, name := range d.clusterNames {
173 cluster := d.clusters[name] 171 cluster := d.clusters[name]
174 172
175 workC <- func() error { 173 workC <- func() error {
176 clusterDir, err := clusterRoot.EnsureDirectory(c luster.cluster.Name) 174 clusterDir, err := clusterRoot.EnsureDirectory(c luster.cluster.Name)
177 if err != nil { 175 if err != nil {
178 » » » » » return errors.Annotate(err).Reason("fail ed to create cluster directory for %(cluster)q"). 176 » » » » » return errors.Annotate(err, "failed to c reate cluster directory for %q", cluster.cluster.Name).Err()
179 » » » » » » D("cluster", cluster.cluster.Nam e).Err()
180 } 177 }
181 178
182 return cluster.stage(w, clusterDir) 179 return cluster.stage(w, clusterDir)
183 } 180 }
184 } 181 }
185 }) 182 })
186 } 183 }
187 184
188 func (d *containerEngineDeployment) localBuild(w *work) error { 185 func (d *containerEngineDeployment) localBuild(w *work) error {
189 return w.RunMulti(func(workC chan<- func() error) { 186 return w.RunMulti(func(workC chan<- func() error) {
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
265 } 262 }
266 sort.Strings(c.scopes) 263 sort.Strings(c.scopes)
267 264
268 // Stage for each deploymend pod. 265 // Stage for each deploymend pod.
269 return w.RunMulti(func(workC chan<- func() error) { 266 return w.RunMulti(func(workC chan<- func() error) {
270 for _, bp := range c.pods { 267 for _, bp := range c.pods {
271 bp := bp 268 bp := bp
272 workC <- func() error { 269 workC <- func() error {
273 stageDir, err := root.EnsureDirectory(string(bp. sp.pod.comp.comp.title)) 270 stageDir, err := root.EnsureDirectory(string(bp. sp.pod.comp.comp.title))
274 if err != nil { 271 if err != nil {
275 » » » » » return errors.Annotate(err).Reason("fail ed to create staging directory").Err() 272 » » » » » return errors.Annotate(err, "failed to c reate staging directory").Err()
276 } 273 }
277 274
278 return bp.stage(w, stageDir) 275 return bp.stage(w, stageDir)
279 } 276 }
280 } 277 }
281 }) 278 })
282 } 279 }
283 280
284 func (c *containerEngineDeploymentCluster) commit(w *work) error { 281 func (c *containerEngineDeploymentCluster) commit(w *work) error {
285 // Push all pods in parallel. 282 // Push all pods in parallel.
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
320 // Generate our deployment YAML. 317 // Generate our deployment YAML.
321 depYAML := kubeBuildDeploymentYAML(bp.binding, bp.sp.deploymentName, bp. sp.imageMap) 318 depYAML := kubeBuildDeploymentYAML(bp.binding, bp.sp.deploymentName, bp. sp.imageMap)
322 depYAML.Metadata.addAnnotation(kubeManagedByKey, kubeManagedByMe) 319 depYAML.Metadata.addAnnotation(kubeManagedByKey, kubeManagedByMe)
323 depYAML.Metadata.addAnnotation(kubeVersionKey, bp.sp.version.String()) 320 depYAML.Metadata.addAnnotation(kubeVersionKey, bp.sp.version.String())
324 depYAML.Metadata.addAnnotation(kubeSourceVersionKey, comp.source().Revis ion) 321 depYAML.Metadata.addAnnotation(kubeSourceVersionKey, comp.source().Revis ion)
325 depYAML.Spec.Template.Metadata.addLabel("luci/project", string(comp.comp .proj.title)) 322 depYAML.Spec.Template.Metadata.addLabel("luci/project", string(comp.comp .proj.title))
326 depYAML.Spec.Template.Metadata.addLabel("luci/component", string(comp.co mp.title)) 323 depYAML.Spec.Template.Metadata.addLabel("luci/component", string(comp.co mp.title))
327 324
328 deploymentYAML := root.File("deployment.yaml") 325 deploymentYAML := root.File("deployment.yaml")
329 if err := deploymentYAML.GenerateYAML(w, depYAML); err != nil { 326 if err := deploymentYAML.GenerateYAML(w, depYAML); err != nil {
330 » » return errors.Annotate(err).Reason("failed to generate deploymen t YAML").Err() 327 » » return errors.Annotate(err, "failed to generate deployment YAML" ).Err()
331 } 328 }
332 bp.deploymentYAMLPath = deploymentYAML.String() 329 bp.deploymentYAMLPath = deploymentYAML.String()
333 return nil 330 return nil
334 } 331 }
335 332
336 func (bp *containerEngineBoundPod) commit(w *work) error { 333 func (bp *containerEngineBoundPod) commit(w *work) error {
337 kubectl, err := bp.c.kubectl(w) 334 kubectl, err := bp.c.kubectl(w)
338 if err != nil { 335 if err != nil {
339 » » return errors.Annotate(err).Err() 336 » » return errors.Annotate(err, "").Err()
340 } 337 }
341 338
342 // Get the current deployment status for this pod. 339 // Get the current deployment status for this pod.
343 var ( 340 var (
344 kd kubeDeployment 341 kd kubeDeployment
345 currentVersion string 342 currentVersion string
346 ) 343 )
347 switch err := kubectl.getResource(w, fmt.Sprintf("deployments/%s", bp.sp .deploymentName), &kd); err { 344 switch err := kubectl.getResource(w, fmt.Sprintf("deployments/%s", bp.sp .deploymentName), &kd); err {
348 case nil: 345 case nil:
349 // Got deployment status. 346 // Got deployment status.
350 md := kd.Metadata 347 md := kd.Metadata
351 if md == nil { 348 if md == nil {
352 return errors.Reason("current deployment has no metadata ").Err() 349 return errors.Reason("current deployment has no metadata ").Err()
353 } 350 }
354 351
355 // Make sure the current deployment is managed by this tool. 352 // Make sure the current deployment is managed by this tool.
356 v, ok := md.Annotations[kubeManagedByKey].(string) 353 v, ok := md.Annotations[kubeManagedByKey].(string)
357 if !ok { 354 if !ok {
358 return errors.Reason("missing '" + kubeManagedByKey + "' annotation").Err() 355 return errors.Reason("missing '" + kubeManagedByKey + "' annotation").Err()
359 } 356 }
360 if v != kubeManagedByMe { 357 if v != kubeManagedByMe {
361 log.Fields{ 358 log.Fields{
362 "managedBy": v, 359 "managedBy": v,
363 "deployment": bp.sp.deploymentName, 360 "deployment": bp.sp.deploymentName,
364 }.Errorf(w, "Current deployment is not managed.") 361 }.Errorf(w, "Current deployment is not managed.")
365 » » » return errors.Reason("unknown manager %(managedBy)q").D( "managedBy", v).Err() 362 » » » return errors.Reason("unknown manager %q", v).Err()
366 } 363 }
367 364
368 // Is the current deployment tagged at the current version? 365 // Is the current deployment tagged at the current version?
369 currentVersion, ok = md.Annotations[kubeVersionKey].(string) 366 currentVersion, ok = md.Annotations[kubeVersionKey].(string)
370 if !ok { 367 if !ok {
371 return errors.Reason("missing '" + kubeVersionKey + "' a nnotation").Err() 368 return errors.Reason("missing '" + kubeVersionKey + "' a nnotation").Err()
372 } 369 }
373 cloudVersion, err := parseCloudProjectVersion(bp.c.gke.project.V ersionScheme, currentVersion) 370 cloudVersion, err := parseCloudProjectVersion(bp.c.gke.project.V ersionScheme, currentVersion)
374 switch { 371 switch {
375 case err != nil: 372 case err != nil:
376 if !bp.c.gke.ignoreCurrentVersion { 373 if !bp.c.gke.ignoreCurrentVersion {
377 » » » » return errors.Annotate(err).Reason("failed to pa rse current version %(version)q"). 374 » » » » return errors.Annotate(err, "failed to parse cur rent version %q", currentVersion).Err()
378 » » » » » D("version", currentVersion).Err()
379 } 375 }
380 376
381 log.Fields{ 377 log.Fields{
382 log.ErrorKey: err, 378 log.ErrorKey: err,
383 "currentVersion": currentVersion, 379 "currentVersion": currentVersion,
384 }.Warningf(w, "Could not parse current version, but conf igured to ignore this failure.") 380 }.Warningf(w, "Could not parse current version, but conf igured to ignore this failure.")
385 381
386 case cloudVersion.String() == bp.sp.version.String(): 382 case cloudVersion.String() == bp.sp.version.String():
387 if !bp.c.gke.ignoreCurrentVersion { 383 if !bp.c.gke.ignoreCurrentVersion {
388 log.Fields{ 384 log.Fields{
(...skipping 10 matching lines...) Expand all
399 // fallthrough to "kubectl apply" the new configuration. 395 // fallthrough to "kubectl apply" the new configuration.
400 fallthrough 396 fallthrough
401 397
402 case errKubeResourceNotFound: 398 case errKubeResourceNotFound:
403 // No current deployment, create a new one. 399 // No current deployment, create a new one.
404 log.Fields{ 400 log.Fields{
405 "currentVersion": currentVersion, 401 "currentVersion": currentVersion,
406 "deployVersion": bp.sp.version, 402 "deployVersion": bp.sp.version,
407 }.Infof(w, "Deploying new pod configuration.") 403 }.Infof(w, "Deploying new pod configuration.")
408 if err := kubectl.exec("apply", "-f", bp.deploymentYAMLPath).che ck(w); err != nil { 404 if err := kubectl.exec("apply", "-f", bp.deploymentYAMLPath).che ck(w); err != nil {
409 » » » return errors.Annotate(err).Reason("failed to create new deployment configuration").Err() 405 » » » return errors.Annotate(err, "failed to create new deploy ment configuration").Err()
410 } 406 }
411 return nil 407 return nil
412 408
413 default: 409 default:
414 » » return errors.Annotate(err).Reason("failed to get status for dep loyment %(deployment)q"). 410 » » return errors.Annotate(err, "failed to get status for deployment %q", bp.sp.deploymentName).Err()
415 » » » D("deployment", bp.sp.deploymentName).Err()
416 } 411 }
417 } 412 }
418 413
419 // stagedGKEPod is staging information for a Google Container Engine deployed 414 // stagedGKEPod is staging information for a Google Container Engine deployed
420 // Kubernetes Pod. 415 // Kubernetes Pod.
421 type stagedGKEPod struct { 416 type stagedGKEPod struct {
422 *deploy.ContainerEnginePod 417 *deploy.ContainerEnginePod
423 418
424 // gke is the container engine deployment that owns this pod. 419 // gke is the container engine deployment that owns this pod.
425 gke *containerEngineDeployment 420 gke *containerEngineDeployment
(...skipping 16 matching lines...) Expand all
442 func (sp *stagedGKEPod) cloudProject() *layoutDeploymentCloudProject { 437 func (sp *stagedGKEPod) cloudProject() *layoutDeploymentCloudProject {
443 return sp.pod.comp.dep.cloudProject 438 return sp.pod.comp.dep.cloudProject
444 } 439 }
445 440
446 func (sp *stagedGKEPod) stage(w *work, root *managedfs.Dir, params *deployParams ) error { 441 func (sp *stagedGKEPod) stage(w *work, root *managedfs.Dir, params *deployParams ) error {
447 // Calculate the cloud project version for this pod. 442 // Calculate the cloud project version for this pod.
448 if sp.version = params.forceVersion; sp.version == nil { 443 if sp.version = params.forceVersion; sp.version == nil {
449 var err error 444 var err error
450 sp.version, err = makeCloudProjectVersion(sp.cloudProject(), sp. pod.comp.source()) 445 sp.version, err = makeCloudProjectVersion(sp.cloudProject(), sp. pod.comp.source())
451 if err != nil { 446 if err != nil {
452 » » » return errors.Annotate(err).Reason("failed to get cloud version").Err() 447 » » » return errors.Annotate(err, "failed to get cloud version ").Err()
453 } 448 }
454 } 449 }
455 450
456 comp := sp.pod.comp 451 comp := sp.pod.comp
457 sp.deploymentName = fmt.Sprintf("%s--%s", comp.comp.proj.title, comp.com p.title) 452 sp.deploymentName = fmt.Sprintf("%s--%s", comp.comp.proj.title, comp.com p.title)
458 453
459 sp.imageMap = make(map[string]string, len(sp.KubePod.Container)) 454 sp.imageMap = make(map[string]string, len(sp.KubePod.Container))
460 sp.containers = make([]*stagedKubernetesContainer, len(sp.KubePod.Contai ner)) 455 sp.containers = make([]*stagedKubernetesContainer, len(sp.KubePod.Contai ner))
461 for i, kc := range sp.KubePod.Container { 456 for i, kc := range sp.KubePod.Container {
462 skc := stagedKubernetesContainer{ 457 skc := stagedKubernetesContainer{
(...skipping 14 matching lines...) Expand all
477 if skc.needsGoPath() { 472 if skc.needsGoPath() {
478 needsGoPath = true 473 needsGoPath = true
479 break 474 break
480 } 475 }
481 } 476 }
482 if needsGoPath { 477 if needsGoPath {
483 // Build a GOPATH from our sources. 478 // Build a GOPATH from our sources.
484 // Construct a GOPATH for this module. 479 // Construct a GOPATH for this module.
485 goPath, err := root.EnsureDirectory("gopath") 480 goPath, err := root.EnsureDirectory("gopath")
486 if err != nil { 481 if err != nil {
487 » » » return errors.Annotate(err).Reason("failed to create GOP ATH base").Err() 482 » » » return errors.Annotate(err, "failed to create GOPATH bas e").Err()
488 } 483 }
489 if err := stageGoPath(w, comp, goPath); err != nil { 484 if err := stageGoPath(w, comp, goPath); err != nil {
490 » » » return errors.Annotate(err).Reason("failed to stage GOPA TH").Err() 485 » » » return errors.Annotate(err, "failed to stage GOPATH").Er r()
491 } 486 }
492 sp.goPath = []string{goPath.String()} 487 sp.goPath = []string{goPath.String()}
493 } 488 }
494 489
495 // Stage each of our containers. 490 // Stage each of our containers.
496 containersDir, err := root.EnsureDirectory("containers") 491 containersDir, err := root.EnsureDirectory("containers")
497 if err != nil { 492 if err != nil {
498 » » return errors.Annotate(err).Err() 493 » » return errors.Annotate(err, "").Err()
499 } 494 }
500 err = w.RunMulti(func(workC chan<- func() error) { 495 err = w.RunMulti(func(workC chan<- func() error) {
501 // Stage each component. 496 // Stage each component.
502 for _, skc := range sp.containers { 497 for _, skc := range sp.containers {
503 skc := skc 498 skc := skc
504 workC <- func() error { 499 workC <- func() error {
505 containerDir, err := containersDir.EnsureDirecto ry(skc.Name) 500 containerDir, err := containersDir.EnsureDirecto ry(skc.Name)
506 if err != nil { 501 if err != nil {
507 » » » » » return errors.Annotate(err).Err() 502 » » » » » return errors.Annotate(err, "").Err()
508 } 503 }
509 504
510 if err := skc.stage(w, containerDir); err != nil { 505 if err := skc.stage(w, containerDir); err != nil {
511 » » » » » return errors.Annotate(err).Reason("fail ed to stage container %(container)q"). 506 » » » » » return errors.Annotate(err, "failed to s tage container %q", skc.Name).Err()
512 » » » » » » D("container", skc.Name).Err()
513 } 507 }
514 return nil 508 return nil
515 } 509 }
516 } 510 }
517 }) 511 })
518 if err != nil { 512 if err != nil {
519 return err 513 return err
520 } 514 }
521 515
522 if err := root.CleanUp(); err != nil { 516 if err := root.CleanUp(); err != nil {
523 » » return errors.Annotate(err).Reason("failed to cleanup staging ar ea").Err() 517 » » return errors.Annotate(err, "failed to cleanup staging area").Er r()
524 } 518 }
525 return nil 519 return nil
526 } 520 }
527 521
528 func (sp *stagedGKEPod) build(w *work) error { 522 func (sp *stagedGKEPod) build(w *work) error {
529 // Build any containers within this pod. 523 // Build any containers within this pod.
530 return w.RunMulti(func(workC chan<- func() error) { 524 return w.RunMulti(func(workC chan<- func() error) {
531 for _, cont := range sp.containers { 525 for _, cont := range sp.containers {
532 workC <- func() error { 526 workC <- func() error {
533 return cont.build(w) 527 return cont.build(w)
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
570 return true 564 return true
571 default: 565 default:
572 return false 566 return false
573 } 567 }
574 } 568 }
575 569
576 func (skc *stagedKubernetesContainer) stage(w *work, root *managedfs.Dir) error { 570 func (skc *stagedKubernetesContainer) stage(w *work, root *managedfs.Dir) error {
577 // Build each Component. 571 // Build each Component.
578 buildDir, err := root.EnsureDirectory("build") 572 buildDir, err := root.EnsureDirectory("build")
579 if err != nil { 573 if err != nil {
580 » » return errors.Annotate(err).Reason("failed to create build direc tory").Err() 574 » » return errors.Annotate(err, "failed to create build directory"). Err()
581 } 575 }
582 if err := buildComponent(w, skc.pod.pod.comp, buildDir); err != nil { 576 if err := buildComponent(w, skc.pod.pod.comp, buildDir); err != nil {
583 » » return errors.Annotate(err).Reason("failed to build component"). Err() 577 » » return errors.Annotate(err, "failed to build component").Err()
584 } 578 }
585 579
586 switch skc.Type { 580 switch skc.Type {
587 case deploy.KubernetesPod_Container_GO: 581 case deploy.KubernetesPod_Container_GO:
588 // Specify how we are to be built. 582 // Specify how we are to be built.
589 skc.buildFn = func(w *work) error { 583 skc.buildFn = func(w *work) error {
590 path, err := skc.pod.pod.comp.buildPath(skc.GetBuild()) 584 path, err := skc.pod.pod.comp.buildPath(skc.GetBuild())
591 if err != nil { 585 if err != nil {
592 » » » » return errors.Annotate(err).Err() 586 » » » » return errors.Annotate(err, "").Err()
593 } 587 }
594 return skc.buildGo(w, path) 588 return skc.buildGo(w, path)
595 } 589 }
596 590
597 default: 591 default:
598 » » return errors.Reason("unknown Kubernetes pod type %(type)T").D(" type", skc.Type).Err() 592 » » return errors.Reason("unknown Kubernetes pod type %T", skc.Type) .Err()
599 } 593 }
600 return nil 594 return nil
601 } 595 }
602 596
603 func (skc *stagedKubernetesContainer) build(w *work) error { 597 func (skc *stagedKubernetesContainer) build(w *work) error {
604 if f := skc.buildFn; f != nil { 598 if f := skc.buildFn; f != nil {
605 return f(w) 599 return f(w)
606 } 600 }
607 return nil 601 return nil
608 } 602 }
609 603
610 // build builds the image associated with this container. 604 // build builds the image associated with this container.
611 func (skc *stagedKubernetesContainer) buildGo(w *work, entryPath string) error { 605 func (skc *stagedKubernetesContainer) buildGo(w *work, entryPath string) error {
612 gcloud, err := w.tools.gcloud(skc.pod.cloudProject().Name) 606 gcloud, err := w.tools.gcloud(skc.pod.cloudProject().Name)
613 if err != nil { 607 if err != nil {
614 » » return errors.Annotate(err).Reason("could not get gcloud tool"). Err() 608 » » return errors.Annotate(err, "could not get gcloud tool").Err()
615 } 609 }
616 610
617 // Use "aedeploy" to gather GOPATH and build against our root. 611 // Use "aedeploy" to gather GOPATH and build against our root.
618 aedeploy, err := w.tools.aedeploy(skc.pod.goPath) 612 aedeploy, err := w.tools.aedeploy(skc.pod.goPath)
619 if err != nil { 613 if err != nil {
620 » » return errors.Annotate(err).Err() 614 » » return errors.Annotate(err, "").Err()
621 } 615 }
622 616
623 x := gcloud.exec("docker", "--", "build", "-t", skc.image, ".") 617 x := gcloud.exec("docker", "--", "build", "-t", skc.image, ".")
624 return aedeploy.bootstrap(x).cwd(entryPath).check(w) 618 return aedeploy.bootstrap(x).cwd(entryPath).check(w)
625 } 619 }
626 620
627 func (skc *stagedKubernetesContainer) push(w *work) error { 621 func (skc *stagedKubernetesContainer) push(w *work) error {
628 gcloud, err := w.tools.gcloud(skc.pod.cloudProject().Name) 622 gcloud, err := w.tools.gcloud(skc.pod.cloudProject().Name)
629 if err != nil { 623 if err != nil {
630 » » return errors.Annotate(err).Reason("could not get gcloud tool"). Err() 624 » » return errors.Annotate(err, "could not get gcloud tool").Err()
631 } 625 }
632 626
633 if err := gcloud.exec("docker", "--", "push", skc.image).check(w); err ! = nil { 627 if err := gcloud.exec("docker", "--", "push", skc.image).check(w); err ! = nil {
634 » » return errors.Annotate(err).Reason("failed to push Docker image %(image)q"). 628 » » return errors.Annotate(err, "failed to push Docker image %q", sk c.image).Err()
635 » » » D("image", skc.image).Err()
636 } 629 }
637 return nil 630 return nil
638 } 631 }
639 632
640 func getContainerEngineKubernetesContext(w *work, cluster *layoutDeploymentGKECl uster) ( 633 func getContainerEngineKubernetesContext(w *work, cluster *layoutDeploymentGKECl uster) (
641 string, error) { 634 string, error) {
642 // Generate our Kubernetes context name. This is derived from the Google 635 // Generate our Kubernetes context name. This is derived from the Google
643 // Container Engine cluster parameters. 636 // Container Engine cluster parameters.
644 kubeCtx := fmt.Sprintf("gke_%s_%s_%s", cluster.cloudProject.Name, cluste r.Zone, cluster.Name) 637 kubeCtx := fmt.Sprintf("gke_%s_%s_%s", cluster.cloudProject.Name, cluste r.Zone, cluster.Name)
645 638
646 kubectl, err := w.tools.kubectl(kubeCtx) 639 kubectl, err := w.tools.kubectl(kubeCtx)
647 if err != nil { 640 if err != nil {
648 » » return "", errors.Annotate(err).Err() 641 » » return "", errors.Annotate(err, "").Err()
649 } 642 }
650 643
651 // Check if the context is already installed in our Kubernetes configura tion. 644 // Check if the context is already installed in our Kubernetes configura tion.
652 switch has, err := kubectl.hasContext(w); { 645 switch has, err := kubectl.hasContext(w); {
653 case err != nil: 646 case err != nil:
654 » » return "", errors.Annotate(err).Reason("failed to check for Kube rnetes context").Err() 647 » » return "", errors.Annotate(err, "failed to check for Kubernetes context").Err()
655 648
656 case !has: 649 case !has:
657 gcloud, err := w.tools.gcloud(cluster.cloudProject.Name) 650 gcloud, err := w.tools.gcloud(cluster.cloudProject.Name)
658 if err != nil { 651 if err != nil {
659 » » » return "", errors.Annotate(err).Err() 652 » » » return "", errors.Annotate(err, "").Err()
660 } 653 }
661 654
662 // The context isn't cached, we will fetch it via: 655 // The context isn't cached, we will fetch it via:
663 // $ gcloud container clusters get-credentials 656 // $ gcloud container clusters get-credentials
664 x := gcloud.exec( 657 x := gcloud.exec(
665 "container", "clusters", 658 "container", "clusters",
666 "get-credentials", cluster.Name, 659 "get-credentials", cluster.Name,
667 "--zone", cluster.Zone) 660 "--zone", cluster.Zone)
668 if err := x.check(w); err != nil { 661 if err := x.check(w); err != nil {
669 » » » return "", errors.Annotate(err).Reason("failed to get cl uster credentials").Err() 662 » » » return "", errors.Annotate(err, "failed to get cluster c redentials").Err()
670 } 663 }
671 switch has, err = kubectl.hasContext(w); { 664 switch has, err = kubectl.hasContext(w); {
672 case err != nil: 665 case err != nil:
673 » » » return "", errors.Annotate(err).Reason("failed to confir m Kubernetes context").Err() 666 » » » return "", errors.Annotate(err, "failed to confirm Kuber netes context").Err()
674 case !has: 667 case !has:
675 » » » return "", errors.Reason("context %(context)q missing af ter fetching credentials").D("context", kubeCtx).Err() 668 » » » return "", errors.Reason("context %q missing after fetch ing credentials", kubeCtx).Err()
676 } 669 }
677 } 670 }
678 return kubeCtx, nil 671 return kubeCtx, nil
679 } 672 }
OLDNEW
« no previous file with comments | « deploytool/cmd/luci_deploy/deploy_appengine.go ('k') | deploytool/cmd/luci_deploy/kubernetes.go » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698