| OLD | NEW |
| (Empty) |
| 1 // Copyright 2016 The LUCI Authors. All rights reserved. | |
| 2 // Use of this source code is governed under the Apache License, Version 2.0 | |
| 3 // that can be found in the LICENSE file. | |
| 4 | |
| 5 package main | |
| 6 | |
| 7 import ( | |
| 8 "fmt" | |
| 9 "sort" | |
| 10 "strconv" | |
| 11 "strings" | |
| 12 | |
| 13 "github.com/luci/luci-go/common/clock" | |
| 14 "github.com/luci/luci-go/common/errors" | |
| 15 log "github.com/luci/luci-go/common/logging" | |
| 16 "github.com/luci/luci-go/deploytool/api/deploy" | |
| 17 "github.com/luci/luci-go/deploytool/managedfs" | |
| 18 ) | |
| 19 | |
| 20 // kubeDepoyedByMe is the "luci:managedBy" Kubernetes Deployment annotation | |
| 21 // indicating that a given Kubernetes Deployment is managed by this deployment | |
| 22 // tool. | |
| 23 const ( | |
| 24 kubeManagedByKey = "luci.managedBy" | |
| 25 kubeManagedByMe = "luci-deploytool" | |
| 26 | |
| 27 kubeDeployToolPrefix = "luci.deploytool/" | |
| 28 kubeVersionKey = kubeDeployToolPrefix + "version" | |
| 29 kubeSourceVersionKey = kubeDeployToolPrefix + "sourceVersion" | |
| 30 ) | |
| 31 | |
| 32 func isKubeDeployToolKey(v string) bool { return strings.HasPrefix(v, kubeDeploy
ToolPrefix) } | |
| 33 | |
| 34 // containerEngineDeployment is a consolidated Google Container Engine | |
| 35 // deployment configuration. It includes staged configurations for specific | |
| 36 // components, as well as global Google Container Engine state for a single | |
| 37 // cloud project. | |
| 38 // | |
| 39 // A container engine deployment is made up of specific clusters within | |
| 40 // Container Engine to independently manage/deploy. Each of these clusters is | |
| 41 // further made up of the Kubernetes pods that are configured to be deployed to | |
| 42 // those clusters. Finally, each pod is broken into specific Kubernetes | |
| 43 // components (Docker images) that the pod is composed of. | |
| 44 // | |
| 45 // Pods/containers are specified as Project Components, and are staged in | |
| 46 // component subdirectories. | |
| 47 // | |
| 48 // Clusters are configured independently. During deployment, pods/containers | |
| 49 // are deployed to clusters. | |
| 50 type containerEngineDeployment struct { | |
| 51 // project is the cloud project that this deployment is targeting. | |
| 52 project *layoutDeploymentCloudProject | |
| 53 | |
| 54 // clusters is a map of the container engine clusters that have componen
ts | |
| 55 // being deployed. | |
| 56 clusters map[string]*containerEngineDeploymentCluster | |
| 57 // clusterNames is the sorted list of cluster names. It is prepared duri
ng | |
| 58 // staging. | |
| 59 clusterNames []string | |
| 60 | |
| 61 // pods is the set of staged pods. Each pod corresponds to a single Proj
ect | |
| 62 // Component. This is separate from Clusters, since a single Pod may be | |
| 63 // deployed to multiple Clusters. | |
| 64 // | |
| 65 // There will be one Pod entry here per declaration, regardless of how m
any | |
| 66 // times it's deployed. | |
| 67 pods []*stagedGKEPod | |
| 68 | |
| 69 // podMap is a map of pods to the clusters that they are deployed to. | |
| 70 podMap map[*layoutDeploymentGKEPod]*stagedGKEPod | |
| 71 | |
| 72 // ignoreCurrentVersion is true if deployment should generate and push a
new | |
| 73 // version even if its base parameters match the currently-deployed vers
ion. | |
| 74 ignoreCurrentVersion bool | |
| 75 | |
| 76 // timestampSuffix is timestamp suffix appended to Docker image names. T
his is | |
| 77 // used to differentiate Docker images from the same version. | |
| 78 timestampSuffix string | |
| 79 } | |
| 80 | |
| 81 func (d *containerEngineDeployment) addCluster(cluster *layoutDeploymentGKEClust
er) *containerEngineDeploymentCluster { | |
| 82 if c := d.clusters[cluster.Name]; c != nil { | |
| 83 return c | |
| 84 } | |
| 85 | |
| 86 c := containerEngineDeploymentCluster{ | |
| 87 gke: d, | |
| 88 cluster: cluster, | |
| 89 } | |
| 90 d.clusters[cluster.Name] = &c | |
| 91 d.clusterNames = append(d.clusterNames, cluster.Name) | |
| 92 return &c | |
| 93 } | |
| 94 | |
| 95 func (d *containerEngineDeployment) maybeRegisterPod(pod *layoutDeploymentGKEPod
) *stagedGKEPod { | |
| 96 // If this is the first time we've seen this pod, add it to our pods lis
t. | |
| 97 if sp := d.podMap[pod]; sp != nil { | |
| 98 return sp | |
| 99 } | |
| 100 | |
| 101 sp := &stagedGKEPod{ | |
| 102 ContainerEnginePod: pod.ContainerEnginePod, | |
| 103 gke: d, | |
| 104 pod: pod, | |
| 105 } | |
| 106 if d.podMap == nil { | |
| 107 d.podMap = make(map[*layoutDeploymentGKEPod]*stagedGKEPod) | |
| 108 } | |
| 109 d.podMap[pod] = sp | |
| 110 d.pods = append(d.pods, sp) | |
| 111 return sp | |
| 112 } | |
| 113 | |
| 114 func (d *containerEngineDeployment) stage(w *work, root *managedfs.Dir, params *
deployParams) error { | |
| 115 d.ignoreCurrentVersion = params.ignoreCurrentVersion | |
| 116 | |
| 117 // Build a common timestamp suffix for our Docker images. | |
| 118 d.timestampSuffix = strconv.FormatInt(clock.Now(w).Unix(), 10) | |
| 119 | |
| 120 podRoot, err := root.EnsureDirectory("pods") | |
| 121 if err != nil { | |
| 122 return errors.Annotate(err).Reason("failed to create pods direct
ory").Err() | |
| 123 } | |
| 124 | |
| 125 // Stage in parallel. We will stage all pods before we stage any contain
ers, | |
| 126 // as container staging requires some pod staging values to be populated
. | |
| 127 err = w.RunMulti(func(workC chan<- func() error) { | |
| 128 // Check and get all Kubernetes contexts in series. | |
| 129 // | |
| 130 // These all share the same Kubernetes configuration file, so we
don't want | |
| 131 // them to stomp each other if we did them in parallel. | |
| 132 workC <- func() error { | |
| 133 for _, name := range d.clusterNames { | |
| 134 cluster := d.clusters[name] | |
| 135 | |
| 136 var err error | |
| 137 if cluster.kubeCtx, err = getContainerEngineKube
rnetesContext(w, cluster.cluster); err != nil { | |
| 138 return errors.Annotate(err).Reason("fail
ed to get Kubernetes context for %(cluster)q"). | |
| 139 D("cluster", cluster.cluster.Nam
e).Err() | |
| 140 } | |
| 141 } | |
| 142 return nil | |
| 143 } | |
| 144 | |
| 145 for _, pod := range d.pods { | |
| 146 pod := pod | |
| 147 workC <- func() error { | |
| 148 // Use the name of this Pod's Component for stag
ing directory. | |
| 149 name := pod.pod.comp.comp.Name | |
| 150 podDir, err := podRoot.EnsureDirectory(name) | |
| 151 if err != nil { | |
| 152 return errors.Annotate(err).Reason("fail
ed to create pod directory for %(pod)q"). | |
| 153 D("pod", name).Err() | |
| 154 } | |
| 155 | |
| 156 return pod.stage(w, podDir) | |
| 157 } | |
| 158 } | |
| 159 }) | |
| 160 if err != nil { | |
| 161 return err | |
| 162 } | |
| 163 | |
| 164 // Now that pods are deployed, deploy our clusters. | |
| 165 clusterRoot, err := root.EnsureDirectory("clusters") | |
| 166 if err != nil { | |
| 167 return errors.Annotate(err).Reason("failed to create clusters di
rectory").Err() | |
| 168 } | |
| 169 | |
| 170 return w.RunMulti(func(workC chan<- func() error) { | |
| 171 // Stage each cluster and pod in parallel. | |
| 172 for _, name := range d.clusterNames { | |
| 173 cluster := d.clusters[name] | |
| 174 | |
| 175 workC <- func() error { | |
| 176 clusterDir, err := clusterRoot.EnsureDirectory(c
luster.cluster.Name) | |
| 177 if err != nil { | |
| 178 return errors.Annotate(err).Reason("fail
ed to create cluster directory for %(cluster)q"). | |
| 179 D("cluster", cluster.cluster.Nam
e).Err() | |
| 180 } | |
| 181 | |
| 182 return cluster.stage(w, clusterDir) | |
| 183 } | |
| 184 } | |
| 185 }) | |
| 186 } | |
| 187 | |
| 188 func (d *containerEngineDeployment) localBuild(w *work) error { | |
| 189 return w.RunMulti(func(workC chan<- func() error) { | |
| 190 for _, pod := range d.pods { | |
| 191 pod := pod | |
| 192 workC <- func() error { | |
| 193 return pod.build(w) | |
| 194 } | |
| 195 } | |
| 196 }) | |
| 197 } | |
| 198 | |
| 199 func (d *containerEngineDeployment) push(w *work) error { | |
| 200 return w.RunMulti(func(workC chan<- func() error) { | |
| 201 for _, pod := range d.pods { | |
| 202 pod := pod | |
| 203 workC <- func() error { | |
| 204 return pod.push(w) | |
| 205 } | |
| 206 } | |
| 207 }) | |
| 208 } | |
| 209 | |
| 210 func (d *containerEngineDeployment) commit(w *work) error { | |
| 211 // Push all clusters in parallel. | |
| 212 return w.RunMulti(func(workC chan<- func() error) { | |
| 213 for _, name := range d.clusterNames { | |
| 214 cluster := d.clusters[name] | |
| 215 workC <- func() error { | |
| 216 return cluster.commit(w) | |
| 217 } | |
| 218 } | |
| 219 }) | |
| 220 } | |
| 221 | |
| 222 // containerEngineDeploymentCluster is the deployment configuration for a single | |
| 223 // Google Compute Engine cluster. This includes the cluster's aggregate global | |
| 224 // configuration, as well as any staged pods that are being deployed to this | |
| 225 // cluster. | |
| 226 type containerEngineDeploymentCluster struct { | |
| 227 // gke is the containerEngineDeployment that owns this cluster. | |
| 228 gke *containerEngineDeployment | |
| 229 | |
| 230 // cluster is the underlying cluster configuration. | |
| 231 cluster *layoutDeploymentGKECluster | |
| 232 | |
| 233 // pods is the sorted list of pod deployments in this cluster. | |
| 234 pods []*containerEngineBoundPod | |
| 235 | |
| 236 // scopes is the set of all scopes across all pods registered to this cl
uster, | |
| 237 // regardless of which are being deployed. | |
| 238 scopes []string | |
| 239 | |
| 240 // kubeCtx is the name of the Kubernetes context, as defined/installed b
y | |
| 241 // gcloud. | |
| 242 kubeCtx string | |
| 243 } | |
| 244 | |
| 245 func (c *containerEngineDeploymentCluster) attachPod(pod *layoutDeploymentGKEPod
Binding) { | |
| 246 c.pods = append(c.pods, &containerEngineBoundPod{ | |
| 247 sp: c.gke.maybeRegisterPod(pod.pod), | |
| 248 c: c, | |
| 249 binding: pod, | |
| 250 }) | |
| 251 } | |
| 252 | |
| 253 func (c *containerEngineDeploymentCluster) stage(w *work, root *managedfs.Dir) e
rror { | |
| 254 // Determine which scopes this cluster will need. This is across ALL pod
s | |
| 255 // registered with the cluster, not just deployed ones. | |
| 256 scopeMap := make(map[string]struct{}) | |
| 257 for _, bp := range c.pods { | |
| 258 for _, scope := range bp.sp.pod.Scopes { | |
| 259 scopeMap[scope] = struct{}{} | |
| 260 } | |
| 261 } | |
| 262 c.scopes = make([]string, 0, len(scopeMap)) | |
| 263 for scope := range scopeMap { | |
| 264 c.scopes = append(c.scopes, scope) | |
| 265 } | |
| 266 sort.Strings(c.scopes) | |
| 267 | |
| 268 // Stage for each deploymend pod. | |
| 269 return w.RunMulti(func(workC chan<- func() error) { | |
| 270 for _, bp := range c.pods { | |
| 271 bp := bp | |
| 272 workC <- func() error { | |
| 273 stageDir, err := root.EnsureDirectory(string(bp.
sp.pod.comp.comp.title)) | |
| 274 if err != nil { | |
| 275 return errors.Annotate(err).Reason("fail
ed to create staging directory").Err() | |
| 276 } | |
| 277 | |
| 278 return bp.stage(w, stageDir) | |
| 279 } | |
| 280 } | |
| 281 }) | |
| 282 } | |
| 283 | |
| 284 func (c *containerEngineDeploymentCluster) commit(w *work) error { | |
| 285 // Push all pods in parallel. | |
| 286 return w.RunMulti(func(workC chan<- func() error) { | |
| 287 for _, bp := range c.pods { | |
| 288 bp := bp | |
| 289 workC <- func() error { | |
| 290 return bp.commit(w) | |
| 291 } | |
| 292 } | |
| 293 }) | |
| 294 } | |
| 295 | |
| 296 func (c *containerEngineDeploymentCluster) kubectl(w *work) (*kubeTool, error) { | |
| 297 return w.tools.kubectl(c.kubeCtx) | |
| 298 } | |
| 299 | |
| 300 // containerEngineBoundPod is a single staged pod deployed to a | |
| 301 // specific GKE cluster. | |
| 302 type containerEngineBoundPod struct { | |
| 303 // sp is the staged pod. | |
| 304 sp *stagedGKEPod | |
| 305 | |
| 306 // cluster is the cluster that this pod is deployed to. | |
| 307 c *containerEngineDeploymentCluster | |
| 308 | |
| 309 // binding is the binding between sp and c. | |
| 310 binding *layoutDeploymentGKEPodBinding | |
| 311 | |
| 312 // deploymentYAMLPath is the filesystem path to the deployment YAML. | |
| 313 deploymentYAMLPath string | |
| 314 } | |
| 315 | |
| 316 func (bp *containerEngineBoundPod) stage(w *work, root *managedfs.Dir) error { | |
| 317 comp := bp.sp.pod.comp | |
| 318 | |
| 319 // Build our pod-wide deployment YAML. | |
| 320 // Generate our deployment YAML. | |
| 321 depYAML := kubeBuildDeploymentYAML(bp.binding, bp.sp.deploymentName, bp.
sp.imageMap) | |
| 322 depYAML.Metadata.addAnnotation(kubeManagedByKey, kubeManagedByMe) | |
| 323 depYAML.Metadata.addAnnotation(kubeVersionKey, bp.sp.version.String()) | |
| 324 depYAML.Metadata.addAnnotation(kubeSourceVersionKey, comp.source().Revis
ion) | |
| 325 depYAML.Spec.Template.Metadata.addLabel("luci/project", string(comp.comp
.proj.title)) | |
| 326 depYAML.Spec.Template.Metadata.addLabel("luci/component", string(comp.co
mp.title)) | |
| 327 | |
| 328 deploymentYAML := root.File("deployment.yaml") | |
| 329 if err := deploymentYAML.GenerateYAML(w, depYAML); err != nil { | |
| 330 return errors.Annotate(err).Reason("failed to generate deploymen
t YAML").Err() | |
| 331 } | |
| 332 bp.deploymentYAMLPath = deploymentYAML.String() | |
| 333 return nil | |
| 334 } | |
| 335 | |
| 336 func (bp *containerEngineBoundPod) commit(w *work) error { | |
| 337 kubectl, err := bp.c.kubectl(w) | |
| 338 if err != nil { | |
| 339 return errors.Annotate(err).Err() | |
| 340 } | |
| 341 | |
| 342 // Get the current deployment status for this pod. | |
| 343 var ( | |
| 344 kd kubeDeployment | |
| 345 currentVersion string | |
| 346 ) | |
| 347 switch err := kubectl.getResource(w, fmt.Sprintf("deployments/%s", bp.sp
.deploymentName), &kd); err { | |
| 348 case nil: | |
| 349 // Got deployment status. | |
| 350 md := kd.Metadata | |
| 351 if md == nil { | |
| 352 return errors.Reason("current deployment has no metadata
").Err() | |
| 353 } | |
| 354 | |
| 355 // Make sure the current deployment is managed by this tool. | |
| 356 v, ok := md.Annotations[kubeManagedByKey].(string) | |
| 357 if !ok { | |
| 358 return errors.Reason("missing '" + kubeManagedByKey + "'
annotation").Err() | |
| 359 } | |
| 360 if v != kubeManagedByMe { | |
| 361 log.Fields{ | |
| 362 "managedBy": v, | |
| 363 "deployment": bp.sp.deploymentName, | |
| 364 }.Errorf(w, "Current deployment is not managed.") | |
| 365 return errors.Reason("unknown manager %(managedBy)q").D(
"managedBy", v).Err() | |
| 366 } | |
| 367 | |
| 368 // Is the current deployment tagged at the current version? | |
| 369 currentVersion, ok = md.Annotations[kubeVersionKey].(string) | |
| 370 if !ok { | |
| 371 return errors.Reason("missing '" + kubeVersionKey + "' a
nnotation").Err() | |
| 372 } | |
| 373 cloudVersion, err := parseCloudProjectVersion(bp.c.gke.project.V
ersionScheme, currentVersion) | |
| 374 switch { | |
| 375 case err != nil: | |
| 376 if !bp.c.gke.ignoreCurrentVersion { | |
| 377 return errors.Annotate(err).Reason("failed to pa
rse current version %(version)q"). | |
| 378 D("version", currentVersion).Err() | |
| 379 } | |
| 380 | |
| 381 log.Fields{ | |
| 382 log.ErrorKey: err, | |
| 383 "currentVersion": currentVersion, | |
| 384 }.Warningf(w, "Could not parse current version, but conf
igured to ignore this failure.") | |
| 385 | |
| 386 case cloudVersion.Equals(bp.sp.version): | |
| 387 if !bp.c.gke.ignoreCurrentVersion { | |
| 388 log.Fields{ | |
| 389 "version": currentVersion, | |
| 390 }.Infof(w, "Deployed version matches deployment
version; not committing.") | |
| 391 return nil | |
| 392 } | |
| 393 | |
| 394 log.Fields{ | |
| 395 "version": currentVersion, | |
| 396 }.Infof(w, "Deployed version matches deployment version,
but configured to deploy anyway.") | |
| 397 } | |
| 398 | |
| 399 // fallthrough to "kubectl apply" the new configuration. | |
| 400 fallthrough | |
| 401 | |
| 402 case errKubeResourceNotFound: | |
| 403 // No current deployment, create a new one. | |
| 404 log.Fields{ | |
| 405 "currentVersion": currentVersion, | |
| 406 "deployVersion": bp.sp.version, | |
| 407 }.Infof(w, "Deploying new pod configuration.") | |
| 408 if err := kubectl.exec("apply", "-f", bp.deploymentYAMLPath).che
ck(w); err != nil { | |
| 409 return errors.Annotate(err).Reason("failed to create new
deployment configuration").Err() | |
| 410 } | |
| 411 return nil | |
| 412 | |
| 413 default: | |
| 414 return errors.Annotate(err).Reason("failed to get status for dep
loyment %(deployment)q"). | |
| 415 D("deployment", bp.sp.deploymentName).Err() | |
| 416 } | |
| 417 } | |
| 418 | |
| 419 // stagedGKEPod is staging information for a Google Container Engine deployed | |
| 420 // Kubernetes Pod. | |
| 421 type stagedGKEPod struct { | |
| 422 *deploy.ContainerEnginePod | |
| 423 | |
| 424 // gke is the container engine deployment that owns this pod. | |
| 425 gke *containerEngineDeployment | |
| 426 // pod is the deployment pod that this is staging. | |
| 427 pod *layoutDeploymentGKEPod | |
| 428 | |
| 429 // version is the calculated cloud project version. | |
| 430 version *cloudProjectVersion | |
| 431 // The name of the deployment for thie Component. | |
| 432 deploymentName string | |
| 433 // containers is the set of staged Kubernetes containers. | |
| 434 containers []*stagedKubernetesContainer | |
| 435 // goPath is the generate GOPATH for this container's sources. | |
| 436 goPath []string | |
| 437 | |
| 438 // imageMap maps container names to their Docker image names. | |
| 439 imageMap map[string]string | |
| 440 } | |
| 441 | |
| 442 func (sp *stagedGKEPod) cloudProject() *layoutDeploymentCloudProject { | |
| 443 return sp.pod.comp.dep.cloudProject | |
| 444 } | |
| 445 | |
| 446 func (sp *stagedGKEPod) stage(w *work, root *managedfs.Dir) error { | |
| 447 // Calculate the cloud project version for this pod. | |
| 448 var err error | |
| 449 sp.version, err = makeCloudProjectVersion(sp.cloudProject(), sp.pod.comp
.source()) | |
| 450 if err != nil { | |
| 451 return errors.Annotate(err).Reason("failed to get cloud version"
).Err() | |
| 452 } | |
| 453 | |
| 454 comp := sp.pod.comp | |
| 455 sp.deploymentName = fmt.Sprintf("%s--%s", comp.comp.proj.title, comp.com
p.title) | |
| 456 | |
| 457 sp.imageMap = make(map[string]string, len(sp.KubePod.Container)) | |
| 458 sp.containers = make([]*stagedKubernetesContainer, len(sp.KubePod.Contai
ner)) | |
| 459 for i, kc := range sp.KubePod.Container { | |
| 460 skc := stagedKubernetesContainer{ | |
| 461 KubernetesPod_Container: kc, | |
| 462 pod: sp, | |
| 463 image: fmt.Sprintf("gcr.io/%s/%s:%s-%s", | |
| 464 sp.gke.project.Name, kc.Name, sp.version.String(
), sp.gke.timestampSuffix), | |
| 465 } | |
| 466 | |
| 467 sp.imageMap[kc.Name] = skc.image | |
| 468 sp.containers[i] = &skc | |
| 469 } | |
| 470 | |
| 471 // All files in this pod will share a GOPATH. Generate it, if any of our | |
| 472 // containers use Go. | |
| 473 needsGoPath := false | |
| 474 for _, skc := range sp.containers { | |
| 475 if skc.needsGoPath() { | |
| 476 needsGoPath = true | |
| 477 break | |
| 478 } | |
| 479 } | |
| 480 if needsGoPath { | |
| 481 // Build a GOPATH from our sources. | |
| 482 // Construct a GOPATH for this module. | |
| 483 goPath, err := root.EnsureDirectory("gopath") | |
| 484 if err != nil { | |
| 485 return errors.Annotate(err).Reason("failed to create GOP
ATH base").Err() | |
| 486 } | |
| 487 if err := stageGoPath(w, comp, goPath); err != nil { | |
| 488 return errors.Annotate(err).Reason("failed to stage GOPA
TH").Err() | |
| 489 } | |
| 490 sp.goPath = []string{goPath.String()} | |
| 491 } | |
| 492 | |
| 493 // Stage each of our containers. | |
| 494 containersDir, err := root.EnsureDirectory("containers") | |
| 495 if err != nil { | |
| 496 return errors.Annotate(err).Err() | |
| 497 } | |
| 498 err = w.RunMulti(func(workC chan<- func() error) { | |
| 499 // Stage each component. | |
| 500 for _, skc := range sp.containers { | |
| 501 skc := skc | |
| 502 workC <- func() error { | |
| 503 containerDir, err := containersDir.EnsureDirecto
ry(skc.Name) | |
| 504 if err != nil { | |
| 505 return errors.Annotate(err).Err() | |
| 506 } | |
| 507 | |
| 508 if err := skc.stage(w, containerDir); err != nil
{ | |
| 509 return errors.Annotate(err).Reason("fail
ed to stage container %(container)q"). | |
| 510 D("container", skc.Name).Err() | |
| 511 } | |
| 512 return nil | |
| 513 } | |
| 514 } | |
| 515 }) | |
| 516 if err != nil { | |
| 517 return err | |
| 518 } | |
| 519 | |
| 520 if err := root.CleanUp(); err != nil { | |
| 521 return errors.Annotate(err).Reason("failed to cleanup staging ar
ea").Err() | |
| 522 } | |
| 523 return nil | |
| 524 } | |
| 525 | |
| 526 func (sp *stagedGKEPod) build(w *work) error { | |
| 527 // Build any containers within this pod. | |
| 528 return w.RunMulti(func(workC chan<- func() error) { | |
| 529 for _, cont := range sp.containers { | |
| 530 workC <- func() error { | |
| 531 return cont.build(w) | |
| 532 } | |
| 533 } | |
| 534 }) | |
| 535 } | |
| 536 | |
| 537 func (sp *stagedGKEPod) push(w *work) error { | |
| 538 // Build any containers within this pod. | |
| 539 return w.RunMulti(func(workC chan<- func() error) { | |
| 540 for _, cont := range sp.containers { | |
| 541 workC <- func() error { | |
| 542 return cont.push(w) | |
| 543 } | |
| 544 } | |
| 545 }) | |
| 546 } | |
| 547 | |
| 548 // stagedKubernetesContainer is staging information for a single Kubernetes | |
| 549 // Container. | |
| 550 type stagedKubernetesContainer struct { | |
| 551 *deploy.KubernetesPod_Container | |
| 552 | |
| 553 // pod is the pod that owns this container. | |
| 554 pod *stagedGKEPod | |
| 555 | |
| 556 // image is the Docker image URI for this container. | |
| 557 image string | |
| 558 // remoteImageExists is true if the image already exists on the remote.
This | |
| 559 // is checked during the "build" phase. | |
| 560 remoteImageExists bool | |
| 561 | |
| 562 buildFn func(*work) error | |
| 563 } | |
| 564 | |
| 565 func (skc *stagedKubernetesContainer) needsGoPath() bool { | |
| 566 switch skc.Type { | |
| 567 case deploy.KubernetesPod_Container_GO: | |
| 568 return true | |
| 569 default: | |
| 570 return false | |
| 571 } | |
| 572 } | |
| 573 | |
| 574 func (skc *stagedKubernetesContainer) stage(w *work, root *managedfs.Dir) error
{ | |
| 575 // Build each Component. | |
| 576 buildDir, err := root.EnsureDirectory("build") | |
| 577 if err != nil { | |
| 578 return errors.Annotate(err).Reason("failed to create build direc
tory").Err() | |
| 579 } | |
| 580 if err := buildComponent(w, skc.pod.pod.comp, buildDir); err != nil { | |
| 581 return errors.Annotate(err).Reason("failed to build component").
Err() | |
| 582 } | |
| 583 | |
| 584 switch skc.Type { | |
| 585 case deploy.KubernetesPod_Container_GO: | |
| 586 // Specify how we are to be built. | |
| 587 skc.buildFn = func(w *work) error { | |
| 588 path, err := skc.pod.pod.comp.buildPath(skc.GetBuild()) | |
| 589 if err != nil { | |
| 590 return errors.Annotate(err).Err() | |
| 591 } | |
| 592 return skc.buildGo(w, path) | |
| 593 } | |
| 594 | |
| 595 default: | |
| 596 return errors.Reason("unknown Kubernetes pod type %(type)T").D("
type", skc.Type).Err() | |
| 597 } | |
| 598 return nil | |
| 599 } | |
| 600 | |
| 601 func (skc *stagedKubernetesContainer) build(w *work) error { | |
| 602 if f := skc.buildFn; f != nil { | |
| 603 return f(w) | |
| 604 } | |
| 605 return nil | |
| 606 } | |
| 607 | |
| 608 // build builds the image associated with this container. | |
| 609 func (skc *stagedKubernetesContainer) buildGo(w *work, entryPath string) error { | |
| 610 gcloud, err := w.tools.gcloud(skc.pod.cloudProject().Name) | |
| 611 if err != nil { | |
| 612 return errors.Annotate(err).Reason("could not get gcloud tool").
Err() | |
| 613 } | |
| 614 | |
| 615 // Use "aedeploy" to gather GOPATH and build against our root. | |
| 616 aedeploy, err := w.tools.aedeploy(skc.pod.goPath) | |
| 617 if err != nil { | |
| 618 return errors.Annotate(err).Err() | |
| 619 } | |
| 620 | |
| 621 x := gcloud.exec("docker", "--", "build", "-t", skc.image, ".") | |
| 622 return aedeploy.bootstrap(x).cwd(entryPath).check(w) | |
| 623 } | |
| 624 | |
| 625 func (skc *stagedKubernetesContainer) push(w *work) error { | |
| 626 gcloud, err := w.tools.gcloud(skc.pod.cloudProject().Name) | |
| 627 if err != nil { | |
| 628 return errors.Annotate(err).Reason("could not get gcloud tool").
Err() | |
| 629 } | |
| 630 | |
| 631 if err := gcloud.exec("docker", "--", "push", skc.image).check(w); err !
= nil { | |
| 632 return errors.Annotate(err).Reason("failed to push Docker image
%(image)q"). | |
| 633 D("image", skc.image).Err() | |
| 634 } | |
| 635 return nil | |
| 636 } | |
| 637 | |
| 638 func getContainerEngineKubernetesContext(w *work, cluster *layoutDeploymentGKECl
uster) ( | |
| 639 string, error) { | |
| 640 // Generate our Kubernetes context name. This is derived from the Google | |
| 641 // Container Engine cluster parameters. | |
| 642 kubeCtx := fmt.Sprintf("gke_%s_%s_%s", cluster.cloudProject.Name, cluste
r.Zone, cluster.Name) | |
| 643 | |
| 644 kubectl, err := w.tools.kubectl(kubeCtx) | |
| 645 if err != nil { | |
| 646 return "", errors.Annotate(err).Err() | |
| 647 } | |
| 648 | |
| 649 // Check if the context is already installed in our Kubernetes configura
tion. | |
| 650 switch has, err := kubectl.hasContext(w); { | |
| 651 case err != nil: | |
| 652 return "", errors.Annotate(err).Reason("failed to check for Kube
rnetes context").Err() | |
| 653 | |
| 654 case !has: | |
| 655 gcloud, err := w.tools.gcloud(cluster.cloudProject.Name) | |
| 656 if err != nil { | |
| 657 return "", errors.Annotate(err).Err() | |
| 658 } | |
| 659 | |
| 660 // The context isn't cached, we will fetch it via: | |
| 661 // $ gcloud container clusters get-credentials | |
| 662 x := gcloud.exec( | |
| 663 "container", "clusters", | |
| 664 "get-credentials", cluster.Name, | |
| 665 "--zone", cluster.Zone) | |
| 666 if err := x.check(w); err != nil { | |
| 667 return "", errors.Annotate(err).Reason("failed to get cl
uster credentials").Err() | |
| 668 } | |
| 669 switch has, err = kubectl.hasContext(w); { | |
| 670 case err != nil: | |
| 671 return "", errors.Annotate(err).Reason("failed to confir
m Kubernetes context").Err() | |
| 672 case !has: | |
| 673 return "", errors.Reason("context %(context)q missing af
ter fetching credentials").D("context", kubeCtx).Err() | |
| 674 } | |
| 675 } | |
| 676 return kubeCtx, nil | |
| 677 } | |
| OLD | NEW |