OLD | NEW |
(Empty) | |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 package main |
| 6 |
| 7 /* |
| 8 Generate the tasks.json file. |
| 9 */ |
| 10 |
| 11 import ( |
| 12 "bytes" |
| 13 "encoding/json" |
| 14 "fmt" |
| 15 "io/ioutil" |
| 16 "os" |
| 17 "path" |
| 18 "path/filepath" |
| 19 "sort" |
| 20 "strings" |
| 21 |
| 22 "github.com/skia-dev/glog" |
| 23 "go.skia.org/infra/go/common" |
| 24 "go.skia.org/infra/go/util" |
| 25 "go.skia.org/infra/task_scheduler/go/specs" |
| 26 ) |
| 27 |
| 28 const ( |
| 29 DEFAULT_OS = "Ubuntu" |
| 30 |
| 31 // Pool for Skia bots. |
| 32 POOL_SKIA = "Skia" |
| 33 |
| 34 // Name prefix for upload jobs. |
| 35 PREFIX_UPLOAD = "Upload" |
| 36 ) |
| 37 |
| 38 var ( |
| 39 // "Constants" |
| 40 |
| 41 // Top-level list of all jobs to run at each commit. |
| 42 JOBS = []string{ |
| 43 "Build-Ubuntu-GCC-x86_64-Release-GN", |
| 44 "Perf-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-GN", |
| 45 "Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-GN", |
| 46 } |
| 47 |
| 48 // UPLOAD_DIMENSIONS are the Swarming dimensions for upload tasks. |
| 49 UPLOAD_DIMENSIONS = []string{ |
| 50 "cpu:x86-64-avx2", |
| 51 "gpu:none", |
| 52 "os:Ubuntu", |
| 53 fmt.Sprintf("pool:%s", POOL_SKIA), |
| 54 } |
| 55 |
| 56 // Defines the structure of job names. |
| 57 jobNameSchema *JobNameSchema |
| 58 |
| 59 // Caches CIPD package info so that we don't have to re-read VERSION |
| 60 // files. |
| 61 cipdPackages = map[string]*specs.CipdPackage{} |
| 62 |
| 63 // Path to the infra/bots directory. |
| 64 infrabotsDir = "" |
| 65 ) |
| 66 |
| 67 // deriveCompileTaskName returns the name of a compile task based on the given |
| 68 // job name. |
| 69 func deriveCompileTaskName(jobName string, parts map[string]string) string { |
| 70 if parts["role"] == "Housekeeper" { |
| 71 return "Build-Ubuntu-GCC-x86_64-Release-Shared" |
| 72 } else if parts["role"] == "Test" || parts["role"] == "Perf" { |
| 73 task_os := parts["os"] |
| 74 ec := parts["extra_config"] |
| 75 if task_os == "Android" { |
| 76 if ec == "Vulkan" { |
| 77 ec = "Android_Vulkan" |
| 78 } else if !strings.Contains(ec, "GN_Android") { |
| 79 ec = task_os |
| 80 } |
| 81 task_os = "Android" |
| 82 } else if task_os == "iOS" { |
| 83 ec = task_os |
| 84 task_os = "Mac" |
| 85 } else if strings.Contains(task_os, "Win") { |
| 86 task_os = "Win" |
| 87 } |
| 88 name, err := jobNameSchema.MakeJobName(map[string]string{ |
| 89 "role": "Build", |
| 90 "os": task_os, |
| 91 "compiler": parts["compiler"], |
| 92 "target_arch": parts["arch"], |
| 93 "configuration": parts["configuration"], |
| 94 "extra_config": ec, |
| 95 }) |
| 96 if err != nil { |
| 97 glog.Fatal(err) |
| 98 } |
| 99 return name |
| 100 } else { |
| 101 return jobName |
| 102 } |
| 103 } |
| 104 |
| 105 // swarmDimensions generates swarming bot dimensions for the given task. |
| 106 func swarmDimensions(parts map[string]string) []string { |
| 107 if parts["extra_config"] == "SkiaCT" { |
| 108 return []string{ |
| 109 "pool:SkiaCT", |
| 110 } |
| 111 } |
| 112 d := map[string]string{ |
| 113 "pool": POOL_SKIA, |
| 114 } |
| 115 if os, ok := parts["os"]; ok { |
| 116 d["os"] = os |
| 117 } else { |
| 118 d["os"] = DEFAULT_OS |
| 119 } |
| 120 if strings.Contains(d["os"], "Win") { |
| 121 d["os"] = "Windows" |
| 122 } |
| 123 if parts["role"] == "Test" || parts["role"] == "Perf" { |
| 124 if strings.Contains(parts["os"], "Android") { |
| 125 // For Android, the device type is a better dimension |
| 126 // than CPU or GPU. |
| 127 d["device_type"] = map[string]string{ |
| 128 "AndroidOne": "sprout", |
| 129 "GalaxyS3": "m0", // "smdk4x12", Detected i
ncorrectly by swarming? |
| 130 "GalaxyS4": "", // TODO(borenet,kjlubick) |
| 131 "GalaxyS7": "heroqlteatt", |
| 132 "NVIDIA_Shield": "foster", |
| 133 "Nexus10": "manta", |
| 134 "Nexus5": "hammerhead", |
| 135 "Nexus6": "shamu", |
| 136 "Nexus6p": "angler", |
| 137 "Nexus7": "grouper", |
| 138 "Nexus7v2": "flo", |
| 139 "Nexus9": "flounder", |
| 140 "NexusPlayer": "fugu", |
| 141 }[parts["model"]] |
| 142 } else if strings.Contains(parts["os"], "iOS") { |
| 143 d["device"] = map[string]string{ |
| 144 "iPad4": "iPad4,1", |
| 145 }[parts["model"]] |
| 146 // TODO(borenet): Replace this hack with something |
| 147 // better. |
| 148 d["os"] = "iOS-9.2" |
| 149 } else if parts["cpu_or_gpu"] == "CPU" { |
| 150 d["gpu"] = "none" |
| 151 d["cpu"] = map[string]string{ |
| 152 "AVX": "x86-64", |
| 153 "AVX2": "x86-64-avx2", |
| 154 "SSE4": "x86-64", |
| 155 }[parts["cpu_or_gpu_value"]] |
| 156 if strings.Contains(parts["os"], "Win") && parts["cpu_or
_gpu_value"] == "AVX2" { |
| 157 // AVX2 is not correctly detected on Windows. Fa
ll back on other |
| 158 // dimensions to ensure that we correctly target
machines which we know |
| 159 // have AVX2 support. |
| 160 d["cpu"] = "x86-64" |
| 161 d["os"] = "Windows-2008ServerR2-SP1" |
| 162 } |
| 163 } else { |
| 164 d["gpu"] = map[string]string{ |
| 165 "GeForce320M": "10de:08a4", |
| 166 "GT610": "10de:104a", |
| 167 "GTX550Ti": "10de:1244", |
| 168 "GTX660": "10de:11c0", |
| 169 "GTX960": "10de:1401", |
| 170 "HD4000": "8086:0a2e", |
| 171 "HD4600": "8086:0412", |
| 172 "HD7770": "1002:683d", |
| 173 "iHD530": "8086:1912", |
| 174 }[parts["cpu_or_gpu_value"]] |
| 175 } |
| 176 } else { |
| 177 d["gpu"] = "none" |
| 178 } |
| 179 rv := make([]string, 0, len(d)) |
| 180 for k, v := range d { |
| 181 rv = append(rv, fmt.Sprintf("%s:%s", k, v)) |
| 182 } |
| 183 sort.Strings(rv) |
| 184 return rv |
| 185 } |
| 186 |
| 187 // getCipdPackage finds and returns the given CIPD package and version. |
| 188 func getCipdPackage(assetName string) *specs.CipdPackage { |
| 189 if pkg, ok := cipdPackages[assetName]; ok { |
| 190 return pkg |
| 191 } |
| 192 versionFile := path.Join(infrabotsDir, "assets", assetName, "VERSION") |
| 193 contents, err := ioutil.ReadFile(versionFile) |
| 194 if err != nil { |
| 195 glog.Fatal(err) |
| 196 } |
| 197 version := strings.TrimSpace(string(contents)) |
| 198 pkg := &specs.CipdPackage{ |
| 199 Name: fmt.Sprintf("skia/bots/%s", assetName), |
| 200 Path: assetName, |
| 201 Version: fmt.Sprintf("version:%s", version), |
| 202 } |
| 203 if assetName == "win_toolchain" { |
| 204 pkg.Path = "t" // Workaround for path length limit on Windows. |
| 205 } |
| 206 cipdPackages[assetName] = pkg |
| 207 return pkg |
| 208 } |
| 209 |
| 210 // compile generates a compile task. Returns the name of the last task in the |
| 211 // generated chain of tasks, which the Job should add as a dependency. |
| 212 func compile(cfg *specs.TasksCfg, name string, parts map[string]string) string { |
| 213 // Collect the necessary CIPD packages. |
| 214 pkgs := []*specs.CipdPackage{} |
| 215 |
| 216 // Android bots require a toolchain. |
| 217 if strings.Contains(name, "Android") { |
| 218 pkgs = append(pkgs, getCipdPackage("android_sdk")) |
| 219 if strings.Contains(name, "Mac") { |
| 220 pkgs = append(pkgs, getCipdPackage("android_ndk_darwin")
) |
| 221 } else { |
| 222 pkgs = append(pkgs, getCipdPackage("android_ndk_linux")) |
| 223 } |
| 224 } |
| 225 |
| 226 // Clang on Linux. |
| 227 if strings.Contains(name, "Ubuntu") && strings.Contains(name, "Clang") { |
| 228 pkgs = append(pkgs, getCipdPackage("clang_linux")) |
| 229 } |
| 230 |
| 231 // Windows toolchain. |
| 232 if strings.Contains(name, "Win") { |
| 233 pkgs = append(pkgs, getCipdPackage("win_toolchain")) |
| 234 if strings.Contains(name, "Vulkan") { |
| 235 pkgs = append(pkgs, getCipdPackage("win_vulkan_sdk")) |
| 236 } |
| 237 } |
| 238 |
| 239 // Add the task. |
| 240 cfg.Tasks[name] = &specs.TaskSpec{ |
| 241 CipdPackages: pkgs, |
| 242 Dimensions: swarmDimensions(parts), |
| 243 ExtraArgs: []string{ |
| 244 "--workdir", "../../..", "swarm_compile", |
| 245 fmt.Sprintf("buildername=%s", name), |
| 246 "mastername=fake-master", |
| 247 "buildnumber=2", |
| 248 "slavename=fake-buildslave", |
| 249 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLAT
ED_OUTDIR), |
| 250 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION), |
| 251 }, |
| 252 Isolate: "compile_skia.isolate", |
| 253 Priority: 0.8, |
| 254 } |
| 255 return name |
| 256 } |
| 257 |
| 258 // recreateSKPs generates a RecreateSKPs task. Returns the name of the last |
| 259 // task in the generated chain of tasks, which the Job should add as a |
| 260 // dependency. |
| 261 func recreateSKPs(cfg *specs.TasksCfg, name string) string { |
| 262 // TODO |
| 263 return name |
| 264 } |
| 265 |
| 266 // ctSKPs generates a CT SKPs task. Returns the name of the last task in the |
| 267 // generated chain of tasks, which the Job should add as a dependency. |
| 268 func ctSKPs(cfg *specs.TasksCfg, name string) string { |
| 269 // TODO |
| 270 return name |
| 271 } |
| 272 |
| 273 // housekeeper generates a Housekeeper task. Returns the name of the last task |
| 274 // in the generated chain of tasks, which the Job should add as a dependency. |
| 275 func housekeeper(cfg *specs.TasksCfg, name, compileTaskName string) string { |
| 276 // TODO |
| 277 return name |
| 278 } |
| 279 |
| 280 // doUpload indicates whether the given Job should upload its results. |
| 281 func doUpload(name string) bool { |
| 282 skipUploadBots := []string{ |
| 283 "ASAN", |
| 284 "Coverage", |
| 285 "MSAN", |
| 286 "TSAN", |
| 287 "UBSAN", |
| 288 "Valgrind", |
| 289 } |
| 290 for _, s := range skipUploadBots { |
| 291 if strings.Contains(name, s) { |
| 292 return false |
| 293 } |
| 294 } |
| 295 return true |
| 296 } |
| 297 |
| 298 // test generates a Test task. Returns the name of the last task in the |
| 299 // generated chain of tasks, which the Job should add as a dependency. |
| 300 func test(cfg *specs.TasksCfg, name string, parts map[string]string, compileTask
Name string, pkgs []*specs.CipdPackage) string { |
| 301 cfg.Tasks[name] = &specs.TaskSpec{ |
| 302 CipdPackages: pkgs, |
| 303 Dependencies: []string{compileTaskName}, |
| 304 Dimensions: swarmDimensions(parts), |
| 305 ExtraArgs: []string{ |
| 306 "--workdir", "../../..", "swarm_test", |
| 307 fmt.Sprintf("buildername=%s", name), |
| 308 "mastername=fake-master", |
| 309 "buildnumber=2", |
| 310 "slavename=fake-buildslave", |
| 311 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLAT
ED_OUTDIR), |
| 312 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION), |
| 313 }, |
| 314 Isolate: "test_skia.isolate", |
| 315 Priority: 0.8, |
| 316 } |
| 317 // Upload results if necessary. |
| 318 if doUpload(name) { |
| 319 uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema
.Sep, name) |
| 320 cfg.Tasks[uploadName] = &specs.TaskSpec{ |
| 321 Dependencies: []string{name}, |
| 322 Dimensions: UPLOAD_DIMENSIONS, |
| 323 ExtraArgs: []string{ |
| 324 "--workdir", "../../..", "upload_dm_results", |
| 325 fmt.Sprintf("buildername=%s", name), |
| 326 "mastername=fake-master", |
| 327 "buildnumber=2", |
| 328 "slavename=fake-buildslave", |
| 329 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDE
R_ISOLATED_OUTDIR), |
| 330 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REV
ISION), |
| 331 }, |
| 332 Isolate: "upload_dm_results.isolate", |
| 333 Priority: 0.8, |
| 334 } |
| 335 return uploadName |
| 336 } |
| 337 return name |
| 338 } |
| 339 |
| 340 // perf generates a Perf task. Returns the name of the last task in the |
| 341 // generated chain of tasks, which the Job should add as a dependency. |
| 342 func perf(cfg *specs.TasksCfg, name string, parts map[string]string, compileTask
Name string, pkgs []*specs.CipdPackage) string { |
| 343 cfg.Tasks[name] = &specs.TaskSpec{ |
| 344 CipdPackages: pkgs, |
| 345 Dependencies: []string{compileTaskName}, |
| 346 Dimensions: swarmDimensions(parts), |
| 347 ExtraArgs: []string{ |
| 348 "--workdir", "../../..", "swarm_perf", |
| 349 fmt.Sprintf("buildername=%s", name), |
| 350 "mastername=fake-master", |
| 351 "buildnumber=2", |
| 352 "slavename=fake-buildslave", |
| 353 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLAT
ED_OUTDIR), |
| 354 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION), |
| 355 }, |
| 356 Isolate: "perf_skia.isolate", |
| 357 Priority: 0.8, |
| 358 } |
| 359 // Upload results if necessary. |
| 360 if strings.Contains(name, "Release") && doUpload(name) { |
| 361 uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema
.Sep, name) |
| 362 cfg.Tasks[uploadName] = &specs.TaskSpec{ |
| 363 Dependencies: []string{name}, |
| 364 Dimensions: UPLOAD_DIMENSIONS, |
| 365 ExtraArgs: []string{ |
| 366 "--workdir", "../../..", "upload_nano_results", |
| 367 fmt.Sprintf("buildername=%s", name), |
| 368 "mastername=fake-master", |
| 369 "buildnumber=2", |
| 370 "slavename=fake-buildslave", |
| 371 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDE
R_ISOLATED_OUTDIR), |
| 372 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REV
ISION), |
| 373 }, |
| 374 Isolate: "upload_nano_results.isolate", |
| 375 Priority: 0.8, |
| 376 } |
| 377 return uploadName |
| 378 } |
| 379 return name |
| 380 } |
| 381 |
| 382 // process generates tasks and jobs for the given job name. |
| 383 func process(cfg *specs.TasksCfg, name string) { |
| 384 if _, ok := cfg.Jobs[name]; ok { |
| 385 glog.Fatalf("Duplicate job %q", name) |
| 386 } |
| 387 deps := []string{} |
| 388 |
| 389 parts, err := jobNameSchema.ParseJobName(name) |
| 390 if err != nil { |
| 391 glog.Fatal(err) |
| 392 } |
| 393 |
| 394 // RecreateSKPs. |
| 395 if strings.Contains(name, "RecreateSKPs") { |
| 396 deps = append(deps, recreateSKPs(cfg, name)) |
| 397 } |
| 398 |
| 399 // CT bots. |
| 400 if strings.Contains(name, "-CT_") { |
| 401 deps = append(deps, ctSKPs(cfg, name)) |
| 402 } |
| 403 |
| 404 // Compile bots. |
| 405 if parts["role"] == "Build" { |
| 406 deps = append(deps, compile(cfg, name, parts)) |
| 407 } |
| 408 |
| 409 // Any remaining bots need a compile task. |
| 410 compileTaskName := deriveCompileTaskName(name, parts) |
| 411 |
| 412 // Housekeeper. |
| 413 if parts["role"] == "Housekeeper" { |
| 414 deps = append(deps, housekeeper(cfg, name, compileTaskName)) |
| 415 } |
| 416 |
| 417 // Common assets needed by the remaining bots. |
| 418 pkgs := []*specs.CipdPackage{ |
| 419 getCipdPackage("skimage"), |
| 420 getCipdPackage("skp"), |
| 421 getCipdPackage("svg"), |
| 422 } |
| 423 |
| 424 // Test bots. |
| 425 if parts["role"] == "Test" { |
| 426 deps = append(deps, test(cfg, name, parts, compileTaskName, pkgs
)) |
| 427 } |
| 428 |
| 429 // Perf bots. |
| 430 if parts["role"] == "Perf" { |
| 431 deps = append(deps, perf(cfg, name, parts, compileTaskName, pkgs
)) |
| 432 } |
| 433 |
| 434 // Add the Job spec. |
| 435 cfg.Jobs[name] = &specs.JobSpec{ |
| 436 Priority: 0.8, |
| 437 TaskSpecs: deps, |
| 438 } |
| 439 } |
| 440 |
| 441 // getCheckoutRoot returns the path of the root of the Skia checkout, or an |
| 442 // error if it cannot be found. |
| 443 func getCheckoutRoot() string { |
| 444 cwd, err := os.Getwd() |
| 445 if err != nil { |
| 446 glog.Fatal(err) |
| 447 } |
| 448 for { |
| 449 if _, err := os.Stat(cwd); err != nil { |
| 450 glog.Fatal(err) |
| 451 } |
| 452 s, err := os.Stat(path.Join(cwd, ".git")) |
| 453 if err == nil && s.IsDir() { |
| 454 // TODO(borenet): Should we verify that this is a Skia |
| 455 // checkout and not something else? |
| 456 return cwd |
| 457 } |
| 458 cwd = filepath.Clean(path.Join(cwd, "..")) |
| 459 } |
| 460 } |
| 461 |
| 462 // Regenerate the tasks.json file. |
| 463 func main() { |
| 464 common.Init() |
| 465 defer common.LogPanic() |
| 466 |
| 467 // Where are we? |
| 468 root := getCheckoutRoot() |
| 469 infrabotsDir = path.Join(root, "infra", "bots") |
| 470 |
| 471 // Create the JobNameSchema. |
| 472 schema, err := NewJobNameSchema(path.Join(infrabotsDir, "recipe_modules"
, "builder_name_schema", "builder_name_schema.json")) |
| 473 if err != nil { |
| 474 glog.Fatal(err) |
| 475 } |
| 476 jobNameSchema = schema |
| 477 |
| 478 // Create the config. |
| 479 cfg := &specs.TasksCfg{ |
| 480 Jobs: map[string]*specs.JobSpec{}, |
| 481 Tasks: map[string]*specs.TaskSpec{}, |
| 482 } |
| 483 |
| 484 // Create Tasks and Jobs. |
| 485 for _, j := range JOBS { |
| 486 process(cfg, j) |
| 487 } |
| 488 |
| 489 // Validate the config. |
| 490 if err := cfg.Validate(); err != nil { |
| 491 glog.Fatal(err) |
| 492 } |
| 493 |
| 494 // Write the tasks.json file. |
| 495 outFile := path.Join(root, specs.TASKS_CFG_FILE) |
| 496 b, err := json.MarshalIndent(cfg, "", " ") |
| 497 if err != nil { |
| 498 glog.Fatal(err) |
| 499 } |
| 500 // The json package escapes HTML characters, which makes our output |
| 501 // much less readable. Replace the escape characters with the real |
| 502 // character. |
| 503 b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1) |
| 504 if err := ioutil.WriteFile(outFile, b, os.ModePerm); err != nil { |
| 505 glog.Fatal(err) |
| 506 } |
| 507 } |
| 508 |
| 509 // TODO(borenet): The below really belongs in its own file, probably next to the |
| 510 // builder_name_schema.json file. |
| 511 |
| 512 // JobNameSchema is a struct used for (de)constructing Job names in a |
| 513 // predictable format. |
| 514 type JobNameSchema struct { |
| 515 Schema map[string][]string `json:"builder_name_schema"` |
| 516 Sep string `json:"builder_name_sep"` |
| 517 } |
| 518 |
| 519 // NewJobNameSchema returns a JobNameSchema instance based on the given JSON |
| 520 // file. |
| 521 func NewJobNameSchema(jsonFile string) (*JobNameSchema, error) { |
| 522 var rv JobNameSchema |
| 523 f, err := os.Open(jsonFile) |
| 524 if err != nil { |
| 525 return nil, err |
| 526 } |
| 527 defer util.Close(f) |
| 528 if err := json.NewDecoder(f).Decode(&rv); err != nil { |
| 529 return nil, err |
| 530 } |
| 531 return &rv, nil |
| 532 } |
| 533 |
| 534 // ParseJobName splits the given Job name into its component parts, according |
| 535 // to the schema. |
| 536 func (s *JobNameSchema) ParseJobName(n string) (map[string]string, error) { |
| 537 split := strings.Split(n, s.Sep) |
| 538 if len(split) < 2 { |
| 539 return nil, fmt.Errorf("Invalid job name: %q", n) |
| 540 } |
| 541 role := split[0] |
| 542 split = split[1:] |
| 543 keys, ok := s.Schema[role] |
| 544 if !ok { |
| 545 return nil, fmt.Errorf("Invalid job name; %q is not a valid role
.", role) |
| 546 } |
| 547 extraConfig := "" |
| 548 if len(split) == len(keys)+1 { |
| 549 extraConfig = split[len(split)-1] |
| 550 split = split[:len(split)-1] |
| 551 } |
| 552 if len(split) != len(keys) { |
| 553 return nil, fmt.Errorf("Invalid job name; %q has incorrect numbe
r of parts.", n) |
| 554 } |
| 555 rv := make(map[string]string, len(keys)+2) |
| 556 rv["role"] = role |
| 557 if extraConfig != "" { |
| 558 rv["extra_config"] = extraConfig |
| 559 } |
| 560 for i, k := range keys { |
| 561 rv[k] = split[i] |
| 562 } |
| 563 return rv, nil |
| 564 } |
| 565 |
| 566 // MakeJobName assembles the given parts of a Job name, according to the schema. |
| 567 func (s *JobNameSchema) MakeJobName(parts map[string]string) (string, error) { |
| 568 role, ok := parts["role"] |
| 569 if !ok { |
| 570 return "", fmt.Errorf("Invalid job parts; jobs must have a role.
") |
| 571 } |
| 572 keys, ok := s.Schema[role] |
| 573 if !ok { |
| 574 return "", fmt.Errorf("Invalid job parts; unknown role %q", role
) |
| 575 } |
| 576 rvParts := make([]string, 0, len(parts)) |
| 577 rvParts = append(rvParts, role) |
| 578 for _, k := range keys { |
| 579 v, ok := parts[k] |
| 580 if !ok { |
| 581 return "", fmt.Errorf("Invalid job parts; missing %q", k
) |
| 582 } |
| 583 rvParts = append(rvParts, v) |
| 584 } |
| 585 if _, ok := parts["extra_config"]; ok { |
| 586 rvParts = append(rvParts, parts["extra_config"]) |
| 587 } |
| 588 return strings.Join(rvParts, s.Sep), nil |
| 589 } |
OLD | NEW |