OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2016 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 package main | |
6 | |
7 /* | |
8 Generate the tasks.json file. | |
9 */ | |
10 | |
11 import ( | |
12 "bytes" | |
13 "encoding/json" | |
14 "fmt" | |
15 "io/ioutil" | |
16 "os" | |
17 "path" | |
18 "path/filepath" | |
19 "sort" | |
20 "strings" | |
21 | |
22 "github.com/skia-dev/glog" | |
23 "go.skia.org/infra/go/common" | |
24 "go.skia.org/infra/go/util" | |
25 "go.skia.org/infra/task_scheduler/go/specs" | |
26 ) | |
27 | |
28 const ( | |
29 DEFAULT_OS = "Ubuntu" | |
30 | |
31 // Pool for Skia bots. | |
32 POOL_SKIA = "Skia" | |
33 | |
34 // Name prefix for upload jobs. | |
35 PREFIX_UPLOAD = "Upload" | |
36 ) | |
37 | |
38 var ( | |
39 // "Constants" | |
40 | |
41 // Top-level list of all jobs to run at each commit. | |
42 JOBS = []string{ | |
43 "Build-Ubuntu-GCC-x86_64-Release-GN", | |
44 "Perf-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-GN", | |
45 "Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-GN", | |
46 } | |
47 | |
48 // UPLOAD_DIMENSIONS are the Swarming dimensions for upload tasks. | |
49 UPLOAD_DIMENSIONS = []string{ | |
50 "cpu:x86-64-avx2", | |
51 "gpu:none", | |
52 "os:Ubuntu", | |
53 fmt.Sprintf("pool:%s", POOL_SKIA), | |
54 } | |
55 | |
56 // Defines the structure of job names. | |
57 jobNameSchema *JobNameSchema | |
58 | |
59 // Caches CIPD package info so that we don't have to re-read VERSION | |
60 // files. | |
61 cipdPackages = map[string]*specs.CipdPackage{} | |
62 | |
63 // Path to the infra/bots directory. | |
64 infrabotsDir = "" | |
65 ) | |
66 | |
67 // deriveCompileTaskName returns the name of a compile task based on the given | |
68 // job name. | |
69 func deriveCompileTaskName(jobName string, parts map[string]string) string { | |
70 if parts["role"] == "Housekeeper" { | |
71 return "Build-Ubuntu-GCC-x86_64-Release-Shared" | |
72 } else if parts["role"] == "Test" || parts["role"] == "Perf" { | |
73 task_os := parts["os"] | |
74 ec := parts["extra_config"] | |
75 if task_os == "Android" { | |
76 if ec == "Vulkan" { | |
77 ec = "Android_Vulkan" | |
78 } else if !strings.Contains(ec, "GN_Android") { | |
79 ec = task_os | |
80 } | |
81 task_os = "Android" | |
82 } else if task_os == "iOS" { | |
83 ec = task_os | |
84 task_os = "Mac" | |
85 } else if strings.Contains(task_os, "Win") { | |
86 task_os = "Win" | |
87 } | |
88 name, err := jobNameSchema.MakeJobName(map[string]string{ | |
89 "role": "Build", | |
90 "os": task_os, | |
91 "compiler": parts["compiler"], | |
92 "target_arch": parts["arch"], | |
93 "configuration": parts["configuration"], | |
94 "extra_config": ec, | |
95 }) | |
96 if err != nil { | |
97 glog.Fatal(err) | |
98 } | |
99 return name | |
100 } else { | |
101 return jobName | |
102 } | |
103 } | |
104 | |
105 // swarmDimensions generates swarming bot dimensions for the given task. | |
106 func swarmDimensions(parts map[string]string) []string { | |
107 if parts["extra_config"] == "SkiaCT" { | |
108 return []string{ | |
109 "pool:SkiaCT", | |
110 } | |
111 } | |
112 d := map[string]string{ | |
113 "pool": POOL_SKIA, | |
114 } | |
115 if os, ok := parts["os"]; ok { | |
116 d["os"] = os | |
117 } else { | |
118 d["os"] = DEFAULT_OS | |
119 } | |
120 if strings.Contains(d["os"], "Win") { | |
121 d["os"] = "Windows" | |
122 } | |
123 if parts["role"] == "Test" || parts["role"] == "Perf" { | |
124 if strings.Contains(parts["os"], "Android") { | |
125 // For Android, the device type is a better dimension | |
126 // than CPU or GPU. | |
127 d["device_type"] = map[string]string{ | |
128 "AndroidOne": "sprout", | |
129 "GalaxyS3": "m0", // "smdk4x12", Detected i ncorrectly by swarming? | |
130 "GalaxyS4": "", // TODO(borenet,kjlubick) | |
131 "GalaxyS7": "heroqlteatt", | |
132 "NVIDIA_Shield": "foster", | |
133 "Nexus10": "manta", | |
134 "Nexus5": "hammerhead", | |
135 "Nexus6": "shamu", | |
136 "Nexus6p": "angler", | |
137 "Nexus7": "grouper", | |
138 "Nexus7v2": "flo", | |
139 "Nexus9": "flounder", | |
140 "NexusPlayer": "fugu", | |
141 }[parts["model"]] | |
142 } else if strings.Contains(parts["os"], "iOS") { | |
143 d["device"] = map[string]string{ | |
144 "iPad4": "iPad4,1", | |
145 }[parts["model"]] | |
146 // TODO(borenet): Replace this hack with something | |
147 // better. | |
148 d["os"] = "iOS-9.2" | |
149 } else if parts["cpu_or_gpu"] == "CPU" { | |
150 d["gpu"] = "none" | |
151 d["cpu"] = map[string]string{ | |
152 "AVX": "x86-64", | |
153 "AVX2": "x86-64-avx2", | |
154 "SSE4": "x86-64", | |
155 }[parts["cpu_or_gpu_value"]] | |
156 if strings.Contains(parts["os"], "Win") && parts["cpu_or _gpu_value"] == "AVX2" { | |
157 // AVX2 is not correctly detected on Windows. Fa ll back on other | |
158 // dimensions to ensure that we correctly target machines which we know | |
159 // have AVX2 support. | |
160 d["cpu"] = "x86-64" | |
161 d["os"] = "Windows-2008ServerR2-SP1" | |
162 } | |
163 } else { | |
164 d["gpu"] = map[string]string{ | |
165 "GeForce320M": "10de:08a4", | |
166 "GT610": "10de:104a", | |
167 "GTX550Ti": "10de:1244", | |
168 "GTX660": "10de:11c0", | |
169 "GTX960": "10de:1401", | |
170 "HD4000": "8086:0a2e", | |
171 "HD4600": "8086:0412", | |
172 "HD7770": "1002:683d", | |
173 "iHD530": "8086:1912", | |
174 }[parts["cpu_or_gpu_value"]] | |
175 } | |
176 } else { | |
177 d["gpu"] = "none" | |
178 } | |
179 rv := make([]string, 0, len(d)) | |
180 for k, v := range d { | |
181 rv = append(rv, fmt.Sprintf("%s:%s", k, v)) | |
182 } | |
183 sort.Strings(rv) | |
184 return rv | |
185 } | |
186 | |
187 // getCipdPackage finds and returns the given CIPD package and version. | |
188 func getCipdPackage(assetName string) *specs.CipdPackage { | |
189 if pkg, ok := cipdPackages[assetName]; ok { | |
190 return pkg | |
191 } | |
192 versionFile := path.Join(infrabotsDir, "assets", assetName, "VERSION") | |
193 contents, err := ioutil.ReadFile(versionFile) | |
194 if err != nil { | |
195 glog.Fatal(err) | |
196 } | |
197 version := strings.TrimSpace(string(contents)) | |
198 pkg := &specs.CipdPackage{ | |
199 Name: fmt.Sprintf("skia/bots/%s", assetName), | |
200 Path: assetName, | |
201 Version: fmt.Sprintf("version:%s", version), | |
202 } | |
203 if assetName == "win_toolchain" { | |
204 pkg.Path = "t" // Workaround for path length limit on Windows. | |
205 } | |
206 cipdPackages[assetName] = pkg | |
207 return pkg | |
208 } | |
209 | |
210 // compile generates a compile task. Returns the name of the last task in the | |
211 // generated chain of tasks, which the Job should add as a dependency. | |
212 func compile(cfg *specs.TasksCfg, name string, parts map[string]string) string { | |
213 // Collect the necessary CIPD packages. | |
214 pkgs := []*specs.CipdPackage{} | |
215 | |
216 // Android bots require a toolchain. | |
217 if strings.Contains(name, "Android") { | |
218 pkgs = append(pkgs, getCipdPackage("android_sdk")) | |
219 if strings.Contains(name, "Mac") { | |
220 pkgs = append(pkgs, getCipdPackage("android_ndk_darwin") ) | |
221 } else { | |
222 pkgs = append(pkgs, getCipdPackage("android_ndk_linux")) | |
223 } | |
224 } | |
225 | |
226 // Clang on Linux. | |
227 if strings.Contains(name, "Ubuntu") && strings.Contains(name, "Clang") { | |
228 pkgs = append(pkgs, getCipdPackage("clang_linux")) | |
229 } | |
230 | |
231 // Windows toolchain. | |
232 if strings.Contains(name, "Win") { | |
233 pkgs = append(pkgs, getCipdPackage("win_toolchain")) | |
234 if strings.Contains(name, "Vulkan") { | |
235 pkgs = append(pkgs, getCipdPackage("win_vulkan_sdk")) | |
236 } | |
237 } | |
238 | |
239 // Add the task. | |
240 cfg.Tasks[name] = &specs.TaskSpec{ | |
241 CipdPackages: pkgs, | |
242 Dimensions: swarmDimensions(parts), | |
243 ExtraArgs: []string{ | |
244 "--workdir", "../../..", "swarm_compile", | |
245 fmt.Sprintf("buildername=%s", name), | |
246 "mastername=fake-master", | |
247 "buildnumber=2", | |
248 "slavename=fake-buildslave", | |
249 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLAT ED_OUTDIR), | |
250 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION), | |
251 }, | |
252 Isolate: "compile_skia.isolate", | |
253 Priority: 0.8, | |
254 } | |
255 return name | |
256 } | |
257 | |
258 // recreateSKPs generates a RecreateSKPs task. Returns the name of the last | |
259 // task in the generated chain of tasks, which the Job should add as a | |
260 // dependency. | |
261 func recreateSKPs(cfg *specs.TasksCfg, name string) string { | |
262 // TODO | |
263 return name | |
264 } | |
265 | |
266 // ctSKPs generates a CT SKPs task. Returns the name of the last task in the | |
267 // generated chain of tasks, which the Job should add as a dependency. | |
268 func ctSKPs(cfg *specs.TasksCfg, name string) string { | |
269 // TODO | |
270 return name | |
271 } | |
272 | |
273 // housekeeper generates a Housekeeper task. Returns the name of the last task | |
274 // in the generated chain of tasks, which the Job should add as a dependency. | |
275 func housekeeper(cfg *specs.TasksCfg, name, compileTaskName string) string { | |
276 // TODO | |
277 return name | |
278 } | |
279 | |
280 // test generates a Test task. Returns the name of the last task in the | |
281 // generated chain of tasks, which the Job should add as a dependency. | |
282 func test(cfg *specs.TasksCfg, name string, parts map[string]string, compileTask Name string, pkgs []*specs.CipdPackage) string { | |
283 cfg.Tasks[name] = &specs.TaskSpec{ | |
284 CipdPackages: pkgs, | |
285 Dependencies: []string{compileTaskName}, | |
286 Dimensions: swarmDimensions(parts), | |
287 ExtraArgs: []string{ | |
288 "--workdir", "../../..", "swarm_test", | |
289 fmt.Sprintf("buildername=%s", name), | |
290 "mastername=fake-master", | |
291 "buildnumber=2", | |
292 "slavename=fake-buildslave", | |
293 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLAT ED_OUTDIR), | |
294 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION), | |
295 }, | |
296 Isolate: "test_skia.isolate", | |
297 Priority: 0.8, | |
298 } | |
299 // Upload results if necessary. | |
300 skipUploadBots := []string{ | |
301 "ASAN", | |
302 "Coverage", | |
303 "MSAN", | |
304 "TSAN", | |
305 "UBSAN", | |
306 "Valgrind", | |
307 } | |
308 upload := true | |
309 for _, s := range skipUploadBots { | |
310 if strings.Contains(name, s) { | |
311 upload = false | |
312 break | |
313 } | |
314 } | |
315 if upload { | |
316 uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema .Sep, name) | |
317 cfg.Tasks[uploadName] = &specs.TaskSpec{ | |
318 Dependencies: []string{name}, | |
319 Dimensions: UPLOAD_DIMENSIONS, | |
320 ExtraArgs: []string{ | |
321 "--workdir", "../../..", "upload_dm_results", | |
322 fmt.Sprintf("buildername=%s", name), | |
323 "mastername=fake-master", | |
324 "buildnumber=2", | |
325 "slavename=fake-buildslave", | |
326 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDE R_ISOLATED_OUTDIR), | |
327 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REV ISION), | |
328 }, | |
329 Isolate: "upload_dm_results.isolate", | |
330 Priority: 0.8, | |
331 } | |
332 return uploadName | |
333 } | |
334 return name | |
335 } | |
336 | |
337 // perf generates a Perf task. Returns the name of the last task in the | |
338 // generated chain of tasks, which the Job should add as a dependency. | |
339 func perf(cfg *specs.TasksCfg, name string, parts map[string]string, compileTask Name string, pkgs []*specs.CipdPackage) string { | |
340 cfg.Tasks[name] = &specs.TaskSpec{ | |
341 CipdPackages: pkgs, | |
342 Dependencies: []string{compileTaskName}, | |
343 Dimensions: swarmDimensions(parts), | |
344 ExtraArgs: []string{ | |
345 "--workdir", "../../..", "swarm_perf", | |
346 fmt.Sprintf("buildername=%s", name), | |
347 "mastername=fake-master", | |
348 "buildnumber=2", | |
349 "slavename=fake-buildslave", | |
350 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLAT ED_OUTDIR), | |
351 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION), | |
352 }, | |
353 Isolate: "perf_skia.isolate", | |
354 Priority: 0.8, | |
355 } | |
356 if !strings.Contains(name, "Release") { | |
357 return name | |
358 } | |
359 // Upload results if necessary. | |
360 skipUploadBots := []string{ | |
dogben
2016/09/30 19:25:29
Maybe share this with test().
borenet
2016/09/30 19:31:21
Done.
| |
361 "ASAN", | |
362 "Coverage", | |
363 "MSAN", | |
364 "TSAN", | |
365 "UBSAN", | |
366 "Valgrind", | |
367 } | |
368 upload := true | |
369 for _, s := range skipUploadBots { | |
370 if strings.Contains(name, s) { | |
371 upload = false | |
372 break | |
373 } | |
374 } | |
375 if upload { | |
376 uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema .Sep, name) | |
377 cfg.Tasks[uploadName] = &specs.TaskSpec{ | |
378 Dependencies: []string{name}, | |
379 Dimensions: UPLOAD_DIMENSIONS, | |
380 ExtraArgs: []string{ | |
381 "--workdir", "../../..", "upload_nano_results", | |
382 fmt.Sprintf("buildername=%s", name), | |
383 "mastername=fake-master", | |
384 "buildnumber=2", | |
385 "slavename=fake-buildslave", | |
386 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDE R_ISOLATED_OUTDIR), | |
387 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REV ISION), | |
388 }, | |
389 Isolate: "upload_nano_results.isolate", | |
390 Priority: 0.8, | |
391 } | |
392 return uploadName | |
393 } | |
394 return name | |
395 } | |
396 | |
397 // process generates tasks and jobs for the given job name. | |
398 func process(cfg *specs.TasksCfg, name string) { | |
399 if _, ok := cfg.Jobs[name]; ok { | |
400 glog.Fatalf("Duplicate job %q", name) | |
401 } | |
402 deps := []string{} | |
403 | |
404 parts, err := jobNameSchema.ParseJobName(name) | |
405 if err != nil { | |
406 glog.Fatal(err) | |
407 } | |
408 | |
409 // RecreateSKPs. | |
410 if strings.Contains(name, "RecreateSKPs") { | |
411 deps = append(deps, recreateSKPs(cfg, name)) | |
412 } | |
413 | |
414 // CT bots. | |
415 if strings.Contains(name, "-CT_") { | |
416 deps = append(deps, ctSKPs(cfg, name)) | |
417 } | |
418 | |
419 // Compile bots. | |
420 if parts["role"] == "Build" { | |
421 deps = append(deps, compile(cfg, name, parts)) | |
422 } | |
423 | |
424 // Any remaining bots need a compile task. | |
425 compileTaskName := deriveCompileTaskName(name, parts) | |
426 | |
427 // Housekeeper. | |
428 if parts["role"] == "Housekeeper" { | |
429 deps = append(deps, housekeeper(cfg, name, compileTaskName)) | |
430 } | |
431 | |
432 // Common assets needed by the remaining bots. | |
433 pkgs := []*specs.CipdPackage{ | |
434 getCipdPackage("skimage"), | |
435 getCipdPackage("skp"), | |
436 getCipdPackage("svg"), | |
437 } | |
438 | |
439 // Test bots. | |
440 if parts["role"] == "Test" { | |
441 deps = append(deps, test(cfg, name, parts, compileTaskName, pkgs )) | |
442 } | |
443 | |
444 // Perf bots. | |
445 if parts["role"] == "Perf" { | |
446 deps = append(deps, perf(cfg, name, parts, compileTaskName, pkgs )) | |
447 } | |
448 | |
449 // Add the Job spec. | |
450 cfg.Jobs[name] = &specs.JobSpec{ | |
451 Priority: 0.8, | |
452 TaskSpecs: deps, | |
453 } | |
454 } | |
455 | |
456 // getCheckoutRoot returns the path of the root of the Skia checkout, or an | |
457 // error if it cannot be found. | |
458 func getCheckoutRoot() string { | |
459 cwd, err := os.Getwd() | |
460 if err != nil { | |
461 glog.Fatal(err) | |
462 } | |
463 for { | |
464 if _, err := os.Stat(cwd); err != nil { | |
465 glog.Fatal(err) | |
466 } | |
467 s, err := os.Stat(path.Join(cwd, ".git")) | |
468 if err == nil && s.IsDir() { | |
469 // TODO(borenet): Should we verify that this is a Skia | |
470 // checkout and not something else? | |
471 return cwd | |
472 } | |
473 cwd = filepath.Clean(path.Join(cwd, "..")) | |
474 } | |
475 } | |
476 | |
477 // Regenerate the tasks.json file. | |
478 func main() { | |
479 common.Init() | |
480 defer common.LogPanic() | |
481 | |
482 // Where are we? | |
483 root := getCheckoutRoot() | |
484 infrabotsDir = path.Join(root, "infra", "bots") | |
485 | |
486 // Create the JobNameSchema. | |
487 schema, err := NewJobNameSchema(path.Join(infrabotsDir, "recipe_modules" , "builder_name_schema", "builder_name_schema.json")) | |
488 if err != nil { | |
489 glog.Fatal(err) | |
490 } | |
491 jobNameSchema = schema | |
492 | |
493 // Create the config. | |
494 cfg := &specs.TasksCfg{ | |
495 Jobs: map[string]*specs.JobSpec{}, | |
496 Tasks: map[string]*specs.TaskSpec{}, | |
497 } | |
498 | |
499 // Create Tasks and Jobs. | |
500 for _, j := range JOBS { | |
501 process(cfg, j) | |
502 } | |
503 | |
504 // Validate the config. | |
505 if err := cfg.Validate(); err != nil { | |
506 glog.Fatal(err) | |
507 } | |
508 | |
509 // Write the tasks.json file. | |
510 outFile := path.Join(root, specs.TASKS_CFG_FILE) | |
511 b, err := json.MarshalIndent(cfg, "", " ") | |
512 if err != nil { | |
513 glog.Fatal(err) | |
514 } | |
515 // The json package escapes HTML characters, which makes our output | |
516 // much less readable. Replace the escape characters with the real | |
517 // character. | |
518 b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1) | |
519 if err := ioutil.WriteFile(outFile, b, os.ModePerm); err != nil { | |
520 glog.Fatal(err) | |
521 } | |
522 } | |
523 | |
524 // TODO(borenet): The below really belongs in its own file, probably next to the | |
525 // builder_name_schema.json file. | |
526 | |
527 // JobNameSchema is a struct used for (de)constructing Job names in a | |
528 // predictable format. | |
529 type JobNameSchema struct { | |
530 Schema map[string][]string `json:"builder_name_schema"` | |
531 Sep string `json:"builder_name_sep"` | |
532 } | |
533 | |
534 // NewJobNameSchema returns a JobNameSchema instance based on the given JSON | |
535 // file. | |
536 func NewJobNameSchema(jsonFile string) (*JobNameSchema, error) { | |
537 var rv JobNameSchema | |
538 f, err := os.Open(jsonFile) | |
539 if err != nil { | |
540 return nil, err | |
541 } | |
542 defer util.Close(f) | |
543 if err := json.NewDecoder(f).Decode(&rv); err != nil { | |
544 return nil, err | |
545 } | |
546 return &rv, nil | |
547 } | |
548 | |
549 // ParseJobName splits the given Job name into its component parts, according | |
550 // to the schema. | |
551 func (s *JobNameSchema) ParseJobName(n string) (map[string]string, error) { | |
552 split := strings.Split(n, s.Sep) | |
553 if len(split) < 2 { | |
554 return nil, fmt.Errorf("Invalid job name: %q", n) | |
555 } | |
556 role := split[0] | |
557 split = split[1:] | |
558 keys, ok := s.Schema[role] | |
559 if !ok { | |
560 return nil, fmt.Errorf("Invalid job name; %q is not a valid role .", role) | |
561 } | |
562 extraConfig := "" | |
563 if len(split) == len(keys)+1 { | |
564 extraConfig = split[len(split)-1] | |
565 split = split[:len(split)-1] | |
566 } | |
567 if len(split) != len(keys) { | |
568 return nil, fmt.Errorf("Invalid job name; %q has incorrect numbe r of parts.", n) | |
569 } | |
570 rv := make(map[string]string, len(keys)+2) | |
571 rv["role"] = role | |
572 if extraConfig != "" { | |
573 rv["extra_config"] = extraConfig | |
574 } | |
575 for i, k := range keys { | |
576 rv[k] = split[i] | |
577 } | |
578 return rv, nil | |
579 } | |
580 | |
581 // MakeJobName assembles the given parts of a Job name, according to the schema. | |
582 func (s *JobNameSchema) MakeJobName(parts map[string]string) (string, error) { | |
583 role, ok := parts["role"] | |
584 if !ok { | |
585 return "", fmt.Errorf("Invalid job parts; jobs must have a role. ") | |
586 } | |
587 keys, ok := s.Schema[role] | |
588 if !ok { | |
589 return "", fmt.Errorf("Invalid job parts; unknown role %q", role ) | |
590 } | |
591 rvParts := make([]string, 0, len(parts)) | |
592 rvParts = append(rvParts, role) | |
593 for _, k := range keys { | |
594 v, ok := parts[k] | |
595 if !ok { | |
596 return "", fmt.Errorf("Invalid job parts; missing %q", k ) | |
597 } | |
598 rvParts = append(rvParts, v) | |
599 } | |
600 if _, ok := parts["extra_config"]; ok { | |
601 rvParts = append(rvParts, parts["extra_config"]) | |
602 } | |
603 return strings.Join(rvParts, s.Sep), nil | |
604 } | |
OLD | NEW |