OLD | NEW |
1 package aggregator | 1 package aggregator |
2 | 2 |
3 import ( | 3 import ( |
4 "bytes" | 4 "bytes" |
5 "crypto/sha1" | 5 "crypto/sha1" |
6 "fmt" | 6 "fmt" |
7 "io" | 7 "io" |
8 "io/ioutil" | 8 "io/ioutil" |
9 "os" | 9 "os" |
10 "path/filepath" | 10 "path/filepath" |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
65 aggregationShutdown chan bool | 65 aggregationShutdown chan bool |
66 aggregationWaitGroup *sync.WaitGroup | 66 aggregationWaitGroup *sync.WaitGroup |
67 // These three counts are used to determine if there is any pending work
. | 67 // These three counts are used to determine if there is any pending work
. |
68 // There is no pending work if all three of these values are equal and t
he | 68 // There is no pending work if all three of these values are equal and t
he |
69 // work queues are empty. | 69 // work queues are empty. |
70 analysisCount int64 | 70 analysisCount int64 |
71 uploadCount int64 | 71 uploadCount int64 |
72 bugReportCount int64 | 72 bugReportCount int64 |
73 } | 73 } |
74 | 74 |
75 // AnalysisPackage is a generic holder for the functions needed to analyze | |
76 type AnalysisPackage struct { | |
77 Setup func(workingDirPath string) error | |
78 Analyze func(workingDirPath, pathToFile string) (uploadPackage, error) | |
79 } | |
80 | |
81 const ( | 75 const ( |
82 BAD_FUZZ = "bad" | 76 BAD_FUZZ = "bad" |
83 GREY_FUZZ = "grey" | 77 GREY_FUZZ = "grey" |
84 ) | 78 ) |
85 | 79 |
| 80 var ( |
| 81 CLANG_DEBUG = common.TEST_HARNESS_NAME + "_clang_debug" |
| 82 CLANG_RELEASE = common.TEST_HARNESS_NAME + "_clang_release" |
| 83 ASAN_DEBUG = common.TEST_HARNESS_NAME + "_asan_debug" |
| 84 ASAN_RELEASE = common.TEST_HARNESS_NAME + "_asan_release" |
| 85 ) |
| 86 |
86 // uploadPackage is a struct containing all the pieces of a fuzz that need to be
uploaded to GCS | 87 // uploadPackage is a struct containing all the pieces of a fuzz that need to be
uploaded to GCS |
87 type uploadPackage struct { | 88 type uploadPackage struct { |
88 » Name string | 89 » Data data.GCSPackage |
89 » FilePath string | 90 » FilePath string |
90 » DebugDump string | |
91 » DebugErr string | |
92 » ReleaseDump string | |
93 » ReleaseErr string | |
94 » FileType string | |
95 // Must be BAD_FUZZ or GREY_FUZZ | 91 // Must be BAD_FUZZ or GREY_FUZZ |
96 FuzzType string | 92 FuzzType string |
97 } | 93 } |
98 | 94 |
99 // bugReportingPackage is a struct containing the pieces of a fuzz that may need
to have | 95 // bugReportingPackage is a struct containing the pieces of a fuzz that may need
to have |
100 // a bug filed or updated. | 96 // a bug filed or updated. |
101 type bugReportingPackage struct { | 97 type bugReportingPackage struct { |
102 FuzzName string | 98 FuzzName string |
103 CommitHash string | 99 CommitHash string |
104 IsBadFuzz bool | 100 IsBadFuzz bool |
105 } | 101 } |
106 | 102 |
107 // StartAggregator creates and starts a Aggregator. | 103 // StartAggregator creates and starts a Aggregator. |
108 // If there is a problem starting up, an error is returned. Other errors will b
e logged. | 104 // If there is a problem starting up, an error is returned. Other errors will b
e logged. |
109 func StartAggregator(s *storage.Client, category string) (*Aggregator, error) { | 105 func StartAggregator(s *storage.Client, category string) (*Aggregator, error) { |
110 b := Aggregator{ | 106 b := Aggregator{ |
111 Category: category, | 107 Category: category, |
112 storageClient: s, | 108 storageClient: s, |
113 fuzzPath: filepath.Join(config.Aggregator.FuzzPath, ca
tegory), | 109 fuzzPath: filepath.Join(config.Aggregator.FuzzPath, ca
tegory), |
114 executablePath: filepath.Join(config.Aggregator.ExecutablePa
th, category), | 110 executablePath: filepath.Join(config.Aggregator.ExecutablePa
th, category), |
115 forAnalysis: make(chan string, 10000), | 111 forAnalysis: make(chan string, 10000), |
116 forUpload: make(chan uploadPackage, 100), | 112 forUpload: make(chan uploadPackage, 100), |
117 forBugReporting: make(chan bugReportingPackage, 100), | 113 forBugReporting: make(chan bugReportingPackage, 100), |
118 MakeBugOnBadFuzz: false, | 114 MakeBugOnBadFuzz: false, |
| 115 UploadGreyFuzzes: false, |
119 monitoringShutdown: make(chan bool, 2), | 116 monitoringShutdown: make(chan bool, 2), |
120 // aggregationShutdown needs to be created with a calculated cap
acity in start | 117 // aggregationShutdown needs to be created with a calculated cap
acity in start |
121 } | 118 } |
122 | 119 |
123 return &b, b.start() | 120 return &b, b.start() |
124 } | 121 } |
125 | 122 |
126 // start starts up the Aggregator. It refreshes all status it needs and builds
a debug and a | 123 // start starts up the Aggregator. It refreshes all status it needs and builds
a debug and a |
127 // release version of Skia for use in analysis. It then spawns the aggregation
pipeline and a | 124 // release version of Skia for use in analysis. It then spawns the aggregation
pipeline and a |
128 // monitoring thread. | 125 // monitoring thread. |
129 func (agg *Aggregator) start() error { | 126 func (agg *Aggregator) start() error { |
130 // Set the wait groups to fresh | 127 // Set the wait groups to fresh |
131 agg.monitoringWaitGroup = &sync.WaitGroup{} | 128 agg.monitoringWaitGroup = &sync.WaitGroup{} |
132 agg.aggregationWaitGroup = &sync.WaitGroup{} | 129 agg.aggregationWaitGroup = &sync.WaitGroup{} |
133 agg.analysisCount = 0 | 130 agg.analysisCount = 0 |
134 agg.uploadCount = 0 | 131 agg.uploadCount = 0 |
135 agg.bugReportCount = 0 | 132 agg.bugReportCount = 0 |
136 » if _, err := fileutil.EnsureDirExists(agg.fuzzPath); err != nil { | 133 » if err := agg.buildAnalysisBinaries(); err != nil { |
137 » » return err | |
138 » } | |
139 » if _, err := fileutil.EnsureDirExists(agg.executablePath); err != nil { | |
140 » » return err | |
141 » } | |
142 » if err := common.BuildClangHarness("Debug", true); err != nil { | |
143 » » return err | |
144 » } | |
145 » if err := common.BuildClangHarness("Release", true); err != nil { | |
146 return err | 134 return err |
147 } | 135 } |
148 | 136 |
149 agg.monitoringWaitGroup.Add(1) | 137 agg.monitoringWaitGroup.Add(1) |
150 go agg.scanForNewCandidates() | 138 go agg.scanForNewCandidates() |
151 | 139 |
152 numAnalysisProcesses := config.Aggregator.NumAnalysisProcesses | 140 numAnalysisProcesses := config.Aggregator.NumAnalysisProcesses |
153 if numAnalysisProcesses <= 0 { | 141 if numAnalysisProcesses <= 0 { |
154 // TODO(kjlubick): Actually make this smart based on the number
of cores | 142 // TODO(kjlubick): Actually make this smart based on the number
of cores |
155 numAnalysisProcesses = 20 | 143 numAnalysisProcesses = 20 |
(...skipping 15 matching lines...) Expand all Loading... |
171 } | 159 } |
172 agg.aggregationWaitGroup.Add(1) | 160 agg.aggregationWaitGroup.Add(1) |
173 go agg.waitForBugReporting() | 161 go agg.waitForBugReporting() |
174 agg.aggregationShutdown = make(chan bool, numAnalysisProcesses+numUpload
Processes+1) | 162 agg.aggregationShutdown = make(chan bool, numAnalysisProcesses+numUpload
Processes+1) |
175 // start background routine to monitor queue details | 163 // start background routine to monitor queue details |
176 agg.monitoringWaitGroup.Add(1) | 164 agg.monitoringWaitGroup.Add(1) |
177 go agg.monitorStatus(numAnalysisProcesses, numUploadProcesses) | 165 go agg.monitorStatus(numAnalysisProcesses, numUploadProcesses) |
178 return nil | 166 return nil |
179 } | 167 } |
180 | 168 |
| 169 // buildAnalysisBinaries creates the 4 executables we need to perform analysis a
nd makes a copy of |
| 170 // them in the executablePath. We need (Debug,Release) x (Clang,ASAN). The cop
ied binaries have |
| 171 // a suffix like _clang_debug |
| 172 func (agg *Aggregator) buildAnalysisBinaries() error { |
| 173 if _, err := fileutil.EnsureDirExists(agg.fuzzPath); err != nil { |
| 174 return err |
| 175 } |
| 176 if _, err := fileutil.EnsureDirExists(agg.executablePath); err != nil { |
| 177 return err |
| 178 } |
| 179 if err := common.BuildClangHarness("Debug", true); err != nil { |
| 180 return err |
| 181 } |
| 182 outPath := filepath.Join(config.Generator.SkiaRoot, "out") |
| 183 if err := fileutil.CopyExecutable(filepath.Join(outPath, "Debug", common
.TEST_HARNESS_NAME), filepath.Join(agg.executablePath, CLANG_DEBUG)); err != nil
{ |
| 184 return err |
| 185 } |
| 186 if err := common.BuildClangHarness("Release", true); err != nil { |
| 187 return err |
| 188 } |
| 189 if err := fileutil.CopyExecutable(filepath.Join(outPath, "Release", comm
on.TEST_HARNESS_NAME), filepath.Join(agg.executablePath, CLANG_RELEASE)); err !=
nil { |
| 190 return err |
| 191 } |
| 192 if err := common.BuildASANHarness("Debug", false); err != nil { |
| 193 return err |
| 194 } |
| 195 if err := fileutil.CopyExecutable(filepath.Join(outPath, "Debug", common
.TEST_HARNESS_NAME), filepath.Join(agg.executablePath, ASAN_DEBUG)); err != nil
{ |
| 196 return err |
| 197 } |
| 198 if err := common.BuildASANHarness("Release", false); err != nil { |
| 199 return err |
| 200 } |
| 201 if err := fileutil.CopyExecutable(filepath.Join(outPath, "Release", comm
on.TEST_HARNESS_NAME), filepath.Join(agg.executablePath, ASAN_RELEASE)); err !=
nil { |
| 202 return err |
| 203 } |
| 204 return nil |
| 205 } |
| 206 |
181 // scanForNewCandidates runs scanHelper once every config.Aggregator.RescanPerio
d, which scans the | 207 // scanForNewCandidates runs scanHelper once every config.Aggregator.RescanPerio
d, which scans the |
182 // config.Generator.AflOutputPath for new fuzzes. If scanHelper returns an erro
r, this method | 208 // config.Generator.AflOutputPath for new fuzzes. If scanHelper returns an erro
r, this method |
183 // will terminate. | 209 // will terminate. |
184 func (agg *Aggregator) scanForNewCandidates() { | 210 func (agg *Aggregator) scanForNewCandidates() { |
185 defer agg.monitoringWaitGroup.Done() | 211 defer agg.monitoringWaitGroup.Done() |
186 | 212 |
187 alreadyFoundFuzzes := &SortedStringSlice{} | 213 alreadyFoundFuzzes := &SortedStringSlice{} |
188 // time.Tick does not fire immediately, so we fire it manually once. | 214 // time.Tick does not fire immediately, so we fire it manually once. |
189 if err := agg.scanHelper(alreadyFoundFuzzes); err != nil { | 215 if err := agg.scanHelper(alreadyFoundFuzzes); err != nil { |
190 glog.Errorf("[%s] Scanner terminated due to error: %v", agg.Cate
gory, err) | 216 glog.Errorf("[%s] Scanner terminated due to error: %v", agg.Cate
gory, err) |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
238 // -hangs/ | 264 // -hangs/ |
239 // -queue/ | 265 // -queue/ |
240 // -fuzzer_stats | 266 // -fuzzer_stats |
241 // -fuzzer1/ | 267 // -fuzzer1/ |
242 // ... | 268 // ... |
243 func (agg *Aggregator) findBadFuzzPaths(alreadyFoundFuzzes *SortedStringSlice) (
[]string, error) { | 269 func (agg *Aggregator) findBadFuzzPaths(alreadyFoundFuzzes *SortedStringSlice) (
[]string, error) { |
244 badFuzzPaths := make([]string, 0) | 270 badFuzzPaths := make([]string, 0) |
245 | 271 |
246 scanPath := filepath.Join(config.Generator.AflOutputPath, agg.Category) | 272 scanPath := filepath.Join(config.Generator.AflOutputPath, agg.Category) |
247 aflDir, err := os.Open(scanPath) | 273 aflDir, err := os.Open(scanPath) |
| 274 if os.IsNotExist(err) { |
| 275 glog.Warningf("Path to scan %s does not exist. Returning 0 foun
d fuzzes", scanPath) |
| 276 return []string{}, nil |
| 277 } |
248 if err != nil { | 278 if err != nil { |
249 return nil, err | 279 return nil, err |
250 } | 280 } |
251 defer util.Close(aflDir) | 281 defer util.Close(aflDir) |
252 | 282 |
253 fuzzerFolders, err := aflDir.Readdir(-1) | 283 fuzzerFolders, err := aflDir.Readdir(-1) |
254 if err != nil { | 284 if err != nil { |
255 return nil, err | 285 return nil, err |
256 } | 286 } |
257 | 287 |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
350 // in that directory. | 380 // in that directory. |
351 func (agg *Aggregator) setupAnalysis(workingDirPath string) error { | 381 func (agg *Aggregator) setupAnalysis(workingDirPath string) error { |
352 // Delete all previous executables to get a clean start | 382 // Delete all previous executables to get a clean start |
353 if err := os.RemoveAll(workingDirPath); err != nil && !os.IsNotExist(err
) { | 383 if err := os.RemoveAll(workingDirPath); err != nil && !os.IsNotExist(err
) { |
354 return err | 384 return err |
355 } | 385 } |
356 if err := os.MkdirAll(workingDirPath, 0755); err != nil { | 386 if err := os.MkdirAll(workingDirPath, 0755); err != nil { |
357 return err | 387 return err |
358 } | 388 } |
359 | 389 |
360 » // make a copy of the debug and release executables | 390 » // make a copy of the 4 executables that were made in buildAnalysisBinar
ies() |
361 » basePath := filepath.Join(config.Generator.SkiaRoot, "out") | 391 » if err := fileutil.CopyExecutable(filepath.Join(agg.executablePath, CLAN
G_DEBUG), filepath.Join(workingDirPath, CLANG_DEBUG)); err != nil { |
362 » if err := fileutil.CopyExecutable(filepath.Join(basePath, "Debug", commo
n.TEST_HARNESS_NAME), filepath.Join(workingDirPath, common.TEST_HARNESS_NAME+"_d
ebug")); err != nil { | |
363 return err | 392 return err |
364 } | 393 } |
365 » if err := fileutil.CopyExecutable(filepath.Join(basePath, "Release", com
mon.TEST_HARNESS_NAME), filepath.Join(workingDirPath, common.TEST_HARNESS_NAME+"
_release")); err != nil { | 394 » if err := fileutil.CopyExecutable(filepath.Join(agg.executablePath, CLAN
G_RELEASE), filepath.Join(workingDirPath, CLANG_RELEASE)); err != nil { |
| 395 » » return err |
| 396 » } |
| 397 » if err := fileutil.CopyExecutable(filepath.Join(agg.executablePath, ASAN
_DEBUG), filepath.Join(workingDirPath, ASAN_DEBUG)); err != nil { |
| 398 » » return err |
| 399 » } |
| 400 » if err := fileutil.CopyExecutable(filepath.Join(agg.executablePath, ASAN
_RELEASE), filepath.Join(workingDirPath, ASAN_RELEASE)); err != nil { |
366 return err | 401 return err |
367 } | 402 } |
368 return nil | 403 return nil |
369 } | 404 } |
370 | 405 |
371 // analyze simply invokes performAnalysis with a fuzz under both the Debug and R
elease build. Upon | 406 // analyze simply invokes performAnalysis with a fuzz under both the Debug and R
elease build. Upon |
372 // completion, it checks to see if the fuzz is a grey fuzz and sets the FuzzType
accordingly. | 407 // completion, it checks to see if the fuzz is a grey fuzz and sets the FuzzType
accordingly. |
373 func (agg *Aggregator) analyze(workingDirPath, fileName string) (uploadPackage,
error) { | 408 func (agg *Aggregator) analyze(workingDirPath, fileName string) (uploadPackage,
error) { |
374 upload := uploadPackage{ | 409 upload := uploadPackage{ |
375 » » Name: fileName, | 410 » » Data: data.GCSPackage{ |
376 » » FileType: agg.Category, | 411 » » » Name: fileName, |
| 412 » » » FuzzCategory: agg.Category, |
| 413 » » }, |
377 FuzzType: BAD_FUZZ, | 414 FuzzType: BAD_FUZZ, |
378 FilePath: filepath.Join(agg.fuzzPath, fileName), | 415 FilePath: filepath.Join(agg.fuzzPath, fileName), |
379 } | 416 } |
380 | 417 |
381 » if dump, stderr, err := agg.performAnalysis(workingDirPath, common.TEST_
HARNESS_NAME, upload.FilePath, true); err != nil { | 418 » if dump, stderr, err := agg.performAnalysis(workingDirPath, CLANG_DEBUG,
upload.FilePath); err != nil { |
382 return upload, err | 419 return upload, err |
383 } else { | 420 } else { |
384 » » upload.DebugDump = dump | 421 » » upload.Data.Debug.Dump = dump |
385 » » upload.DebugErr = stderr | 422 » » upload.Data.Debug.StdErr = stderr |
386 } | 423 } |
387 » if dump, stderr, err := agg.performAnalysis(workingDirPath, common.TEST_
HARNESS_NAME, upload.FilePath, false); err != nil { | 424 » if dump, stderr, err := agg.performAnalysis(workingDirPath, CLANG_RELEAS
E, upload.FilePath); err != nil { |
388 return upload, err | 425 return upload, err |
389 } else { | 426 } else { |
390 » » upload.ReleaseDump = dump | 427 » » upload.Data.Release.Dump = dump |
391 » » upload.ReleaseErr = stderr | 428 » » upload.Data.Release.StdErr = stderr |
392 } | 429 } |
393 » if r := data.ParseFuzzResult(upload.DebugDump, upload.DebugErr, upload.R
eleaseDump, upload.ReleaseErr); r.Flags == data.DebugFailedGracefully|data.Relea
seFailedGracefully { | 430 » // AddressSanitizer only outputs to stderr |
| 431 » if _, stderr, err := agg.performAnalysis(workingDirPath, ASAN_DEBUG, upl
oad.FilePath); err != nil { |
| 432 » » return upload, err |
| 433 » } else { |
| 434 » » upload.Data.Debug.Asan = stderr |
| 435 » } |
| 436 » if _, stderr, err := agg.performAnalysis(workingDirPath, ASAN_RELEASE, u
pload.FilePath); err != nil { |
| 437 » » return upload, err |
| 438 » } else { |
| 439 » » upload.Data.Release.Asan = stderr |
| 440 » } |
| 441 » if r := data.ParseGCSPackage(upload.Data); r.Debug.Flags == data.Termina
tedGracefully && r.Release.Flags == data.TerminatedGracefully { |
394 upload.FuzzType = GREY_FUZZ | 442 upload.FuzzType = GREY_FUZZ |
395 } | 443 } |
396 return upload, nil | 444 return upload, nil |
397 } | 445 } |
398 | 446 |
399 // performAnalysis executes a command from the working dir specified using | 447 // performAnalysis executes a command from the working dir specified using |
400 // AnalysisArgs for a given fuzz category. The crash dumps (which | 448 // AnalysisArgs for a given fuzz category. The crash dumps (which |
401 // come via standard out) and standard errors are recorded as strings. | 449 // come via standard out) and standard errors are recorded as strings. |
402 func (agg *Aggregator) performAnalysis(workingDirPath, baseExecutableName, pathT
oFile string, isDebug bool) (string, string, error) { | 450 func (agg *Aggregator) performAnalysis(workingDirPath, executableName, pathToFil
e string) (string, string, error) { |
403 » suffix := "_release" | |
404 » if isDebug { | |
405 » » suffix = "_debug" | |
406 » } | |
407 | |
408 » pathToExecutable := fmt.Sprintf("./%s%s", baseExecutableName, suffix) | |
409 | 451 |
410 var dump bytes.Buffer | 452 var dump bytes.Buffer |
411 var stdErr bytes.Buffer | 453 var stdErr bytes.Buffer |
412 | 454 |
413 // GNU timeout is used instead of the option on exec.Command because exp
erimentation with the | 455 // GNU timeout is used instead of the option on exec.Command because exp
erimentation with the |
414 // latter showed evidence of that way leaking processes, which led to OO
M errors. | 456 // latter showed evidence of that way leaking processes, which led to OO
M errors. |
415 cmd := &exec.Command{ | 457 cmd := &exec.Command{ |
416 » » Name: "timeout", | 458 » » Name: "timeout", |
417 » » Args: common.AnalysisArgsFor(agg.Category, pathToExecutable
, pathToFile), | 459 » » Args: common.AnalysisArgsFor(agg.Category, "./"+executabl
eName, pathToFile), |
418 » » LogStdout: false, | 460 » » LogStdout: false, |
419 » » LogStderr: false, | 461 » » LogStderr: false, |
420 » » Stdout: &dump, | 462 » » Stdout: &dump, |
421 » » Stderr: &stdErr, | 463 » » Stderr: &stdErr, |
422 » » Dir: workingDirPath, | 464 » » Dir: workingDirPath, |
| 465 » » InheritPath: true, |
| 466 » » Env: []string{common.ASAN_OPTIONS}, |
423 } | 467 } |
424 | 468 |
425 //errors are fine/expected from this, as we are dealing with bad fuzzes | 469 //errors are fine/expected from this, as we are dealing with bad fuzzes |
426 if err := exec.Run(cmd); err != nil { | 470 if err := exec.Run(cmd); err != nil { |
427 return dump.String(), stdErr.String(), nil | 471 return dump.String(), stdErr.String(), nil |
428 } | 472 } |
429 return dump.String(), stdErr.String(), nil | 473 return dump.String(), stdErr.String(), nil |
430 } | 474 } |
431 | 475 |
432 // calcuateHash calculates the sha1 hash of a file, given its path. It returns
both the hash as a | 476 // calcuateHash calculates the sha1 hash of a file, given its path. It returns
both the hash as a |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
464 // them. If any unrecoverable errors happen, this method terminates. | 508 // them. If any unrecoverable errors happen, this method terminates. |
465 func (agg *Aggregator) waitForUploads(identifier int) { | 509 func (agg *Aggregator) waitForUploads(identifier int) { |
466 defer agg.aggregationWaitGroup.Done() | 510 defer agg.aggregationWaitGroup.Done() |
467 defer go_metrics.GetOrRegisterCounter("upload_process_count", go_metrics
.DefaultRegistry).Dec(int64(1)) | 511 defer go_metrics.GetOrRegisterCounter("upload_process_count", go_metrics
.DefaultRegistry).Dec(int64(1)) |
468 glog.Infof("[%s] Spawning uploader %d", agg.Category, identifier) | 512 glog.Infof("[%s] Spawning uploader %d", agg.Category, identifier) |
469 for { | 513 for { |
470 select { | 514 select { |
471 case p := <-agg.forUpload: | 515 case p := <-agg.forUpload: |
472 atomic.AddInt64(&agg.uploadCount, int64(1)) | 516 atomic.AddInt64(&agg.uploadCount, int64(1)) |
473 if !agg.UploadGreyFuzzes && p.FuzzType == GREY_FUZZ { | 517 if !agg.UploadGreyFuzzes && p.FuzzType == GREY_FUZZ { |
474 » » » » glog.Infof("[%s] Skipping upload of grey fuzz %s
", agg.Category, p.Name) | 518 » » » » glog.Infof("[%s] Skipping upload of grey fuzz %s
", agg.Category, p.Data.Name) |
475 continue | 519 continue |
476 } | 520 } |
477 if err := agg.upload(p); err != nil { | 521 if err := agg.upload(p); err != nil { |
478 glog.Errorf("[%s] Uploader %d terminated due to
error: %s", agg.Category, identifier, err) | 522 glog.Errorf("[%s] Uploader %d terminated due to
error: %s", agg.Category, identifier, err) |
479 return | 523 return |
480 } | 524 } |
481 agg.forBugReporting <- bugReportingPackage{ | 525 agg.forBugReporting <- bugReportingPackage{ |
482 » » » » FuzzName: p.Name, | 526 » » » » FuzzName: p.Data.Name, |
483 CommitHash: config.Generator.SkiaVersion.Hash, | 527 CommitHash: config.Generator.SkiaVersion.Hash, |
484 IsBadFuzz: p.FuzzType == BAD_FUZZ, | 528 IsBadFuzz: p.FuzzType == BAD_FUZZ, |
485 } | 529 } |
486 case <-agg.aggregationShutdown: | 530 case <-agg.aggregationShutdown: |
487 glog.Infof("[%s] Uploader %d recieved shutdown signal",
agg.Category, identifier) | 531 glog.Infof("[%s] Uploader %d recieved shutdown signal",
agg.Category, identifier) |
488 return | 532 return |
489 } | 533 } |
490 } | 534 } |
491 } | 535 } |
492 | 536 |
493 // upload breaks apart the uploadPackage into its constituant parts and uploads
them to GCS using | 537 // upload breaks apart the uploadPackage into its constituant parts and uploads
them to GCS using |
494 // some helper methods. | 538 // some helper methods. |
495 func (agg *Aggregator) upload(p uploadPackage) error { | 539 func (agg *Aggregator) upload(p uploadPackage) error { |
496 » glog.Infof("[%s] uploading %s with file %s and analysis bytes %d;%d;%d;%
d", agg.Category, p.Name, p.FilePath, len(p.DebugDump), len(p.DebugErr), len(p.R
eleaseDump), len(p.ReleaseErr)) | 540 » glog.Infof("[%s] uploading %s with file %s and analysis bytes %d;%d;%d|%
d;%d;%d", agg.Category, p.Data.Name, p.FilePath, len(p.Data.Debug.Asan), len(p.D
ata.Debug.Dump), len(p.Data.Debug.StdErr), len(p.Data.Release.Asan), len(p.Data.
Release.Dump), len(p.Data.Release.StdErr)) |
497 | 541 |
498 » if err := agg.uploadBinaryFromDisk(p, p.Name, p.FilePath); err != nil { | 542 » if err := agg.uploadBinaryFromDisk(p, p.Data.Name, p.FilePath); err != n
il { |
499 return err | 543 return err |
500 } | 544 } |
501 » if err := agg.uploadString(p, p.Name+"_debug.dump", p.DebugDump); err !=
nil { | 545 » if err := agg.uploadString(p, p.Data.Name+"_debug.asan", p.Data.Debug.As
an); err != nil { |
502 return err | 546 return err |
503 } | 547 } |
504 » if err := agg.uploadString(p, p.Name+"_debug.err", p.DebugErr); err != n
il { | 548 » if err := agg.uploadString(p, p.Data.Name+"_debug.dump", p.Data.Debug.Du
mp); err != nil { |
505 return err | 549 return err |
506 } | 550 } |
507 » if err := agg.uploadString(p, p.Name+"_release.dump", p.ReleaseDump); er
r != nil { | 551 » if err := agg.uploadString(p, p.Data.Name+"_debug.err", p.Data.Debug.Std
Err); err != nil { |
508 return err | 552 return err |
509 } | 553 } |
510 » return agg.uploadString(p, p.Name+"_release.err", p.ReleaseErr) | 554 » if err := agg.uploadString(p, p.Data.Name+"_release.asan", p.Data.Releas
e.Asan); err != nil { |
| 555 » » return err |
| 556 » } |
| 557 » if err := agg.uploadString(p, p.Data.Name+"_release.dump", p.Data.Releas
e.Dump); err != nil { |
| 558 » » return err |
| 559 » } |
| 560 » return agg.uploadString(p, p.Data.Name+"_release.err", p.Data.Release.St
dErr) |
511 } | 561 } |
512 | 562 |
513 // uploadBinaryFromDisk uploads a binary file on disk to GCS, returning an error
if anything | 563 // uploadBinaryFromDisk uploads a binary file on disk to GCS, returning an error
if anything |
514 // goes wrong. | 564 // goes wrong. |
515 func (agg *Aggregator) uploadBinaryFromDisk(p uploadPackage, fileName, filePath
string) error { | 565 func (agg *Aggregator) uploadBinaryFromDisk(p uploadPackage, fileName, filePath
string) error { |
516 » name := fmt.Sprintf("%s/%s/%s/%s/%s", p.FileType, config.Generator.SkiaV
ersion.Hash, p.FuzzType, p.Name, fileName) | 566 » name := fmt.Sprintf("%s/%s/%s/%s/%s", p.Data.FuzzCategory, config.Genera
tor.SkiaVersion.Hash, p.FuzzType, p.Data.Name, fileName) |
517 w := agg.storageClient.Bucket(config.GS.Bucket).Object(name).NewWriter(c
ontext.Background()) | 567 w := agg.storageClient.Bucket(config.GS.Bucket).Object(name).NewWriter(c
ontext.Background()) |
518 defer util.Close(w) | 568 defer util.Close(w) |
519 // We set the encoding to avoid accidental crashes if Chrome were to try
to render a fuzzed png | 569 // We set the encoding to avoid accidental crashes if Chrome were to try
to render a fuzzed png |
520 // or svg or something. | 570 // or svg or something. |
521 w.ObjectAttrs.ContentEncoding = "application/octet-stream" | 571 w.ObjectAttrs.ContentEncoding = "application/octet-stream" |
522 | 572 |
523 f, err := os.Open(filePath) | 573 f, err := os.Open(filePath) |
524 if err != nil { | 574 if err != nil { |
525 return fmt.Errorf("There was a problem reading %s for uploading
: %s", filePath, err) | 575 return fmt.Errorf("There was a problem reading %s for uploading
: %s", filePath, err) |
526 } | 576 } |
527 | 577 |
528 if n, err := io.Copy(w, f); err != nil { | 578 if n, err := io.Copy(w, f); err != nil { |
529 return fmt.Errorf("There was a problem uploading binary file %s.
Only uploaded %d bytes : %s", name, n, err) | 579 return fmt.Errorf("There was a problem uploading binary file %s.
Only uploaded %d bytes : %s", name, n, err) |
530 } | 580 } |
531 return nil | 581 return nil |
532 } | 582 } |
533 | 583 |
534 // uploadBinaryFromDisk uploads the contents of a string as a file to GCS, retur
ning an error if | 584 // uploadBinaryFromDisk uploads the contents of a string as a file to GCS, retur
ning an error if |
535 // anything goes wrong. | 585 // anything goes wrong. |
536 func (agg *Aggregator) uploadString(p uploadPackage, fileName, contents string)
error { | 586 func (agg *Aggregator) uploadString(p uploadPackage, fileName, contents string)
error { |
537 » name := fmt.Sprintf("%s/%s/%s/%s/%s", p.FileType, config.Generator.SkiaV
ersion.Hash, p.FuzzType, p.Name, fileName) | 587 » name := fmt.Sprintf("%s/%s/%s/%s/%s", p.Data.FuzzCategory, config.Genera
tor.SkiaVersion.Hash, p.FuzzType, p.Data.Name, fileName) |
538 w := agg.storageClient.Bucket(config.GS.Bucket).Object(name).NewWriter(c
ontext.Background()) | 588 w := agg.storageClient.Bucket(config.GS.Bucket).Object(name).NewWriter(c
ontext.Background()) |
539 defer util.Close(w) | 589 defer util.Close(w) |
540 w.ObjectAttrs.ContentEncoding = "text/plain" | 590 w.ObjectAttrs.ContentEncoding = "text/plain" |
541 | 591 |
542 if n, err := w.Write([]byte(contents)); err != nil { | 592 if n, err := w.Write([]byte(contents)); err != nil { |
543 return fmt.Errorf("There was a problem uploading %s. Only uploa
ded %d bytes: %s", name, n, err) | 593 return fmt.Errorf("There was a problem uploading %s. Only uploa
ded %d bytes: %s", name, n, err) |
544 } | 594 } |
545 return nil | 595 return nil |
546 } | 596 } |
547 | 597 |
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
645 } | 695 } |
646 glog.Infof("[%s] Waiting %s for the aggregator's queues to be em
pty", agg.Category, config.Aggregator.StatusPeriod) | 696 glog.Infof("[%s] Waiting %s for the aggregator's queues to be em
pty", agg.Category, config.Aggregator.StatusPeriod) |
647 } | 697 } |
648 } | 698 } |
649 | 699 |
650 // ForceAnalysis directly adds the given path to the analysis queue, where it wi
ll be analyzed, | 700 // ForceAnalysis directly adds the given path to the analysis queue, where it wi
ll be analyzed, |
651 // uploaded and possibly bug reported. | 701 // uploaded and possibly bug reported. |
652 func (agg *Aggregator) ForceAnalysis(path string) { | 702 func (agg *Aggregator) ForceAnalysis(path string) { |
653 agg.forAnalysis <- path | 703 agg.forAnalysis <- path |
654 } | 704 } |
OLD | NEW |