OLD | NEW |
(Empty) | |
| 1 // run_chromium_analysis is an application that runs the specified benchmark ove
r |
| 2 // CT's webpage archives. It is intended to be run on swarming bots. |
| 3 package main |
| 4 |
| 5 import ( |
| 6 "bytes" |
| 7 "flag" |
| 8 "io/ioutil" |
| 9 "path/filepath" |
| 10 "sync" |
| 11 "time" |
| 12 |
| 13 "github.com/skia-dev/glog" |
| 14 |
| 15 "strings" |
| 16 |
| 17 "go.skia.org/infra/ct/go/util" |
| 18 "go.skia.org/infra/ct/go/worker_scripts/worker_common" |
| 19 "go.skia.org/infra/go/common" |
| 20 skutil "go.skia.org/infra/go/util" |
| 21 ) |
| 22 |
| 23 const ( |
| 24 // The number of goroutines that will run in parallel to run benchmarks. |
| 25 WORKER_POOL_SIZE = 10 |
| 26 ) |
| 27 |
| 28 var ( |
| 29 startRange = flag.Int("start_range", 1, "The number this worker
will run benchmarks from.") |
| 30 num = flag.Int("num", 100, "The total number of benchmark
s to run starting from the start_range.") |
| 31 pagesetType = flag.String("pageset_type", util.PAGESET_TYPE_MOBIL
E_10k, "The type of pagesets to analyze. Eg: 10k, Mobile10k, All.") |
| 32 chromiumBuild = flag.String("chromium_build", "", "The chromium bui
ld to use.") |
| 33 runID = flag.String("run_id", "", "The unique run id (typic
ally requester + timestamp).") |
| 34 benchmarkName = flag.String("benchmark_name", "", "The telemetry be
nchmark to run on this worker.") |
| 35 benchmarkExtraArgs = flag.String("benchmark_extra_args", "", "The extra
arguments that are passed to the specified benchmark.") |
| 36 browserExtraArgs = flag.String("browser_extra_args", "", "The extra ar
guments that are passed to the browser while running the benchmark.") |
| 37 chromeCleanerTimer = flag.Duration("cleaner_timer", 15*time.Minute, "How
often all chrome processes will be killed on this slave.") |
| 38 ) |
| 39 |
| 40 func main() { |
| 41 defer common.LogPanic() |
| 42 worker_common.Init() |
| 43 defer util.TimeTrack(time.Now(), "Running Chromium Analysis") |
| 44 defer glog.Flush() |
| 45 |
| 46 // Validate required arguments. |
| 47 if *chromiumBuild == "" { |
| 48 glog.Error("Must specify --chromium_build") |
| 49 return |
| 50 } |
| 51 if *runID == "" { |
| 52 glog.Error("Must specify --run_id") |
| 53 return |
| 54 } |
| 55 if *benchmarkName == "" { |
| 56 glog.Error("Must specify --benchmark_name") |
| 57 return |
| 58 } |
| 59 |
| 60 // Reset the local chromium checkout. |
| 61 if err := util.ResetCheckout(util.ChromiumSrcDir); err != nil { |
| 62 glog.Errorf("Could not reset %s: %s", util.ChromiumSrcDir, err) |
| 63 return |
| 64 } |
| 65 // Sync the local chromium checkout. |
| 66 if err := util.SyncDir(util.ChromiumSrcDir); err != nil { |
| 67 glog.Errorf("Could not gclient sync %s: %s", util.ChromiumSrcDir
, err) |
| 68 return |
| 69 } |
| 70 |
| 71 // Instantiate GsUtil object. |
| 72 gs, err := util.NewGsUtil(nil) |
| 73 if err != nil { |
| 74 glog.Error(err) |
| 75 return |
| 76 } |
| 77 |
| 78 // Download the benchmark patch for this run from Google storage. |
| 79 benchmarkPatchName := *runID + ".benchmark.patch" |
| 80 tmpDir, err := ioutil.TempDir("", "patches") |
| 81 if err != nil { |
| 82 glog.Errorf("Could not create a temp dir: %s", err) |
| 83 return |
| 84 } |
| 85 defer skutil.RemoveAll(tmpDir) |
| 86 benchmarkPatchLocalPath := filepath.Join(tmpDir, benchmarkPatchName) |
| 87 remotePatchesDir := filepath.Join(util.ChromiumAnalysisRunsDir, *runID) |
| 88 benchmarkPatchRemotePath := filepath.Join(remotePatchesDir, benchmarkPat
chName) |
| 89 respBody, err := gs.GetRemoteFileContents(benchmarkPatchRemotePath) |
| 90 if err != nil { |
| 91 glog.Errorf("Could not fetch %s: %s", benchmarkPatchRemotePath,
err) |
| 92 return |
| 93 } |
| 94 defer skutil.Close(respBody) |
| 95 buf := new(bytes.Buffer) |
| 96 if _, err := buf.ReadFrom(respBody); err != nil { |
| 97 glog.Errorf("Could not read from %s: %s", benchmarkPatchRemotePa
th, err) |
| 98 return |
| 99 } |
| 100 if err := ioutil.WriteFile(benchmarkPatchLocalPath, buf.Bytes(), 0666);
err != nil { |
| 101 glog.Errorf("Unable to create file %s: %s", benchmarkPatchLocalP
ath, err) |
| 102 return |
| 103 } |
| 104 // Apply benchmark patch to the local chromium checkout. |
| 105 if buf.Len() > 10 { |
| 106 if err := util.ApplyPatch(benchmarkPatchLocalPath, util.Chromium
SrcDir); err != nil { |
| 107 glog.Errorf("Could not apply Telemetry's patch in %s: %s
", util.ChromiumSrcDir, err) |
| 108 return |
| 109 } |
| 110 } |
| 111 |
| 112 // Download the specified chromium build. |
| 113 if err := gs.DownloadChromiumBuild(*chromiumBuild); err != nil { |
| 114 glog.Error(err) |
| 115 return |
| 116 } |
| 117 //Delete the chromium build to save space when we are done. |
| 118 defer skutil.RemoveAll(filepath.Join(util.ChromiumBuildsDir, *chromiumBu
ild)) |
| 119 |
| 120 chromiumBinary := filepath.Join(util.ChromiumBuildsDir, *chromiumBuild,
util.BINARY_CHROME) |
| 121 |
| 122 // Download pagesets if they do not exist locally. |
| 123 pathToPagesets := filepath.Join(util.PagesetsDir, *pagesetType) |
| 124 if _, err := gs.DownloadSwarmingArtifacts(pathToPagesets, util.PAGESETS_
DIR_NAME, *pagesetType, *startRange, *num); err != nil { |
| 125 glog.Error(err) |
| 126 return |
| 127 } |
| 128 defer skutil.RemoveAll(pathToPagesets) |
| 129 |
| 130 // Download archives if they do not exist locally. |
| 131 pathToArchives := filepath.Join(util.WebArchivesDir, *pagesetType) |
| 132 if _, err := gs.DownloadSwarmingArtifacts(pathToArchives, util.WEB_ARCHI
VES_DIR_NAME, *pagesetType, *startRange, *num); err != nil { |
| 133 glog.Error(err) |
| 134 return |
| 135 } |
| 136 defer skutil.RemoveAll(pathToArchives) |
| 137 |
| 138 // Establish nopatch output paths. |
| 139 localOutputDir := filepath.Join(util.StorageDir, util.BenchmarkRunsDir,
*runID) |
| 140 skutil.RemoveAll(localOutputDir) |
| 141 skutil.MkdirAll(localOutputDir, 0700) |
| 142 defer skutil.RemoveAll(localOutputDir) |
| 143 remoteDir := filepath.Join(util.BenchmarkRunsDir, *runID) |
| 144 |
| 145 // Construct path to CT's python scripts. |
| 146 pathToPyFiles := util.GetPathToPyFiles(!*worker_common.Local) |
| 147 |
| 148 fileInfos, err := ioutil.ReadDir(pathToPagesets) |
| 149 if err != nil { |
| 150 glog.Errorf("Unable to read the pagesets dir %s: %s", pathToPage
sets, err) |
| 151 return |
| 152 } |
| 153 |
| 154 glog.Infoln("===== Going to run the task with parallel chrome processes
=====") |
| 155 |
| 156 // Create channel that contains all pageset file names. This channel wil
l |
| 157 // be consumed by the worker pool. |
| 158 pagesetRequests := util.GetClosedChannelOfPagesets(fileInfos) |
| 159 |
| 160 var wg sync.WaitGroup |
| 161 // Use a RWMutex for the chromeProcessesCleaner goroutine to communicate
to |
| 162 // the workers (acting as "readers") when it wants to be the "writer" an
d |
| 163 // kill all zombie chrome processes. |
| 164 var mutex sync.RWMutex |
| 165 |
| 166 // Loop through workers in the worker pool. |
| 167 for i := 0; i < WORKER_POOL_SIZE; i++ { |
| 168 // Increment the WaitGroup counter. |
| 169 wg.Add(1) |
| 170 |
| 171 // Create and run a goroutine closure that runs the benchmark. |
| 172 go func() { |
| 173 // Decrement the WaitGroup counter when the goroutine co
mpletes. |
| 174 defer wg.Done() |
| 175 |
| 176 for pagesetName := range pagesetRequests { |
| 177 |
| 178 mutex.RLock() |
| 179 if err := util.RunBenchmark(pagesetName, pathToP
agesets, pathToPyFiles, localOutputDir, *chromiumBuild, chromiumBinary, *runID,
*browserExtraArgs, *benchmarkName, "Linux", *benchmarkExtraArgs, *pagesetType, -
1); err != nil { |
| 180 glog.Errorf("Error while running withpat
ch benchmark: %s", err) |
| 181 return |
| 182 } |
| 183 mutex.RUnlock() |
| 184 } |
| 185 }() |
| 186 } |
| 187 |
| 188 if !*worker_common.Local { |
| 189 // Start the cleaner. |
| 190 go util.ChromeProcessesCleaner(&mutex, *chromeCleanerTimer) |
| 191 } |
| 192 |
| 193 // Wait for all spawned goroutines to complete. |
| 194 wg.Wait() |
| 195 |
| 196 // If "--output-format=csv-pivot-table" was specified then merge all CSV
files and upload. |
| 197 if strings.Contains(*benchmarkExtraArgs, "--output-format=csv-pivot-tabl
e") { |
| 198 if err := util.MergeUploadCSVFilesOnWorkers(localOutputDir, path
ToPyFiles, *runID, remoteDir, gs, *startRange); err != nil { |
| 199 glog.Errorf("Error while processing withpatch CSV files:
%s", err) |
| 200 return |
| 201 } |
| 202 } |
| 203 } |
OLD | NEW |