Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(25)

Side by Side Diff: dm/DM.cpp

Issue 847273005: More natural way to serialize GPU tasks and tests. (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: always stack-scope GrContextFactories Created 5 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « bench/nanobench.cpp ('k') | dm/DMGpuSupport.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #include "CrashHandler.h" 1 #include "CrashHandler.h"
2 #include "DMJsonWriter.h" 2 #include "DMJsonWriter.h"
3 #include "DMSrcSink.h" 3 #include "DMSrcSink.h"
4 #include "OverwriteLine.h" 4 #include "OverwriteLine.h"
5 #include "ProcStats.h" 5 #include "ProcStats.h"
6 #include "SkBBHFactory.h" 6 #include "SkBBHFactory.h"
7 #include "SkCommonFlags.h" 7 #include "SkCommonFlags.h"
8 #include "SkForceLinking.h" 8 #include "SkForceLinking.h"
9 #include "SkGraphics.h" 9 #include "SkGraphics.h"
10 #include "SkMD5.h" 10 #include "SkMD5.h"
(...skipping 335 matching lines...) Expand 10 before | Expand all | Expand 10 after
346 static void run_enclave(SkTArray<Task>* tasks) { 346 static void run_enclave(SkTArray<Task>* tasks) {
347 for (int i = 0; i < tasks->count(); i++) { 347 for (int i = 0; i < tasks->count(); i++) {
348 Task::Run(tasks->begin() + i); 348 Task::Run(tasks->begin() + i);
349 } 349 }
350 } 350 }
351 351
352 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~*/ 352 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~*/
353 353
354 // Unit tests don't fit so well into the Src/Sink model, so we give them special treatment. 354 // Unit tests don't fit so well into the Src/Sink model, so we give them special treatment.
355 355
356 static SkTDArray<skiatest::Test> gCPUTests, gGPUTests; 356 static SkTDArray<skiatest::Test> gThreadedTests, gGPUTests;
357 357
358 static void gather_tests() { 358 static void gather_tests() {
359 if (!FLAGS_src.contains("tests")) { 359 if (!FLAGS_src.contains("tests")) {
360 return; 360 return;
361 } 361 }
362 for (const skiatest::TestRegistry* r = skiatest::TestRegistry::Head(); r; 362 for (const skiatest::TestRegistry* r = skiatest::TestRegistry::Head(); r;
363 r = r->next()) { 363 r = r->next()) {
364 // Despite its name, factory() is returning a reference to 364 // Despite its name, factory() is returning a reference to
365 // link-time static const POD data. 365 // link-time static const POD data.
366 const skiatest::Test& test = r->factory(); 366 const skiatest::Test& test = r->factory();
367 if (SkCommandLineFlags::ShouldSkip(FLAGS_match, test.name)) { 367 if (SkCommandLineFlags::ShouldSkip(FLAGS_match, test.name)) {
368 continue; 368 continue;
369 } 369 }
370 if (test.needsGpu && gpu_supported()) { 370 if (test.needsGpu && gpu_supported()) {
371 gGPUTests.push(test); 371 (FLAGS_gpu_threading ? gThreadedTests : gGPUTests).push(test);
372 } else if (!test.needsGpu && FLAGS_cpu) { 372 } else if (!test.needsGpu && FLAGS_cpu) {
373 gCPUTests.push(test); 373 gThreadedTests.push(test);
374 } 374 }
375 } 375 }
376 } 376 }
377 377
378 static void run_test(skiatest::Test* test) { 378 static void run_test(skiatest::Test* test) {
379 struct : public skiatest::Reporter { 379 struct : public skiatest::Reporter {
380 void reportFailed(const skiatest::Failure& failure) SK_OVERRIDE { 380 void reportFailed(const skiatest::Failure& failure) SK_OVERRIDE {
381 fail(failure.toString()); 381 fail(failure.toString());
382 JsonWriter::AddTestFailure(failure); 382 JsonWriter::AddTestFailure(failure);
383 } 383 }
384 bool allowExtendedTest() const SK_OVERRIDE { 384 bool allowExtendedTest() const SK_OVERRIDE {
385 return FLAGS_pathOpsExtended; 385 return FLAGS_pathOpsExtended;
386 } 386 }
387 bool verbose() const SK_OVERRIDE { return FLAGS_veryVerbose; } 387 bool verbose() const SK_OVERRIDE { return FLAGS_veryVerbose; }
388 } reporter; 388 } reporter;
389 WallTimer timer; 389 WallTimer timer;
390 timer.start(); 390 timer.start();
391 if (!FLAGS_dryRun) { 391 if (!FLAGS_dryRun) {
392 test->proc(&reporter, GetThreadLocalGrContextFactory()); 392 GrContextFactory factory;
393 test->proc(&reporter, &factory);
393 } 394 }
394 timer.end(); 395 timer.end();
395 done(timer.fWall, "unit", "test", test->name); 396 done(timer.fWall, "unit", "test", test->name);
396 } 397 }
397 398
398 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~*/ 399 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~*/
399 400
401 // If we're isolating all GPU-bound work to one thread (the default), this funct ion runs all that.
402 static void run_enclave_and_gpu_tests(SkTArray<Task>* tasks) {
403 run_enclave(tasks);
404 for (int i = 0; i < gGPUTests.count(); i++) {
405 run_test(&gGPUTests[i]);
406 }
407 }
408
400 int dm_main(); 409 int dm_main();
401 int dm_main() { 410 int dm_main() {
402 SetupCrashHandler(); 411 SetupCrashHandler();
403 SkAutoGraphics ag; 412 SkAutoGraphics ag;
404 SkTaskGroup::Enabler enabled(FLAGS_threads); 413 SkTaskGroup::Enabler enabled(FLAGS_threads);
405 414
406 gather_srcs(); 415 gather_srcs();
407 gather_sinks(); 416 gather_sinks();
408 gather_tests(); 417 gather_tests();
409 418
410 gPending = gSrcs.count() * gSinks.count() + gCPUTests.count() + gGPUTests.co unt(); 419 gPending = gSrcs.count() * gSinks.count() + gThreadedTests.count() + gGPUTes ts.count();
411 SkDebugf("%d srcs * %d sinks + %d tests == %d tasks\n", 420 SkDebugf("%d srcs * %d sinks + %d tests == %d tasks\n",
412 gSrcs.count(), gSinks.count(), gCPUTests.count() + gGPUTests.count( ), gPending); 421 gSrcs.count(), gSinks.count(), gThreadedTests.count() + gGPUTests.c ount(), gPending);
413 422
414 // We try to exploit as much parallelism as is safe. Most Src/Sink pairs ru n on any thread, 423 // We try to exploit as much parallelism as is safe. Most Src/Sink pairs ru n on any thread,
415 // but Sinks that identify as part of a particular enclave run serially on a single thread. 424 // but Sinks that identify as part of a particular enclave run serially on a single thread.
416 // CPU tests run on any thread. GPU tests depend on --gpu_threading. 425 // CPU tests run on any thread. GPU tests depend on --gpu_threading.
417 SkTArray<Task> enclaves[kNumEnclaves]; 426 SkTArray<Task> enclaves[kNumEnclaves];
418 for (int j = 0; j < gSinks.count(); j++) { 427 for (int j = 0; j < gSinks.count(); j++) {
419 SkTArray<Task>& tasks = enclaves[gSinks[j]->enclave()]; 428 SkTArray<Task>& tasks = enclaves[gSinks[j]->enclave()];
420 for (int i = 0; i < gSrcs.count(); i++) { 429 for (int i = 0; i < gSrcs.count(); i++) {
421 tasks.push_back(Task(gSrcs[i], gSinks[j])); 430 tasks.push_back(Task(gSrcs[i], gSinks[j]));
422 } 431 }
423 } 432 }
424 433
425 SK_COMPILE_ASSERT(kAnyThread_Enclave == 0, AnyThreadZero);
426 SkTaskGroup tg; 434 SkTaskGroup tg;
427 tg.batch( Task::Run, enclaves[0].begin(), enclaves[0].count()); 435 tg.batch(run_test, gThreadedTests.begin(), gThreadedTests.count());
428 tg.batch(run_enclave, enclaves+1, kNumEnclaves-1); 436 for (int i = 0; i < kNumEnclaves; i++) {
429 tg.batch( run_test, gCPUTests.begin(), gCPUTests.count()); 437 switch(i) {
430 if (FLAGS_gpu_threading) { 438 case kAnyThread_Enclave:
431 tg.batch(run_test, gGPUTests.begin(), gGPUTests.count()); 439 tg.batch(Task::Run, enclaves[i].begin(), enclaves[i].count());
432 #if !defined(SK_BUILD_FOR_WIN32) 440 break;
433 } else { 441 case kGPU_Enclave:
434 for (int i = 0; i < gGPUTests.count(); i++) { 442 tg.add(run_enclave_and_gpu_tests, &enclaves[i]);
435 run_test(&gGPUTests[i]); 443 break;
436 } 444 default:
437 #endif 445 tg.add(run_enclave, &enclaves[i]);
446 break;
438 } 447 }
448 }
439 tg.wait(); 449 tg.wait();
440 // At this point we're back in single-threaded land. 450 // At this point we're back in single-threaded land.
441 451
442 // This is not ideal for parallelism, but Windows seems crash-prone if we ru n
443 // these GPU tests in parallel with any GPU Src/Sink work. Everyone else se ems fine.
444 #if defined(SK_BUILD_FOR_WIN32)
445 for (int i = 0; i < gGPUTests.count(); i++) {
446 run_test(&gGPUTests[i]);
447 }
448 #endif
449
450 SkDebugf("\n"); 452 SkDebugf("\n");
451 JsonWriter::DumpJson(); 453 JsonWriter::DumpJson();
452 454
453 if (gFailures.count() > 0) { 455 if (gFailures.count() > 0) {
454 SkDebugf("Failures:\n"); 456 SkDebugf("Failures:\n");
455 for (int i = 0; i < gFailures.count(); i++) { 457 for (int i = 0; i < gFailures.count(); i++) {
456 SkDebugf("\t%s\n", gFailures[i].c_str()); 458 SkDebugf("\t%s\n", gFailures[i].c_str());
457 } 459 }
458 SkDebugf("%d failures\n", gFailures.count()); 460 SkDebugf("%d failures\n", gFailures.count());
459 return 1; 461 return 1;
460 } 462 }
461 if (gPending > 0) { 463 if (gPending > 0) {
462 SkDebugf("Hrm, we didn't seem to run everything we intended to! Please file a bug.\n"); 464 SkDebugf("Hrm, we didn't seem to run everything we intended to! Please file a bug.\n");
463 return 1; 465 return 1;
464 } 466 }
465 return 0; 467 return 0;
466 } 468 }
467 469
468 #if !defined(SK_BUILD_FOR_IOS) && !defined(SK_BUILD_FOR_NACL) 470 #if !defined(SK_BUILD_FOR_IOS) && !defined(SK_BUILD_FOR_NACL)
469 int main(int argc, char** argv) { 471 int main(int argc, char** argv) {
470 SkCommandLineFlags::Parse(argc, argv); 472 SkCommandLineFlags::Parse(argc, argv);
471 return dm_main(); 473 return dm_main();
472 } 474 }
473 #endif 475 #endif
OLDNEW
« no previous file with comments | « bench/nanobench.cpp ('k') | dm/DMGpuSupport.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698