Index: gm/gmmain.cpp |
=================================================================== |
--- gm/gmmain.cpp (revision 8344) |
+++ gm/gmmain.cpp (working copy) |
@@ -86,6 +86,7 @@ |
using namespace skiagm; |
+// EPOGER: we don't need FailRec anymore, just an SkString for each entry? |
struct FailRec { |
SkString fName; |
bool fIsPixelError; |
@@ -190,6 +191,7 @@ |
fUseFileHierarchy = false; |
fIgnorableErrorCombination.add(kMissingExpectations_ErrorType); |
fMismatchPath = NULL; |
+ fTestsRun = 0; |
} |
SkString make_name(const char shortName[], const char configName[]) { |
@@ -243,34 +245,75 @@ |
} |
/** |
- * Records the errors encountered in fFailedTests, except for any error |
- * types we want to ignore. |
+ * Records the errors encountered in fFailedTests. |
+ * |
+ * We even record errors that we regard as "ignorable"; we can filter them |
+ * out later. |
*/ |
+ // EPOGER: rename as RecordResult(), because it increments fTestsRun?? |
void RecordError(const ErrorCombination& errorCombination, const SkString& name, |
const char renderModeDescriptor []) { |
- // The common case: no error means nothing to record. |
+ fTestsRun++; |
if (errorCombination.isEmpty()) { |
return; |
} |
+ SkString fullName = make_name(name.c_str(), renderModeDescriptor); |
+ for (int typeInt = 0; typeInt <= kLast_ErrorType; typeInt++) { |
+ ErrorType type = static_cast<ErrorType>(typeInt); |
+ if (errorCombination.includes(type)) { |
+ fFailedTests[type].push_back(fullName); |
+ } |
+ } |
+ } |
- // If only certain error type(s) were reported, we know we can ignore them. |
- if (errorCombination.minus(fIgnorableErrorCombination).isEmpty()) { |
- return; |
+ /** |
+ * Return the number of significant (non-ignorable) errors we have |
+ * encountered so far. |
+ */ |
+ int NumSignificantErrors() { |
+ int significantErrors = 0; |
+ for (int typeInt = 0; typeInt <= kLast_ErrorType; typeInt++) { |
+ ErrorType type = static_cast<ErrorType>(typeInt); |
+ if (!fIgnorableErrorCombination.includes(type)) { |
+ significantErrors += fFailedTests[type].count(); |
+ } |
} |
- |
- FailRec& rec = fFailedTests.push_back(make_name(name.c_str(), renderModeDescriptor)); |
- rec.fIsPixelError = errorCombination.includes(kImageMismatch_ErrorType); |
+ return significantErrors; |
} |
- // List contents of fFailedTests via SkDebug. |
+ /** |
+ * List contents of fFailedTests to stdout. |
+ */ |
void ListErrors() { |
- for (int i = 0; i < fFailedTests.count(); ++i) { |
- if (fFailedTests[i].fIsPixelError) { |
- gm_fprintf(stderr, "\t\t%s pixel_error\n", fFailedTests[i].fName.c_str()); |
+ // First, print a single summary line. |
+ SkString summary; |
+ summary.appendf("Ran %d tests:", fTestsRun); |
+ for (int typeInt = 0; typeInt <= kLast_ErrorType; typeInt++) { |
+ ErrorType type = static_cast<ErrorType>(typeInt); |
+ summary.appendf(" %s=%d", getErrorTypeName(type), fFailedTests[type].count()); |
+ } |
+ gm_fprintf(stdout, "%s\n", summary.c_str()); |
+ |
+ // Now, for each failure type, list the tests that failed that way. |
+ for (int typeInt = 0; typeInt <= kLast_ErrorType; typeInt++) { |
+ SkString line; |
+ ErrorType type = static_cast<ErrorType>(typeInt); |
+ if (fIgnorableErrorCombination.includes(type)) { |
+ line.append("[ ] "); |
} else { |
- gm_fprintf(stderr, "\t\t%s\n", fFailedTests[i].fName.c_str()); |
+ line.append("[*] "); |
} |
+ |
+ SkTArray<FailRec> *failedTestsOfThisType = &fFailedTests[type]; |
+ int count = failedTestsOfThisType->count(); |
+ line.appendf("%d %s:", count, getErrorTypeName(type)); |
+ for (int i = 0; i < count; ++i) { |
+ line.append(" "); |
+ line.append((*failedTestsOfThisType)[i].fName); |
+ } |
+ gm_fprintf(stdout, "%s\n", line.c_str()); |
} |
+ gm_fprintf(stdout, "(results marked with [*] will cause nonzero return value)\n"); |
} |
static bool write_document(const SkString& path, |
@@ -510,6 +553,7 @@ |
} else { |
gm_fprintf(stderr, "FAILED to write %s\n", path.c_str()); |
ErrorCombination errors(kWritingReferenceImage_ErrorType); |
+ // EPOGER: don't call RecordError() here? instead, the caller should call RecordError (or maybe RecordResults) exactly ONCE to properly record one attempt, one partial failure. |
RecordError(errors, name, renderModeDescriptor); |
return errors; |
} |
@@ -761,6 +805,7 @@ |
* @param actualBitmap actual bitmap generated by this run |
* @param referenceBitmap bitmap we expected to be generated |
*/ |
+ // EPOGER: since this is separate from compare_test_results_to_stored_expectations(), that should give me the ability to signal different error types for renderModeDiscrepancy! |
ErrorCombination compare_test_results_to_reference_bitmap( |
GM* gm, const ConfigData& gRec, const char renderModeDescriptor [], |
SkBitmap& actualBitmap, const SkBitmap* referenceBitmap) { |
@@ -880,6 +925,7 @@ |
// ('image-surface gm test is failing in "deferred" mode, |
// and gm is not reporting the failure') |
if (errors.isEmpty()) { |
+ // EPOGER: for cases like this, return some new ErrorType (TestSkipped?) so we see that they happened |
return kEmpty_ErrorCombination; |
} |
return compare_test_results_to_reference_bitmap( |
@@ -953,8 +999,9 @@ |
const char* fMismatchPath; |
- // information about all failed tests we have encountered so far |
- SkTArray<FailRec> fFailedTests; |
+ // collection of tests that have failed with each ErrorType |
+ SkTArray<FailRec> fFailedTests[kLast_ErrorType+1]; |
+ int fTestsRun; |
// Where to read expectations (expected image checksums, etc.) from. |
// If unset, we don't do comparisons. |
@@ -1266,6 +1313,7 @@ |
uint32_t gmFlags = gm->getFlags(); |
// run the picture centric GM steps |
+ // EPOGER: record a different error code if we skipped this mode? |
if (!(gmFlags & GM::kSkipPicture_Flag)) { |
ErrorCombination pictErrors; |
@@ -1359,6 +1407,45 @@ |
return errorsForAllModes; |
} |
+/** |
+ * Return a list of all entries in an array of strings as a single string |
+ * of this form: |
+ * "item1", "item2", "item3" |
+ */ |
+SkString list_all(const SkTArray<SkString> &stringArray); |
+SkString list_all(const SkTArray<SkString> &stringArray) { |
+ SkString total; |
+ for (int i = 0; i < stringArray.count(); i++) { |
+ if (i > 0) { |
+ total.append(", "); |
+ } |
+ total.append("\""); |
+ total.append(stringArray[i]); |
+ total.append("\""); |
+ } |
+ return total; |
+} |
+ |
+/** |
+ * Return a list of configuration names, as a single string of this form: |
+ * "item1", "item2", "item3" |
+ * |
+ * @param configs configurations, as a list of indices into gRec |
+ */ |
+SkString list_all_config_names(const SkTDArray<size_t> &configs); |
+SkString list_all_config_names(const SkTDArray<size_t> &configs) { |
+ SkString total; |
+ for (int i = 0; i < configs.count(); i++) { |
+ if (i > 0) { |
+ total.append(", "); |
+ } |
+ total.append("\""); |
+ total.append(gRec[configs[i]].fName); |
+ total.append("\""); |
+ } |
+ return total; |
+} |
+ |
int tool_main(int argc, char** argv); |
int tool_main(int argc, char** argv) { |
@@ -1540,12 +1627,7 @@ |
moduloRemainder = -1; |
} |
- // Accumulate success of all tests. |
- int testsRun = 0; |
- int testsPassed = 0; |
- int testsFailed = 0; |
- int testsMissingReferenceImages = 0; |
- |
+ int gmsRun = 0; |
int gmIndex = -1; |
SkString moduloStr; |
@@ -1585,43 +1667,66 @@ |
continue; |
} |
+ gmsRun++; |
SkISize size = gm->getISize(); |
gm_fprintf(stdout, "%sdrawing... %s [%d %d]\n", moduloStr.c_str(), shortName, |
size.width(), size.height()); |
- ErrorCombination testErrors; |
- testErrors.add(run_multiple_configs(gmmain, gm, configs, grFactory)); |
+ run_multiple_configs(gmmain, gm, configs, grFactory); |
SkBitmap comparisonBitmap; |
const ConfigData compareConfig = |
{ SkBitmap::kARGB_8888_Config, kRaster_Backend, kDontCare_GLContextType, 0, kRW_ConfigFlag, "comparison", false }; |
- testErrors.add(gmmain.generate_image( |
- gm, compareConfig, NULL, NULL, &comparisonBitmap, false)); |
+ gmmain.generate_image(gm, compareConfig, NULL, NULL, &comparisonBitmap, false); |
// TODO(epoger): only run this if gmmain.generate_image() succeeded? |
// Otherwise, what are we comparing against? |
- testErrors.add(run_multiple_modes(gmmain, gm, compareConfig, comparisonBitmap, |
- tileGridReplayScales)); |
+ run_multiple_modes(gmmain, gm, compareConfig, comparisonBitmap, tileGridReplayScales); |
- // Update overall results. |
- // We only tabulate the particular error types that we currently |
- // care about (e.g., missing reference images). Later on, if we |
- // want to also tabulate other error types, we can do so. |
- testsRun++; |
- if (!gmmain.fExpectationsSource.get() || |
- (testErrors.includes(kMissingExpectations_ErrorType))) { |
- testsMissingReferenceImages++; |
+ SkDELETE(gm); |
+ } |
+ |
+ // Assemble the list of modes we ran each test through. |
+ // |
+ // TODO(epoger): Instead of assembling this list of modes here, |
+ // can/should we assemble it as we actually run the tests in |
+ // run_multiple_modes()? |
+ SkTArray<SkString> modes; |
+ if (FLAGS_replay) { |
+ modes.push_back(SkString("replay")); |
+ } |
+ if (FLAGS_serialize) { |
+ modes.push_back(SkString("serialize")); |
+ } |
+ if (FLAGS_rtree) { |
+ modes.push_back(SkString("rtree")); |
+ } |
+ if (FLAGS_tileGrid) { |
+ for (int i = 0; i < tileGridReplayScales.count(); i++) { |
+ SkString modeName("tileGrid"); |
+ modeName.appendf("%f", tileGridReplayScales[i]); |
+ modes.push_back(modeName); |
} |
- if (testErrors.minus(gmmain.fIgnorableErrorCombination).isEmpty()) { |
- testsPassed++; |
- } else { |
- testsFailed++; |
+ } |
+ if (FLAGS_pipe) { |
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gPipeWritingFlagCombos); i++) { |
+ SkString modeName("pipe"); |
+ modeName.append(gPipeWritingFlagCombos[i].name); |
+ modes.push_back(modeName); |
} |
+ } |
+ if (FLAGS_tiledPipe) { |
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gPipeWritingFlagCombos); i++) { |
+ SkString modeName("tiledPipe"); |
+ modeName.append(gPipeWritingFlagCombos[i].name); |
+ modes.push_back(modeName); |
+ } |
+ } |
- SkDELETE(gm); |
- } |
- gm_fprintf(stdout, "Ran %d tests: %d passed, %d failed, %d missing reference images\n", |
- testsRun, testsPassed, testsFailed, testsMissingReferenceImages); |
+ // Output summary to stdout. |
+ gm_fprintf(stdout, "Ran %d GMs, each with %d configs [%s] and %d modes [%s], so there should be a total of %d tests\n", |
+ gmsRun, configs.count(), list_all_config_names(configs).c_str(), modes.count(), list_all(modes).c_str(), gmsRun * (configs.count() + modes.count())); |
+ // EPOGER: what if the expected total number of tests is wrong? |
gmmain.ListErrors(); |
if (FLAGS_writeJsonSummaryPath.count() == 1) { |
@@ -1661,7 +1766,7 @@ |
#endif |
SkGraphics::Term(); |
- return (0 == testsFailed) ? 0 : -1; |
+ return (0 == gmmain.NumSignificantErrors()) ? 0 : -1; |
} |
void GMMain::installFilter(SkCanvas* canvas) { |