| OLD | NEW |
| 1 #!/bin/bash | 1 #!/bin/bash |
| 2 | 2 |
| 3 # Self-tests for gm, based on tools/tests/run.sh | 3 # Self-tests for gm, based on tools/tests/run.sh |
| 4 # | 4 # |
| 5 # These tests are run by the Skia_PerCommit_House_Keeping bot at every commit, | 5 # These tests are run by the Skia_PerCommit_House_Keeping bot at every commit, |
| 6 # so make sure that they still pass when you make changes to gm! | 6 # so make sure that they still pass when you make changes to gm! |
| 7 # | 7 # |
| 8 # To generate new baselines when gm behavior changes, run gm/tests/rebaseline.sh | 8 # To generate new baselines when gm behavior changes, run gm/tests/rebaseline.sh |
| 9 # | 9 # |
| 10 # TODO: because this is written as a shell script (instead of, say, Python) | 10 # TODO: because this is written as a shell script (instead of, say, Python) |
| (...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 215 gm_test "--hierarchy --match selftest1 $CONFIGS -r $GM_INPUTS/images/empty-dir"
"$GM_OUTPUTS/nonverbose" | 215 gm_test "--hierarchy --match selftest1 $CONFIGS -r $GM_INPUTS/images/empty-dir"
"$GM_OUTPUTS/nonverbose" |
| 216 | 216 |
| 217 # Add pdf to the list of configs. | 217 # Add pdf to the list of configs. |
| 218 gm_test "--verbose --hierarchy --match selftest1 $CONFIGS pdf -r $GM_INPUTS/json
/identical-bytes.json" "$GM_OUTPUTS/add-config-pdf" | 218 gm_test "--verbose --hierarchy --match selftest1 $CONFIGS pdf -r $GM_INPUTS/json
/identical-bytes.json" "$GM_OUTPUTS/add-config-pdf" |
| 219 | 219 |
| 220 # Test what happens if run without -r (no expected-results.json to compare | 220 # Test what happens if run without -r (no expected-results.json to compare |
| 221 # against). | 221 # against). |
| 222 gm_test "--verbose --hierarchy --match selftest1 $CONFIGS" "$GM_OUTPUTS/no-readp
ath" | 222 gm_test "--verbose --hierarchy --match selftest1 $CONFIGS" "$GM_OUTPUTS/no-readp
ath" |
| 223 | 223 |
| 224 # Test what happens if a subset of the renderModes fail (e.g. pipe) | 224 # Test what happens if a subset of the renderModes fail (e.g. pipe) |
| 225 gm_test "--pipe --simulatePipePlaybackFailure --verbose --hierarchy --match self
test1 $CONFIGS -r $GM_INPUTS/json/identical-pixels.json" "$GM_OUTPUTS/pipe-playb
ack-failure" | 225 gm_test "--simulatePipePlaybackFailure --verbose --hierarchy --match selftest1 $
CONFIGS -r $GM_INPUTS/json/identical-pixels.json" "$GM_OUTPUTS/pipe-playback-fai
lure" |
| 226 | 226 |
| 227 # Confirm that IntentionallySkipped tests are recorded as such. | 227 # Confirm that IntentionallySkipped tests are recorded as such. |
| 228 gm_test "--verbose --hierarchy --match selftest1 selftest2 $CONFIGS" "$GM_OUTPUT
S/intentionally-skipped-tests" | 228 gm_test "--verbose --hierarchy --match selftest1 selftest2 $CONFIGS" "$GM_OUTPUT
S/intentionally-skipped-tests" |
| 229 | 229 |
| 230 # Ignore some error types (including ExpectationsMismatch) | 230 # Ignore some error types (including ExpectationsMismatch) |
| 231 gm_test "--ignoreErrorTypes ExpectationsMismatch NoGpuContext --verbose --hierar
chy --match selftest1 $CONFIGS -r $GM_INPUTS/json/different-pixels.json" "$GM_OU
TPUTS/ignore-expectations-mismatch" | 231 gm_test "--ignoreErrorTypes ExpectationsMismatch NoGpuContext --verbose --hierar
chy --match selftest1 $CONFIGS -r $GM_INPUTS/json/different-pixels.json" "$GM_OU
TPUTS/ignore-expectations-mismatch" |
| 232 | 232 |
| 233 # Test non-hierarchical mode. | 233 # Test non-hierarchical mode. |
| 234 gm_test "--verbose --match selftest1 $CONFIGS -r $GM_INPUTS/json/different-pixel
s-no-hierarchy.json" "$GM_OUTPUTS/no-hierarchy" | 234 gm_test "--verbose --match selftest1 $CONFIGS -r $GM_INPUTS/json/different-pixel
s-no-hierarchy.json" "$GM_OUTPUTS/no-hierarchy" |
| 235 | 235 |
| (...skipping 10 matching lines...) Expand all Loading... |
| 246 for CASE in $FAILING_CASES; do | 246 for CASE in $FAILING_CASES; do |
| 247 assert_fails "python gm/display_json_results.py $GM_OUTPUTS/$CASE/$OUTPUT_EXPE
CTED_SUBDIR/json-summary.txt" | 247 assert_fails "python gm/display_json_results.py $GM_OUTPUTS/$CASE/$OUTPUT_EXPE
CTED_SUBDIR/json-summary.txt" |
| 248 done | 248 done |
| 249 | 249 |
| 250 if [ $ENCOUNTERED_ANY_ERRORS == 0 ]; then | 250 if [ $ENCOUNTERED_ANY_ERRORS == 0 ]; then |
| 251 echo "All tests passed." | 251 echo "All tests passed." |
| 252 exit 0 | 252 exit 0 |
| 253 else | 253 else |
| 254 exit 1 | 254 exit 1 |
| 255 fi | 255 fi |
| OLD | NEW |