| OLD | NEW |
| 1 #!/bin/bash | 1 #!/bin/bash |
| 2 | 2 |
| 3 # Self-tests for gm, based on tools/tests/run.sh | 3 # Self-tests for gm, based on tools/tests/run.sh |
| 4 # | 4 # |
| 5 # These tests are run by the Skia_PerCommit_House_Keeping bot at every commit, | 5 # These tests are run by the Skia_PerCommit_House_Keeping bot at every commit, |
| 6 # so make sure that they still pass when you make changes to gm! | 6 # so make sure that they still pass when you make changes to gm! |
| 7 # | 7 # |
| 8 # To generate new baselines when gm behavior changes, run gm/tests/rebaseline.sh | 8 # To generate new baselines when gm behavior changes, run gm/tests/rebaseline.sh |
| 9 # | 9 # |
| 10 # TODO: because this is written as a shell script (instead of, say, Python) | 10 # TODO: because this is written as a shell script (instead of, say, Python) |
| (...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 218 gm_test "--hierarchy --match selftest1 $CONFIGS -r $GM_INPUTS/images/empty-dir"
"$GM_OUTPUTS/nonverbose" | 218 gm_test "--hierarchy --match selftest1 $CONFIGS -r $GM_INPUTS/images/empty-dir"
"$GM_OUTPUTS/nonverbose" |
| 219 | 219 |
| 220 # Add pdf to the list of configs. | 220 # Add pdf to the list of configs. |
| 221 gm_test "--verbose --hierarchy --match selftest1 $CONFIGS pdf -r $GM_INPUTS/json
/identical-bytes.json" "$GM_OUTPUTS/add-config-pdf" | 221 gm_test "--verbose --hierarchy --match selftest1 $CONFIGS pdf -r $GM_INPUTS/json
/identical-bytes.json" "$GM_OUTPUTS/add-config-pdf" |
| 222 | 222 |
| 223 # Test what happens if run without -r (no expected-results.json to compare | 223 # Test what happens if run without -r (no expected-results.json to compare |
| 224 # against). | 224 # against). |
| 225 gm_test "--verbose --hierarchy --match selftest1 $CONFIGS" "$GM_OUTPUTS/no-readp
ath" | 225 gm_test "--verbose --hierarchy --match selftest1 $CONFIGS" "$GM_OUTPUTS/no-readp
ath" |
| 226 | 226 |
| 227 # Test what happens if a subset of the renderModes fail (e.g. pipe) | 227 # Test what happens if a subset of the renderModes fail (e.g. pipe) |
| 228 gm_test "--simulatePipePlaybackFailure --verbose --hierarchy --match selftest1 $
CONFIGS -r $GM_INPUTS/json/identical-pixels.json" "$GM_OUTPUTS/pipe-playback-fai
lure" | 228 gm_test "--pipe --simulatePipePlaybackFailure --verbose --hierarchy --match self
test1 $CONFIGS -r $GM_INPUTS/json/identical-pixels.json" "$GM_OUTPUTS/pipe-playb
ack-failure" |
| 229 | 229 |
| 230 # Confirm that IntentionallySkipped tests are recorded as such. | 230 # Confirm that IntentionallySkipped tests are recorded as such. |
| 231 gm_test "--verbose --hierarchy --match selftest1 selftest2 $CONFIGS" "$GM_OUTPUT
S/intentionally-skipped-tests" | 231 gm_test "--verbose --hierarchy --match selftest1 selftest2 $CONFIGS" "$GM_OUTPUT
S/intentionally-skipped-tests" |
| 232 | 232 |
| 233 # Ignore some error types (including ExpectationsMismatch) | 233 # Ignore some error types (including ExpectationsMismatch) |
| 234 gm_test "--ignoreErrorTypes ExpectationsMismatch NoGpuContext --verbose --hierar
chy --match selftest1 $CONFIGS -r $GM_INPUTS/json/different-pixels.json" "$GM_OU
TPUTS/ignore-expectations-mismatch" | 234 gm_test "--ignoreErrorTypes ExpectationsMismatch NoGpuContext --verbose --hierar
chy --match selftest1 $CONFIGS -r $GM_INPUTS/json/different-pixels.json" "$GM_OU
TPUTS/ignore-expectations-mismatch" |
| 235 | 235 |
| 236 # Test non-hierarchical mode. | 236 # Test non-hierarchical mode. |
| 237 gm_test "--verbose --match selftest1 $CONFIGS -r $GM_INPUTS/json/different-pixel
s-no-hierarchy.json" "$GM_OUTPUTS/no-hierarchy" | 237 gm_test "--verbose --match selftest1 $CONFIGS -r $GM_INPUTS/json/different-pixel
s-no-hierarchy.json" "$GM_OUTPUTS/no-hierarchy" |
| 238 | 238 |
| (...skipping 10 matching lines...) Expand all Loading... |
| 249 for CASE in $FAILING_CASES; do | 249 for CASE in $FAILING_CASES; do |
| 250 assert_fails "python gm/display_json_results.py $GM_OUTPUTS/$CASE/$OUTPUT_EXPE
CTED_SUBDIR/json-summary.txt" | 250 assert_fails "python gm/display_json_results.py $GM_OUTPUTS/$CASE/$OUTPUT_EXPE
CTED_SUBDIR/json-summary.txt" |
| 251 done | 251 done |
| 252 | 252 |
| 253 if [ $ENCOUNTERED_ANY_ERRORS == 0 ]; then | 253 if [ $ENCOUNTERED_ANY_ERRORS == 0 ]; then |
| 254 echo "All tests passed." | 254 echo "All tests passed." |
| 255 exit 0 | 255 exit 0 |
| 256 else | 256 else |
| 257 exit 1 | 257 exit 1 |
| 258 fi | 258 fi |
| OLD | NEW |