OLD | NEW |
---|---|
1 #!/bin/bash | 1 #!/bin/bash |
2 | 2 |
3 # Self-tests for gm, based on tools/tests/run.sh | 3 # Self-tests for gm, based on tools/tests/run.sh |
4 # | 4 # |
5 # These tests are run by the Skia_PerCommit_House_Keeping bot at every commit, | 5 # These tests are run by the Skia_PerCommit_House_Keeping bot at every commit, |
6 # so make sure that they still pass when you make changes to gm! | 6 # so make sure that they still pass when you make changes to gm! |
7 # | 7 # |
8 # To generate new baselines when gm behavior changes, run gm/tests/rebaseline.sh | 8 # To generate new baselines when gm behavior changes, run gm/tests/rebaseline.sh |
9 # | 9 # |
10 # TODO: because this is written as a shell script (instead of, say, Python) | 10 # TODO: because this is written as a shell script (instead of, say, Python) |
(...skipping 26 matching lines...) Expand all Loading... | |
37 echo "compare_directories requires exactly 2 parameters, got $#" | 37 echo "compare_directories requires exactly 2 parameters, got $#" |
38 exit 1 | 38 exit 1 |
39 fi | 39 fi |
40 diff -r --exclude=.* $1 $2 | 40 diff -r --exclude=.* $1 $2 |
41 if [ $? != 0 ]; then | 41 if [ $? != 0 ]; then |
42 echo "failed in: compare_directories $1 $2" | 42 echo "failed in: compare_directories $1 $2" |
43 ENCOUNTERED_ANY_ERRORS=1 | 43 ENCOUNTERED_ANY_ERRORS=1 |
44 fi | 44 fi |
45 } | 45 } |
46 | 46 |
47 # Run a command, and validate that it succeeds (returns 0). | |
48 function assert_passes { | |
49 COMMAND="$1" | |
50 OUTPUT=$($COMMAND 2>&1) | |
51 if [ $? != 0 ]; then | |
52 echo "This command was supposed to pass, but failed: [$COMMAND]" | |
53 echo $OUTPUT | |
54 ENCOUNTERED_ANY_ERRORS=1 | |
55 fi | |
56 } | |
57 | |
58 # Run a command, and validate that it fails (returns nonzero). | |
59 function assert_fails { | |
60 COMMAND="$1" | |
61 OUTPUT=$($COMMAND 2>&1) | |
62 if [ $? == 0 ]; then | |
63 echo "This command was supposed to fail, but passed: [$COMMAND]" | |
64 echo $OUTPUT | |
65 ENCOUNTERED_ANY_ERRORS=1 | |
66 fi | |
67 } | |
68 | |
47 # Run gm... | 69 # Run gm... |
48 # - with the arguments in $1 | 70 # - with the arguments in $1 |
49 # - writing stdout into $2/$OUTPUT_ACTUAL_SUBDIR/stdout | 71 # - writing stdout into $2/$OUTPUT_ACTUAL_SUBDIR/stdout |
50 # - writing json summary into $2/$OUTPUT_ACTUAL_SUBDIR/json-summary.txt | 72 # - writing json summary into $2/$OUTPUT_ACTUAL_SUBDIR/json-summary.txt |
51 # - writing return value into $2/$OUTPUT_ACTUAL_SUBDIR/return_value | 73 # - writing return value into $2/$OUTPUT_ACTUAL_SUBDIR/return_value |
52 # Then compare all of those against $2/$OUTPUT_EXPECTED_SUBDIR . | 74 # Then compare all of those against $2/$OUTPUT_EXPECTED_SUBDIR . |
53 function gm_test { | 75 function gm_test { |
54 if [ $# != 2 ]; then | 76 if [ $# != 2 ]; then |
55 echo "gm_test requires exactly 2 parameters, got $#" | 77 echo "gm_test requires exactly 2 parameters, got $#" |
56 exit 1 | 78 exit 1 |
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
179 | 201 |
180 # Test what happens if a subset of the renderModes fail (e.g. pipe) | 202 # Test what happens if a subset of the renderModes fail (e.g. pipe) |
181 gm_test "--simulatePipePlaybackFailure --verbose --hierarchy --match selftest1 $ CONFIGS -r $GM_INPUTS/json/identical-pixels.json" "$GM_OUTPUTS/pipe-playback-fai lure" | 203 gm_test "--simulatePipePlaybackFailure --verbose --hierarchy --match selftest1 $ CONFIGS -r $GM_INPUTS/json/identical-pixels.json" "$GM_OUTPUTS/pipe-playback-fai lure" |
182 | 204 |
183 # Confirm that IntentionallySkipped tests are recorded as such. | 205 # Confirm that IntentionallySkipped tests are recorded as such. |
184 gm_test "--verbose --hierarchy --match selftest1 selftest2 $CONFIGS" "$GM_OUTPUT S/intentionally-skipped-tests" | 206 gm_test "--verbose --hierarchy --match selftest1 selftest2 $CONFIGS" "$GM_OUTPUT S/intentionally-skipped-tests" |
185 | 207 |
186 # Ignore some error types (including ExpectationsMismatch) | 208 # Ignore some error types (including ExpectationsMismatch) |
187 gm_test "--ignoreErrorTypes ExpectationsMismatch NoGpuContext --verbose --hierar chy --match selftest1 $CONFIGS -r $GM_INPUTS/json/different-pixels.json" "$GM_OU TPUTS/ignore-expectations-mismatch" | 209 gm_test "--ignoreErrorTypes ExpectationsMismatch NoGpuContext --verbose --hierar chy --match selftest1 $CONFIGS -r $GM_INPUTS/json/different-pixels.json" "$GM_OU TPUTS/ignore-expectations-mismatch" |
188 | 210 |
211 # Exercise confirm_no_failures_in_json.py | |
rmistry
2013/05/13 12:25:39
Everything looks the same in the below 3 lines exc
epoger
2013/05/14 15:03:00
Made it two loops (one for passing cases, one for
| |
212 assert_passes "python gm/confirm_no_failures_in_json.py $GM_OUTPUTS/compared-aga inst-identical-bytes-json/$OUTPUT_EXPECTED_SUBDIR/json-summary.txt" | |
213 assert_passes "python gm/confirm_no_failures_in_json.py $GM_OUTPUTS/compared-aga inst-identical-pixels-json/$OUTPUT_EXPECTED_SUBDIR/json-summary.txt" | |
214 assert_fails "python gm/confirm_no_failures_in_json.py $GM_OUTPUTS/compared-agai nst-different-pixels-json/$OUTPUT_EXPECTED_SUBDIR/json-summary.txt" | |
215 | |
189 if [ $ENCOUNTERED_ANY_ERRORS == 0 ]; then | 216 if [ $ENCOUNTERED_ANY_ERRORS == 0 ]; then |
190 echo "All tests passed." | 217 echo "All tests passed." |
191 exit 0 | 218 exit 0 |
192 else | 219 else |
193 exit 1 | 220 exit 1 |
194 fi | 221 fi |
OLD | NEW |