Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(25)

Side by Side Diff: gm/tests/run.sh

Issue 15100003: Add gm/confirm_no_failures_in_json.py (Closed) Base URL: http://skia.googlecode.com/svn/trunk/
Patch Set: sync_to_r9118 Created 7 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « gm/gm_expectations.cpp ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/bin/bash 1 #!/bin/bash
2 2
3 # Self-tests for gm, based on tools/tests/run.sh 3 # Self-tests for gm, based on tools/tests/run.sh
4 # 4 #
5 # These tests are run by the Skia_PerCommit_House_Keeping bot at every commit, 5 # These tests are run by the Skia_PerCommit_House_Keeping bot at every commit,
6 # so make sure that they still pass when you make changes to gm! 6 # so make sure that they still pass when you make changes to gm!
7 # 7 #
8 # To generate new baselines when gm behavior changes, run gm/tests/rebaseline.sh 8 # To generate new baselines when gm behavior changes, run gm/tests/rebaseline.sh
9 # 9 #
10 # TODO: because this is written as a shell script (instead of, say, Python) 10 # TODO: because this is written as a shell script (instead of, say, Python)
(...skipping 26 matching lines...) Expand all
37 echo "compare_directories requires exactly 2 parameters, got $#" 37 echo "compare_directories requires exactly 2 parameters, got $#"
38 exit 1 38 exit 1
39 fi 39 fi
40 diff -r --exclude=.* $1 $2 40 diff -r --exclude=.* $1 $2
41 if [ $? != 0 ]; then 41 if [ $? != 0 ]; then
42 echo "failed in: compare_directories $1 $2" 42 echo "failed in: compare_directories $1 $2"
43 ENCOUNTERED_ANY_ERRORS=1 43 ENCOUNTERED_ANY_ERRORS=1
44 fi 44 fi
45 } 45 }
46 46
47 # Run a command, and validate that it succeeds (returns 0).
48 function assert_passes {
49 COMMAND="$1"
50 OUTPUT=$($COMMAND 2>&1)
51 if [ $? != 0 ]; then
52 echo "This command was supposed to pass, but failed: [$COMMAND]"
53 echo $OUTPUT
54 ENCOUNTERED_ANY_ERRORS=1
55 fi
56 }
57
58 # Run a command, and validate that it fails (returns nonzero).
59 function assert_fails {
60 COMMAND="$1"
61 OUTPUT=$($COMMAND 2>&1)
62 if [ $? == 0 ]; then
63 echo "This command was supposed to fail, but passed: [$COMMAND]"
64 echo $OUTPUT
65 ENCOUNTERED_ANY_ERRORS=1
66 fi
67 }
68
47 # Run gm... 69 # Run gm...
48 # - with the arguments in $1 70 # - with the arguments in $1
49 # - writing stdout into $2/$OUTPUT_ACTUAL_SUBDIR/stdout 71 # - writing stdout into $2/$OUTPUT_ACTUAL_SUBDIR/stdout
50 # - writing json summary into $2/$OUTPUT_ACTUAL_SUBDIR/json-summary.txt 72 # - writing json summary into $2/$OUTPUT_ACTUAL_SUBDIR/json-summary.txt
51 # - writing return value into $2/$OUTPUT_ACTUAL_SUBDIR/return_value 73 # - writing return value into $2/$OUTPUT_ACTUAL_SUBDIR/return_value
52 # Then compare all of those against $2/$OUTPUT_EXPECTED_SUBDIR . 74 # Then compare all of those against $2/$OUTPUT_EXPECTED_SUBDIR .
53 function gm_test { 75 function gm_test {
54 if [ $# != 2 ]; then 76 if [ $# != 2 ]; then
55 echo "gm_test requires exactly 2 parameters, got $#" 77 echo "gm_test requires exactly 2 parameters, got $#"
56 exit 1 78 exit 1
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
179 201
180 # Test what happens if a subset of the renderModes fail (e.g. pipe) 202 # Test what happens if a subset of the renderModes fail (e.g. pipe)
181 gm_test "--simulatePipePlaybackFailure --verbose --hierarchy --match selftest1 $ CONFIGS -r $GM_INPUTS/json/identical-pixels.json" "$GM_OUTPUTS/pipe-playback-fai lure" 203 gm_test "--simulatePipePlaybackFailure --verbose --hierarchy --match selftest1 $ CONFIGS -r $GM_INPUTS/json/identical-pixels.json" "$GM_OUTPUTS/pipe-playback-fai lure"
182 204
183 # Confirm that IntentionallySkipped tests are recorded as such. 205 # Confirm that IntentionallySkipped tests are recorded as such.
184 gm_test "--verbose --hierarchy --match selftest1 selftest2 $CONFIGS" "$GM_OUTPUT S/intentionally-skipped-tests" 206 gm_test "--verbose --hierarchy --match selftest1 selftest2 $CONFIGS" "$GM_OUTPUT S/intentionally-skipped-tests"
185 207
186 # Ignore some error types (including ExpectationsMismatch) 208 # Ignore some error types (including ExpectationsMismatch)
187 gm_test "--ignoreErrorTypes ExpectationsMismatch NoGpuContext --verbose --hierar chy --match selftest1 $CONFIGS -r $GM_INPUTS/json/different-pixels.json" "$GM_OU TPUTS/ignore-expectations-mismatch" 209 gm_test "--ignoreErrorTypes ExpectationsMismatch NoGpuContext --verbose --hierar chy --match selftest1 $CONFIGS -r $GM_INPUTS/json/different-pixels.json" "$GM_OU TPUTS/ignore-expectations-mismatch"
188 210
211 # Exercise confirm_no_failures_in_json.py
212 PASSING_CASES="compared-against-identical-bytes-json compared-against-identical- pixels-json"
213 FAILING_CASES="compared-against-different-pixels-json"
214 for CASE in $PASSING_CASES; do
215 assert_passes "python gm/confirm_no_failures_in_json.py $GM_OUTPUTS/$CASE/$OUT PUT_EXPECTED_SUBDIR/json-summary.txt"
216 done
217 for CASE in $FAILING_CASES; do
218 assert_fails "python gm/confirm_no_failures_in_json.py $GM_OUTPUTS/$CASE/$OUTP UT_EXPECTED_SUBDIR/json-summary.txt"
219 done
220
189 if [ $ENCOUNTERED_ANY_ERRORS == 0 ]; then 221 if [ $ENCOUNTERED_ANY_ERRORS == 0 ]; then
190 echo "All tests passed." 222 echo "All tests passed."
191 exit 0 223 exit 0
192 else 224 else
193 exit 1 225 exit 1
194 fi 226 fi
OLDNEW
« no previous file with comments | « gm/gm_expectations.cpp ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698