Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(246)

Side by Side Diff: gm/tests/run.sh

Issue 13979017: GM self-tests: always run all tests (don't stop at first failure) (Closed) Base URL: http://skia.googlecode.com/svn/trunk/
Patch Set: Created 7 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « gm/tests/rebaseline.sh ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/bin/bash 1 #!/bin/bash
2 2
3 # Self-tests for gm, based on tools/tests/run.sh 3 # Self-tests for gm, based on tools/tests/run.sh
4 # 4 #
5 # These tests are run by the Skia_PerCommit_House_Keeping bot at every commit, 5 # These tests are run by the Skia_PerCommit_House_Keeping bot at every commit,
6 # so make sure that they still pass when you make changes to gm! 6 # so make sure that they still pass when you make changes to gm!
7 # 7 #
8 # To generate new baselines when gm behavior changes, run gm/tests/rebaseline.sh 8 # To generate new baselines when gm behavior changes, run gm/tests/rebaseline.sh
9 # 9 #
10 # TODO: because this is written as a shell script (instead of, say, Python) 10 # TODO: because this is written as a shell script (instead of, say, Python)
11 # it only runs on Linux and Mac. 11 # it only runs on Linux and Mac.
12 # See https://code.google.com/p/skia/issues/detail?id=677 12 # See https://code.google.com/p/skia/issues/detail?id=677
13 # ('make tools/tests/run.sh work cross-platform') 13 # ('make tools/tests/run.sh work cross-platform')
14 # Ideally, these tests should pass on all development platforms... 14 # Ideally, these tests should pass on all development platforms...
15 # otherwise, how can developers be expected to test them before committing a 15 # otherwise, how can developers be expected to test them before committing a
16 # change? 16 # change?
17 17
18 # cd into .../trunk so all the paths will work 18 # cd into .../trunk so all the paths will work
19 cd $(dirname $0)/../.. 19 cd $(dirname $0)/../..
20 20
21 # TODO(epoger): make it look in Release and/or Debug 21 # TODO(epoger): make it look in Release and/or Debug
22 GM_BINARY=out/Debug/gm 22 GM_BINARY=out/Debug/gm
23 23
24 OUTPUT_ACTUAL_SUBDIR=output-actual 24 OUTPUT_ACTUAL_SUBDIR=output-actual
25 OUTPUT_EXPECTED_SUBDIR=output-expected 25 OUTPUT_EXPECTED_SUBDIR=output-expected
26 CONFIGS="--config 8888 565" 26 CONFIGS="--config 8888 565"
27 27
28 ENCOUNTERED_ANY_ERRORS=0
29
28 # Compare contents of all files within directories $1 and $2, 30 # Compare contents of all files within directories $1 and $2,
29 # EXCEPT for any dotfiles. 31 # EXCEPT for any dotfiles.
30 # If there are any differences, a description is written to stdout and 32 # If there are any differences, a description is written to stdout and
31 # we exit with a nonzero return value. 33 # we exit with a nonzero return value.
32 # Otherwise, we write nothing to stdout and return. 34 # Otherwise, we write nothing to stdout and return.
33 function compare_directories { 35 function compare_directories {
34 if [ $# != 2 ]; then 36 if [ $# != 2 ]; then
35 echo "compare_directories requires exactly 2 parameters, got $#" 37 echo "compare_directories requires exactly 2 parameters, got $#"
36 exit 1 38 exit 1
37 fi 39 fi
38 diff -r --exclude=.* $1 $2 40 diff -r --exclude=.* $1 $2
39 if [ $? != 0 ]; then 41 if [ $? != 0 ]; then
40 echo "failed in: compare_directories $1 $2" 42 echo "failed in: compare_directories $1 $2"
41 exit 1 43 ENCOUNTERED_ANY_ERRORS=1
borenet 2013/04/26 16:04:09 Alternatively, you could add error messages to thi
epoger 2013/04/26 16:10:55 The question is, what would I put into $ERRORS tha
borenet 2013/04/26 16:12:43 Fair enough.
42 fi 44 fi
43 } 45 }
44 46
45 # Run gm... 47 # Run gm...
46 # - with the arguments in $1 48 # - with the arguments in $1
47 # - writing stdout into $2/$OUTPUT_ACTUAL_SUBDIR/stdout 49 # - writing stdout into $2/$OUTPUT_ACTUAL_SUBDIR/stdout
48 # - writing json summary into $2/$OUTPUT_ACTUAL_SUBDIR/json-summary.txt 50 # - writing json summary into $2/$OUTPUT_ACTUAL_SUBDIR/json-summary.txt
49 # - writing return value into $2/$OUTPUT_ACTUAL_SUBDIR/return_value 51 # - writing return value into $2/$OUTPUT_ACTUAL_SUBDIR/return_value
50 # Then compare all of those against $2/$OUTPUT_EXPECTED_SUBDIR . 52 # Then compare all of those against $2/$OUTPUT_EXPECTED_SUBDIR .
51 function gm_test { 53 function gm_test {
52 if [ $# != 2 ]; then 54 if [ $# != 2 ]; then
53 echo "gm_test requires exactly 2 parameters, got $#" 55 echo "gm_test requires exactly 2 parameters, got $#"
54 exit 1 56 exit 1
borenet 2013/04/26 16:04:09 What about here?
epoger 2013/04/26 16:10:55 In this case, there was an error within the run.sh
55 fi 57 fi
56 GM_ARGS="$1" 58 GM_ARGS="$1"
57 ACTUAL_OUTPUT_DIR="$2/$OUTPUT_ACTUAL_SUBDIR" 59 ACTUAL_OUTPUT_DIR="$2/$OUTPUT_ACTUAL_SUBDIR"
58 EXPECTED_OUTPUT_DIR="$2/$OUTPUT_EXPECTED_SUBDIR" 60 EXPECTED_OUTPUT_DIR="$2/$OUTPUT_EXPECTED_SUBDIR"
59 JSON_SUMMARY_FILE="$ACTUAL_OUTPUT_DIR/json-summary.txt" 61 JSON_SUMMARY_FILE="$ACTUAL_OUTPUT_DIR/json-summary.txt"
60 62
61 rm -rf $ACTUAL_OUTPUT_DIR 63 rm -rf $ACTUAL_OUTPUT_DIR
62 mkdir -p $ACTUAL_OUTPUT_DIR 64 mkdir -p $ACTUAL_OUTPUT_DIR
63 COMMAND="$GM_BINARY $GM_ARGS --writeJsonSummaryPath $JSON_SUMMARY_FILE" 65 COMMAND="$GM_BINARY $GM_ARGS --writeJsonSummaryPath $JSON_SUMMARY_FILE"
64 echo "$COMMAND" >$ACTUAL_OUTPUT_DIR/command_line 66 echo "$COMMAND" >$ACTUAL_OUTPUT_DIR/command_line
(...skipping 16 matching lines...) Expand all
81 # Create input dir (at path $1) with expectations (both image and json) 83 # Create input dir (at path $1) with expectations (both image and json)
82 # that gm will match or mismatch as appropriate. 84 # that gm will match or mismatch as appropriate.
83 # 85 #
84 # We used to check these files into SVN, but then we needed to rebasline them 86 # We used to check these files into SVN, but then we needed to rebasline them
85 # when our drawing changed at all... so, as proposed in 87 # when our drawing changed at all... so, as proposed in
86 # http://code.google.com/p/skia/issues/detail?id=1068 , we generate them 88 # http://code.google.com/p/skia/issues/detail?id=1068 , we generate them
87 # new each time. 89 # new each time.
88 function create_inputs_dir { 90 function create_inputs_dir {
89 if [ $# != 1 ]; then 91 if [ $# != 1 ]; then
90 echo "create_inputs_dir requires exactly 1 parameter, got $#" 92 echo "create_inputs_dir requires exactly 1 parameter, got $#"
91 exit 1 93 exit 1
borenet 2013/04/26 16:04:09 And here?
epoger 2013/04/26 16:10:55 In this case, there was an error within the run.sh
92 fi 94 fi
93 INPUTS_DIR="$1" 95 INPUTS_DIR="$1"
94 IMAGES_DIR=$INPUTS_DIR/images 96 IMAGES_DIR=$INPUTS_DIR/images
95 JSON_DIR=$INPUTS_DIR/json 97 JSON_DIR=$INPUTS_DIR/json
96 mkdir -p $IMAGES_DIR $JSON_DIR 98 mkdir -p $IMAGES_DIR $JSON_DIR
97 99
98 mkdir -p $IMAGES_DIR/identical-bytes 100 mkdir -p $IMAGES_DIR/identical-bytes
99 # Run GM to write out the images actually generated. 101 # Run GM to write out the images actually generated.
100 $GM_BINARY --hierarchy --match selftest1 $CONFIGS \ 102 $GM_BINARY --hierarchy --match selftest1 $CONFIGS \
101 -w $IMAGES_DIR/identical-bytes 103 -w $IMAGES_DIR/identical-bytes
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
164 166
165 # Test what happens if a subset of the renderModes fail (e.g. pipe) 167 # Test what happens if a subset of the renderModes fail (e.g. pipe)
166 gm_test "--simulatePipePlaybackFailure --verbose --hierarchy --match selftest1 $ CONFIGS -r $GM_INPUTS/json/identical-pixels.json" "$GM_OUTPUTS/pipe-playback-fai lure" 168 gm_test "--simulatePipePlaybackFailure --verbose --hierarchy --match selftest1 $ CONFIGS -r $GM_INPUTS/json/identical-pixels.json" "$GM_OUTPUTS/pipe-playback-fai lure"
167 169
168 # Confirm that IntentionallySkipped tests are recorded as such. 170 # Confirm that IntentionallySkipped tests are recorded as such.
169 gm_test "--verbose --hierarchy --match selftest1 selftest2 $CONFIGS" "$GM_OUTPUT S/intentionally-skipped-tests" 171 gm_test "--verbose --hierarchy --match selftest1 selftest2 $CONFIGS" "$GM_OUTPUT S/intentionally-skipped-tests"
170 172
171 # Ignore some error types (including ExpectationsMismatch) 173 # Ignore some error types (including ExpectationsMismatch)
172 gm_test "--ignoreErrorTypes ExpectationsMismatch NoGpuContext --verbose --hierar chy --match selftest1 $CONFIGS -r $GM_INPUTS/json/different-pixels.json" "$GM_OU TPUTS/ignore-expectations-mismatch" 174 gm_test "--ignoreErrorTypes ExpectationsMismatch NoGpuContext --verbose --hierar chy --match selftest1 $CONFIGS -r $GM_INPUTS/json/different-pixels.json" "$GM_OU TPUTS/ignore-expectations-mismatch"
173 175
174 echo "All tests passed." 176 if [ $ENCOUNTERED_ANY_ERRORS == 0 ]; then
177 echo "All tests passed."
178 exit 0
179 else
180 exit 1
181 fi
OLDNEW
« no previous file with comments | « gm/tests/rebaseline.sh ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698