Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 #!/bin/bash | 1 #!/bin/bash |
| 2 | 2 |
| 3 # Self-tests for gm, based on tools/tests/run.sh | 3 # Self-tests for gm, based on tools/tests/run.sh |
| 4 # | 4 # |
| 5 # These tests are run by the Skia_PerCommit_House_Keeping bot at every commit, | 5 # These tests are run by the Skia_PerCommit_House_Keeping bot at every commit, |
| 6 # so make sure that they still pass when you make changes to gm! | 6 # so make sure that they still pass when you make changes to gm! |
| 7 # | 7 # |
| 8 # To generate new baselines when gm behavior changes, run gm/tests/rebaseline.sh | 8 # To generate new baselines when gm behavior changes, run gm/tests/rebaseline.sh |
| 9 # | 9 # |
| 10 # TODO: because this is written as a shell script (instead of, say, Python) | 10 # TODO: because this is written as a shell script (instead of, say, Python) |
| (...skipping 29 matching lines...) Expand all Loading... | |
| 40 diff -r --exclude=.* $1 $2 | 40 diff -r --exclude=.* $1 $2 |
| 41 if [ $? != 0 ]; then | 41 if [ $? != 0 ]; then |
| 42 echo "failed in: compare_directories $1 $2" | 42 echo "failed in: compare_directories $1 $2" |
| 43 ENCOUNTERED_ANY_ERRORS=1 | 43 ENCOUNTERED_ANY_ERRORS=1 |
| 44 fi | 44 fi |
| 45 } | 45 } |
| 46 | 46 |
| 47 # Run a command, and validate that it succeeds (returns 0). | 47 # Run a command, and validate that it succeeds (returns 0). |
| 48 function assert_passes { | 48 function assert_passes { |
| 49 COMMAND="$1" | 49 COMMAND="$1" |
| 50 OUTPUT=$($COMMAND 2>&1) | 50 echo |
| 51 echo "assert_passes $COMMAND ..." | |
|
epoger
2014/01/24 17:35:48
I think it's better for the output to include more
| |
| 52 $COMMAND | |
| 51 if [ $? != 0 ]; then | 53 if [ $? != 0 ]; then |
| 52 echo "This command was supposed to pass, but failed: [$COMMAND]" | 54 echo "This command was supposed to pass, but failed: [$COMMAND]" |
| 53 echo $OUTPUT | |
| 54 ENCOUNTERED_ANY_ERRORS=1 | 55 ENCOUNTERED_ANY_ERRORS=1 |
| 55 fi | 56 fi |
| 56 } | 57 } |
| 57 | 58 |
| 58 # Run a command, and validate that it fails (returns nonzero). | 59 # Run a command, and validate that it fails (returns nonzero). |
| 59 function assert_fails { | 60 function assert_fails { |
| 60 COMMAND="$1" | 61 COMMAND="$1" |
| 61 OUTPUT=$($COMMAND 2>&1) | 62 echo |
| 63 echo "assert_fails $COMMAND ..." | |
| 64 $COMMAND | |
| 62 if [ $? == 0 ]; then | 65 if [ $? == 0 ]; then |
| 63 echo "This command was supposed to fail, but passed: [$COMMAND]" | 66 echo "This command was supposed to fail, but passed: [$COMMAND]" |
| 64 echo $OUTPUT | |
| 65 ENCOUNTERED_ANY_ERRORS=1 | 67 ENCOUNTERED_ANY_ERRORS=1 |
| 66 fi | 68 fi |
| 67 } | 69 } |
| 68 | 70 |
| 69 # Run gm... | 71 # Run gm... |
| 70 # - with the arguments in $1 | 72 # - with the arguments in $1 |
| 71 # - writing stdout into $2/$OUTPUT_ACTUAL_SUBDIR/stdout | 73 # - writing stdout into $2/$OUTPUT_ACTUAL_SUBDIR/stdout |
| 72 # - writing json summary into $2/$OUTPUT_ACTUAL_SUBDIR/json-summary.txt | 74 # - writing json summary into $2/$OUTPUT_ACTUAL_SUBDIR/json-summary.txt |
| 73 # - writing return value into $2/$OUTPUT_ACTUAL_SUBDIR/return_value | 75 # - writing return value into $2/$OUTPUT_ACTUAL_SUBDIR/return_value |
| 74 # Then compare all of those against $2/$OUTPUT_EXPECTED_SUBDIR . | 76 # Then compare all of those against $2/$OUTPUT_EXPECTED_SUBDIR . |
| (...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 267 # Exercise display_json_results.py | 269 # Exercise display_json_results.py |
| 268 PASSING_CASES="compared-against-identical-bytes-json compared-against-identical- pixels-json" | 270 PASSING_CASES="compared-against-identical-bytes-json compared-against-identical- pixels-json" |
| 269 FAILING_CASES="compared-against-different-pixels-json" | 271 FAILING_CASES="compared-against-different-pixels-json" |
| 270 for CASE in $PASSING_CASES; do | 272 for CASE in $PASSING_CASES; do |
| 271 assert_passes "python gm/display_json_results.py $GM_OUTPUTS/$CASE/$OUTPUT_EXP ECTED_SUBDIR/json-summary.txt" | 273 assert_passes "python gm/display_json_results.py $GM_OUTPUTS/$CASE/$OUTPUT_EXP ECTED_SUBDIR/json-summary.txt" |
| 272 done | 274 done |
| 273 for CASE in $FAILING_CASES; do | 275 for CASE in $FAILING_CASES; do |
| 274 assert_fails "python gm/display_json_results.py $GM_OUTPUTS/$CASE/$OUTPUT_EXPE CTED_SUBDIR/json-summary.txt" | 276 assert_fails "python gm/display_json_results.py $GM_OUTPUTS/$CASE/$OUTPUT_EXPE CTED_SUBDIR/json-summary.txt" |
| 275 done | 277 done |
| 276 | 278 |
| 279 # Exercise all rebaseline_server unittests. | |
| 280 assert_passes "python gm/rebaseline_server/test_all.py" | |
| 281 | |
| 277 if [ $ENCOUNTERED_ANY_ERRORS == 0 ]; then | 282 if [ $ENCOUNTERED_ANY_ERRORS == 0 ]; then |
| 278 echo "All tests passed." | 283 echo "All tests passed." |
| 279 exit 0 | 284 exit 0 |
| 280 else | 285 else |
| 281 exit 1 | 286 exit 1 |
| 282 fi | 287 fi |
| OLD | NEW |