Chromium Code Reviews| Index: gm/tests/run.sh |
| diff --git a/gm/tests/run.sh b/gm/tests/run.sh |
| index 7e1f03dcb17c56d31bfffb3e315196294f286163..29074e5f1dafccfcb0f9e1e2cd5ef52632930dc8 100755 |
| --- a/gm/tests/run.sh |
| +++ b/gm/tests/run.sh |
| @@ -47,10 +47,11 @@ function compare_directories { |
| # Run a command, and validate that it succeeds (returns 0). |
| function assert_passes { |
| COMMAND="$1" |
| - OUTPUT=$($COMMAND 2>&1) |
| + echo |
| + echo "assert_passes $COMMAND ..." |
|
epoger
2014/01/24 17:35:48
I think it's better for the output to include more
|
| + $COMMAND |
| if [ $? != 0 ]; then |
| echo "This command was supposed to pass, but failed: [$COMMAND]" |
| - echo $OUTPUT |
| ENCOUNTERED_ANY_ERRORS=1 |
| fi |
| } |
| @@ -58,10 +59,11 @@ function assert_passes { |
| # Run a command, and validate that it fails (returns nonzero). |
| function assert_fails { |
| COMMAND="$1" |
| - OUTPUT=$($COMMAND 2>&1) |
| + echo |
| + echo "assert_fails $COMMAND ..." |
| + $COMMAND |
| if [ $? == 0 ]; then |
| echo "This command was supposed to fail, but passed: [$COMMAND]" |
| - echo $OUTPUT |
| ENCOUNTERED_ANY_ERRORS=1 |
| fi |
| } |
| @@ -274,6 +276,9 @@ for CASE in $FAILING_CASES; do |
| assert_fails "python gm/display_json_results.py $GM_OUTPUTS/$CASE/$OUTPUT_EXPECTED_SUBDIR/json-summary.txt" |
| done |
| +# Exercise all rebaseline_server unittests. |
| +assert_passes "python gm/rebaseline_server/test_all.py" |
| + |
| if [ $ENCOUNTERED_ANY_ERRORS == 0 ]; then |
| echo "All tests passed." |
| exit 0 |