1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
|
#!/bin/bash
# Self-tests for gm, based on tools/tests/run.sh
#
# These tests are run by the Skia_PerCommit_House_Keeping bot at every commit,
# so make sure that they still pass when you make changes to gm!
#
# To generate new baselines when gm behavior changes, run gm/tests/rebaseline.sh
#
# TODO: because this is written as a shell script (instead of, say, Python)
# it only runs on Linux and Mac.
# See https://code.google.com/p/skia/issues/detail?id=677
# ('make tools/tests/run.sh work cross-platform')
# Ideally, these tests should pass on all development platforms...
# otherwise, how can developers be expected to test them before committing a
# change?
# cd into .../trunk so all the paths will work
cd $(dirname $0)/../..
# TODO(epoger): make it look in Release and/or Debug
GM_BINARY=out/Debug/gm
OUTPUT_ACTUAL_SUBDIR=output-actual
OUTPUT_EXPECTED_SUBDIR=output-expected
CONFIGS="--config 8888 565"
ENCOUNTERED_ANY_ERRORS=0
# Compare contents of all files within directories $1 and $2,
# EXCEPT for any dotfiles.
# If there are any differences, a description is written to stdout and
# we exit with a nonzero return value.
# Otherwise, we write nothing to stdout and return.
function compare_directories {
if [ $# != 2 ]; then
echo "compare_directories requires exactly 2 parameters, got $#"
exit 1
fi
diff -r --exclude=.* $1 $2
if [ $? != 0 ]; then
echo "failed in: compare_directories $1 $2"
ENCOUNTERED_ANY_ERRORS=1
fi
}
# Run a command, and validate that it succeeds (returns 0).
function assert_passes {
COMMAND="$1"
echo
echo "assert_passes $COMMAND ..."
$COMMAND
if [ $? != 0 ]; then
echo "This command was supposed to pass, but failed: [$COMMAND]"
ENCOUNTERED_ANY_ERRORS=1
fi
}
# Run a command, and validate that it fails (returns nonzero).
function assert_fails {
COMMAND="$1"
echo
echo "assert_fails $COMMAND ..."
$COMMAND
if [ $? == 0 ]; then
echo "This command was supposed to fail, but passed: [$COMMAND]"
ENCOUNTERED_ANY_ERRORS=1
fi
}
# Run gm...
# - with the arguments in $1
# - writing json summary into $2/$OUTPUT_ACTUAL_SUBDIR/json-summary.txt
# - writing return value into $2/$OUTPUT_ACTUAL_SUBDIR/return_value
# Then compare all of those against $2/$OUTPUT_EXPECTED_SUBDIR .
function gm_test {
if [ $# != 2 ]; then
echo "gm_test requires exactly 2 parameters, got $#"
exit 1
fi
GM_ARGS="$1"
ACTUAL_OUTPUT_DIR="$2/$OUTPUT_ACTUAL_SUBDIR"
EXPECTED_OUTPUT_DIR="$2/$OUTPUT_EXPECTED_SUBDIR"
JSON_SUMMARY_FILE="$ACTUAL_OUTPUT_DIR/json-summary.txt"
rm -rf $ACTUAL_OUTPUT_DIR
mkdir -p $ACTUAL_OUTPUT_DIR
COMMAND="$GM_BINARY $GM_ARGS --writeJsonSummaryPath $JSON_SUMMARY_FILE --writePath $ACTUAL_OUTPUT_DIR/writePath --mismatchPath $ACTUAL_OUTPUT_DIR/mismatchPath --missingExpectationsPath $ACTUAL_OUTPUT_DIR/missingExpectationsPath"
$COMMAND
echo $? >$ACTUAL_OUTPUT_DIR/return_value
# Replace image file contents with just the filename, for two reasons:
# 1. Image file encoding may vary by platform
# 2. https://code.google.com/p/chromium/issues/detail?id=169600
# ('gcl/upload.py fail to upload binary files to rietveld')
for IMAGEFILE in $(find $ACTUAL_OUTPUT_DIR -name \*.png); do
echo "[contents of $IMAGEFILE]" >$IMAGEFILE
done
for IMAGEFILE in $(find $ACTUAL_OUTPUT_DIR -name \*.pdf); do
echo "[contents of $IMAGEFILE]" >$IMAGEFILE
done
# Add a file to any empty subdirectories.
for DIR in $(find $ACTUAL_OUTPUT_DIR -mindepth 1 -type d); do
echo "Created additional file to make sure directory isn't empty, because self-test cannot handle empty directories." >$DIR/bogusfile
done
compare_directories $EXPECTED_OUTPUT_DIR $ACTUAL_OUTPUT_DIR
}
# Swap contents of two files at paths $1 and $2.
function swap_files {
if [ $# != 2 ]; then
echo "swap_files requires exactly 2 parameters, got $#"
exit 1
fi
mv "$1" "$1.tmp"
mv "$2" "$1"
mv "$1.tmp" "$2"
}
# Create input dir (at path $1) with expectations (both image and json)
# that gm will match or mismatch as appropriate.
#
# We used to check these files into SVN, but then we needed to rebaseline them
# when our drawing changed at all... so, as proposed in
# http://code.google.com/p/skia/issues/detail?id=1068 , we generate them
# new each time.
function create_inputs_dir {
if [ $# != 1 ]; then
echo "create_inputs_dir requires exactly 1 parameter, got $#"
exit 1
fi
INPUTS_DIR="$1"
IMAGES_DIR=$INPUTS_DIR/images
JSON_DIR=$INPUTS_DIR/json
mkdir -p $IMAGES_DIR $JSON_DIR
THIS_IMAGE_DIR=$IMAGES_DIR/identical-bytes
mkdir -p $THIS_IMAGE_DIR
# Run GM to write out the images actually generated.
$GM_BINARY --hierarchy --match selftest1 $CONFIGS -w $THIS_IMAGE_DIR
# Run GM again to read in those images and write them out as a JSON summary.
$GM_BINARY --hierarchy --match selftest1 $CONFIGS -r $THIS_IMAGE_DIR \
--writeJsonSummaryPath $JSON_DIR/identical-bytes.json
THIS_IMAGE_DIR=$IMAGES_DIR/identical-pixels
mkdir -p $THIS_IMAGE_DIR
$GM_BINARY --hierarchy --match selftest1 $CONFIGS -w $THIS_IMAGE_DIR
echo "more bytes that do not change the image pixels" \
>> $THIS_IMAGE_DIR/8888/selftest1.png
echo "more bytes that do not change the image pixels" \
>> $THIS_IMAGE_DIR/565/selftest1.png
$GM_BINARY --hierarchy --match selftest1 $CONFIGS -r $THIS_IMAGE_DIR \
--writeJsonSummaryPath $JSON_DIR/identical-pixels.json
THIS_IMAGE_DIR=$IMAGES_DIR/different-pixels
mkdir -p $THIS_IMAGE_DIR
$GM_BINARY --hierarchy --match selftest $CONFIGS -w $THIS_IMAGE_DIR
swap_files $THIS_IMAGE_DIR/8888/selftest2.png $THIS_IMAGE_DIR/8888/selftest1.png
swap_files $THIS_IMAGE_DIR/565/selftest2.png $THIS_IMAGE_DIR/565/selftest1.png
$GM_BINARY --hierarchy --match selftest $CONFIGS -r $THIS_IMAGE_DIR \
--writeJsonSummaryPath $JSON_DIR/different-pixels.json
# Create another JSON expectations file which is identical to
# different-pixels.json, but in which the *first* ignore-failure is changed
# from false to true.
OLD='"ignore-failure" : false'
NEW='"ignore-failure" : true'
sed -e "0,/$OLD/{s/$OLD/$NEW/}" $JSON_DIR/different-pixels.json \
>$JSON_DIR/different-pixels-ignore-some-failures.json
THIS_IMAGE_DIR=$IMAGES_DIR/different-pixels-no-hierarchy
mkdir -p $THIS_IMAGE_DIR
$GM_BINARY --match selftest2 $CONFIGS -w $THIS_IMAGE_DIR
mv $THIS_IMAGE_DIR/selftest2_8888.png $THIS_IMAGE_DIR/selftest1_8888.png
mv $THIS_IMAGE_DIR/selftest2_565.png $THIS_IMAGE_DIR/selftest1_565.png
$GM_BINARY --match selftest1 $CONFIGS -r $THIS_IMAGE_DIR \
--writeJsonSummaryPath $JSON_DIR/different-pixels-no-hierarchy.json
mkdir -p $IMAGES_DIR/empty-dir
echo "# Comment line" >$GM_IGNORE_FAILURES_FILE
echo "" >>$GM_IGNORE_FAILURES_FILE
echo "# ignore any runs of the 'selftest1' test" >>$GM_IGNORE_FAILURES_FILE
echo "selftest1" >>$GM_IGNORE_FAILURES_FILE
echo "" >>$GM_IGNORE_FAILURES_FILE
echo "# make sure we don't do partial matches (should NOT ignore 'selftest2' runs)" >>$GM_IGNORE_FAILURES_FILE
echo "selftest" >>$GM_IGNORE_FAILURES_FILE
}
GM_TESTDIR=gm/tests
GM_INPUTS=$GM_TESTDIR/inputs
GM_OUTPUTS=$GM_TESTDIR/outputs
GM_TEMPFILES=$GM_TESTDIR/tempfiles
GM_IGNORE_FAILURES_FILE=$GM_INPUTS/ignored-tests.txt
create_inputs_dir $GM_INPUTS
# Compare generated image against an input image file with identical bytes.
gm_test "--verbose --hierarchy --match selftest1 $CONFIGS -r $GM_INPUTS/images/identical-bytes" "$GM_OUTPUTS/compared-against-identical-bytes-images"
gm_test "--verbose --hierarchy --match selftest1 $CONFIGS -r $GM_INPUTS/json/identical-bytes.json" "$GM_OUTPUTS/compared-against-identical-bytes-json"
# Compare generated image against an input image file with identical pixels but different PNG encoding.
gm_test "--verbose --hierarchy --match selftest1 $CONFIGS -r $GM_INPUTS/images/identical-pixels" "$GM_OUTPUTS/compared-against-identical-pixels-images"
gm_test "--verbose --hierarchy --match selftest1 $CONFIGS -r $GM_INPUTS/json/identical-pixels.json" "$GM_OUTPUTS/compared-against-identical-pixels-json"
# Compare generated image against an input image file with different pixels.
gm_test "--verbose --hierarchy --match selftest1 $CONFIGS -r $GM_INPUTS/images/different-pixels" "$GM_OUTPUTS/compared-against-different-pixels-images"
gm_test "--verbose --hierarchy --match selftest1 $CONFIGS -r $GM_INPUTS/json/different-pixels.json" "$GM_OUTPUTS/compared-against-different-pixels-json"
# Exercise --ignoreFailuresFile flag.
# This should run two GM tests: selftest1 and selftest2.
# Failures in selftest1 should be ignored, but failures in selftest2 should not.
gm_test "--verbose --hierarchy --match selftest --ignoreFailuresFile $GM_IGNORE_FAILURES_FILE $CONFIGS -r $GM_INPUTS/json/different-pixels.json" "$GM_OUTPUTS/ignoring-one-test"
# Compare different pixels, but with a SUBSET of the expectations marked as
# ignore-failure.
gm_test "--verbose --hierarchy --match selftest1 $CONFIGS -r $GM_INPUTS/json/different-pixels-ignore-some-failures.json" "$GM_OUTPUTS/ignoring-some-failures"
# Compare generated image against an empty "expected image" dir.
# Even the tests that have been marked as ignore-failure (selftest1) should
# show up as no-comparison.
gm_test "--verbose --hierarchy --match selftest --ignoreFailuresFile $GM_IGNORE_FAILURES_FILE $CONFIGS -r $GM_INPUTS/images/empty-dir" "$GM_OUTPUTS/compared-against-empty-dir"
# Compare generated image against a nonexistent "expected image" dir.
gm_test "--verbose --hierarchy --match selftest1 $CONFIGS -r ../path/to/nowhere" "$GM_OUTPUTS/compared-against-nonexistent-dir"
# Compare generated image against an empty "expected image" dir, but NOT in verbose mode.
gm_test "--hierarchy --match selftest1 $CONFIGS -r $GM_INPUTS/images/empty-dir" "$GM_OUTPUTS/nonverbose"
# Add pdf to the list of configs.
gm_test "--verbose --hierarchy --match selftest1 $CONFIGS pdf -r $GM_INPUTS/json/identical-bytes.json" "$GM_OUTPUTS/add-config-pdf"
# Test what happens if run without -r (no expected-results.json to compare
# against).
gm_test "--verbose --hierarchy --match selftest1 $CONFIGS" "$GM_OUTPUTS/no-readpath"
# Test what happens if a subset of the renderModes fail (e.g. pipe)
gm_test "--pipe --simulatePipePlaybackFailure --verbose --hierarchy --match selftest1 $CONFIGS -r $GM_INPUTS/json/identical-pixels.json" "$GM_OUTPUTS/pipe-playback-failure"
# Confirm that IntentionallySkipped tests are recorded as such.
gm_test "--verbose --hierarchy --match selftest1 selftest2 $CONFIGS" "$GM_OUTPUTS/intentionally-skipped-tests"
# Ignore some error types (including ExpectationsMismatch)
gm_test "--ignoreErrorTypes ExpectationsMismatch NoGpuContext --verbose --hierarchy --match selftest1 $CONFIGS -r $GM_INPUTS/json/different-pixels.json" "$GM_OUTPUTS/ignore-expectations-mismatch"
# Test non-hierarchical mode.
gm_test "--verbose --match selftest1 $CONFIGS -r $GM_INPUTS/json/different-pixels-no-hierarchy.json" "$GM_OUTPUTS/no-hierarchy"
# Try writing out actual images using checksum-based filenames, like we do when
# uploading to Google Storage.
gm_test "--verbose --writeChecksumBasedFilenames --match selftest1 $CONFIGS -r $GM_INPUTS/json/different-pixels-no-hierarchy.json" "$GM_OUTPUTS/checksum-based-filenames"
# Exercise display_json_results.py
PASSING_CASES="compared-against-identical-bytes-json compared-against-identical-pixels-json"
FAILING_CASES="compared-against-different-pixels-json"
for CASE in $PASSING_CASES; do
assert_passes "python gm/display_json_results.py $GM_OUTPUTS/$CASE/$OUTPUT_EXPECTED_SUBDIR/json-summary.txt"
done
for CASE in $FAILING_CASES; do
assert_fails "python gm/display_json_results.py $GM_OUTPUTS/$CASE/$OUTPUT_EXPECTED_SUBDIR/json-summary.txt"
done
# Exercise all Python unittests.
assert_passes "python gm/test_all.py"
echo
if [ $ENCOUNTERED_ANY_ERRORS == 0 ]; then
echo "All tests passed."
exit 0
else
echo "Some tests failed."
exit 1
fi
|