1 | #!/usr/bin/env bash
|
2 | #
|
3 | # Run tests against multiple shells with the sh_spec framework.
|
4 | #
|
5 | # Usage:
|
6 | # test/spec-runner.sh <function name>
|
7 |
|
8 | set -o nounset
|
9 | set -o pipefail
|
10 | set -o errexit
|
11 | shopt -s strict:all 2>/dev/null || true # dogfood for OSH
|
12 |
|
13 | REPO_ROOT=$(cd "$(dirname $0)/.."; pwd)
|
14 |
|
15 | source build/dev-shell.sh
|
16 | source test/common.sh
|
17 | source test/spec-common.sh
|
18 | source test/tsv-lib.sh # $TAB
|
19 |
|
20 | NUM_SPEC_TASKS=${NUM_SPEC_TASKS:-400}
|
21 |
|
22 | # Option to use our xargs implementation.
|
23 | #xargs() {
|
24 | # echo "Using ~/git/oilshell/xargs.py/xargs.py"
|
25 | # ~/git/oilshell/xargs.py/xargs.py "$@"
|
26 | #}
|
27 |
|
28 | #
|
29 | # Test Runner
|
30 | #
|
31 |
|
32 | write-suite-manifests() {
|
33 | #test/sh_spec.py --print-table spec/*.test.sh
|
34 | { test/sh_spec.py --print-table spec/*.test.sh | while read suite name; do
|
35 | case $suite in
|
36 | osh) echo $name >& $osh ;;
|
37 | ysh) echo $name >& $ysh ;;
|
38 | disabled) ;; # ignore
|
39 | *) die "Invalid suite $suite" ;;
|
40 | esac
|
41 | done
|
42 | } {osh}>_tmp/spec/SUITE-osh.txt \
|
43 | {ysh}>_tmp/spec/SUITE-ysh.txt \
|
44 | {needs_terminal}>_tmp/spec/SUITE-needs-terminal.txt
|
45 |
|
46 | # These are kind of pseudo-suites, not the main 3
|
47 | test/sh_spec.py --print-tagged interactive \
|
48 | spec/*.test.sh > _tmp/spec/SUITE-interactive.txt
|
49 |
|
50 | test/sh_spec.py --print-tagged dev-minimal \
|
51 | spec/*.test.sh > _tmp/spec/SUITE-osh-minimal.txt
|
52 | }
|
53 |
|
54 |
|
55 | diff-manifest() {
|
56 | ### temporary test
|
57 |
|
58 | write-suite-manifests
|
59 | #return
|
60 |
|
61 | # crazy sorting, affects glob
|
62 | # doesn't work
|
63 | #LANG=C
|
64 | #LC_COLLATE=C
|
65 | #LC_ALL=C
|
66 | #export LANG LC_COLLATE LC_ALL
|
67 |
|
68 | for suite in osh ysh interactive osh-minimal; do
|
69 | echo
|
70 | echo [$suite]
|
71 | echo
|
72 |
|
73 | diff -u -r <(sort spec2/SUITE-$suite.txt) <(sort _tmp/spec/SUITE-$suite.txt) #|| true
|
74 | done
|
75 | }
|
76 |
|
77 | dispatch-one() {
|
78 | # Determines what binaries to compare against: compare-py | compare-cpp | release-alpine
|
79 | local compare_mode=${1:-compare-py}
|
80 | # Which subdir of _tmp/spec: osh-py ysh-py osh-cpp ysh-cpp smoosh
|
81 | local spec_subdir=${2:-osh-py}
|
82 | local spec_name=$3
|
83 | shift 3 # rest are more flags
|
84 |
|
85 | log "__ $spec_name"
|
86 |
|
87 | local -a prefix
|
88 | case $compare_mode in
|
89 |
|
90 | compare-py) prefix=(test/spec.sh) ;;
|
91 |
|
92 | compare-cpp) prefix=(test/spec-cpp.sh run-file) ;;
|
93 |
|
94 | # For interactive comparison
|
95 | osh-only) prefix=(test/spec-util.sh run-file-with-osh) ;;
|
96 | bash-only) prefix=(test/spec-util.sh run-file-with-bash) ;;
|
97 |
|
98 | release-alpine) prefix=(test/spec-alpine.sh run-file) ;;
|
99 |
|
100 | *) die "Invalid compare mode $compare_mode" ;;
|
101 | esac
|
102 |
|
103 | local base_dir=_tmp/spec/$spec_subdir
|
104 |
|
105 | # TODO: Could --stats-{file,template} be a separate awk step on .tsv files?
|
106 | run-task-with-status \
|
107 | $base_dir/${spec_name}.task.txt \
|
108 | "${prefix[@]}" $spec_name \
|
109 | --format html \
|
110 | --stats-file $base_dir/${spec_name}.stats.txt \
|
111 | --stats-template \
|
112 | '%(num_cases)d %(oils_num_passed)d %(oils_num_failed)d %(oils_failures_allowed)d %(oils_ALT_delta)d' \
|
113 | "$@" \
|
114 | > $base_dir/${spec_name}.html
|
115 | }
|
116 |
|
117 |
|
118 | _html-summary() {
|
119 | ### Print an HTML summary to stdout and return whether all tests succeeded
|
120 |
|
121 | local sh_label=$1 # osh or ysh
|
122 | local base_dir=$2 # e.g. _tmp/spec/ysh-cpp
|
123 | local totals=$3 # path to print HTML to
|
124 | local manifest=$4
|
125 |
|
126 | html-head --title "Spec Test Summary" \
|
127 | ../../../web/base.css ../../../web/spec-tests.css
|
128 |
|
129 | cat <<EOF
|
130 | <body class="width50">
|
131 |
|
132 | <p id="home-link">
|
133 | <!-- The release index is two dirs up -->
|
134 | <a href="../..">Up</a> |
|
135 | <a href="/">oilshell.org</a>
|
136 | </p>
|
137 |
|
138 | <h1>Spec Test Results Summary</h1>
|
139 |
|
140 | <table>
|
141 | <thead>
|
142 | <tr>
|
143 | <td>name</td>
|
144 | <td># cases</td> <td>$sh_label # passed</td> <td>$sh_label # failed</td>
|
145 | <td>$sh_label failures allowed</td>
|
146 | <td>$sh_label ALT delta</td>
|
147 | <td>Elapsed Seconds</td>
|
148 | </tr>
|
149 | </thead>
|
150 | <!-- TOTALS -->
|
151 | EOF
|
152 |
|
153 | # Awk notes:
|
154 | # - "getline" is kind of like bash "read", but it doesn't allow you do
|
155 | # specify variable names. You have to destructure it yourself.
|
156 | # - Lack of string interpolation is very annoying
|
157 |
|
158 | head -n $NUM_SPEC_TASKS $manifest | sort | awk -v totals=$totals -v base_dir=$base_dir '
|
159 | # Awk problem: getline errors are ignored by default!
|
160 | function error(path) {
|
161 | print "Error reading line from file: " path > "/dev/stderr"
|
162 | exit(1)
|
163 | }
|
164 |
|
165 | {
|
166 | spec_name = $0
|
167 |
|
168 | # Read from the task files
|
169 | path = ( base_dir "/" spec_name ".task.txt" )
|
170 | n = getline < path
|
171 | if (n != 1) {
|
172 | error(path)
|
173 | }
|
174 | status = $1
|
175 | wall_secs = $2
|
176 |
|
177 | path = ( base_dir "/" spec_name ".stats.txt" )
|
178 | n = getline < path
|
179 | if (n != 1) {
|
180 | error(path)
|
181 | }
|
182 | num_cases = $1
|
183 | oils_num_passed = $2
|
184 | oils_num_failed = $3
|
185 | oils_failures_allowed = $4
|
186 | oils_ALT_delta = $5
|
187 |
|
188 | sum_status += status
|
189 | sum_wall_secs += wall_secs
|
190 | sum_num_cases += num_cases
|
191 | sum_oils_num_passed += oils_num_passed
|
192 | sum_oils_num_failed += oils_num_failed
|
193 | sum_oils_failures_allowed += oils_failures_allowed
|
194 | sum_oils_ALT_delta += oils_ALT_delta
|
195 | num_rows += 1
|
196 |
|
197 | # For the console
|
198 | if (status == 0) {
|
199 | num_passed += 1
|
200 | } else {
|
201 | num_failed += 1
|
202 | print spec_name " failed with status " status > "/dev/stderr"
|
203 | }
|
204 |
|
205 | if (status != 0) {
|
206 | css_class = "failed"
|
207 | } else if (oils_num_failed != 0) {
|
208 | css_class = "osh-allow-fail"
|
209 | } else if (oils_num_passed != 0) {
|
210 | css_class = "osh-pass"
|
211 | } else {
|
212 | css_class = ""
|
213 | }
|
214 | print "<tr class=" css_class ">"
|
215 | print "<td><a href=" spec_name ".html>" spec_name "</a></td>"
|
216 | print "<td>" num_cases "</td>"
|
217 | print "<td>" oils_num_passed "</td>"
|
218 | print "<td>" oils_num_failed "</td>"
|
219 | print "<td>" oils_failures_allowed "</td>"
|
220 | print "<td>" oils_ALT_delta "</td>"
|
221 | printf("<td>%.2f</td>\n", wall_secs);
|
222 | print "</tr>"
|
223 | }
|
224 |
|
225 | END {
|
226 | print "<tr class=totals>" >totals
|
227 | print "<td>TOTAL (" num_rows " rows) </td>" >totals
|
228 | print "<td>" sum_num_cases "</td>" >totals
|
229 | print "<td>" sum_oils_num_passed "</td>" >totals
|
230 | print "<td>" sum_oils_num_failed "</td>" >totals
|
231 | print "<td>" sum_oils_failures_allowed "</td>" >totals
|
232 | print "<td>" sum_oils_ALT_delta "</td>" >totals
|
233 | printf("<td>%.2f</td>\n", sum_wall_secs) > totals
|
234 | print "</tr>" >totals
|
235 |
|
236 | print "<tfoot>"
|
237 | print "<!-- TOTALS -->"
|
238 | print "</tfoot>"
|
239 |
|
240 | # For the console
|
241 | print "" > "/dev/stderr"
|
242 | if (num_failed == 0) {
|
243 | print "*** All " num_passed " tests PASSED" > "/dev/stderr"
|
244 | } else {
|
245 | print "*** " num_failed " tests FAILED" > "/dev/stderr"
|
246 | exit(1) # failure
|
247 | }
|
248 | }
|
249 | '
|
250 | all_passed=$?
|
251 |
|
252 | cat <<EOF
|
253 | </table>
|
254 |
|
255 | <h3>Version Information</h3>
|
256 | <pre>
|
257 | EOF
|
258 |
|
259 | # TODO: can pass shells here, e.g. for test/spec-cpp.sh
|
260 | test/spec-version.sh ${suite}-version-text
|
261 |
|
262 | cat <<EOF
|
263 | </pre>
|
264 | </body>
|
265 | </html>
|
266 | EOF
|
267 |
|
268 | return $all_passed
|
269 | }
|
270 |
|
271 | html-summary() {
|
272 | local suite=$1
|
273 | local base_dir=$2
|
274 |
|
275 | local manifest="_tmp/spec/SUITE-$suite.txt"
|
276 |
|
277 | local totals=$base_dir/totals-$suite.html
|
278 | local tmp=$base_dir/tmp-$suite.html
|
279 |
|
280 | local out=$base_dir/index.html
|
281 |
|
282 | # TODO: Do we also need $base_dir/{osh,oil}-details-for-toil.json
|
283 | # osh failures, and all failures
|
284 | # When deploying, if they exist, them copy them outside?
|
285 | # I guess toil_web.py can use the zipfile module?
|
286 | # To get _tmp/spec/...
|
287 | # it can read JSON like:
|
288 | # { "task_tsv": "_tmp/toil/INDEX.tsv",
|
289 | # "details_json": [ ... ],
|
290 | # }
|
291 |
|
292 | set +o errexit
|
293 | _html-summary $suite $base_dir $totals $manifest > $tmp
|
294 | all_passed=$?
|
295 | set -o errexit
|
296 |
|
297 | # Total rows are displayed at both the top and bottom.
|
298 | awk -v totals="$(cat $totals)" '
|
299 | /<!-- TOTALS -->/ {
|
300 | print totals
|
301 | next
|
302 | }
|
303 | { print }
|
304 | ' < $tmp > $out
|
305 |
|
306 | echo
|
307 | echo "Results: file://$PWD/$out"
|
308 |
|
309 | return $all_passed
|
310 | }
|
311 |
|
312 | _all-parallel() {
|
313 | local suite=${1:-osh}
|
314 | local compare_mode=${2:-compare-py}
|
315 | local spec_subdir=${3:-survey}
|
316 |
|
317 | # The rest are more flags
|
318 | shift 3
|
319 |
|
320 | local manifest="_tmp/spec/SUITE-$suite.txt"
|
321 | local output_base_dir="_tmp/spec/$spec_subdir"
|
322 | mkdir -p $output_base_dir
|
323 |
|
324 | write-suite-manifests
|
325 |
|
326 | # The exit codes are recorded in files for html-summary to aggregate.
|
327 | set +o errexit
|
328 | head -n $NUM_SPEC_TASKS $manifest \
|
329 | | xargs -I {} -P $MAX_PROCS -- \
|
330 | $0 dispatch-one $compare_mode $spec_subdir {} "$@"
|
331 | set -o errexit
|
332 |
|
333 | all-tests-to-html $manifest $output_base_dir
|
334 |
|
335 | # note: the HTML links to ../../web/, which is in the repo.
|
336 | html-summary $suite $output_base_dir # returns whether all passed
|
337 | }
|
338 |
|
339 | all-parallel() {
|
340 | ### Run spec tests in parallel.
|
341 |
|
342 | # Note that this function doesn't fail because 'run-file' saves the status
|
343 | # to a file.
|
344 |
|
345 | time $0 _all-parallel "$@"
|
346 | }
|
347 |
|
348 | all-tests-to-html() {
|
349 | local manifest=$1
|
350 | local output_base_dir=$2
|
351 | # ignore attrs output
|
352 | head -n $NUM_SPEC_TASKS $manifest \
|
353 | | xargs --verbose -- doctools/src_tree.py spec-files $output_base_dir >/dev/null
|
354 |
|
355 | #| xargs -n 1 -P $MAX_PROCS -- $0 test-to-html $output_base_dir
|
356 | log "done: all-tests-to-html"
|
357 | }
|
358 |
|
359 | shell-sanity-check() {
|
360 | echo "PWD = $PWD"
|
361 | echo "PATH = $PATH"
|
362 |
|
363 | for sh in "$@"; do
|
364 | # note: shells are in $PATH, but not $OSH_LIST
|
365 | if ! $sh -c 'echo -n "hello from $0: "; command -v $0 || true'; then
|
366 | echo "ERROR: $sh failed sanity check"
|
367 | return 1
|
368 | fi
|
369 | done
|
370 | }
|
371 |
|
372 | filename=$(basename $0)
|
373 | if test "$filename" = 'spec-runner.sh'; then
|
374 | "$@"
|
375 | fi
|