OILS / test / unit.sh View on Github | oilshell.org

295 lines, 154 significant
1#!/usr/bin/env bash
2#
3# Run unit tests. Sets PYTHONPATH.
4#
5# Usage:
6# test/unit.sh <function name>
7#
8# Examples:
9#
10# test/unit.sh unit frontend/lexer_test.py
11# test/unit.sh all
12# test/unit.sh minimal
13
14set -o nounset
15set -o pipefail
16set -o errexit
17shopt -s strict:all 2>/dev/null || true # dogfood for OSH
18
19REPO_ROOT=$(cd "$(dirname $0)/.."; pwd) # tsv-lib.sh uses this
20readonly REPO_ROOT
21
22source build/dev-shell.sh # R_LIBS_USER, but also changes python3
23source test/common.sh # html-head
24source devtools/run-task.sh # run-task
25source test/tsv-lib.sh
26
27banner() {
28 echo -----
29 echo "$@"
30 echo -----
31}
32
33unit() {
34 ### Run a single test, autocompletes with devtools/completion.bash
35 local test_path=$1
36
37 # Duplicates logic in test-niafest
38 read -r first_line < $test_path
39 if [[ $first_line == *python3* ]]; then
40 py_path_more=: # no-op
41 else
42 py_path_more=:vendor/ # for vendor/typing.py
43 fi
44 PYTHONPATH=${PYTHONPATH}${py_path_more} "$@"
45}
46
47test-files() {
48 find . -name '_*' -a -prune -o -name '*_test.py' -a -printf '%P\n' | sort
49}
50
51test-manifest() {
52 test-files | while read test_path; do
53 local minimal=-
54 case $test_path in
55 # For build/py.sh minimal: if we didn't build fastlex.so,
56 # then skip a unit test that will fail.
57 pyext/fastlex_test.py|doctools/cmark_test.py)
58 minimal=exclude
59 ;;
60
61 # Skip obsolete tests
62 demo/old/*)
63 continue
64 ;;
65
66 # Skip OPy and pgen2 tests - they have some PYTHONPATH issues?
67 # May want to restore pgen2
68 opy/*|pgen2/*)
69 continue
70 ;;
71
72 esac
73
74 read -r first_line < $test_path
75 #echo $first_line
76 if [[ $first_line == *python3* ]]; then
77 kind=py3
78 py_path_more=: # no-op
79 else
80 kind=py2
81 py_path_more=:vendor/ # for vendor/typing.py
82 fi
83
84 echo "$minimal $kind $py_path_more $test_path"
85 done
86}
87
88files-to-count() {
89 ### Invoked by metrics/source-code.sh
90 test-manifest | while read _ _ _ test_path; do
91 echo $test_path
92 done
93}
94
95run-unit-test() {
96 local py_path_more=$1
97 local test_path=$2
98
99 PYTHONPATH=${PYTHONPATH}${py_path_more} run-test-bin $test_path '' _test/py-unit
100}
101
102all() {
103 ### Run unit tests after build/py.sh all
104
105 test-manifest | while read minimal kind py_path_more test_path; do
106 run-unit-test $py_path_more $test_path '' _test/py-unit
107 done
108
109 echo
110 echo "All unit tests passed."
111}
112
113minimal() {
114 ### Run unit tests after build/py.sh minimal
115
116 test-manifest | while read minimal kind py_path_more test_path; do
117 if test $minimal = exclude; then
118 continue
119 fi
120
121 if test $kind = py3; then
122 continue
123 fi
124
125 run-unit-test $py_path_more $test_path
126 done
127
128 echo
129 echo "Minimal unit tests passed."
130}
131
132#
133# Unlike soil-run, run-for-release makes an HTML page in _release/VERSION
134# Could unify them.
135
136run-test-and-log() {
137 local tasks_tsv=$1
138 local rel_path=$2
139
140 local log=_tmp/unit/$rel_path.txt
141 mkdir -p "$(dirname $log)"
142
143 time-tsv --append --out $tasks_tsv \
144 --field $rel_path --field "$rel_path.txt" -- \
145 $rel_path >$log 2>&1
146}
147
148run-all-and-log() {
149 local out_dir=_tmp/unit
150 mkdir -p $out_dir
151 rm -r -f $out_dir/*
152
153 local tasks_tsv=$out_dir/tasks.tsv
154
155 local status=0
156
157 # Not writing a schema
158 tsv-row 'status' 'elapsed_secs' 'test' 'test_HREF' > $tasks_tsv
159
160 # There are no functions here, so disabling errexit is safe.
161 # Note: In YSH, this could use shopt { }.
162 test-manifest | while read _ kind py_path_more test_path; do
163
164 local status=0
165 set +o errexit
166 PYTHONPATH=${PYTHONPATH}${py_path_more} run-test-and-log $tasks_tsv $test_path
167 status=$?
168 set -o errexit
169
170 if test $status -ne 0; then
171 echo "FAIL $status - $test_path"
172 fi
173
174 done
175}
176
177# TODO: It would be nice to have timestamps of the underlying TSV files and
178# timestamp of running the report. This is useful for benchmarks too.
179
180print-report() {
181 local in_dir=${1:-_tmp/unit}
182 local base_url='../../web' # published at more_tests.wwz/unit/
183
184 html-head --title 'Oils Unit Test Results' \
185 "$base_url/table/table-sort.js" \
186 "$base_url/table/table-sort.css" \
187 "$base_url/base.css" \
188 "$base_url/benchmarks.css"
189
190 # NOTE: Using benchmarks for now.
191 cat <<EOF
192 <body class="width40">
193 <p id="home-link">
194 <a href="/">oilshell.org</a>
195 </p>
196 <h2>Unit Test Results</h2>
197
198EOF
199
200 tsv2html $in_dir/report.tsv
201
202 cat <<EOF
203 </body>
204</html>
205EOF
206}
207
208write-report() {
209 # Presentation:
210 #
211 # - elapsed seconds -> milliseconds
212 # - Link to test log
213 # - Right justify numbers
214
215 local out=_tmp/unit/index.html
216 test/report.R unit _tmp/unit _tmp/unit
217 print-report > $out
218 echo "Wrote $out"
219}
220
221run-for-release() {
222 # Invoked by devtools/release.sh.
223
224 run-all-and-log
225 write-report
226}
227
228#
229# Experimental tsv-stream
230#
231
232tsv-stream-one() {
233 local rel_path=$1
234
235 local log_file=_tmp/unit/$rel_path.txt
236 mkdir -p "$(dirname $log_file)"
237
238 echo
239 echo "| ROW test=$rel_path test_HREF=$log_file"
240
241 # TODO: Emit | ADD status=0 elapsed_secs=0.11
242
243 time-tsv -o /dev/stdout -- $rel_path
244 local status=$?
245
246 if test $status -ne 0; then
247 echo
248 echo "*** $t FAILED ***"
249 echo
250 return 255 # xargs aborts
251 fi
252
253}
254
255tsv-stream-all() {
256 echo '| HEADER status elapsed_secs test test_HREF'
257
258 time py2-tests T | head -n 20 | xargs -n 1 -- $0 tsv-stream-one
259}
260
261# Experimental idea: Capture tsv-stream-all, and turn it into two things:
262#
263# - A TSV file, which can be turned into HTML, and summarized with counts
264# - An HTML text string with <a name=""> anchors, which the table can link to
265#
266# At the terminal, the raw output is still readable, without a filter.
267# Although we might want:
268#
269# | OK
270# | FAIL
271#
272# Instead of:
273#
274# | ADD status=0
275# | ADD status=1
276#
277# Also, we currently send output to /dev/null at the terminal, and we save it
278# when doing a release.
279#
280# We might also do something like C++ unit tests:
281#
282# RUN osh/split_test.py &> _test/osh/split_test
283
284all-2() {
285 ### New harness that uses tsv-stream
286
287 # Use this at the command line, in the CI, and in the release.
288
289 tsv-stream-all | devtools/tsv_stream.py
290}
291
292# NOTE: Show options like this:
293# python -m unittest discover -h
294
295run-task "$@"