OILS / test / unit.sh View on Github | oilshell.org

292 lines, 151 significant
1#!/usr/bin/env bash
2#
3# Run unit tests. Sets PYTHONPATH.
4#
5# Usage:
6# test/unit.sh <function name>
7#
8# Examples:
9#
10# test/unit.sh unit frontend/lexer_test.py
11# test/unit.sh all
12# test/unit.sh minimal
13
14: ${LIB_OSH=stdlib/osh}
15source $LIB_OSH/bash-strict.sh
16source $LIB_OSH/task-five.sh # run-task
17
18REPO_ROOT=$(cd "$(dirname $0)/.."; pwd) # tsv-lib.sh uses this
19
20source build/dev-shell.sh # R_LIBS_USER, but also changes python3
21source test/common.sh # html-head
22source test/tsv-lib.sh
23
24banner() {
25 echo -----
26 echo "$@"
27 echo -----
28}
29
30unit() {
31 ### Run a single test, autocompletes with devtools/completion.bash
32 local test_path=$1
33
34 # Duplicates logic in test-niafest
35 read -r first_line < $test_path
36 if [[ $first_line == *python3* ]]; then
37 py_path_more=: # no-op
38 else
39 py_path_more=:vendor/ # for vendor/typing.py
40 fi
41 PYTHONPATH=${PYTHONPATH}${py_path_more} "$@"
42}
43
44test-files() {
45 find . -name '_*' -a -prune -o -name '*_test.py' -a -printf '%P\n' | sort
46}
47
48test-manifest() {
49 test-files | while read test_path; do
50 local minimal=-
51 case $test_path in
52 # For build/py.sh minimal: if we didn't build fastlex.so,
53 # then skip a unit test that will fail.
54 pyext/fastlex_test.py|doctools/cmark_test.py)
55 minimal=exclude
56 ;;
57
58 # Skip obsolete tests
59 demo/old/*)
60 continue
61 ;;
62
63 # Skip OPy and pgen2 tests - they have some PYTHONPATH issues?
64 # May want to restore pgen2
65 opy/*|pgen2/*)
66 continue
67 ;;
68
69 esac
70
71 read -r first_line < $test_path
72 #echo $first_line
73 if [[ $first_line == *python3* ]]; then
74 kind=py3
75 py_path_more=: # no-op
76 else
77 kind=py2
78 py_path_more=:vendor/ # for vendor/typing.py
79 fi
80
81 echo "$minimal $kind $py_path_more $test_path"
82 done
83}
84
85files-to-count() {
86 ### Invoked by metrics/source-code.sh
87 test-manifest | while read _ _ _ test_path; do
88 echo $test_path
89 done
90}
91
92run-unit-test() {
93 local py_path_more=$1
94 local test_path=$2
95
96 PYTHONPATH=${PYTHONPATH}${py_path_more} run-test-bin $test_path '' _test/py-unit
97}
98
99all() {
100 ### Run unit tests after build/py.sh all
101
102 test-manifest | while read minimal kind py_path_more test_path; do
103 run-unit-test $py_path_more $test_path '' _test/py-unit
104 done
105
106 echo
107 echo "All unit tests passed."
108}
109
110minimal() {
111 ### Run unit tests after build/py.sh minimal
112
113 test-manifest | while read minimal kind py_path_more test_path; do
114 if test $minimal = exclude; then
115 continue
116 fi
117
118 if test $kind = py3; then
119 continue
120 fi
121
122 run-unit-test $py_path_more $test_path
123 done
124
125 echo
126 echo "Minimal unit tests passed."
127}
128
129#
130# Unlike soil-run, run-for-release makes an HTML page in _release/VERSION
131# Could unify them.
132
133run-test-and-log() {
134 local tasks_tsv=$1
135 local rel_path=$2
136
137 local log=_tmp/unit/$rel_path.txt
138 mkdir -p "$(dirname $log)"
139
140 time-tsv --append --out $tasks_tsv \
141 --field $rel_path --field "$rel_path.txt" -- \
142 $rel_path >$log 2>&1
143}
144
145run-all-and-log() {
146 local out_dir=_tmp/unit
147 mkdir -p $out_dir
148 rm -r -f $out_dir/*
149
150 local tasks_tsv=$out_dir/tasks.tsv
151
152 local status=0
153
154 # Not writing a schema
155 tsv-row 'status' 'elapsed_secs' 'test' 'test_HREF' > $tasks_tsv
156
157 # There are no functions here, so disabling errexit is safe.
158 # Note: In YSH, this could use shopt { }.
159 test-manifest | while read _ kind py_path_more test_path; do
160
161 local status=0
162 set +o errexit
163 PYTHONPATH=${PYTHONPATH}${py_path_more} run-test-and-log $tasks_tsv $test_path
164 status=$?
165 set -o errexit
166
167 if test $status -ne 0; then
168 echo "FAIL $status - $test_path"
169 fi
170
171 done
172}
173
174# TODO: It would be nice to have timestamps of the underlying TSV files and
175# timestamp of running the report. This is useful for benchmarks too.
176
177print-report() {
178 local in_dir=${1:-_tmp/unit}
179 local base_url='../../web' # published at more_tests.wwz/unit/
180
181 html-head --title 'Oils Unit Test Results' \
182 "$base_url/table/table-sort.js" \
183 "$base_url/table/table-sort.css" \
184 "$base_url/base.css" \
185 "$base_url/benchmarks.css"
186
187 # NOTE: Using benchmarks for now.
188 cat <<EOF
189 <body class="width40">
190 <p id="home-link">
191 <a href="/">oilshell.org</a>
192 </p>
193 <h2>Unit Test Results</h2>
194
195EOF
196
197 tsv2html $in_dir/report.tsv
198
199 cat <<EOF
200 </body>
201</html>
202EOF
203}
204
205write-report() {
206 # Presentation:
207 #
208 # - elapsed seconds -> milliseconds
209 # - Link to test log
210 # - Right justify numbers
211
212 local out=_tmp/unit/index.html
213 test/report.R unit _tmp/unit _tmp/unit
214 print-report > $out
215 echo "Wrote $out"
216}
217
218run-for-release() {
219 # Invoked by devtools/release.sh.
220
221 run-all-and-log
222 write-report
223}
224
225#
226# Experimental tsv-stream
227#
228
229tsv-stream-one() {
230 local rel_path=$1
231
232 local log_file=_tmp/unit/$rel_path.txt
233 mkdir -p "$(dirname $log_file)"
234
235 echo
236 echo "| ROW test=$rel_path test_HREF=$log_file"
237
238 # TODO: Emit | ADD status=0 elapsed_secs=0.11
239
240 time-tsv -o /dev/stdout -- $rel_path
241 local status=$?
242
243 if test $status -ne 0; then
244 echo
245 echo "*** $t FAILED ***"
246 echo
247 return 255 # xargs aborts
248 fi
249
250}
251
252tsv-stream-all() {
253 echo '| HEADER status elapsed_secs test test_HREF'
254
255 time py2-tests T | head -n 20 | xargs -n 1 -- $0 tsv-stream-one
256}
257
258# Experimental idea: Capture tsv-stream-all, and turn it into two things:
259#
260# - A TSV file, which can be turned into HTML, and summarized with counts
261# - An HTML text string with <a name=""> anchors, which the table can link to
262#
263# At the terminal, the raw output is still readable, without a filter.
264# Although we might want:
265#
266# | OK
267# | FAIL
268#
269# Instead of:
270#
271# | ADD status=0
272# | ADD status=1
273#
274# Also, we currently send output to /dev/null at the terminal, and we save it
275# when doing a release.
276#
277# We might also do something like C++ unit tests:
278#
279# RUN osh/split_test.py &> _test/osh/split_test
280
281all-2() {
282 ### New harness that uses tsv-stream
283
284 # Use this at the command line, in the CI, and in the release.
285
286 tsv-stream-all | devtools/tsv_stream.py
287}
288
289# NOTE: Show options like this:
290# python -m unittest discover -h
291
292task-five "$@"