| 1 | #!/usr/bin/env bash
|
| 2 | #
|
| 3 | # Functions to invoke soil/web remotely.
|
| 4 | #
|
| 5 | # soil/web is deployed manually, and then this runs at HEAD in the repo. Every
|
| 6 | # CI run has an up-to-date copy.
|
| 7 | #
|
| 8 | # Usage:
|
| 9 | # soil/web-worker.sh <function name>
|
| 10 |
|
| 11 | set -o nounset
|
| 12 | set -o pipefail
|
| 13 | set -o errexit
|
| 14 |
|
| 15 | REPO_ROOT=$(cd "$(dirname $0)/.."; pwd)
|
| 16 |
|
| 17 | source soil/common.sh
|
| 18 | source test/tsv-lib.sh # tsv2html
|
| 19 | source web/table/html.sh # table-sort-{begin,end}
|
| 20 |
|
| 21 | # ~/
|
| 22 | # soil-web/ # executable files
|
| 23 | # doctools/
|
| 24 | # html_head.py
|
| 25 | # soil/
|
| 26 | # web.py
|
| 27 | # web.sh
|
| 28 | # travis-ci.oilshell.org/ # served over HTTP
|
| 29 | # index.html
|
| 30 | # web/
|
| 31 | # base.css
|
| 32 | # soil.css
|
| 33 | # github-jobs/
|
| 34 | # index.html
|
| 35 | # 3619/ # $GITHUB_RUN_NUMBER
|
| 36 | # dev-minimal.wwz
|
| 37 | # cpp-small.wwz
|
| 38 | # srht-jobs/
|
| 39 | # index.html
|
| 40 | # 22/ # $JOB_ID
|
| 41 | # dev-minimal.wwz
|
| 42 | # 23 # $JOB_ID
|
| 43 | # cpp-small.wwz
|
| 44 |
|
| 45 | sshq() {
|
| 46 | # Don't need commands module as I said here!
|
| 47 | # http://www.oilshell.org/blog/2017/01/31.html
|
| 48 | #
|
| 49 | # This is Bernstein chaining through ssh.
|
| 50 |
|
| 51 | ssh $SOIL_USER@$SOIL_HOST "$(printf '%q ' "$@")"
|
| 52 | }
|
| 53 |
|
| 54 | remote-rewrite-jobs-index() {
|
| 55 | sshq soil-web/soil/web.sh rewrite-jobs-index "$@"
|
| 56 | }
|
| 57 |
|
| 58 | remote-cleanup-jobs-index() {
|
| 59 | local prefix=$1
|
| 60 | # clean it up for real!
|
| 61 | sshq soil-web/soil/web.sh cleanup-jobs-index "$prefix" false
|
| 62 | }
|
| 63 |
|
| 64 | remote-cleanup-status-api() {
|
| 65 | sshq soil-web/soil/web.sh cleanup-status-api false
|
| 66 | }
|
| 67 |
|
| 68 | my-scp() {
|
| 69 | scp -o StrictHostKeyChecking=no "$@"
|
| 70 | }
|
| 71 |
|
| 72 | my-ssh() {
|
| 73 | ssh -o StrictHostKeyChecking=no "$@"
|
| 74 | }
|
| 75 |
|
| 76 | scp-status-api() {
|
| 77 | local run_id=${1:-TEST2-github-run-id}
|
| 78 | local job_name=$2
|
| 79 |
|
| 80 | local status_file="_soil-jobs/$job_name.status.txt"
|
| 81 | local remote_path="$SOIL_REMOTE_DIR/status-api/github/$run_id/$job_name"
|
| 82 |
|
| 83 | # We could make this one invocation of something like:
|
| 84 | # cat $status_file | sshq soil/web.sh PUT $remote_path
|
| 85 |
|
| 86 | my-ssh $SOIL_USER_HOST "mkdir -p $(dirname $remote_path)"
|
| 87 |
|
| 88 | # the consumer should check if these are all zero
|
| 89 | # note: the file gets RENAMED
|
| 90 | my-scp $status_file "$SOIL_USER_HOST:$remote_path"
|
| 91 | }
|
| 92 |
|
| 93 | scp-results() {
|
| 94 | # could also use Travis known_hosts addon?
|
| 95 | local prefix=$1 # srht- or ''
|
| 96 | shift
|
| 97 |
|
| 98 | my-scp "$@" "$SOIL_USER_HOST:$SOIL_REMOTE_DIR/${prefix}jobs/"
|
| 99 | }
|
| 100 |
|
| 101 | # Dummy that doesn't depend on results
|
| 102 | deploy-test-wwz() {
|
| 103 | set -x
|
| 104 | local out_name="$(date +%Y-%m-%d__%H-%M-%S)_test"
|
| 105 |
|
| 106 | local wwz=$out_name.wwz
|
| 107 |
|
| 108 | cat >index.html <<EOF
|
| 109 | <a href="build/oil-manifest.txt">build/oil-manifest.txt</a> <br/>
|
| 110 | <a href="build/opy-manifest.txt">build/opy-manifest.txt</a> <br/>
|
| 111 | <a href="env.txt">env.txt</a> <br/>
|
| 112 | EOF
|
| 113 |
|
| 114 | dump-env > env.txt
|
| 115 |
|
| 116 | zip -q $wwz env.txt index.html build/*.txt
|
| 117 |
|
| 118 | scp-results '' $wwz
|
| 119 | }
|
| 120 |
|
| 121 | format-wwz-index() {
|
| 122 | ### What's displayed in $ID.wwz/index.html
|
| 123 |
|
| 124 | local job_id=$1
|
| 125 | local tsv=${2:-_tmp/soil/INDEX.tsv}
|
| 126 |
|
| 127 | soil-html-head "$job_id.wwz"
|
| 128 |
|
| 129 | cat <<EOF
|
| 130 | <body class="width40">
|
| 131 | <p id="home-link">
|
| 132 | <a href="..">Up</a>
|
| 133 | | <a href="/">Home</a>
|
| 134 | | <a href="//oilshell.org/">oilshell.org</a>
|
| 135 | </p>
|
| 136 |
|
| 137 | <h1>$job_id.wwz</h1>
|
| 138 | EOF
|
| 139 |
|
| 140 | echo '<ul>'
|
| 141 | cat <<EOF
|
| 142 | <li>
|
| 143 | <a href="_tmp/soil/INDEX.tsv">_tmp/soil/INDEX.tsv</a>, also copied to
|
| 144 | <a href="../$job_id.tsv">../$job_id.tsv</a>.
|
| 145 | </li>
|
| 146 | <li>
|
| 147 | <a href="../$job_id.json">../$job_id.json</a>
|
| 148 | </li>
|
| 149 | EOF
|
| 150 |
|
| 151 | if test -f _tmp/soil/image.html; then
|
| 152 | echo '
|
| 153 | <li>
|
| 154 | <a href="_tmp/soil/image.html">Container Image Stats</a>
|
| 155 | </li>
|
| 156 | '
|
| 157 | fi
|
| 158 |
|
| 159 | echo '</ul>'
|
| 160 | }
|
| 161 |
|
| 162 | format-image-stats() {
|
| 163 | local soil_dir=${1:-_tmp/soil}
|
| 164 | local web_base_url=${2:-'/web'} # for production
|
| 165 |
|
| 166 | table-sort-html-head "Image Stats" $web_base_url
|
| 167 |
|
| 168 | # prints <body>; make it wide for the shell commands
|
| 169 | table-sort-begin "width60"
|
| 170 |
|
| 171 | # TODO:
|
| 172 | # - Format the TSV as an HTML table
|
| 173 | # - Save the name and tag and show it
|
| 174 |
|
| 175 | cat <<EOF
|
| 176 | <p id="home-link">
|
| 177 | <a href="/">Home</a>
|
| 178 | | <a href="//oilshell.org/">oilshell.org</a>
|
| 179 | </p>
|
| 180 |
|
| 181 | <h1>Images Tagged</h1>
|
| 182 |
|
| 183 | <a href="images-tagged.txt">images-tagged.txt</a> <br/>
|
| 184 |
|
| 185 | <h1>Image Layers</h1>
|
| 186 | EOF
|
| 187 |
|
| 188 | tsv2html3 $soil_dir/image-layers.tsv
|
| 189 |
|
| 190 | # First column is number of bytes; ignore header
|
| 191 | local total_bytes=$(awk '
|
| 192 | { sum += $1 }
|
| 193 | END { printf("%.1f", sum / 1000000) }
|
| 194 | ' $soil_dir/image-layers.tsv)
|
| 195 |
|
| 196 | echo "<p>Total Size: <b>$total_bytes MB</b></p>"
|
| 197 |
|
| 198 |
|
| 199 | cat <<EOF
|
| 200 | <h2>Raw Data</h2>
|
| 201 |
|
| 202 | <a href="image-layers.txt">image-layers.txt</a> <br/>
|
| 203 | <a href="image-layers.tsv">image-layers.tsv</a> <br/>
|
| 204 | </body>
|
| 205 | </html>
|
| 206 | EOF
|
| 207 |
|
| 208 | table-sort-end image-layers
|
| 209 | }
|
| 210 |
|
| 211 | make-job-wwz() {
|
| 212 | local job_id=${1:-test-job}
|
| 213 |
|
| 214 | local wwz=$job_id.wwz
|
| 215 |
|
| 216 | # Doesn't exist when we're not using a container
|
| 217 | if test -f _tmp/soil/image-layers.tsv; then
|
| 218 | format-image-stats _tmp/soil > _tmp/soil/image.html
|
| 219 | fi
|
| 220 |
|
| 221 | format-wwz-index $job_id > index.html
|
| 222 |
|
| 223 | # _tmp/soil: Logs are in _tmp, see soil/worker.sh
|
| 224 | # web/ : spec test HTML references this.
|
| 225 | # Note that that index references /web/{base,soil}.css, outside the .wwz
|
| 226 | # osh-summary.html uses table-sort.js and ajax.js
|
| 227 | #
|
| 228 | # TODO:
|
| 229 | # - Could move _tmp/{spec,stateful,syscall} etc. to _test
|
| 230 | # - Create _tmp/benchmarks/{compute,gc,gc-cachegrind,osh-parser,mycpp-examples,...}
|
| 231 | # - would require release/$VERSION/pub/benchmarks.wwz, like we have
|
| 232 | # pub/metrics.wwz, for consistent links
|
| 233 |
|
| 234 | zip -q -r $wwz \
|
| 235 | index.html \
|
| 236 | _build/wedge/logs \
|
| 237 | _test \
|
| 238 | _tmp/{soil,spec,src-tree-www,wild-www,stateful,process-table,syscall,benchmark-data,metrics,mycpp-examples,compute,gc,gc-cachegrind,perf,vm-baseline,osh-runtime,osh-parser,host-id,shell-id} \
|
| 239 | _tmp/uftrace/{index.html,stage2} \
|
| 240 | web/{base,src-tree,spec-tests,spec-cpp,line-counts,benchmarks,wild}.css web/ajax.js \
|
| 241 | web/table/table-sort.{css,js} \
|
| 242 | _release/oil*.tar _release/*.xshar _release/VERSION/
|
| 243 | }
|
| 244 |
|
| 245 | test-collect-json() {
|
| 246 | soil/collect_json.py _tmp/soil PATH
|
| 247 | }
|
| 248 |
|
| 249 | deploy-job-results() {
|
| 250 | ### Copy .wwz, .tsv, and .json to a new dir
|
| 251 |
|
| 252 | local prefix=$1 # e.g. example.com/github-jobs/
|
| 253 | local subdir=$2 # e.g. example.com/github-jobs/1234/ # make this dir
|
| 254 | local job_name=$3 # e.g. example.com/github-jobs/1234/foo.wwz
|
| 255 | shift 2
|
| 256 | # rest of args are more env vars
|
| 257 |
|
| 258 | # writes $job_name.wwz
|
| 259 | make-job-wwz $job_name
|
| 260 |
|
| 261 | # Debug permissions. When using docker rather than podman, these dirs can be
|
| 262 | # owned by root and we can't write into them.
|
| 263 | ls -l -d _tmp/soil
|
| 264 | ls -l _tmp/soil
|
| 265 |
|
| 266 | date +%s > _tmp/soil/task-deploy-start-time.txt
|
| 267 |
|
| 268 | soil/collect_json.py _tmp/soil "$@" > $job_name.json
|
| 269 |
|
| 270 | # So we don't have to unzip it
|
| 271 | cp _tmp/soil/INDEX.tsv $job_name.tsv
|
| 272 |
|
| 273 | local remote_dest_dir="$SOIL_REMOTE_DIR/${prefix}jobs/$subdir"
|
| 274 | my-ssh $SOIL_USER_HOST "mkdir -p $remote_dest_dir"
|
| 275 |
|
| 276 | # Do JSON last because that's what 'list-json' looks for
|
| 277 | my-scp $job_name.{wwz,tsv,json} "$SOIL_USER_HOST:$remote_dest_dir"
|
| 278 |
|
| 279 | log ''
|
| 280 | log 'View CI results here:'
|
| 281 | log ''
|
| 282 | log "http://$SOIL_HOST/${prefix}jobs/$subdir/"
|
| 283 | log "http://$SOIL_HOST/${prefix}jobs/$subdir/$job_name.wwz/"
|
| 284 | log ''
|
| 285 | }
|
| 286 |
|
| 287 | publish-cpp-tarball() {
|
| 288 | local prefix=${1:-'github-'} # e.g. example.com/github-jobs/
|
| 289 |
|
| 290 | # Example of dir structure we need to cleanup:
|
| 291 | #
|
| 292 | # srht-jobs/
|
| 293 | # git-$hash/
|
| 294 | # index.html
|
| 295 | # oils-for-unix.tar
|
| 296 | # github-jobs/
|
| 297 | # git-$hash/
|
| 298 | # oils-for-unix.tar
|
| 299 | #
|
| 300 | # Algorithm
|
| 301 | # 1. List all JSON, finding commit date and commit hash
|
| 302 | # 2. Get the OLDEST commit dates, e.g. all except for 50
|
| 303 | # 3. Delete all commit hash dirs not associated with them
|
| 304 |
|
| 305 | # Fix subtle problem here !!!
|
| 306 | shopt -s inherit_errexit
|
| 307 |
|
| 308 | local git_commit_dir
|
| 309 | git_commit_dir=$(git-commit-dir "$prefix")
|
| 310 |
|
| 311 | my-ssh $SOIL_USER_HOST "mkdir -p $git_commit_dir"
|
| 312 |
|
| 313 | # Do JSON last because that's what 'list-json' looks for
|
| 314 |
|
| 315 | local tar=_release/oils-for-unix.tar
|
| 316 |
|
| 317 | # Permission denied because of host/guest issue
|
| 318 | #local tar_gz=$tar.gz
|
| 319 | #gzip -c $tar > $tar_gz
|
| 320 |
|
| 321 | # Avoid race condition
|
| 322 | # Crappy UUID: seconds since epoch, plus PID
|
| 323 | local timestamp
|
| 324 | timestamp=$(date +%s)
|
| 325 |
|
| 326 | local temp_name="tmp-$timestamp-$$.tar"
|
| 327 |
|
| 328 | my-scp $tar "$SOIL_USER_HOST:$git_commit_dir/$temp_name"
|
| 329 |
|
| 330 | my-ssh $SOIL_USER_HOST \
|
| 331 | "mv -v $git_commit_dir/$temp_name $git_commit_dir/oils-for-unix.tar"
|
| 332 |
|
| 333 | log 'Tarball:'
|
| 334 | log ''
|
| 335 | log "http://$git_commit_dir"
|
| 336 | }
|
| 337 |
|
| 338 | remote-event-job-done() {
|
| 339 | ### "Client side" handler: a job calls this when it's done
|
| 340 |
|
| 341 | log "remote-event-job-done"
|
| 342 |
|
| 343 | # Deployed code dir
|
| 344 | sshq soil-web/soil/web.sh event-job-done "$@"
|
| 345 | }
|
| 346 |
|
| 347 | filename=$(basename $0)
|
| 348 | if test $filename = 'web-worker.sh'; then
|
| 349 | "$@"
|
| 350 | fi
|