| 1 | #!/usr/bin/env bash
 | 
| 2 | #
 | 
| 3 | # Measure how fast the OSH parser is.
 | 
| 4 | #
 | 
| 5 | # Usage:
 | 
| 6 | #   benchmarks/osh-parser.sh <function name>
 | 
| 7 | #
 | 
| 8 | # Examples:
 | 
| 9 | #   benchmarks/osh-parser.sh soil-run
 | 
| 10 | #   QUICKLY=1 benchmarks/osh-parser.sh soil-run
 | 
| 11 | 
 | 
| 12 | set -o nounset
 | 
| 13 | set -o pipefail
 | 
| 14 | set -o errexit
 | 
| 15 | 
 | 
| 16 | REPO_ROOT=$(cd "$(dirname $0)/.."; pwd)  # tsv-lib.sh uses this
 | 
| 17 | readonly REPO_ROOT
 | 
| 18 | 
 | 
| 19 | source benchmarks/common.sh  # die
 | 
| 20 | source benchmarks/cachegrind.sh  # with-cachgrind
 | 
| 21 | source test/tsv-lib.sh  # tsv2html
 | 
| 22 | source test/common.sh  # die
 | 
| 23 | 
 | 
| 24 | # TODO: The raw files should be published.  In both
 | 
| 25 | # ~/git/oilshell/benchmarks-data and also in the /release/ hierarchy?
 | 
| 26 | readonly BASE_DIR=_tmp/osh-parser
 | 
| 27 | readonly SORTED=$BASE_DIR/tmp/sorted.txt
 | 
| 28 | 
 | 
| 29 | write-sorted-manifest() {
 | 
| 30 |   local files=${1:-benchmarks/osh-parser-files.txt}
 | 
| 31 |   local counts=$BASE_DIR/tmp/line-counts.txt
 | 
| 32 |   local csv_out=$2
 | 
| 33 |   local sep=${3:-','}  # CSV or TSV
 | 
| 34 | 
 | 
| 35 |   # Remove comments and sort by line count
 | 
| 36 |   grep -v '^#' $files | xargs wc -l | sort -n > $counts
 | 
| 37 |     
 | 
| 38 |   # Raw list of paths
 | 
| 39 |   cat $counts | awk '$2 != "total" { print $2 }' > $SORTED
 | 
| 40 | 
 | 
| 41 |   # Make a CSV file from wc output
 | 
| 42 |   cat $counts | awk -v sep="$sep" '
 | 
| 43 |       BEGIN { print "num_lines" sep "path" }
 | 
| 44 |       $2 != "total" { print $1 sep $2 }' \
 | 
| 45 |       > $csv_out
 | 
| 46 | }
 | 
| 47 | 
 | 
| 48 | # Called by xargs with a task row.
 | 
| 49 | parser-task() {
 | 
| 50 |   local out_dir=$1  # output
 | 
| 51 |   local job_id=$2
 | 
| 52 |   local host=$3
 | 
| 53 |   local host_hash=$4
 | 
| 54 |   local sh_path=$5
 | 
| 55 |   local shell_hash=$6
 | 
| 56 |   local script_path=$7
 | 
| 57 | 
 | 
| 58 |   echo "--- TIME $sh_path $script_path ---"
 | 
| 59 | 
 | 
| 60 |   local times_out="$out_dir/$host.$job_id.times.csv"
 | 
| 61 | 
 | 
| 62 |   local shell_name
 | 
| 63 |   shell_name=$(basename $sh_path)
 | 
| 64 | 
 | 
| 65 |   # Can't use array because of set -u bug!!!  Only fixed in bash 4.4.
 | 
| 66 |   extra_args=''
 | 
| 67 |   case "$shell_name" in
 | 
| 68 |     osh|oils-for-unix.*)
 | 
| 69 |       extra_args='--ast-format none'
 | 
| 70 |       ;;
 | 
| 71 |   esac
 | 
| 72 | 
 | 
| 73 |   # exit code, time in seconds, host_hash, shell_hash, path.  \0
 | 
| 74 |   # would have been nice here!
 | 
| 75 |   # TODO: TSV
 | 
| 76 |   benchmarks/time_.py \
 | 
| 77 |     --append \
 | 
| 78 |     --output $times_out \
 | 
| 79 |     --rusage \
 | 
| 80 |     --field "$host" --field "$host_hash" \
 | 
| 81 |     --field "$shell_name" --field "$shell_hash" \
 | 
| 82 |     --field "$script_path" -- \
 | 
| 83 |     "$sh_path" -n $extra_args "$script_path" || echo FAILED
 | 
| 84 | }
 | 
| 85 | 
 | 
| 86 | # Called by xargs with a task row.
 | 
| 87 | # NOTE: This is very similar to the function above, except that we add
 | 
| 88 | # cachegrind.  We could probably conslidate these.
 | 
| 89 | cachegrind-task() {
 | 
| 90 |   local out_dir=$1  # output
 | 
| 91 |   local job_id=$2
 | 
| 92 |   local host_name=$3
 | 
| 93 |   local unused2=$4
 | 
| 94 |   local sh_path=$5
 | 
| 95 |   local shell_hash=$6
 | 
| 96 |   local script_path=$7
 | 
| 97 | 
 | 
| 98 |   echo "--- CACHEGRIND $sh_path $script_path ---"
 | 
| 99 | 
 | 
| 100 |   local host_job_id="$host_name.$job_id"
 | 
| 101 | 
 | 
| 102 |   # NOTE: This has to match the path that the header was written to
 | 
| 103 |   local times_out="$out_dir/$host_job_id.cachegrind.tsv"
 | 
| 104 | 
 | 
| 105 |   local cachegrind_out_dir="$host_job_id.cachegrind"
 | 
| 106 |   mkdir -p $out_dir/$cachegrind_out_dir
 | 
| 107 | 
 | 
| 108 |   local shell_name
 | 
| 109 |   shell_name=$(basename $sh_path)
 | 
| 110 | 
 | 
| 111 |   local script_name
 | 
| 112 |   script_name=$(basename $script_path)
 | 
| 113 | 
 | 
| 114 |   # RELATIVE PATH
 | 
| 115 |   local cachegrind_out_path="${cachegrind_out_dir}/${shell_name}-${shell_hash}__${script_name}.txt"
 | 
| 116 | 
 | 
| 117 |   # Can't use array because of set -u bug!!!  Only fixed in bash 4.4.
 | 
| 118 |   extra_args=''
 | 
| 119 |   case "$shell_name" in
 | 
| 120 |     osh|oils-for-unix.*)
 | 
| 121 |       extra_args="--ast-format none"
 | 
| 122 |       ;;
 | 
| 123 |   esac
 | 
| 124 | 
 | 
| 125 |   benchmarks/time_.py \
 | 
| 126 |     --tsv \
 | 
| 127 |     --append \
 | 
| 128 |     --output $times_out \
 | 
| 129 |     --rusage \
 | 
| 130 |     --field "$shell_name" --field "$shell_hash" \
 | 
| 131 |     --field "$script_path" \
 | 
| 132 |     --field $cachegrind_out_path \
 | 
| 133 |     -- \
 | 
| 134 |     $0 with-cachegrind $out_dir/$cachegrind_out_path \
 | 
| 135 |     "$sh_path" -n $extra_args "$script_path" || echo FAILED
 | 
| 136 | }
 | 
| 137 | 
 | 
| 138 | # For each shell, print 10 script paths.
 | 
| 139 | print-tasks() {
 | 
| 140 |   local provenance=$1
 | 
| 141 |   shift
 | 
| 142 |   # rest are shells
 | 
| 143 | 
 | 
| 144 |   # Add 1 field for each of 5 fields.
 | 
| 145 |   cat $provenance | filter-provenance "$@" |
 | 
| 146 |   while read fields; do
 | 
| 147 |     if test -n "${QUICKLY:-}"; then
 | 
| 148 |       # Quick test
 | 
| 149 |       head -n 2 $SORTED | xargs -n 1 -- echo "$fields"
 | 
| 150 |     else
 | 
| 151 |       cat $SORTED | xargs -n 1 -- echo "$fields"
 | 
| 152 |     fi
 | 
| 153 |   done
 | 
| 154 | }
 | 
| 155 | 
 | 
| 156 | cachegrind-parse-configure-coreutils() {
 | 
| 157 |   ### Similar to benchmarks/gc, benchmarks/uftrace
 | 
| 158 | 
 | 
| 159 |   local bin=_bin/cxx-opt/oils-for-unix
 | 
| 160 |   ninja $bin
 | 
| 161 |   local out=_tmp/parse.configure-coreutils.txt 
 | 
| 162 | 
 | 
| 163 |   local -a cmd=( 
 | 
| 164 |     $bin --ast-format none -n
 | 
| 165 |     benchmarks/testdata/configure-coreutils )
 | 
| 166 | 
 | 
| 167 |   time "${cmd[@]}"
 | 
| 168 | 
 | 
| 169 |   time cachegrind $out "${cmd[@]}"
 | 
| 170 | 
 | 
| 171 |   echo
 | 
| 172 |   cat $out
 | 
| 173 | }
 | 
| 174 | 
 | 
| 175 | cachegrind-demo() {
 | 
| 176 |   #local sh=bash
 | 
| 177 |   local sh=zsh
 | 
| 178 | 
 | 
| 179 |   local out_dir=_tmp/cachegrind
 | 
| 180 | 
 | 
| 181 |   mkdir -p $out_dir
 | 
| 182 | 
 | 
| 183 |   # notes:
 | 
| 184 |   # - not passing --trace-children (follow execvpe)
 | 
| 185 |   # - passing --xml=yes gives error: cachegrind doesn't support XML
 | 
| 186 |   # - there is a log out and a details out
 | 
| 187 | 
 | 
| 188 |   valgrind --tool=cachegrind \
 | 
| 189 |     --log-file=$out_dir/log.txt \
 | 
| 190 |     --cachegrind-out-file=$out_dir/details.txt \
 | 
| 191 |     -- $sh -c 'echo hi'
 | 
| 192 | 
 | 
| 193 |   echo
 | 
| 194 |   head -n 20 $out_dir/*.txt
 | 
| 195 | }
 | 
| 196 | 
 | 
| 197 | readonly NUM_TASK_COLS=6  # input columns: 5 from provenance, 1 for file
 | 
| 198 | 
 | 
| 199 | # Figure out all tasks to run, and run them.  When called from auto.sh, $2
 | 
| 200 | # should be the ../benchmarks-data repo.
 | 
| 201 | measure() {
 | 
| 202 |   local provenance=$1
 | 
| 203 |   local host_job_id=$2
 | 
| 204 |   local out_dir=${3:-$BASE_DIR/raw}
 | 
| 205 |   local osh_cpp=${4:-$OSH_CPP_BENCHMARK_DATA}
 | 
| 206 | 
 | 
| 207 |   local times_out="$out_dir/$host_job_id.times.csv"
 | 
| 208 |   local lines_out="$out_dir/$host_job_id.lines.csv"
 | 
| 209 | 
 | 
| 210 |   mkdir -p $BASE_DIR/{tmp,raw,stage1} $out_dir
 | 
| 211 | 
 | 
| 212 |   # Files that we should measure.  Exploded into tasks.
 | 
| 213 |   write-sorted-manifest '' $lines_out
 | 
| 214 | 
 | 
| 215 |   # Write Header of the CSV file that is appended to.
 | 
| 216 |   # TODO: TSV
 | 
| 217 |   benchmarks/time_.py --print-header \
 | 
| 218 |     --rusage \
 | 
| 219 |     --field host_name --field host_hash \
 | 
| 220 |     --field shell_name --field shell_hash \
 | 
| 221 |     --field path \
 | 
| 222 |     > $times_out
 | 
| 223 | 
 | 
| 224 |   local tasks=$BASE_DIR/tasks.txt
 | 
| 225 |   print-tasks $provenance "${SHELLS[@]}" $osh_cpp > $tasks
 | 
| 226 | 
 | 
| 227 |   # Run them all
 | 
| 228 |   cat $tasks | xargs -n $NUM_TASK_COLS -- $0 parser-task $out_dir
 | 
| 229 | }
 | 
| 230 | 
 | 
| 231 | measure-cachegrind() {
 | 
| 232 |   local provenance=$1
 | 
| 233 |   local host_job_id=$2
 | 
| 234 |   local out_dir=${3:-$BASE_DIR/raw}
 | 
| 235 |   local osh_cpp=${4:-$OSH_CPP_BENCHMARK_DATA}
 | 
| 236 | 
 | 
| 237 |   local cachegrind_tsv="$out_dir/$host_job_id.cachegrind.tsv"
 | 
| 238 |   local lines_out="$out_dir/$host_job_id.lines.tsv"
 | 
| 239 | 
 | 
| 240 |   mkdir -p $BASE_DIR/{tmp,raw,stage1} $out_dir
 | 
| 241 | 
 | 
| 242 |   write-sorted-manifest '' $lines_out $'\t'  # TSV
 | 
| 243 | 
 | 
| 244 |   # TODO: This header is fragile.  Every task should print its own file with a
 | 
| 245 |   # header, and then we can run them in parallel, and join them with
 | 
| 246 |   # devtools/csv_concat.py
 | 
| 247 | 
 | 
| 248 |   benchmarks/time_.py --tsv --print-header \
 | 
| 249 |     --rusage \
 | 
| 250 |     --field shell_name --field shell_hash \
 | 
| 251 |     --field path \
 | 
| 252 |     --field cachegrind_out_path \
 | 
| 253 |     > $cachegrind_tsv
 | 
| 254 | 
 | 
| 255 |   local ctasks=$BASE_DIR/cachegrind-tasks.txt
 | 
| 256 | 
 | 
| 257 |   # zsh weirdly forks during zsh -n, which complicates our cachegrind
 | 
| 258 |   # measurement.  So just ignore it.  (This can be seen with
 | 
| 259 |   # strace -e fork -f -- zsh -n $file)
 | 
| 260 |   print-tasks $provenance bash dash mksh $osh_cpp > $ctasks
 | 
| 261 | 
 | 
| 262 |   cat $ctasks | xargs -n $NUM_TASK_COLS -- $0 cachegrind-task $out_dir
 | 
| 263 | }
 | 
| 264 | 
 | 
| 265 | #
 | 
| 266 | # Data Preparation and Analysis
 | 
| 267 | #
 | 
| 268 | 
 | 
| 269 | stage1-cachegrind() {
 | 
| 270 |   local raw_dir=$1
 | 
| 271 |   local single_machine=$2
 | 
| 272 |   local out_dir=$3
 | 
| 273 |   local raw_data_csv=$4
 | 
| 274 | 
 | 
| 275 |   local maybe_host
 | 
| 276 |   if test -n "$single_machine"; then
 | 
| 277 |     # CI: _tmp/osh-parser/raw.no-host.$job_id
 | 
| 278 |     maybe_host='no-host'
 | 
| 279 |   else
 | 
| 280 |     # release: ../benchmark-data/osh-parser/raw.lenny.$job_id
 | 
| 281 |     #maybe_host=$(hostname)
 | 
| 282 |     maybe_host=$MACHINE1  # lenny
 | 
| 283 |   fi
 | 
| 284 | 
 | 
| 285 |   # Only runs on one machine
 | 
| 286 |   local -a sorted=( $raw_dir/$maybe_host.*.cachegrind.tsv )
 | 
| 287 |   local tsv_in=${sorted[-1]}  # latest one
 | 
| 288 | 
 | 
| 289 |   devtools/tsv_column_from_files.py \
 | 
| 290 |     --new-column irefs \
 | 
| 291 |     --path-column cachegrind_out_path \
 | 
| 292 |     --extract-group-1 'I[ ]*refs:[ ]*([\d,]+)' \
 | 
| 293 |     --remove-commas \
 | 
| 294 |     $tsv_in > $out_dir/cachegrind.tsv
 | 
| 295 | 
 | 
| 296 |   echo $tsv_in >> $raw_data_csv
 | 
| 297 | }
 | 
| 298 | 
 | 
| 299 | stage1() {
 | 
| 300 |   local raw_dir=${1:-$BASE_DIR/raw}
 | 
| 301 |   local single_machine=${2:-}
 | 
| 302 | 
 | 
| 303 |   local out=$BASE_DIR/stage1
 | 
| 304 |   mkdir -p $out
 | 
| 305 | 
 | 
| 306 |   # Construct a one-column CSV file
 | 
| 307 |   local raw_data_csv=$out/raw-data.csv
 | 
| 308 |   echo 'path' > $raw_data_csv
 | 
| 309 | 
 | 
| 310 |   stage1-cachegrind $raw_dir "$single_machine" $out $raw_data_csv
 | 
| 311 | 
 | 
| 312 |   local lines_csv=$out/lines.csv
 | 
| 313 | 
 | 
| 314 |   local -a raw=()
 | 
| 315 |   if test -n "$single_machine"; then
 | 
| 316 |     local -a a=($raw_dir/$single_machine.*.times.csv)
 | 
| 317 |     raw+=( ${a[-1]} )
 | 
| 318 |     echo ${a[-1]} >> $raw_data_csv
 | 
| 319 | 
 | 
| 320 |     # They are the same, output one of them.
 | 
| 321 |     cat $raw_dir/$single_machine.*.lines.csv > $lines_csv 
 | 
| 322 |   else
 | 
| 323 |     # Globs are in lexicographical order, which works for our dates.
 | 
| 324 |     local -a a=($raw_dir/$MACHINE1.*.times.csv)
 | 
| 325 |     local -a b=($raw_dir/$MACHINE2.*.times.csv)
 | 
| 326 | 
 | 
| 327 |     raw+=( ${a[-1]} ${b[-1]} )
 | 
| 328 |     {
 | 
| 329 |       echo ${a[-1]}
 | 
| 330 |       echo ${b[-1]}
 | 
| 331 |     } >> $raw_data_csv
 | 
| 332 | 
 | 
| 333 | 
 | 
| 334 |     # Verify that the files are equal, and pass one of them.
 | 
| 335 |     local -a c=($raw_dir/$MACHINE1.*.lines.csv)
 | 
| 336 |     local -a d=($raw_dir/$MACHINE2.*.lines.csv)
 | 
| 337 | 
 | 
| 338 |     local left=${c[-1]}
 | 
| 339 |     local right=${d[-1]}
 | 
| 340 | 
 | 
| 341 |     if ! diff $left $right; then
 | 
| 342 |       die "Benchmarks were run on different files ($left != $right)"
 | 
| 343 |     fi
 | 
| 344 | 
 | 
| 345 |     # They are the same, output one of them.
 | 
| 346 |     cat $left > $lines_csv 
 | 
| 347 |   fi
 | 
| 348 | 
 | 
| 349 |   local times_csv=$out/times.csv
 | 
| 350 |   csv-concat "${raw[@]}" > $times_csv
 | 
| 351 | 
 | 
| 352 |   head $out/*
 | 
| 353 |   wc -l $out/*
 | 
| 354 | }
 | 
| 355 | 
 | 
| 356 | # TODO:
 | 
| 357 | # - maybe rowspan for hosts: flanders/lenny
 | 
| 358 | #   - does that interfere with sorting?
 | 
| 359 | #
 | 
| 360 | # NOTE: not bothering to make it sortable now.  Just using the CSS.
 | 
| 361 | 
 | 
| 362 | print-report() {
 | 
| 363 |   local in_dir=$1
 | 
| 364 | 
 | 
| 365 |   benchmark-html-head 'OSH Parser Performance'
 | 
| 366 | 
 | 
| 367 |   cat <<EOF
 | 
| 368 |   <body class="width60">
 | 
| 369 |     <p id="home-link">
 | 
| 370 |       <a href="/">oilshell.org</a>
 | 
| 371 |     </p>
 | 
| 372 | EOF
 | 
| 373 | 
 | 
| 374 |   cmark <<'EOF'
 | 
| 375 | ## OSH Parser Performance
 | 
| 376 | 
 | 
| 377 | We time `$sh -n $file` for various files under various shells, and repeat then
 | 
| 378 | run under cachegrind for stable metrics.
 | 
| 379 | 
 | 
| 380 | Source code: [oil/benchmarks/osh-parser.sh](https://github.com/oilshell/oil/tree/master/benchmarks/osh-parser.sh)
 | 
| 381 | 
 | 
| 382 | [Raw files](-wwz-index)
 | 
| 383 | 
 | 
| 384 | ### Summary
 | 
| 385 | 
 | 
| 386 | #### Instructions Per Line (via cachegrind)
 | 
| 387 | 
 | 
| 388 | Lower numbers are generally better, but each shell recognizes a different
 | 
| 389 | language, and OSH uses a more thorough parsing algorithm.  In **thousands** of
 | 
| 390 | "I refs".
 | 
| 391 | 
 | 
| 392 | EOF
 | 
| 393 |   tsv2html $in_dir/cachegrind_summary.tsv
 | 
| 394 | 
 | 
| 395 |   cmark <<'EOF'
 | 
| 396 | 
 | 
| 397 | (zsh isn't measured because `zsh -n` unexpectedly forks.)
 | 
| 398 | 
 | 
| 399 | #### Average Parsing Rate, Measured on Two Machines (lines/ms)
 | 
| 400 | 
 | 
| 401 | Shell startup time is included in the elapsed time measurements, but long files
 | 
| 402 | are chosen to minimize its effect.
 | 
| 403 | EOF
 | 
| 404 |   csv2html $in_dir/summary.csv
 | 
| 405 | 
 | 
| 406 |   cmark <<< '### Per-File Measurements'
 | 
| 407 |   echo
 | 
| 408 | 
 | 
| 409 |   # Flat tables for CI
 | 
| 410 |   if test -f $in_dir/times_flat.tsv; then
 | 
| 411 |     cmark <<< '#### Time and Memory'
 | 
| 412 |     echo
 | 
| 413 | 
 | 
| 414 |     tsv2html $in_dir/times_flat.tsv
 | 
| 415 |   fi
 | 
| 416 |   if test -f $in_dir/cachegrind_flat.tsv; then
 | 
| 417 |     cmark <<< '#### Instruction Counts'
 | 
| 418 |     echo
 | 
| 419 | 
 | 
| 420 |     tsv2html $in_dir/cachegrind_flat.tsv
 | 
| 421 |   fi
 | 
| 422 | 
 | 
| 423 |   # Breakdowns for release
 | 
| 424 |   if test -f $in_dir/instructions.tsv; then
 | 
| 425 |     cmark <<< '#### Instructions Per Line (in thousands)'
 | 
| 426 |     echo
 | 
| 427 |     tsv2html $in_dir/instructions.tsv
 | 
| 428 |   fi
 | 
| 429 | 
 | 
| 430 |   if test -f $in_dir/elapsed.csv; then
 | 
| 431 |     cmark <<< '#### Elapsed Time (milliseconds)'
 | 
| 432 |     echo
 | 
| 433 |     csv2html $in_dir/elapsed.csv
 | 
| 434 |   fi
 | 
| 435 | 
 | 
| 436 |   if test -f $in_dir/rate.csv; then
 | 
| 437 |     cmark <<< '#### Parsing Rate (lines/ms)'
 | 
| 438 |     echo
 | 
| 439 |     csv2html $in_dir/rate.csv
 | 
| 440 |   fi
 | 
| 441 | 
 | 
| 442 |   if test -f $in_dir/max_rss.csv; then
 | 
| 443 |     cmark <<'EOF'
 | 
| 444 | ### Memory Usage (Max Resident Set Size in MB)
 | 
| 445 | 
 | 
| 446 | Again, OSH uses a **different algorithm** (and language) than POSIX shells.  It
 | 
| 447 | builds an AST in memory rather than just validating the code line-by-line.
 | 
| 448 | 
 | 
| 449 | EOF
 | 
| 450 |     csv2html $in_dir/max_rss.csv
 | 
| 451 |   fi
 | 
| 452 | 
 | 
| 453 |   cmark <<EOF
 | 
| 454 | ### Shell and Host Details
 | 
| 455 | EOF
 | 
| 456 |   csv2html $in_dir/shells.csv
 | 
| 457 |   csv2html $in_dir/hosts.csv
 | 
| 458 | 
 | 
| 459 |   cmark <<EOF
 | 
| 460 | ### Raw Data
 | 
| 461 | EOF
 | 
| 462 |   csv2html $in_dir/raw-data.csv
 | 
| 463 | 
 | 
| 464 |   cmark << 'EOF'
 | 
| 465 | 
 | 
| 466 |   </body>
 | 
| 467 | </html>
 | 
| 468 | EOF
 | 
| 469 | }
 | 
| 470 | 
 | 
| 471 | soil-run() {
 | 
| 472 |   ### Run it on just this machine, and make a report
 | 
| 473 | 
 | 
| 474 |   rm -r -f $BASE_DIR
 | 
| 475 |   mkdir -p $BASE_DIR
 | 
| 476 | 
 | 
| 477 |   local -a osh_bin=( $OSH_CPP_NINJA_BUILD )
 | 
| 478 |   ninja "${osh_bin[@]}"
 | 
| 479 | 
 | 
| 480 |   local single_machine='no-host'
 | 
| 481 | 
 | 
| 482 |   local job_id
 | 
| 483 |   job_id=$(benchmarks/id.sh print-job-id)
 | 
| 484 | 
 | 
| 485 |   benchmarks/id.sh shell-provenance-2 \
 | 
| 486 |     $single_machine $job_id _tmp \
 | 
| 487 |     bash dash bin/osh "${osh_bin[@]}"
 | 
| 488 | 
 | 
| 489 |   # TODO: measure* should use print-tasks | run-tasks
 | 
| 490 |   local provenance=_tmp/provenance.txt
 | 
| 491 |   local host_job_id="$single_machine.$job_id"
 | 
| 492 | 
 | 
| 493 |   measure $provenance $host_job_id '' $OSH_CPP_NINJA_BUILD
 | 
| 494 | 
 | 
| 495 |   measure-cachegrind $provenance $host_job_id '' $OSH_CPP_NINJA_BUILD
 | 
| 496 | 
 | 
| 497 |   # TODO: R can use this TSV file
 | 
| 498 |   cp -v _tmp/provenance.tsv $BASE_DIR/stage1/provenance.tsv
 | 
| 499 | 
 | 
| 500 |   # Trivial concatenation for 1 machine
 | 
| 501 |   stage1 '' $single_machine
 | 
| 502 | 
 | 
| 503 |   benchmarks/report.sh stage2 $BASE_DIR
 | 
| 504 | 
 | 
| 505 |   benchmarks/report.sh stage3 $BASE_DIR
 | 
| 506 | }
 | 
| 507 | 
 | 
| 508 | "$@"
 |