OILS / core / executor.py View on Github | oilshell.org

715 lines, 388 significant
1"""executor.py."""
2from __future__ import print_function
3
4from errno import EINTR
5
6from _devbuild.gen.id_kind_asdl import Id
7from _devbuild.gen.option_asdl import builtin_i
8from _devbuild.gen.runtime_asdl import RedirValue, trace
9from _devbuild.gen.syntax_asdl import (
10 command,
11 command_e,
12 CommandSub,
13 CompoundWord,
14 loc,
15 loc_t,
16)
17from _devbuild.gen.value_asdl import value
18from builtin import hay_ysh
19from core import dev
20from core import error
21from core import process
22from core.error import e_die, e_die_status
23from core import pyos
24from core import pyutil
25from core import state
26from core import ui
27from core import vm
28from frontend import consts
29from frontend import lexer
30from mycpp.mylib import log
31
32import posix_ as posix
33
34from typing import cast, Dict, List, Optional, TYPE_CHECKING
35if TYPE_CHECKING:
36 from _devbuild.gen.runtime_asdl import (cmd_value, CommandStatus,
37 StatusArray)
38 from _devbuild.gen.syntax_asdl import command_t
39 from builtin import trap_osh
40 from core import optview
41 from core import state
42 from core.vm import _Builtin
43
44_ = log
45
46
47class _ProcessSubFrame(object):
48 """To keep track of diff <(cat 1) <(cat 2) > >(tac)"""
49
50 def __init__(self):
51 # type: () -> None
52
53 # These objects appear unconditionally in the main loop, and aren't
54 # commonly used, so we manually optimize [] into None.
55
56 self._to_wait = [] # type: List[process.Process]
57 self._to_close = [] # type: List[int] # file descriptors
58 self._locs = [] # type: List[loc_t]
59 self._modified = False
60
61 def WasModified(self):
62 # type: () -> bool
63 return self._modified
64
65 def Append(self, p, fd, status_loc):
66 # type: (process.Process, int, loc_t) -> None
67 self._modified = True
68
69 self._to_wait.append(p)
70 self._to_close.append(fd)
71 self._locs.append(status_loc)
72
73 def MaybeWaitOnProcessSubs(self, waiter, status_array):
74 # type: (process.Waiter, StatusArray) -> None
75
76 # Wait in the same order that they were evaluated. That seems fine.
77 for fd in self._to_close:
78 posix.close(fd)
79
80 codes = [] # type: List[int]
81 locs = [] # type: List[loc_t]
82 for i, p in enumerate(self._to_wait):
83 #log('waiting for %s', p)
84 st = p.Wait(waiter)
85 codes.append(st)
86 locs.append(self._locs[i])
87
88 status_array.codes = codes
89 status_array.locs = locs
90
91
92# Big flgas for RunSimpleCommand
93DO_FORK = 1 << 1
94NO_CALL_PROCS = 1 << 2 # command ls suppresses function lookup
95USE_DEFAULT_PATH = 1 << 3 # for command -p ls changes the path
96
97# Copied from var.c in dash
98DEFAULT_PATH = [
99 '/usr/local/sbin', '/usr/local/bin', '/usr/sbin', '/usr/bin', '/sbin',
100 '/bin'
101]
102
103
104class ShellExecutor(vm._Executor):
105 """An executor combined with the OSH language evaluators in osh/ to create
106 a shell interpreter."""
107
108 def __init__(
109 self,
110 mem, # type: state.Mem
111 exec_opts, # type: optview.Exec
112 mutable_opts, # type: state.MutableOpts
113 procs, # type: Dict[str, value.Proc]
114 hay_state, # type: hay_ysh.HayState
115 builtins, # type: Dict[int, _Builtin]
116 search_path, # type: state.SearchPath
117 ext_prog, # type: process.ExternalProgram
118 waiter, # type: process.Waiter
119 tracer, # type: dev.Tracer
120 job_control, # type: process.JobControl
121 job_list, # type: process.JobList
122 fd_state, # type: process.FdState
123 trap_state, # type: trap_osh.TrapState
124 errfmt # type: ui.ErrorFormatter
125 ):
126 # type: (...) -> None
127 vm._Executor.__init__(self)
128 self.mem = mem
129 self.exec_opts = exec_opts
130 self.mutable_opts = mutable_opts # for IsDisabled(), not mutating
131 self.procs = procs
132 self.hay_state = hay_state
133 self.builtins = builtins
134 self.search_path = search_path
135 self.ext_prog = ext_prog
136 self.waiter = waiter
137 self.tracer = tracer
138 self.multi_trace = tracer.multi_trace
139 self.job_control = job_control
140 # sleep 5 & puts a (PID, job#) entry here. And then "jobs" displays it.
141 self.job_list = job_list
142 self.fd_state = fd_state
143 self.trap_state = trap_state
144 self.errfmt = errfmt
145 self.process_sub_stack = [] # type: List[_ProcessSubFrame]
146 self.clean_frame_pool = [] # type: List[_ProcessSubFrame]
147
148 # When starting a pipeline in the foreground, we need to pass a handle to it
149 # through the evaluation of the last node back to ourselves for execution.
150 # We use this handle to make sure any processes forked for the last part of
151 # the pipeline are placed into the same process group as the rest of the
152 # pipeline. Since there is, by design, only ever one foreground pipeline and
153 # any pipelines started within subshells run in their parent's process
154 # group, we only need one pointer here, not some collection.
155 self.fg_pipeline = None # type: Optional[process.Pipeline]
156
157 def CheckCircularDeps(self):
158 # type: () -> None
159 assert self.cmd_ev is not None
160
161 def _MakeProcess(self, node, inherit_errexit, inherit_errtrace):
162 # type: (command_t, bool, bool) -> process.Process
163 """Assume we will run the node in another process.
164
165 Return a process.
166 """
167 UP_node = node
168 if node.tag() == command_e.ControlFlow:
169 node = cast(command.ControlFlow, UP_node)
170 # Pipeline or subshells with control flow are invalid, e.g.:
171 # - break | less
172 # - continue | less
173 # - ( return )
174 # NOTE: This could be done at parse time too.
175 if node.keyword.id != Id.ControlFlow_Exit:
176 e_die(
177 'Invalid control flow %r in pipeline / subshell / background'
178 % lexer.TokenVal(node.keyword), node.keyword)
179
180 # NOTE: If ErrExit(), we could be verbose about subprogram errors? This
181 # only really matters when executing 'exit 42', because the child shell
182 # inherits errexit and will be verbose. Other notes:
183 #
184 # - We might want errors to fit on a single line so they don't get #
185 # interleaved.
186 # - We could turn the `exit` builtin into a error.FatalRuntime exception
187 # and get this check for "free".
188 thunk = process.SubProgramThunk(self.cmd_ev,
189 node,
190 self.trap_state,
191 self.multi_trace,
192 inherit_errexit,
193 inherit_errtrace)
194 p = process.Process(thunk, self.job_control, self.job_list,
195 self.tracer)
196 return p
197
198 def RunBuiltin(self, builtin_id, cmd_val):
199 # type: (int, cmd_value.Argv) -> int
200 """Run a builtin.
201
202 Also called by the 'builtin' builtin.
203 """
204 self.tracer.OnBuiltin(builtin_id, cmd_val.argv)
205
206 builtin_func = self.builtins[builtin_id]
207
208 io_errors = [] # type: List[error.IOError_OSError]
209 with vm.ctx_FlushStdout(io_errors):
210 # note: could be second word, like 'builtin read'
211 with ui.ctx_Location(self.errfmt, cmd_val.arg_locs[0]):
212 try:
213 status = builtin_func.Run(cmd_val)
214 assert isinstance(status, int)
215 except (IOError, OSError) as e:
216 self.errfmt.PrintMessage(
217 '%s builtin I/O error: %s' %
218 (cmd_val.argv[0], pyutil.strerror(e)),
219 cmd_val.arg_locs[0])
220 return 1
221 except error.Usage as e:
222 arg0 = cmd_val.argv[0]
223 # e.g. 'type' doesn't accept flag '-x'
224 self.errfmt.PrefixPrint(e.msg, '%r ' % arg0, e.location)
225 return 2 # consistent error code for usage error
226
227 if len(io_errors): # e.g. disk full, ulimit
228 self.errfmt.PrintMessage(
229 '%s builtin I/O error: %s' %
230 (cmd_val.argv[0], pyutil.strerror(io_errors[0])),
231 cmd_val.arg_locs[0])
232 return 1
233
234 return status
235
236 def RunSimpleCommand(self, cmd_val, cmd_st, run_flags):
237 # type: (cmd_value.Argv, CommandStatus, int) -> int
238 """Run builtins, functions, external commands.
239
240 Possible variations:
241 - YSH might have different, simpler rules. No special builtins, etc.
242 - YSH might have OILS_PATH = :| /bin /usr/bin | or something.
243 - Interpreters might want to define all their own builtins.
244 """
245 argv = cmd_val.argv
246 if len(cmd_val.arg_locs):
247 arg0_loc = cmd_val.arg_locs[0] # type: loc_t
248 else:
249 arg0_loc = loc.Missing
250
251 # This happens when you write "$@" but have no arguments.
252 if len(argv) == 0:
253 if self.exec_opts.strict_argv():
254 e_die("Command evaluated to an empty argv array", arg0_loc)
255 else:
256 return 0 # status 0, or skip it?
257
258 arg0 = argv[0]
259
260 builtin_id = consts.LookupAssignBuiltin(arg0)
261 if builtin_id != consts.NO_INDEX:
262 # command readonly is disallowed, for technical reasons. Could relax it
263 # later.
264 self.errfmt.Print_("Can't run assignment builtin recursively",
265 arg0_loc)
266 return 1
267
268 builtin_id = consts.LookupSpecialBuiltin(arg0)
269 if builtin_id != consts.NO_INDEX:
270 cmd_st.show_code = True # this is a "leaf" for errors
271 status = self.RunBuiltin(builtin_id, cmd_val)
272 # TODO: Enable this and fix spec test failures.
273 # Also update _SPECIAL_BUILTINS in osh/builtin.py.
274 #if status != 0:
275 # e_die_status(status, 'special builtin failed')
276 return status
277
278 call_procs = not (run_flags & NO_CALL_PROCS)
279 # Builtins like 'true' can be redefined as functions.
280 if call_procs:
281 # TODO: Look shell functions in self.sh_funcs, but procs are
282 # value.Proc in the var namespace.
283 # Pitfall: What happens if there are two of the same name? I guess
284 # that's why you have = and 'type' inspect them
285
286 proc_node = self.procs.get(arg0)
287 if proc_node is not None:
288 if self.exec_opts.strict_errexit():
289 disabled_tok = self.mutable_opts.ErrExitDisabledToken()
290 if disabled_tok:
291 self.errfmt.Print_(
292 'errexit was disabled for this construct',
293 disabled_tok)
294 self.errfmt.StderrLine('')
295 e_die(
296 "Can't run a proc while errexit is disabled. "
297 "Use 'try' or wrap it in a process with $0 myproc",
298 arg0_loc)
299
300 with dev.ctx_Tracer(self.tracer, 'proc', argv):
301 with state.ctx_HideErrTrap(self.trap_state, self.exec_opts.errtrace()):
302 # NOTE: Functions could call 'exit 42' directly, etc.
303 status = self.cmd_ev.RunProc(proc_node, cmd_val)
304 return status
305
306 # Notes:
307 # - procs shadow hay names
308 # - hay names shadow normal builtins? Should we limit to CAPS or no?
309 if self.hay_state.Resolve(arg0):
310 return self.RunBuiltin(builtin_i.haynode, cmd_val)
311
312 builtin_id = consts.LookupNormalBuiltin(arg0)
313
314 if self.exec_opts._running_hay():
315 # Hay: limit the builtins that can be run
316 # - declare 'use dialect'
317 # - echo and write for debugging
318 # - no JSON?
319 if builtin_id in (builtin_i.haynode, builtin_i.use, builtin_i.echo,
320 builtin_i.write):
321 cmd_st.show_code = True # this is a "leaf" for errors
322 return self.RunBuiltin(builtin_id, cmd_val)
323
324 self.errfmt.Print_('Unknown command %r while running hay' % arg0,
325 arg0_loc)
326 return 127
327
328 if builtin_id != consts.NO_INDEX:
329 cmd_st.show_code = True # this is a "leaf" for errors
330 return self.RunBuiltin(builtin_id, cmd_val)
331
332 environ = self.mem.GetExported() # Include temporary variables
333
334 if cmd_val.typed_args:
335 e_die(
336 '%r appears to be external. External commands don\'t accept typed args (OILS-ERR-200)'
337 % arg0, cmd_val.typed_args.left)
338
339 # Resolve argv[0] BEFORE forking.
340 if run_flags & USE_DEFAULT_PATH:
341 argv0_path = state.LookupExecutable(arg0, DEFAULT_PATH)
342 else:
343 argv0_path = self.search_path.CachedLookup(arg0)
344 if argv0_path is None:
345 self.errfmt.Print_('%r not found (OILS-ERR-100)' % arg0, arg0_loc)
346 return 127
347
348 # Normal case: ls /
349 if run_flags & DO_FORK:
350 thunk = process.ExternalThunk(self.ext_prog, argv0_path, cmd_val,
351 environ)
352 p = process.Process(thunk, self.job_control, self.job_list,
353 self.tracer)
354
355 if self.job_control.Enabled():
356 if self.fg_pipeline is not None:
357 pgid = self.fg_pipeline.ProcessGroupId()
358 # If job control is enabled, this should be true
359 assert pgid != process.INVALID_PGID
360
361 change = process.SetPgid(pgid, self.tracer)
362 self.fg_pipeline = None # clear to avoid confusion in subshells
363 else:
364 change = process.SetPgid(process.OWN_LEADER, self.tracer)
365 p.AddStateChange(change)
366
367 status = p.RunProcess(self.waiter, trace.External(cmd_val.argv))
368
369 # this is close to a "leaf" for errors
370 # problem: permission denied EACCESS prints duplicate messages
371 # TODO: add message command 'ls' failed
372 cmd_st.show_code = True
373
374 return status
375
376 self.tracer.OnExec(cmd_val.argv)
377
378 # Already forked for pipeline: ls / | wc -l
379 self.ext_prog.Exec(argv0_path, cmd_val, environ) # NEVER RETURNS
380
381 raise AssertionError('for -Wreturn-type in C++')
382
383 def RunBackgroundJob(self, node):
384 # type: (command_t) -> int
385 """For & etc."""
386 # Special case for pipeline. There is some evidence here:
387 # https://www.gnu.org/software/libc/manual/html_node/Launching-Jobs.html#Launching-Jobs
388 #
389 # "You can either make all the processes in the process group be children
390 # of the shell process, or you can make one process in group be the
391 # ancestor of all the other processes in that group. The sample shell
392 # program presented in this chapter uses the first approach because it
393 # makes bookkeeping somewhat simpler."
394 UP_node = node
395
396 if UP_node.tag() == command_e.Pipeline:
397 node = cast(command.Pipeline, UP_node)
398 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
399 self.job_control, self.job_list, self.tracer)
400 for child in node.children:
401 p = self._MakeProcess(child, True, self.exec_opts.errtrace())
402 p.Init_ParentPipeline(pi)
403 pi.Add(p)
404
405 pi.StartPipeline(self.waiter)
406 pi.SetBackground()
407 last_pid = pi.LastPid()
408 self.mem.last_bg_pid = last_pid # for $!
409
410 self.job_list.AddJob(pi) # show in 'jobs' list
411
412 else:
413 # Problem: to get the 'set -b' behavior of immediate notifications, we
414 # have to register SIGCHLD. But then that introduces race conditions.
415 # If we haven't called Register yet, then we won't know who to notify.
416
417 p = self._MakeProcess(node, True, self.exec_opts.errtrace())
418 if self.job_control.Enabled():
419 p.AddStateChange(
420 process.SetPgid(process.OWN_LEADER, self.tracer))
421
422 p.SetBackground()
423 pid = p.StartProcess(trace.Fork)
424 self.mem.last_bg_pid = pid # for $!
425 self.job_list.AddJob(p) # show in 'jobs' list
426 return 0
427
428 def RunPipeline(self, node, status_out):
429 # type: (command.Pipeline, CommandStatus) -> None
430
431 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
432 self.job_control, self.job_list, self.tracer)
433
434 # initialized with CommandStatus.CreateNull()
435 pipe_locs = [] # type: List[loc_t]
436
437 # First n-1 processes (which is empty when n == 1)
438 n = len(node.children)
439 for i in xrange(n - 1):
440 child = node.children[i]
441
442 # TODO: determine these locations at parse time?
443 pipe_locs.append(loc.Command(child))
444
445 p = self._MakeProcess(child, True, self.exec_opts.errtrace())
446 p.Init_ParentPipeline(pi)
447 pi.Add(p)
448
449 last_child = node.children[n - 1]
450 # Last piece of code is in THIS PROCESS. 'echo foo | read line; echo $line'
451 pi.AddLast((self.cmd_ev, last_child))
452 pipe_locs.append(loc.Command(last_child))
453
454 with dev.ctx_Tracer(self.tracer, 'pipeline', None):
455 pi.StartPipeline(self.waiter)
456 self.fg_pipeline = pi
457 status_out.pipe_status = pi.RunLastPart(self.waiter, self.fd_state)
458 self.fg_pipeline = None # clear in case we didn't end up forking
459
460 status_out.pipe_locs = pipe_locs
461
462 def RunSubshell(self, node):
463 # type: (command_t) -> int
464 p = self._MakeProcess(node, True, self.exec_opts.errtrace())
465 if self.job_control.Enabled():
466 p.AddStateChange(process.SetPgid(process.OWN_LEADER, self.tracer))
467
468 return p.RunProcess(self.waiter, trace.ForkWait)
469
470 def RunCommandSub(self, cs_part):
471 # type: (CommandSub) -> str
472
473 if not self.exec_opts._allow_command_sub():
474 # _allow_command_sub is used in two places. Only one of them turns off _allow_process_sub
475 if not self.exec_opts._allow_process_sub():
476 why = "status wouldn't be checked (strict_errexit)"
477 else:
478 why = 'eval_unsafe_arith is off'
479
480 e_die("Command subs not allowed here because %s" % why,
481 loc.WordPart(cs_part))
482
483 node = cs_part.child
484
485 # Hack for weird $(<file) construct
486 if node.tag() == command_e.Redirect:
487 redir_node = cast(command.Redirect, node)
488 # Detect '< file'
489 if (len(redir_node.redirects) == 1 and
490 redir_node.redirects[0].op.id == Id.Redir_Less and
491 redir_node.child.tag() == command_e.NoOp):
492
493 # Change it to __cat < file.
494 # TODO: could be 'internal cat' (issue #1013)
495 tok = lexer.DummyToken(Id.Lit_Chars, '__cat')
496 cat_word = CompoundWord([tok])
497
498 # Blame < because __cat has no location
499 blame_tok = redir_node.redirects[0].op
500 simple = command.Simple(blame_tok, [], [cat_word], None, None,
501 True)
502
503 # MUTATE redir node so it's like $(<file _cat)
504 redir_node.child = simple
505
506 p = self._MakeProcess(node, self.exec_opts.inherit_errexit(), self.exec_opts.errtrace())
507 # Shell quirk: Command subs remain part of the shell's process group, so we
508 # don't use p.AddStateChange(process.SetPgid(...))
509
510 r, w = posix.pipe()
511 p.AddStateChange(process.StdoutToPipe(r, w))
512
513 p.StartProcess(trace.CommandSub)
514 #log('Command sub started %d', pid)
515
516 chunks = [] # type: List[str]
517 posix.close(w) # not going to write
518 while True:
519 n, err_num = pyos.Read(r, 4096, chunks)
520
521 if n < 0:
522 if err_num == EINTR:
523 pass # retry
524 else:
525 # Like the top level IOError handler
526 e_die_status(
527 2,
528 'osh I/O error (read): %s' % posix.strerror(err_num))
529
530 elif n == 0: # EOF
531 break
532 posix.close(r)
533
534 status = p.Wait(self.waiter)
535
536 # OSH has the concept of aborting in the middle of a WORD. We're not
537 # waiting until the command is over!
538 if self.exec_opts.command_sub_errexit():
539 if status != 0:
540 msg = 'Command Sub exited with status %d' % status
541 raise error.ErrExit(status, msg, loc.WordPart(cs_part))
542
543 else:
544 # Set a flag so we check errexit at the same time as bash. Example:
545 #
546 # a=$(false)
547 # echo foo # no matter what comes here, the flag is reset
548 #
549 # Set ONLY until this command node has finished executing.
550
551 # HACK: move this
552 self.cmd_ev.check_command_sub_status = True
553 self.mem.SetLastStatus(status)
554
555 # Runtime errors test case: # $("echo foo > $@")
556 # Why rstrip()?
557 # https://unix.stackexchange.com/questions/17747/why-does-shell-command-substitution-gobble-up-a-trailing-newline-char
558 return ''.join(chunks).rstrip('\n')
559
560 def RunProcessSub(self, cs_part):
561 # type: (CommandSub) -> str
562 """Process sub creates a forks a process connected to a pipe.
563
564 The pipe is typically passed to another process via a /dev/fd/$FD path.
565
566 Life cycle of a process substitution:
567
568 1. Start with this code
569
570 diff <(seq 3) <(seq 4)
571
572 2. To evaluate the command line, we evaluate every word. The
573 NormalWordEvaluator this method, RunProcessSub(), which does 3 things:
574
575 a. Create a pipe(), getting r and w
576 b. Starts the seq process, which inherits r and w
577 It has a StdoutToPipe() redirect, which means that it dup2(w, 1)
578 and close(r)
579 c. Close the w FD, because neither the shell or 'diff' will write to it.
580 However we must retain 'r', because 'diff' hasn't opened /dev/fd yet!
581 d. We evaluate <(seq 3) to /dev/fd/$r, so "diff" can read from it
582
583 3. Now we're done evaluating every word, so we know the command line of
584 diff, which looks like
585
586 diff /dev/fd/64 /dev/fd/65
587
588 Those are the FDs for the read ends of the pipes we created.
589
590 4. diff inherits a copy of the read end of bot pipes. But it actually
591 calls open() both files passed as argv. (I think this is fine.)
592
593 5. wait() for the diff process.
594
595 6. The shell closes both the read ends of both pipes. Neither us or
596 'diffd' will read again.
597
598 7. The shell waits for both 'seq' processes.
599
600 Related:
601 shopt -s process_sub_fail
602 _process_sub_status
603 """
604 cs_loc = loc.WordPart(cs_part)
605
606 if not self.exec_opts._allow_process_sub():
607 e_die(
608 "Process subs not allowed here because status wouldn't be checked (strict_errexit)",
609 cs_loc)
610
611 p = self._MakeProcess(cs_part.child, True, self.exec_opts.errtrace())
612
613 r, w = posix.pipe()
614 #log('pipe = %d, %d', r, w)
615
616 op_id = cs_part.left_token.id
617 if op_id == Id.Left_ProcSubIn:
618 # Example: cat < <(head foo.txt)
619 #
620 # The head process should write its stdout to a pipe.
621 redir = process.StdoutToPipe(r,
622 w) # type: process.ChildStateChange
623
624 elif op_id == Id.Left_ProcSubOut:
625 # Example: head foo.txt > >(tac)
626 #
627 # The tac process should read its stdin from a pipe.
628
629 # Note: this example sometimes requires you to hit "enter" in bash and
630 # zsh. WHy?
631 redir = process.StdinFromPipe(r, w)
632
633 else:
634 raise AssertionError()
635
636 p.AddStateChange(redir)
637
638 if self.job_control.Enabled():
639 p.AddStateChange(process.SetPgid(process.OWN_LEADER, self.tracer))
640
641 # Fork, letting the child inherit the pipe file descriptors.
642 p.StartProcess(trace.ProcessSub)
643
644 ps_frame = self.process_sub_stack[-1]
645
646 # Note: bash never waits() on the process, but zsh does. The calling
647 # program needs to read() before we can wait, e.g.
648 # diff <(sort left.txt) <(sort right.txt)
649
650 # After forking, close the end of the pipe we're not using.
651 if op_id == Id.Left_ProcSubIn:
652 posix.close(w) # cat < <(head foo.txt)
653 ps_frame.Append(p, r, cs_loc) # close later
654 elif op_id == Id.Left_ProcSubOut:
655 posix.close(r)
656 #log('Left_ProcSubOut closed %d', r)
657 ps_frame.Append(p, w, cs_loc) # close later
658 else:
659 raise AssertionError()
660
661 # Is /dev Linux-specific?
662 if op_id == Id.Left_ProcSubIn:
663 return '/dev/fd/%d' % r
664
665 elif op_id == Id.Left_ProcSubOut:
666 return '/dev/fd/%d' % w
667
668 else:
669 raise AssertionError()
670
671 def PushRedirects(self, redirects, err_out):
672 # type: (List[RedirValue], List[error.IOError_OSError]) -> None
673 if len(redirects) == 0: # Optimized to avoid allocs
674 return
675 self.fd_state.Push(redirects, err_out)
676
677 def PopRedirects(self, num_redirects, err_out):
678 # type: (int, List[error.IOError_OSError]) -> None
679 if num_redirects == 0: # Optimized to avoid allocs
680 return
681 self.fd_state.Pop(err_out)
682
683 def PushProcessSub(self):
684 # type: () -> None
685 if len(self.clean_frame_pool):
686 # Optimized to avoid allocs
687 new_frame = self.clean_frame_pool.pop()
688 else:
689 new_frame = _ProcessSubFrame()
690 self.process_sub_stack.append(new_frame)
691
692 def PopProcessSub(self, compound_st):
693 # type: (StatusArray) -> None
694 """This method is called by a context manager, which means we always
695 wait() on the way out, which I think is the right thing.
696
697 We don't always set _process_sub_status, e.g. if some fatal
698 error occurs first, but we always wait.
699 """
700 frame = self.process_sub_stack.pop()
701 if frame.WasModified():
702 frame.MaybeWaitOnProcessSubs(self.waiter, compound_st)
703 else:
704 # Optimized to avoid allocs
705 self.clean_frame_pool.append(frame)
706
707 # Note: the 3 lists in _ProcessSubFrame are hot in our profiles. It would
708 # be nice to somehow "destroy" them here, rather than letting them become
709 # garbage that needs to be traced.
710
711 # The CommandEvaluator could have a ProcessSubStack, which supports Push(),
712 # Pop(), and Top() of VALUES rather than GC objects?
713
714
715# vim: sw=4