OILS / core / executor.py View on Github | oilshell.org

721 lines, 391 significant
1"""executor.py."""
2from __future__ import print_function
3
4from errno import EINTR
5
6from _devbuild.gen.id_kind_asdl import Id
7from _devbuild.gen.option_asdl import builtin_i
8from _devbuild.gen.runtime_asdl import RedirValue, trace, scope_e
9from _devbuild.gen.syntax_asdl import (
10 command,
11 command_e,
12 CommandSub,
13 CompoundWord,
14 loc,
15 loc_t,
16)
17from _devbuild.gen.value_asdl import value, value_e
18from builtin import hay_ysh
19from core import dev
20from core import error
21from core import process
22from core.error import e_die, e_die_status
23from core import pyos
24from core import pyutil
25from core import state
26from core import ui
27from core import vm
28from frontend import consts
29from frontend import lexer
30from mycpp.mylib import log
31
32import posix_ as posix
33
34from typing import cast, Dict, List, Optional, TYPE_CHECKING
35if TYPE_CHECKING:
36 from _devbuild.gen.runtime_asdl import (cmd_value, CommandStatus,
37 StatusArray)
38 from _devbuild.gen.syntax_asdl import command_t
39 from builtin import trap_osh
40 from core import optview
41 from core import state
42 from core.vm import _Builtin
43
44_ = log
45
46
47class _ProcessSubFrame(object):
48 """To keep track of diff <(cat 1) <(cat 2) > >(tac)"""
49
50 def __init__(self):
51 # type: () -> None
52
53 # These objects appear unconditionally in the main loop, and aren't
54 # commonly used, so we manually optimize [] into None.
55
56 self._to_wait = [] # type: List[process.Process]
57 self._to_close = [] # type: List[int] # file descriptors
58 self._locs = [] # type: List[loc_t]
59 self._modified = False
60
61 def WasModified(self):
62 # type: () -> bool
63 return self._modified
64
65 def Append(self, p, fd, status_loc):
66 # type: (process.Process, int, loc_t) -> None
67 self._modified = True
68
69 self._to_wait.append(p)
70 self._to_close.append(fd)
71 self._locs.append(status_loc)
72
73 def MaybeWaitOnProcessSubs(self, waiter, status_array):
74 # type: (process.Waiter, StatusArray) -> None
75
76 # Wait in the same order that they were evaluated. That seems fine.
77 for fd in self._to_close:
78 posix.close(fd)
79
80 codes = [] # type: List[int]
81 locs = [] # type: List[loc_t]
82 for i, p in enumerate(self._to_wait):
83 #log('waiting for %s', p)
84 st = p.Wait(waiter)
85 codes.append(st)
86 locs.append(self._locs[i])
87
88 status_array.codes = codes
89 status_array.locs = locs
90
91
92# Big flgas for RunSimpleCommand
93DO_FORK = 1 << 1
94NO_CALL_PROCS = 1 << 2 # command ls suppresses function lookup
95USE_DEFAULT_PATH = 1 << 3 # for command -p ls changes the path
96
97# Copied from var.c in dash
98DEFAULT_PATH = [
99 '/usr/local/sbin', '/usr/local/bin', '/usr/sbin', '/usr/bin', '/sbin',
100 '/bin'
101]
102
103
104class ShellExecutor(vm._Executor):
105 """An executor combined with the OSH language evaluators in osh/ to create
106 a shell interpreter."""
107
108 def __init__(
109 self,
110 mem, # type: state.Mem
111 exec_opts, # type: optview.Exec
112 mutable_opts, # type: state.MutableOpts
113 procs, # type: Dict[str, value.Proc]
114 hay_state, # type: hay_ysh.HayState
115 builtins, # type: Dict[int, _Builtin]
116 search_path, # type: state.SearchPath
117 ext_prog, # type: process.ExternalProgram
118 waiter, # type: process.Waiter
119 tracer, # type: dev.Tracer
120 job_control, # type: process.JobControl
121 job_list, # type: process.JobList
122 fd_state, # type: process.FdState
123 trap_state, # type: trap_osh.TrapState
124 errfmt # type: ui.ErrorFormatter
125 ):
126 # type: (...) -> None
127 vm._Executor.__init__(self)
128 self.mem = mem
129 self.exec_opts = exec_opts
130 self.mutable_opts = mutable_opts # for IsDisabled(), not mutating
131 self.procs = procs
132 self.hay_state = hay_state
133 self.builtins = builtins
134 self.search_path = search_path
135 self.ext_prog = ext_prog
136 self.waiter = waiter
137 self.tracer = tracer
138 self.multi_trace = tracer.multi_trace
139 self.job_control = job_control
140 # sleep 5 & puts a (PID, job#) entry here. And then "jobs" displays it.
141 self.job_list = job_list
142 self.fd_state = fd_state
143 self.trap_state = trap_state
144 self.errfmt = errfmt
145 self.process_sub_stack = [] # type: List[_ProcessSubFrame]
146 self.clean_frame_pool = [] # type: List[_ProcessSubFrame]
147
148 # When starting a pipeline in the foreground, we need to pass a handle to it
149 # through the evaluation of the last node back to ourselves for execution.
150 # We use this handle to make sure any processes forked for the last part of
151 # the pipeline are placed into the same process group as the rest of the
152 # pipeline. Since there is, by design, only ever one foreground pipeline and
153 # any pipelines started within subshells run in their parent's process
154 # group, we only need one pointer here, not some collection.
155 self.fg_pipeline = None # type: Optional[process.Pipeline]
156
157 def CheckCircularDeps(self):
158 # type: () -> None
159 assert self.cmd_ev is not None
160
161 def _MakeProcess(self, node, inherit_errexit=True):
162 # type: (command_t, bool) -> process.Process
163 """Assume we will run the node in another process.
164
165 Return a process.
166 """
167 UP_node = node
168 if node.tag() == command_e.ControlFlow:
169 node = cast(command.ControlFlow, UP_node)
170 # Pipeline or subshells with control flow are invalid, e.g.:
171 # - break | less
172 # - continue | less
173 # - ( return )
174 # NOTE: This could be done at parse time too.
175 if node.keyword.id != Id.ControlFlow_Exit:
176 e_die(
177 'Invalid control flow %r in pipeline / subshell / background'
178 % lexer.TokenVal(node.keyword), node.keyword)
179
180 # NOTE: If ErrExit(), we could be verbose about subprogram errors? This
181 # only really matters when executing 'exit 42', because the child shell
182 # inherits errexit and will be verbose. Other notes:
183 #
184 # - We might want errors to fit on a single line so they don't get #
185 # interleaved.
186 # - We could turn the `exit` builtin into a error.FatalRuntime exception
187 # and get this check for "free".
188 thunk = process.SubProgramThunk(self.cmd_ev,
189 node,
190 self.trap_state,
191 self.multi_trace,
192 inherit_errexit=inherit_errexit)
193 p = process.Process(thunk, self.job_control, self.job_list,
194 self.tracer)
195 return p
196
197 def RunBuiltin(self, builtin_id, cmd_val):
198 # type: (int, cmd_value.Argv) -> int
199 """Run a builtin.
200
201 Also called by the 'builtin' builtin.
202 """
203 self.tracer.OnBuiltin(builtin_id, cmd_val.argv)
204
205 builtin_func = self.builtins[builtin_id]
206
207 io_errors = [] # type: List[error.IOError_OSError]
208 with vm.ctx_FlushStdout(io_errors):
209 # note: could be second word, like 'builtin read'
210 with ui.ctx_Location(self.errfmt, cmd_val.arg_locs[0]):
211 try:
212 status = builtin_func.Run(cmd_val)
213 assert isinstance(status, int)
214 except (IOError, OSError) as e:
215 self.errfmt.PrintMessage(
216 '%s builtin I/O error: %s' %
217 (cmd_val.argv[0], pyutil.strerror(e)),
218 cmd_val.arg_locs[0])
219 return 1
220 except error.Usage as e:
221 arg0 = cmd_val.argv[0]
222 # e.g. 'type' doesn't accept flag '-x'
223 self.errfmt.PrefixPrint(e.msg, '%r ' % arg0, e.location)
224 return 2 # consistent error code for usage error
225
226 if len(io_errors): # e.g. disk full, ulimit
227 self.errfmt.PrintMessage(
228 '%s builtin I/O error: %s' %
229 (cmd_val.argv[0], pyutil.strerror(io_errors[0])),
230 cmd_val.arg_locs[0])
231 return 1
232
233 return status
234
235 def RunSimpleCommand(self, cmd_val, cmd_st, run_flags):
236 # type: (cmd_value.Argv, CommandStatus, int) -> int
237 """Run builtins, functions, external commands.
238
239 Possible variations:
240 - YSH might have different, simpler rules. No special builtins, etc.
241 - YSH might have OILS_PATH = :| /bin /usr/bin | or something.
242 - Interpreters might want to define all their own builtins.
243 """
244 argv = cmd_val.argv
245 if len(cmd_val.arg_locs):
246 arg0_loc = cmd_val.arg_locs[0] # type: loc_t
247 else:
248 arg0_loc = loc.Missing
249
250 # This happens when you write "$@" but have no arguments.
251 if len(argv) == 0:
252 if self.exec_opts.strict_argv():
253 e_die("Command evaluated to an empty argv array", arg0_loc)
254 else:
255 return 0 # status 0, or skip it?
256
257 arg0 = argv[0]
258
259 builtin_id = consts.LookupAssignBuiltin(arg0)
260 if builtin_id != consts.NO_INDEX:
261 # command readonly is disallowed, for technical reasons. Could relax it
262 # later.
263 self.errfmt.Print_("Can't run assignment builtin recursively",
264 arg0_loc)
265 return 1
266
267 builtin_id = consts.LookupSpecialBuiltin(arg0)
268 if builtin_id != consts.NO_INDEX:
269 cmd_st.show_code = True # this is a "leaf" for errors
270 status = self.RunBuiltin(builtin_id, cmd_val)
271 # TODO: Enable this and fix spec test failures.
272 # Also update _SPECIAL_BUILTINS in osh/builtin.py.
273 #if status != 0:
274 # e_die_status(status, 'special builtin failed')
275 return status
276
277 call_procs = not (run_flags & NO_CALL_PROCS)
278 # Builtins like 'true' can be redefined as functions.
279 if call_procs:
280 # TODO: Look shell functions in self.sh_funcs, but procs are
281 # value.Proc in the var namespace.
282 # Pitfall: What happens if there are two of the same name? I guess
283 # that's why you have = and 'type' inspect them
284
285 proc_node = self.procs.get(arg0)
286
287 if proc_node is None:
288 cell = self.mem.GetCell(arg0, scope_e.LocalOnly)
289 if cell and cell.val.tag() == value_e.Proc:
290 proc_node = cast(value.Proc, cell.val)
291
292 if proc_node is not None:
293 if self.exec_opts.strict_errexit():
294 disabled_tok = self.mutable_opts.ErrExitDisabledToken()
295 if disabled_tok:
296 self.errfmt.Print_(
297 'errexit was disabled for this construct',
298 disabled_tok)
299 self.errfmt.StderrLine('')
300 e_die(
301 "Can't run a proc while errexit is disabled. "
302 "Use 'try' or wrap it in a process with $0 myproc",
303 arg0_loc)
304
305 with dev.ctx_Tracer(self.tracer, 'proc', argv):
306 # NOTE: Functions could call 'exit 42' directly, etc.
307 status = self.cmd_ev.RunProc(proc_node, cmd_val)
308 return status
309
310
311 # Notes:
312 # - procs shadow hay names
313 # - hay names shadow normal builtins? Should we limit to CAPS or no?
314 if self.hay_state.Resolve(arg0):
315 return self.RunBuiltin(builtin_i.haynode, cmd_val)
316
317 builtin_id = consts.LookupNormalBuiltin(arg0)
318
319 if self.exec_opts._running_hay():
320 # Hay: limit the builtins that can be run
321 # - declare 'use dialect'
322 # - echo and write for debugging
323 # - no JSON?
324 if builtin_id in (builtin_i.haynode, builtin_i.use, builtin_i.echo,
325 builtin_i.write):
326 cmd_st.show_code = True # this is a "leaf" for errors
327 return self.RunBuiltin(builtin_id, cmd_val)
328
329 self.errfmt.Print_('Unknown command %r while running hay' % arg0,
330 arg0_loc)
331 return 127
332
333 if builtin_id != consts.NO_INDEX:
334 cmd_st.show_code = True # this is a "leaf" for errors
335 return self.RunBuiltin(builtin_id, cmd_val)
336
337 environ = self.mem.GetExported() # Include temporary variables
338
339 if cmd_val.typed_args:
340 e_die(
341 '%r appears to be external. External commands don\'t accept typed args (OILS-ERR-200)'
342 % arg0, cmd_val.typed_args.left)
343
344 # Resolve argv[0] BEFORE forking.
345 if run_flags & USE_DEFAULT_PATH:
346 argv0_path = state.LookupExecutable(arg0, DEFAULT_PATH)
347 else:
348 argv0_path = self.search_path.CachedLookup(arg0)
349 if argv0_path is None:
350 self.errfmt.Print_('%r not found (OILS-ERR-100)' % arg0, arg0_loc)
351 return 127
352
353 # Normal case: ls /
354 if run_flags & DO_FORK:
355 thunk = process.ExternalThunk(self.ext_prog, argv0_path, cmd_val,
356 environ)
357 p = process.Process(thunk, self.job_control, self.job_list,
358 self.tracer)
359
360 if self.job_control.Enabled():
361 if self.fg_pipeline is not None:
362 pgid = self.fg_pipeline.ProcessGroupId()
363 # If job control is enabled, this should be true
364 assert pgid != process.INVALID_PGID
365
366 change = process.SetPgid(pgid, self.tracer)
367 self.fg_pipeline = None # clear to avoid confusion in subshells
368 else:
369 change = process.SetPgid(process.OWN_LEADER, self.tracer)
370 p.AddStateChange(change)
371
372 status = p.RunProcess(self.waiter, trace.External(cmd_val.argv))
373
374 # this is close to a "leaf" for errors
375 # problem: permission denied EACCESS prints duplicate messages
376 # TODO: add message command 'ls' failed
377 cmd_st.show_code = True
378
379 return status
380
381 self.tracer.OnExec(cmd_val.argv)
382
383 # Already forked for pipeline: ls / | wc -l
384 self.ext_prog.Exec(argv0_path, cmd_val, environ) # NEVER RETURNS
385
386 raise AssertionError('for -Wreturn-type in C++')
387
388 def RunBackgroundJob(self, node):
389 # type: (command_t) -> int
390 """For & etc."""
391 # Special case for pipeline. There is some evidence here:
392 # https://www.gnu.org/software/libc/manual/html_node/Launching-Jobs.html#Launching-Jobs
393 #
394 # "You can either make all the processes in the process group be children
395 # of the shell process, or you can make one process in group be the
396 # ancestor of all the other processes in that group. The sample shell
397 # program presented in this chapter uses the first approach because it
398 # makes bookkeeping somewhat simpler."
399 UP_node = node
400
401 if UP_node.tag() == command_e.Pipeline:
402 node = cast(command.Pipeline, UP_node)
403 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
404 self.job_control, self.job_list, self.tracer)
405 for child in node.children:
406 p = self._MakeProcess(child)
407 p.Init_ParentPipeline(pi)
408 pi.Add(p)
409
410 pi.StartPipeline(self.waiter)
411 pi.SetBackground()
412 last_pid = pi.LastPid()
413 self.mem.last_bg_pid = last_pid # for $!
414
415 self.job_list.AddJob(pi) # show in 'jobs' list
416
417 else:
418 # Problem: to get the 'set -b' behavior of immediate notifications, we
419 # have to register SIGCHLD. But then that introduces race conditions.
420 # If we haven't called Register yet, then we won't know who to notify.
421
422 p = self._MakeProcess(node)
423 if self.job_control.Enabled():
424 p.AddStateChange(
425 process.SetPgid(process.OWN_LEADER, self.tracer))
426
427 p.SetBackground()
428 pid = p.StartProcess(trace.Fork)
429 self.mem.last_bg_pid = pid # for $!
430 self.job_list.AddJob(p) # show in 'jobs' list
431 return 0
432
433 def RunPipeline(self, node, status_out):
434 # type: (command.Pipeline, CommandStatus) -> None
435
436 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
437 self.job_control, self.job_list, self.tracer)
438
439 # initialized with CommandStatus.CreateNull()
440 pipe_locs = [] # type: List[loc_t]
441
442 # First n-1 processes (which is empty when n == 1)
443 n = len(node.children)
444 for i in xrange(n - 1):
445 child = node.children[i]
446
447 # TODO: determine these locations at parse time?
448 pipe_locs.append(loc.Command(child))
449
450 p = self._MakeProcess(child)
451 p.Init_ParentPipeline(pi)
452 pi.Add(p)
453
454 last_child = node.children[n - 1]
455 # Last piece of code is in THIS PROCESS. 'echo foo | read line; echo $line'
456 pi.AddLast((self.cmd_ev, last_child))
457 pipe_locs.append(loc.Command(last_child))
458
459 with dev.ctx_Tracer(self.tracer, 'pipeline', None):
460 pi.StartPipeline(self.waiter)
461 self.fg_pipeline = pi
462 status_out.pipe_status = pi.RunLastPart(self.waiter, self.fd_state)
463 self.fg_pipeline = None # clear in case we didn't end up forking
464
465 status_out.pipe_locs = pipe_locs
466
467 def RunSubshell(self, node):
468 # type: (command_t) -> int
469 p = self._MakeProcess(node)
470 if self.job_control.Enabled():
471 p.AddStateChange(process.SetPgid(process.OWN_LEADER, self.tracer))
472
473 return p.RunProcess(self.waiter, trace.ForkWait)
474
475 def RunCommandSub(self, cs_part):
476 # type: (CommandSub) -> str
477
478 if not self.exec_opts._allow_command_sub():
479 # _allow_command_sub is used in two places. Only one of them turns off _allow_process_sub
480 if not self.exec_opts._allow_process_sub():
481 why = "status wouldn't be checked (strict_errexit)"
482 else:
483 why = 'eval_unsafe_arith is off'
484
485 e_die("Command subs not allowed here because %s" % why,
486 loc.WordPart(cs_part))
487
488 node = cs_part.child
489
490 # Hack for weird $(<file) construct
491 if node.tag() == command_e.Redirect:
492 redir_node = cast(command.Redirect, node)
493 # Detect '< file'
494 if (len(redir_node.redirects) == 1 and
495 redir_node.redirects[0].op.id == Id.Redir_Less and
496 redir_node.child.tag() == command_e.NoOp):
497
498 # Change it to __cat < file.
499 # TODO: could be 'internal cat' (issue #1013)
500 tok = lexer.DummyToken(Id.Lit_Chars, '__cat')
501 cat_word = CompoundWord([tok])
502
503 # Blame < because __cat has no location
504 blame_tok = redir_node.redirects[0].op
505 simple = command.Simple(blame_tok, [], [cat_word], None, None,
506 True)
507
508 # MUTATE redir node so it's like $(<file _cat)
509 redir_node.child = simple
510
511 p = self._MakeProcess(node,
512 inherit_errexit=self.exec_opts.inherit_errexit())
513 # Shell quirk: Command subs remain part of the shell's process group, so we
514 # don't use p.AddStateChange(process.SetPgid(...))
515
516 r, w = posix.pipe()
517 p.AddStateChange(process.StdoutToPipe(r, w))
518
519 p.StartProcess(trace.CommandSub)
520 #log('Command sub started %d', pid)
521
522 chunks = [] # type: List[str]
523 posix.close(w) # not going to write
524 while True:
525 n, err_num = pyos.Read(r, 4096, chunks)
526
527 if n < 0:
528 if err_num == EINTR:
529 pass # retry
530 else:
531 # Like the top level IOError handler
532 e_die_status(
533 2,
534 'osh I/O error (read): %s' % posix.strerror(err_num))
535
536 elif n == 0: # EOF
537 break
538 posix.close(r)
539
540 status = p.Wait(self.waiter)
541
542 # OSH has the concept of aborting in the middle of a WORD. We're not
543 # waiting until the command is over!
544 if self.exec_opts.command_sub_errexit():
545 if status != 0:
546 msg = 'Command Sub exited with status %d' % status
547 raise error.ErrExit(status, msg, loc.WordPart(cs_part))
548
549 else:
550 # Set a flag so we check errexit at the same time as bash. Example:
551 #
552 # a=$(false)
553 # echo foo # no matter what comes here, the flag is reset
554 #
555 # Set ONLY until this command node has finished executing.
556
557 # HACK: move this
558 self.cmd_ev.check_command_sub_status = True
559 self.mem.SetLastStatus(status)
560
561 # Runtime errors test case: # $("echo foo > $@")
562 # Why rstrip()?
563 # https://unix.stackexchange.com/questions/17747/why-does-shell-command-substitution-gobble-up-a-trailing-newline-char
564 return ''.join(chunks).rstrip('\n')
565
566 def RunProcessSub(self, cs_part):
567 # type: (CommandSub) -> str
568 """Process sub creates a forks a process connected to a pipe.
569
570 The pipe is typically passed to another process via a /dev/fd/$FD path.
571
572 Life cycle of a process substitution:
573
574 1. Start with this code
575
576 diff <(seq 3) <(seq 4)
577
578 2. To evaluate the command line, we evaluate every word. The
579 NormalWordEvaluator this method, RunProcessSub(), which does 3 things:
580
581 a. Create a pipe(), getting r and w
582 b. Starts the seq process, which inherits r and w
583 It has a StdoutToPipe() redirect, which means that it dup2(w, 1)
584 and close(r)
585 c. Close the w FD, because neither the shell or 'diff' will write to it.
586 However we must retain 'r', because 'diff' hasn't opened /dev/fd yet!
587 d. We evaluate <(seq 3) to /dev/fd/$r, so "diff" can read from it
588
589 3. Now we're done evaluating every word, so we know the command line of
590 diff, which looks like
591
592 diff /dev/fd/64 /dev/fd/65
593
594 Those are the FDs for the read ends of the pipes we created.
595
596 4. diff inherits a copy of the read end of bot pipes. But it actually
597 calls open() both files passed as argv. (I think this is fine.)
598
599 5. wait() for the diff process.
600
601 6. The shell closes both the read ends of both pipes. Neither us or
602 'diffd' will read again.
603
604 7. The shell waits for both 'seq' processes.
605
606 Related:
607 shopt -s process_sub_fail
608 _process_sub_status
609 """
610 cs_loc = loc.WordPart(cs_part)
611
612 if not self.exec_opts._allow_process_sub():
613 e_die(
614 "Process subs not allowed here because status wouldn't be checked (strict_errexit)",
615 cs_loc)
616
617 p = self._MakeProcess(cs_part.child)
618
619 r, w = posix.pipe()
620 #log('pipe = %d, %d', r, w)
621
622 op_id = cs_part.left_token.id
623 if op_id == Id.Left_ProcSubIn:
624 # Example: cat < <(head foo.txt)
625 #
626 # The head process should write its stdout to a pipe.
627 redir = process.StdoutToPipe(r,
628 w) # type: process.ChildStateChange
629
630 elif op_id == Id.Left_ProcSubOut:
631 # Example: head foo.txt > >(tac)
632 #
633 # The tac process should read its stdin from a pipe.
634
635 # Note: this example sometimes requires you to hit "enter" in bash and
636 # zsh. WHy?
637 redir = process.StdinFromPipe(r, w)
638
639 else:
640 raise AssertionError()
641
642 p.AddStateChange(redir)
643
644 if self.job_control.Enabled():
645 p.AddStateChange(process.SetPgid(process.OWN_LEADER, self.tracer))
646
647 # Fork, letting the child inherit the pipe file descriptors.
648 p.StartProcess(trace.ProcessSub)
649
650 ps_frame = self.process_sub_stack[-1]
651
652 # Note: bash never waits() on the process, but zsh does. The calling
653 # program needs to read() before we can wait, e.g.
654 # diff <(sort left.txt) <(sort right.txt)
655
656 # After forking, close the end of the pipe we're not using.
657 if op_id == Id.Left_ProcSubIn:
658 posix.close(w) # cat < <(head foo.txt)
659 ps_frame.Append(p, r, cs_loc) # close later
660 elif op_id == Id.Left_ProcSubOut:
661 posix.close(r)
662 #log('Left_ProcSubOut closed %d', r)
663 ps_frame.Append(p, w, cs_loc) # close later
664 else:
665 raise AssertionError()
666
667 # Is /dev Linux-specific?
668 if op_id == Id.Left_ProcSubIn:
669 return '/dev/fd/%d' % r
670
671 elif op_id == Id.Left_ProcSubOut:
672 return '/dev/fd/%d' % w
673
674 else:
675 raise AssertionError()
676
677 def PushRedirects(self, redirects, err_out):
678 # type: (List[RedirValue], List[error.IOError_OSError]) -> None
679 if len(redirects) == 0: # Optimized to avoid allocs
680 return
681 self.fd_state.Push(redirects, err_out)
682
683 def PopRedirects(self, num_redirects, err_out):
684 # type: (int, List[error.IOError_OSError]) -> None
685 if num_redirects == 0: # Optimized to avoid allocs
686 return
687 self.fd_state.Pop(err_out)
688
689 def PushProcessSub(self):
690 # type: () -> None
691 if len(self.clean_frame_pool):
692 # Optimized to avoid allocs
693 new_frame = self.clean_frame_pool.pop()
694 else:
695 new_frame = _ProcessSubFrame()
696 self.process_sub_stack.append(new_frame)
697
698 def PopProcessSub(self, compound_st):
699 # type: (StatusArray) -> None
700 """This method is called by a context manager, which means we always
701 wait() on the way out, which I think is the right thing.
702
703 We don't always set _process_sub_status, e.g. if some fatal
704 error occurs first, but we always wait.
705 """
706 frame = self.process_sub_stack.pop()
707 if frame.WasModified():
708 frame.MaybeWaitOnProcessSubs(self.waiter, compound_st)
709 else:
710 # Optimized to avoid allocs
711 self.clean_frame_pool.append(frame)
712
713 # Note: the 3 lists in _ProcessSubFrame are hot in our profiles. It would
714 # be nice to somehow "destroy" them here, rather than letting them become
715 # garbage that needs to be traced.
716
717 # The CommandEvaluator could have a ProcessSubStack, which supports Push(),
718 # Pop(), and Top() of VALUES rather than GC objects?
719
720
721# vim: sw=4