OILS / core / executor.py View on Github | oilshell.org

714 lines, 387 significant
1"""executor.py."""
2from __future__ import print_function
3
4from errno import EINTR
5
6from _devbuild.gen.id_kind_asdl import Id
7from _devbuild.gen.option_asdl import builtin_i
8from _devbuild.gen.runtime_asdl import RedirValue, trace
9from _devbuild.gen.syntax_asdl import (
10 command,
11 command_e,
12 CommandSub,
13 CompoundWord,
14 loc,
15 loc_t,
16)
17from _devbuild.gen.value_asdl import value
18from builtin import hay_ysh
19from core import dev
20from core import error
21from core import process
22from core.error import e_die, e_die_status
23from core import pyos
24from core import pyutil
25from core import state
26from core import ui
27from core import vm
28from frontend import consts
29from frontend import lexer
30from mycpp.mylib import log
31
32import posix_ as posix
33
34from typing import cast, Dict, List, Optional, TYPE_CHECKING
35if TYPE_CHECKING:
36 from _devbuild.gen.runtime_asdl import (cmd_value, CommandStatus,
37 StatusArray)
38 from _devbuild.gen.syntax_asdl import command_t
39 from builtin import trap_osh
40 from core import optview
41 from core import state
42 from core.vm import _Builtin
43
44_ = log
45
46
47class _ProcessSubFrame(object):
48 """To keep track of diff <(cat 1) <(cat 2) > >(tac)"""
49
50 def __init__(self):
51 # type: () -> None
52
53 # These objects appear unconditionally in the main loop, and aren't
54 # commonly used, so we manually optimize [] into None.
55
56 self._to_wait = [] # type: List[process.Process]
57 self._to_close = [] # type: List[int] # file descriptors
58 self._locs = [] # type: List[loc_t]
59 self._modified = False
60
61 def WasModified(self):
62 # type: () -> bool
63 return self._modified
64
65 def Append(self, p, fd, status_loc):
66 # type: (process.Process, int, loc_t) -> None
67 self._modified = True
68
69 self._to_wait.append(p)
70 self._to_close.append(fd)
71 self._locs.append(status_loc)
72
73 def MaybeWaitOnProcessSubs(self, waiter, status_array):
74 # type: (process.Waiter, StatusArray) -> None
75
76 # Wait in the same order that they were evaluated. That seems fine.
77 for fd in self._to_close:
78 posix.close(fd)
79
80 codes = [] # type: List[int]
81 locs = [] # type: List[loc_t]
82 for i, p in enumerate(self._to_wait):
83 #log('waiting for %s', p)
84 st = p.Wait(waiter)
85 codes.append(st)
86 locs.append(self._locs[i])
87
88 status_array.codes = codes
89 status_array.locs = locs
90
91
92# Big flgas for RunSimpleCommand
93DO_FORK = 1 << 1
94NO_CALL_PROCS = 1 << 2 # command ls suppresses function lookup
95USE_DEFAULT_PATH = 1 << 3 # for command -p ls changes the path
96
97# Copied from var.c in dash
98DEFAULT_PATH = [
99 '/usr/local/sbin', '/usr/local/bin', '/usr/sbin', '/usr/bin', '/sbin',
100 '/bin'
101]
102
103
104class ShellExecutor(vm._Executor):
105 """An executor combined with the OSH language evaluators in osh/ to create
106 a shell interpreter."""
107
108 def __init__(
109 self,
110 mem, # type: state.Mem
111 exec_opts, # type: optview.Exec
112 mutable_opts, # type: state.MutableOpts
113 procs, # type: Dict[str, value.Proc]
114 hay_state, # type: hay_ysh.HayState
115 builtins, # type: Dict[int, _Builtin]
116 search_path, # type: state.SearchPath
117 ext_prog, # type: process.ExternalProgram
118 waiter, # type: process.Waiter
119 tracer, # type: dev.Tracer
120 job_control, # type: process.JobControl
121 job_list, # type: process.JobList
122 fd_state, # type: process.FdState
123 trap_state, # type: trap_osh.TrapState
124 errfmt # type: ui.ErrorFormatter
125 ):
126 # type: (...) -> None
127 vm._Executor.__init__(self)
128 self.mem = mem
129 self.exec_opts = exec_opts
130 self.mutable_opts = mutable_opts # for IsDisabled(), not mutating
131 self.procs = procs
132 self.hay_state = hay_state
133 self.builtins = builtins
134 self.search_path = search_path
135 self.ext_prog = ext_prog
136 self.waiter = waiter
137 self.tracer = tracer
138 self.multi_trace = tracer.multi_trace
139 self.job_control = job_control
140 # sleep 5 & puts a (PID, job#) entry here. And then "jobs" displays it.
141 self.job_list = job_list
142 self.fd_state = fd_state
143 self.trap_state = trap_state
144 self.errfmt = errfmt
145 self.process_sub_stack = [] # type: List[_ProcessSubFrame]
146 self.clean_frame_pool = [] # type: List[_ProcessSubFrame]
147
148 # When starting a pipeline in the foreground, we need to pass a handle to it
149 # through the evaluation of the last node back to ourselves for execution.
150 # We use this handle to make sure any processes forked for the last part of
151 # the pipeline are placed into the same process group as the rest of the
152 # pipeline. Since there is, by design, only ever one foreground pipeline and
153 # any pipelines started within subshells run in their parent's process
154 # group, we only need one pointer here, not some collection.
155 self.fg_pipeline = None # type: Optional[process.Pipeline]
156
157 def CheckCircularDeps(self):
158 # type: () -> None
159 assert self.cmd_ev is not None
160
161 def _MakeProcess(self, node, inherit_errexit=True):
162 # type: (command_t, bool) -> process.Process
163 """Assume we will run the node in another process.
164
165 Return a process.
166 """
167 UP_node = node
168 if node.tag() == command_e.ControlFlow:
169 node = cast(command.ControlFlow, UP_node)
170 # Pipeline or subshells with control flow are invalid, e.g.:
171 # - break | less
172 # - continue | less
173 # - ( return )
174 # NOTE: This could be done at parse time too.
175 if node.keyword.id != Id.ControlFlow_Exit:
176 e_die(
177 'Invalid control flow %r in pipeline / subshell / background'
178 % lexer.TokenVal(node.keyword), node.keyword)
179
180 # NOTE: If ErrExit(), we could be verbose about subprogram errors? This
181 # only really matters when executing 'exit 42', because the child shell
182 # inherits errexit and will be verbose. Other notes:
183 #
184 # - We might want errors to fit on a single line so they don't get #
185 # interleaved.
186 # - We could turn the `exit` builtin into a error.FatalRuntime exception
187 # and get this check for "free".
188 thunk = process.SubProgramThunk(self.cmd_ev,
189 node,
190 self.trap_state,
191 self.multi_trace,
192 inherit_errexit=inherit_errexit)
193 p = process.Process(thunk, self.job_control, self.job_list,
194 self.tracer)
195 return p
196
197 def RunBuiltin(self, builtin_id, cmd_val):
198 # type: (int, cmd_value.Argv) -> int
199 """Run a builtin.
200
201 Also called by the 'builtin' builtin.
202 """
203 self.tracer.OnBuiltin(builtin_id, cmd_val.argv)
204
205 builtin_func = self.builtins[builtin_id]
206
207 io_errors = [] # type: List[error.IOError_OSError]
208 with vm.ctx_FlushStdout(io_errors):
209 # note: could be second word, like 'builtin read'
210 with ui.ctx_Location(self.errfmt, cmd_val.arg_locs[0]):
211 try:
212 status = builtin_func.Run(cmd_val)
213 assert isinstance(status, int)
214 except (IOError, OSError) as e:
215 self.errfmt.PrintMessage(
216 '%s builtin I/O error: %s' %
217 (cmd_val.argv[0], pyutil.strerror(e)),
218 cmd_val.arg_locs[0])
219 return 1
220 except error.Usage as e:
221 arg0 = cmd_val.argv[0]
222 # e.g. 'type' doesn't accept flag '-x'
223 self.errfmt.PrefixPrint(e.msg, '%r ' % arg0, e.location)
224 return 2 # consistent error code for usage error
225
226 if len(io_errors): # e.g. disk full, ulimit
227 self.errfmt.PrintMessage(
228 '%s builtin I/O error: %s' %
229 (cmd_val.argv[0], pyutil.strerror(io_errors[0])),
230 cmd_val.arg_locs[0])
231 return 1
232
233 return status
234
235 def RunSimpleCommand(self, cmd_val, cmd_st, run_flags):
236 # type: (cmd_value.Argv, CommandStatus, int) -> int
237 """Run builtins, functions, external commands.
238
239 Possible variations:
240 - YSH might have different, simpler rules. No special builtins, etc.
241 - YSH might have OILS_PATH = :| /bin /usr/bin | or something.
242 - Interpreters might want to define all their own builtins.
243 """
244 argv = cmd_val.argv
245 if len(cmd_val.arg_locs):
246 arg0_loc = cmd_val.arg_locs[0] # type: loc_t
247 else:
248 arg0_loc = loc.Missing
249
250 # This happens when you write "$@" but have no arguments.
251 if len(argv) == 0:
252 if self.exec_opts.strict_argv():
253 e_die("Command evaluated to an empty argv array", arg0_loc)
254 else:
255 return 0 # status 0, or skip it?
256
257 arg0 = argv[0]
258
259 builtin_id = consts.LookupAssignBuiltin(arg0)
260 if builtin_id != consts.NO_INDEX:
261 # command readonly is disallowed, for technical reasons. Could relax it
262 # later.
263 self.errfmt.Print_("Can't run assignment builtin recursively",
264 arg0_loc)
265 return 1
266
267 builtin_id = consts.LookupSpecialBuiltin(arg0)
268 if builtin_id != consts.NO_INDEX:
269 cmd_st.show_code = True # this is a "leaf" for errors
270 status = self.RunBuiltin(builtin_id, cmd_val)
271 # TODO: Enable this and fix spec test failures.
272 # Also update _SPECIAL_BUILTINS in osh/builtin.py.
273 #if status != 0:
274 # e_die_status(status, 'special builtin failed')
275 return status
276
277 call_procs = not (run_flags & NO_CALL_PROCS)
278 # Builtins like 'true' can be redefined as functions.
279 if call_procs:
280 # TODO: Look shell functions in self.sh_funcs, but procs are
281 # value.Proc in the var namespace.
282 # Pitfall: What happens if there are two of the same name? I guess
283 # that's why you have = and 'type' inspect them
284
285 proc_node = self.procs.get(arg0)
286 if proc_node is not None:
287 if self.exec_opts.strict_errexit():
288 disabled_tok = self.mutable_opts.ErrExitDisabledToken()
289 if disabled_tok:
290 self.errfmt.Print_(
291 'errexit was disabled for this construct',
292 disabled_tok)
293 self.errfmt.StderrLine('')
294 e_die(
295 "Can't run a proc while errexit is disabled. "
296 "Use 'try' or wrap it in a process with $0 myproc",
297 arg0_loc)
298
299 with dev.ctx_Tracer(self.tracer, 'proc', argv):
300 # NOTE: Functions could call 'exit 42' directly, etc.
301 status = self.cmd_ev.RunProc(proc_node, cmd_val)
302 return status
303
304 # Notes:
305 # - procs shadow hay names
306 # - hay names shadow normal builtins? Should we limit to CAPS or no?
307 if self.hay_state.Resolve(arg0):
308 return self.RunBuiltin(builtin_i.haynode, cmd_val)
309
310 builtin_id = consts.LookupNormalBuiltin(arg0)
311
312 if self.exec_opts._running_hay():
313 # Hay: limit the builtins that can be run
314 # - declare 'use dialect'
315 # - echo and write for debugging
316 # - no JSON?
317 if builtin_id in (builtin_i.haynode, builtin_i.use, builtin_i.echo,
318 builtin_i.write):
319 cmd_st.show_code = True # this is a "leaf" for errors
320 return self.RunBuiltin(builtin_id, cmd_val)
321
322 self.errfmt.Print_('Unknown command %r while running hay' % arg0,
323 arg0_loc)
324 return 127
325
326 if builtin_id != consts.NO_INDEX:
327 cmd_st.show_code = True # this is a "leaf" for errors
328 return self.RunBuiltin(builtin_id, cmd_val)
329
330 environ = self.mem.GetExported() # Include temporary variables
331
332 if cmd_val.typed_args:
333 e_die(
334 '%r appears to be external. External commands don\'t accept typed args (OILS-ERR-200)'
335 % arg0, cmd_val.typed_args.left)
336
337 # Resolve argv[0] BEFORE forking.
338 if run_flags & USE_DEFAULT_PATH:
339 argv0_path = state.LookupExecutable(arg0, DEFAULT_PATH)
340 else:
341 argv0_path = self.search_path.CachedLookup(arg0)
342 if argv0_path is None:
343 self.errfmt.Print_('%r not found (OILS-ERR-100)' % arg0, arg0_loc)
344 return 127
345
346 # Normal case: ls /
347 if run_flags & DO_FORK:
348 thunk = process.ExternalThunk(self.ext_prog, argv0_path, cmd_val,
349 environ)
350 p = process.Process(thunk, self.job_control, self.job_list,
351 self.tracer)
352
353 if self.job_control.Enabled():
354 if self.fg_pipeline is not None:
355 pgid = self.fg_pipeline.ProcessGroupId()
356 # If job control is enabled, this should be true
357 assert pgid != process.INVALID_PGID
358
359 change = process.SetPgid(pgid, self.tracer)
360 self.fg_pipeline = None # clear to avoid confusion in subshells
361 else:
362 change = process.SetPgid(process.OWN_LEADER, self.tracer)
363 p.AddStateChange(change)
364
365 status = p.RunProcess(self.waiter, trace.External(cmd_val.argv))
366
367 # this is close to a "leaf" for errors
368 # problem: permission denied EACCESS prints duplicate messages
369 # TODO: add message command 'ls' failed
370 cmd_st.show_code = True
371
372 return status
373
374 self.tracer.OnExec(cmd_val.argv)
375
376 # Already forked for pipeline: ls / | wc -l
377 self.ext_prog.Exec(argv0_path, cmd_val, environ) # NEVER RETURNS
378
379 raise AssertionError('for -Wreturn-type in C++')
380
381 def RunBackgroundJob(self, node):
382 # type: (command_t) -> int
383 """For & etc."""
384 # Special case for pipeline. There is some evidence here:
385 # https://www.gnu.org/software/libc/manual/html_node/Launching-Jobs.html#Launching-Jobs
386 #
387 # "You can either make all the processes in the process group be children
388 # of the shell process, or you can make one process in group be the
389 # ancestor of all the other processes in that group. The sample shell
390 # program presented in this chapter uses the first approach because it
391 # makes bookkeeping somewhat simpler."
392 UP_node = node
393
394 if UP_node.tag() == command_e.Pipeline:
395 node = cast(command.Pipeline, UP_node)
396 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
397 self.job_control, self.job_list, self.tracer)
398 for child in node.children:
399 p = self._MakeProcess(child)
400 p.Init_ParentPipeline(pi)
401 pi.Add(p)
402
403 pi.StartPipeline(self.waiter)
404 pi.SetBackground()
405 last_pid = pi.LastPid()
406 self.mem.last_bg_pid = last_pid # for $!
407
408 self.job_list.AddJob(pi) # show in 'jobs' list
409
410 else:
411 # Problem: to get the 'set -b' behavior of immediate notifications, we
412 # have to register SIGCHLD. But then that introduces race conditions.
413 # If we haven't called Register yet, then we won't know who to notify.
414
415 p = self._MakeProcess(node)
416 if self.job_control.Enabled():
417 p.AddStateChange(
418 process.SetPgid(process.OWN_LEADER, self.tracer))
419
420 p.SetBackground()
421 pid = p.StartProcess(trace.Fork)
422 self.mem.last_bg_pid = pid # for $!
423 self.job_list.AddJob(p) # show in 'jobs' list
424 return 0
425
426 def RunPipeline(self, node, status_out):
427 # type: (command.Pipeline, CommandStatus) -> None
428
429 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
430 self.job_control, self.job_list, self.tracer)
431
432 # initialized with CommandStatus.CreateNull()
433 pipe_locs = [] # type: List[loc_t]
434
435 # First n-1 processes (which is empty when n == 1)
436 n = len(node.children)
437 for i in xrange(n - 1):
438 child = node.children[i]
439
440 # TODO: determine these locations at parse time?
441 pipe_locs.append(loc.Command(child))
442
443 p = self._MakeProcess(child)
444 p.Init_ParentPipeline(pi)
445 pi.Add(p)
446
447 last_child = node.children[n - 1]
448 # Last piece of code is in THIS PROCESS. 'echo foo | read line; echo $line'
449 pi.AddLast((self.cmd_ev, last_child))
450 pipe_locs.append(loc.Command(last_child))
451
452 with dev.ctx_Tracer(self.tracer, 'pipeline', None):
453 pi.StartPipeline(self.waiter)
454 self.fg_pipeline = pi
455 status_out.pipe_status = pi.RunLastPart(self.waiter, self.fd_state)
456 self.fg_pipeline = None # clear in case we didn't end up forking
457
458 status_out.pipe_locs = pipe_locs
459
460 def RunSubshell(self, node):
461 # type: (command_t) -> int
462 p = self._MakeProcess(node)
463 if self.job_control.Enabled():
464 p.AddStateChange(process.SetPgid(process.OWN_LEADER, self.tracer))
465
466 return p.RunProcess(self.waiter, trace.ForkWait)
467
468 def RunCommandSub(self, cs_part):
469 # type: (CommandSub) -> str
470
471 if not self.exec_opts._allow_command_sub():
472 # _allow_command_sub is used in two places. Only one of them turns off _allow_process_sub
473 if not self.exec_opts._allow_process_sub():
474 why = "status wouldn't be checked (strict_errexit)"
475 else:
476 why = 'eval_unsafe_arith is off'
477
478 e_die("Command subs not allowed here because %s" % why,
479 loc.WordPart(cs_part))
480
481 node = cs_part.child
482
483 # Hack for weird $(<file) construct
484 if node.tag() == command_e.Redirect:
485 redir_node = cast(command.Redirect, node)
486 # Detect '< file'
487 if (len(redir_node.redirects) == 1 and
488 redir_node.redirects[0].op.id == Id.Redir_Less and
489 redir_node.child.tag() == command_e.NoOp):
490
491 # Change it to __cat < file.
492 # TODO: could be 'internal cat' (issue #1013)
493 tok = lexer.DummyToken(Id.Lit_Chars, '__cat')
494 cat_word = CompoundWord([tok])
495
496 # Blame < because __cat has no location
497 blame_tok = redir_node.redirects[0].op
498 simple = command.Simple(blame_tok, [], [cat_word], None, None,
499 True)
500
501 # MUTATE redir node so it's like $(<file _cat)
502 redir_node.child = simple
503
504 p = self._MakeProcess(node,
505 inherit_errexit=self.exec_opts.inherit_errexit())
506 # Shell quirk: Command subs remain part of the shell's process group, so we
507 # don't use p.AddStateChange(process.SetPgid(...))
508
509 r, w = posix.pipe()
510 p.AddStateChange(process.StdoutToPipe(r, w))
511
512 p.StartProcess(trace.CommandSub)
513 #log('Command sub started %d', pid)
514
515 chunks = [] # type: List[str]
516 posix.close(w) # not going to write
517 while True:
518 n, err_num = pyos.Read(r, 4096, chunks)
519
520 if n < 0:
521 if err_num == EINTR:
522 pass # retry
523 else:
524 # Like the top level IOError handler
525 e_die_status(
526 2,
527 'osh I/O error (read): %s' % posix.strerror(err_num))
528
529 elif n == 0: # EOF
530 break
531 posix.close(r)
532
533 status = p.Wait(self.waiter)
534
535 # OSH has the concept of aborting in the middle of a WORD. We're not
536 # waiting until the command is over!
537 if self.exec_opts.command_sub_errexit():
538 if status != 0:
539 msg = 'Command Sub exited with status %d' % status
540 raise error.ErrExit(status, msg, loc.WordPart(cs_part))
541
542 else:
543 # Set a flag so we check errexit at the same time as bash. Example:
544 #
545 # a=$(false)
546 # echo foo # no matter what comes here, the flag is reset
547 #
548 # Set ONLY until this command node has finished executing.
549
550 # HACK: move this
551 self.cmd_ev.check_command_sub_status = True
552 self.mem.SetLastStatus(status)
553
554 # Runtime errors test case: # $("echo foo > $@")
555 # Why rstrip()?
556 # https://unix.stackexchange.com/questions/17747/why-does-shell-command-substitution-gobble-up-a-trailing-newline-char
557 return ''.join(chunks).rstrip('\n')
558
559 def RunProcessSub(self, cs_part):
560 # type: (CommandSub) -> str
561 """Process sub creates a forks a process connected to a pipe.
562
563 The pipe is typically passed to another process via a /dev/fd/$FD path.
564
565 Life cycle of a process substitution:
566
567 1. Start with this code
568
569 diff <(seq 3) <(seq 4)
570
571 2. To evaluate the command line, we evaluate every word. The
572 NormalWordEvaluator this method, RunProcessSub(), which does 3 things:
573
574 a. Create a pipe(), getting r and w
575 b. Starts the seq process, which inherits r and w
576 It has a StdoutToPipe() redirect, which means that it dup2(w, 1)
577 and close(r)
578 c. Close the w FD, because neither the shell or 'diff' will write to it.
579 However we must retain 'r', because 'diff' hasn't opened /dev/fd yet!
580 d. We evaluate <(seq 3) to /dev/fd/$r, so "diff" can read from it
581
582 3. Now we're done evaluating every word, so we know the command line of
583 diff, which looks like
584
585 diff /dev/fd/64 /dev/fd/65
586
587 Those are the FDs for the read ends of the pipes we created.
588
589 4. diff inherits a copy of the read end of bot pipes. But it actually
590 calls open() both files passed as argv. (I think this is fine.)
591
592 5. wait() for the diff process.
593
594 6. The shell closes both the read ends of both pipes. Neither us or
595 'diffd' will read again.
596
597 7. The shell waits for both 'seq' processes.
598
599 Related:
600 shopt -s process_sub_fail
601 _process_sub_status
602 """
603 cs_loc = loc.WordPart(cs_part)
604
605 if not self.exec_opts._allow_process_sub():
606 e_die(
607 "Process subs not allowed here because status wouldn't be checked (strict_errexit)",
608 cs_loc)
609
610 p = self._MakeProcess(cs_part.child)
611
612 r, w = posix.pipe()
613 #log('pipe = %d, %d', r, w)
614
615 op_id = cs_part.left_token.id
616 if op_id == Id.Left_ProcSubIn:
617 # Example: cat < <(head foo.txt)
618 #
619 # The head process should write its stdout to a pipe.
620 redir = process.StdoutToPipe(r,
621 w) # type: process.ChildStateChange
622
623 elif op_id == Id.Left_ProcSubOut:
624 # Example: head foo.txt > >(tac)
625 #
626 # The tac process should read its stdin from a pipe.
627
628 # Note: this example sometimes requires you to hit "enter" in bash and
629 # zsh. WHy?
630 redir = process.StdinFromPipe(r, w)
631
632 else:
633 raise AssertionError()
634
635 p.AddStateChange(redir)
636
637 if self.job_control.Enabled():
638 p.AddStateChange(process.SetPgid(process.OWN_LEADER, self.tracer))
639
640 # Fork, letting the child inherit the pipe file descriptors.
641 p.StartProcess(trace.ProcessSub)
642
643 ps_frame = self.process_sub_stack[-1]
644
645 # Note: bash never waits() on the process, but zsh does. The calling
646 # program needs to read() before we can wait, e.g.
647 # diff <(sort left.txt) <(sort right.txt)
648
649 # After forking, close the end of the pipe we're not using.
650 if op_id == Id.Left_ProcSubIn:
651 posix.close(w) # cat < <(head foo.txt)
652 ps_frame.Append(p, r, cs_loc) # close later
653 elif op_id == Id.Left_ProcSubOut:
654 posix.close(r)
655 #log('Left_ProcSubOut closed %d', r)
656 ps_frame.Append(p, w, cs_loc) # close later
657 else:
658 raise AssertionError()
659
660 # Is /dev Linux-specific?
661 if op_id == Id.Left_ProcSubIn:
662 return '/dev/fd/%d' % r
663
664 elif op_id == Id.Left_ProcSubOut:
665 return '/dev/fd/%d' % w
666
667 else:
668 raise AssertionError()
669
670 def PushRedirects(self, redirects, err_out):
671 # type: (List[RedirValue], List[error.IOError_OSError]) -> None
672 if len(redirects) == 0: # Optimized to avoid allocs
673 return
674 self.fd_state.Push(redirects, err_out)
675
676 def PopRedirects(self, num_redirects, err_out):
677 # type: (int, List[error.IOError_OSError]) -> None
678 if num_redirects == 0: # Optimized to avoid allocs
679 return
680 self.fd_state.Pop(err_out)
681
682 def PushProcessSub(self):
683 # type: () -> None
684 if len(self.clean_frame_pool):
685 # Optimized to avoid allocs
686 new_frame = self.clean_frame_pool.pop()
687 else:
688 new_frame = _ProcessSubFrame()
689 self.process_sub_stack.append(new_frame)
690
691 def PopProcessSub(self, compound_st):
692 # type: (StatusArray) -> None
693 """This method is called by a context manager, which means we always
694 wait() on the way out, which I think is the right thing.
695
696 We don't always set _process_sub_status, e.g. if some fatal
697 error occurs first, but we always wait.
698 """
699 frame = self.process_sub_stack.pop()
700 if frame.WasModified():
701 frame.MaybeWaitOnProcessSubs(self.waiter, compound_st)
702 else:
703 # Optimized to avoid allocs
704 self.clean_frame_pool.append(frame)
705
706 # Note: the 3 lists in _ProcessSubFrame are hot in our profiles. It would
707 # be nice to somehow "destroy" them here, rather than letting them become
708 # garbage that needs to be traced.
709
710 # The CommandEvaluator could have a ProcessSubStack, which supports Push(),
711 # Pop(), and Top() of VALUES rather than GC objects?
712
713
714# vim: sw=4