1 # -*- test-case-name: twisted.test.test_process -*-
2 # Copyright (c) Twisted Matrix Laboratories.
3 # See LICENSE for details.
6 UNIX Process management.
8 Do NOT use this module directly - use reactor.spawnProcess() instead.
10 Maintainer: Itamar Shtull-Trauring
14 import gc, os, sys, stat, traceback, select, signal, errno
26 from zope.interface import implements
28 from twisted.python import log, failure
29 from twisted.python.util import switchUID
30 from twisted.internet import fdesc, abstract, error
31 from twisted.internet.main import CONNECTION_LOST, CONNECTION_DONE
32 from twisted.internet._baseprocess import BaseProcess
33 from twisted.internet.interfaces import IProcessTransport
35 # Some people were importing this, which is incorrect, just keeping it
36 # here for backwards compatibility:
37 ProcessExitedAlready = error.ProcessExitedAlready
39 reapProcessHandlers = {}
41 def reapAllProcesses():
43 Reap all registered processes.
45 for process in reapProcessHandlers.values():
49 def registerReapProcessHandler(pid, process):
51 Register a process handler for the given pid, in case L{reapAllProcesses}
54 @param pid: the pid of the process.
55 @param process: a process handler.
57 if pid in reapProcessHandlers:
58 raise RuntimeError("Try to register an already registered process.")
60 auxPID, status = os.waitpid(pid, os.WNOHANG)
62 log.msg('Failed to reap %d:' % pid)
66 process.processEnded(status)
68 # if auxPID is 0, there are children but none have exited
69 reapProcessHandlers[pid] = process
72 def unregisterReapProcessHandler(pid, process):
74 Unregister a process handler previously registered with
75 L{registerReapProcessHandler}.
77 if not (pid in reapProcessHandlers
78 and reapProcessHandlers[pid] == process):
79 raise RuntimeError("Try to unregister a process not registered.")
80 del reapProcessHandlers[pid]
83 def detectLinuxBrokenPipeBehavior():
85 On some Linux version, write-only pipe are detected as readable. This
86 function is here to check if this bug is present or not.
88 See L{ProcessWriter.doRead} for a more detailed explanation.
90 global brokenLinuxPipeBehavior
93 reads, writes, exes = select.select([w], [], [], 0)
95 # Linux < 2.6.11 says a write-only pipe is readable.
96 brokenLinuxPipeBehavior = True
98 brokenLinuxPipeBehavior = False
102 # Call at import time
103 detectLinuxBrokenPipeBehavior()
106 class ProcessWriter(abstract.FileDescriptor):
108 (Internal) Helper class to write into a Process's input pipe.
110 I am a helper which describes a selectable asynchronous writer to a
111 process's input pipe, including stdin.
113 @ivar enableReadHack: A flag which determines how readability on this
114 write descriptor will be handled. If C{True}, then readability may
115 indicate the reader for this write descriptor has been closed (ie,
116 the connection has been lost). If C{False}, then readability events
121 enableReadHack = False
123 def __init__(self, reactor, proc, name, fileno, forceReadHack=False):
125 Initialize, specifying a Process instance to connect to.
127 abstract.FileDescriptor.__init__(self, reactor)
128 fdesc.setNonBlocking(fileno)
133 if not stat.S_ISFIFO(os.fstat(self.fileno()).st_mode):
134 # If the fd is not a pipe, then the read hack is never
135 # applicable. This case arises when ProcessWriter is used by
136 # StandardIO and stdout is redirected to a normal file.
137 self.enableReadHack = False
139 self.enableReadHack = True
141 # Detect if this fd is actually a write-only fd. If it's
142 # valid to read, don't try to detect closing via read.
143 # This really only means that we cannot detect a TTY's write
146 os.read(self.fileno(), 0)
148 # It's a write-only pipe end, enable hack
149 self.enableReadHack = True
151 if self.enableReadHack:
156 Return the fileno() of my process's stdin.
160 def writeSomeData(self, data):
162 Write some data to the open process.
164 rv = fdesc.writeToFD(self.fd, data)
165 if rv == len(data) and self.enableReadHack:
166 # If the send buffer is now empty and it is necessary to monitor
167 # this descriptor for readability to detect close, try detecting
172 def write(self, data):
174 abstract.FileDescriptor.write(self, data)
178 The only way a write pipe can become "readable" is at EOF, because the
179 child has closed it, and we're using a reactor which doesn't
180 distinguish between readable and closed (such as the select reactor).
182 Except that's not true on linux < 2.6.11. It has the following
183 characteristics: write pipe is completely empty => POLLOUT (writable in
184 select), write pipe is not completely empty => POLLIN (readable in
185 select), write pipe's reader closed => POLLIN|POLLERR (readable and
188 That's what this funky code is for. If linux was not broken, this
189 function could be simply "return CONNECTION_LOST".
191 BUG: We call select no matter what the reactor.
192 If the reactor is pollreactor, and the fd is > 1024, this will fail.
193 (only occurs on broken versions of linux, though).
195 if self.enableReadHack:
196 if brokenLinuxPipeBehavior:
198 r, w, x = select.select([fd], [fd], [], 0)
200 return CONNECTION_LOST
202 return CONNECTION_LOST
206 def connectionLost(self, reason):
208 See abstract.FileDescriptor.connectionLost.
210 # At least on OS X 10.4, exiting while stdout is non-blocking can
211 # result in data loss. For some reason putting the file descriptor
212 # back into blocking mode seems to resolve this issue.
213 fdesc.setBlocking(self.fd)
215 abstract.FileDescriptor.connectionLost(self, reason)
216 self.proc.childConnectionLost(self.name, reason)
220 class ProcessReader(abstract.FileDescriptor):
224 I am a selectable representation of a process's output pipe, such as
229 def __init__(self, reactor, proc, name, fileno):
231 Initialize, specifying a process to connect to.
233 abstract.FileDescriptor.__init__(self, reactor)
234 fdesc.setNonBlocking(fileno)
242 Return the fileno() of my process's stderr.
246 def writeSomeData(self, data):
247 # the only time this is actually called is after .loseConnection Any
248 # actual write attempt would fail, so we must avoid that. This hack
249 # allows us to use .loseConnection on both readers and writers.
251 return CONNECTION_LOST
255 This is called when the pipe becomes readable.
257 return fdesc.readFromFD(self.fd, self.dataReceived)
259 def dataReceived(self, data):
260 self.proc.childDataReceived(self.name, data)
262 def loseConnection(self):
263 if self.connected and not self.disconnecting:
264 self.disconnecting = 1
266 self.reactor.callLater(0, self.connectionLost,
267 failure.Failure(CONNECTION_DONE))
269 def connectionLost(self, reason):
271 Close my end of the pipe, signal the Process (which signals the
274 abstract.FileDescriptor.connectionLost(self, reason)
275 self.proc.childConnectionLost(self.name, reason)
278 class _BaseProcess(BaseProcess, object):
280 Base class for Process and PTYProcess.
285 def reapProcess(self):
287 Try to reap a process (without blocking) via waitpid.
289 This is called when sigchild is caught or a Process object loses its
290 "connection" (stdout is closed) This ought to result in reaping all
291 zombie processes, since it will be called twice as often as it needs
294 (Unfortunately, this is a slightly experimental approach, since
295 UNIX has no way to be really sure that your process is going to
296 go away w/o blocking. I don't want to block.)
300 pid, status = os.waitpid(self.pid, os.WNOHANG)
302 if e.errno == errno.ECHILD:
308 log.msg('Failed to reap %d:' % self.pid)
312 self.processEnded(status)
313 unregisterReapProcessHandler(pid, self)
316 def _getReason(self, status):
317 exitCode = sig = None
318 if os.WIFEXITED(status):
319 exitCode = os.WEXITSTATUS(status)
321 sig = os.WTERMSIG(status)
323 return error.ProcessTerminated(exitCode, sig, status)
324 return error.ProcessDone(status)
327 def signalProcess(self, signalID):
329 Send the given signal C{signalID} to the process. It'll translate a
330 few signals ('HUP', 'STOP', 'INT', 'KILL', 'TERM') from a string
331 representation to its int value, otherwise it'll pass directly the
334 @type signalID: C{str} or C{int}
336 if signalID in ('HUP', 'STOP', 'INT', 'KILL', 'TERM'):
337 signalID = getattr(signal, 'SIG%s' % (signalID,))
339 raise ProcessExitedAlready()
340 os.kill(self.pid, signalID)
343 def _resetSignalDisposition(self):
344 # The Python interpreter ignores some signals, and our child
345 # process will inherit that behaviour. To have a child process
346 # that responds to signals normally, we need to reset our
347 # child process's signal handling (just) after we fork and
349 for signalnum in range(1, signal.NSIG):
350 if signal.getsignal(signalnum) == signal.SIG_IGN:
351 # Reset signal handling to the default
352 signal.signal(signalnum, signal.SIG_DFL)
355 def _fork(self, path, uid, gid, executable, args, environment, **kwargs):
357 Fork and then exec sub-process.
359 @param path: the path where to run the new process.
361 @param uid: if defined, the uid used to run the new process.
363 @param gid: if defined, the gid used to run the new process.
365 @param executable: the executable to run in a new process.
366 @type executable: C{str}
367 @param args: arguments used to create the new process.
369 @param environment: environment used for the new process.
370 @type environment: C{dict}.
371 @param kwargs: keyword arguments to L{_setupChild} method.
373 settingUID = (uid is not None) or (gid is not None)
375 curegid = os.getegid()
376 currgid = os.getgid()
377 cureuid = os.geteuid()
378 curruid = os.getuid()
383 # prepare to change UID in subprocess
387 collectorEnabled = gc.isenabled()
392 # Still in the parent process
394 os.setregid(currgid, curegid)
395 os.setreuid(curruid, cureuid)
400 if self.pid == 0: # pid is 0 in the child process
401 # do not put *ANY* code outside the try block. The child process
402 # must either exec or _exit. If it gets outside this block (due
403 # to an exception that is not handled here, but which might be
404 # handled higher up), there will be two copies of the parent
405 # running in parallel, doing all kinds of damage.
407 # After each change to this code, review it to make sure there
410 # Stop debugging. If I am, I don't care anymore.
412 self._setupChild(**kwargs)
413 self._execChild(path, settingUID, uid, gid,
414 executable, args, environment)
416 # If there are errors, bail and try to write something
417 # descriptive to stderr.
418 # XXX: The parent's stderr isn't necessarily fd 2 anymore, or
419 # even still available
420 # XXXX: however even libc assumes write(2, err) is a useful
423 stderr = os.fdopen(2, 'w')
424 stderr.write("Upon execvpe %s %s in environment %s\n:" %
425 (executable, str(args),
426 "id %s" % id(environment)))
427 traceback.print_exc(file=stderr)
432 pass # make *sure* the child terminates
433 # Did you read the comment about not adding code here?
436 # we are now in parent process
438 os.setregid(currgid, curegid)
439 os.setreuid(curruid, cureuid)
442 self.status = -1 # this records the exit status of the child
444 def _setupChild(self, *args, **kwargs):
446 Setup the child process. Override in subclasses.
448 raise NotImplementedError()
450 def _execChild(self, path, settingUID, uid, gid,
451 executable, args, environment):
453 The exec() which is done in the forked child.
457 # set the UID before I actually exec the process
460 os.execvpe(executable, args, environment)
464 String representation of a process.
466 return "<%s pid=%s status=%s>" % (self.__class__.__name__,
467 self.pid, self.status)
470 class _FDDetector(object):
472 This class contains the logic necessary to decide which of the available
473 system techniques should be used to detect the open file descriptors for
474 the current process. The chosen technique gets monkey-patched into the
475 _listOpenFDs method of this class so that the detection only needs to occur
478 @ivars listdir: The implementation of listdir to use. This gets overwritten
480 @ivars getpid: The implementation of getpid to use, returns the PID of the
482 @ivars openfile: The implementation of open() to use, by default the Python
485 # So that we can unit test this
491 self._implementations = [
492 self._procFDImplementation, self._devFDImplementation,
493 self._fallbackFDImplementation]
496 def _listOpenFDs(self):
498 Return an iterable of file descriptors which I{may} be open in this
501 This will try to return the fewest possible descriptors without missing
504 self._listOpenFDs = self._getImplementation()
505 return self._listOpenFDs()
508 def _getImplementation(self):
510 Pick a method which gives correct results for C{_listOpenFDs} in this
513 This involves a lot of very platform-specific checks, some of which may
514 be relatively expensive. Therefore the returned method should be saved
515 and re-used, rather than always calling this method to determine what it
518 See the implementation for the details of how a method is selected.
520 for impl in self._implementations:
526 fp = self.openfile("/dev/null", "r")
532 # If no implementation can detect the newly opened file above, then just
533 # return the last one. The last one should therefore always be one
534 # which makes a simple static guess which includes all possible open
535 # file descriptors, but perhaps also many other values which do not
536 # correspond to file descriptors. For example, the scheme implemented
537 # by _fallbackFDImplementation is suitable to be the last entry.
541 def _devFDImplementation(self):
543 Simple implementation for systems where /dev/fd actually works.
544 See: http://www.freebsd.org/cgi/man.cgi?fdescfs
547 result = [int(fd) for fd in self.listdir(dname)]
551 def _procFDImplementation(self):
553 Simple implementation for systems where /proc/pid/fd exists (we assume
556 dname = "/proc/%d/fd" % (self.getpid(),)
557 return [int(fd) for fd in self.listdir(dname)]
560 def _fallbackFDImplementation(self):
562 Fallback implementation where either the resource module can inform us
563 about the upper bound of how many FDs to expect, or where we just guess
564 a constant maximum if there is no resource module.
566 All possible file descriptors from 0 to that upper bound are returned
567 with no attempt to exclude invalid file descriptor values.
574 # OS-X reports 9223372036854775808. That's a lot of fds to close.
575 # OS-X should get the /dev/fd implementation instead, so mostly
576 # this check probably isn't necessary.
577 maxfds = min(1024, resource.getrlimit(resource.RLIMIT_NOFILE)[1])
582 detector = _FDDetector()
586 Use the global detector object to figure out which FD implementation to
589 return detector._listOpenFDs()
592 class Process(_BaseProcess):
594 An operating-system Process.
596 This represents an operating-system process with arbitrary input/output
597 pipes connected to it. Those pipes may represent standard input,
598 standard output, and standard error, or any other file descriptor.
600 On UNIX, this is implemented using fork(), exec(), pipe()
601 and fcntl(). These calls may not exist elsewhere so this
602 code is not cross-platform. (also, windows can only select
605 implements(IProcessTransport)
613 processWriterFactory = ProcessWriter
614 processReaderFactory = ProcessReader
617 reactor, executable, args, environment, path, proto,
618 uid=None, gid=None, childFDs=None):
620 Spawn an operating-system process.
622 This is where the hard work of disconnecting all currently open
623 files / forking / executing the new process happens. (This is
624 executed automatically when a Process is instantiated.)
626 This will also run the subprocess as a given user ID and group ID, if
627 specified. (Implementation Note: this doesn't support all the arcane
628 nuances of setXXuid on UNIX: it will assume that either your effective
632 assert 'r' not in childFDs.values()
633 assert 'w' not in childFDs.values()
634 _BaseProcess.__init__(self, proto)
637 # keys are childFDs, we can sense them closing
638 # values are ProcessReader/ProcessWriters
642 # values are parentFDs
645 childFDs = {0: "w", # we write to the child's stdin
646 1: "r", # we read from their stdout
647 2: "r", # and we read from their stderr
651 if debug: print "childFDs", childFDs
656 _openedPipes.extend([r, w])
659 # fdmap.keys() are filenos of pipes that are used by the child.
660 fdmap = {} # maps childFD to parentFD
662 for childFD, target in childFDs.items():
663 if debug: print "[%d]" % childFD, target
665 # we need a pipe that the parent can read from
666 readFD, writeFD = pipe()
667 if debug: print "readFD=%d, writeFD=%d" % (readFD, writeFD)
668 fdmap[childFD] = writeFD # child writes to this
669 helpers[childFD] = readFD # parent reads from this
671 # we need a pipe that the parent can write to
672 readFD, writeFD = pipe()
673 if debug: print "readFD=%d, writeFD=%d" % (readFD, writeFD)
674 fdmap[childFD] = readFD # child reads from this
675 helpers[childFD] = writeFD # parent writes to this
677 assert type(target) == int, '%r should be an int' % (target,)
678 fdmap[childFD] = target # parent ignores this
679 if debug: print "fdmap", fdmap
680 if debug: print "helpers", helpers
681 # the child only cares about fdmap.values()
683 self._fork(path, uid, gid, executable, args, environment, fdmap=fdmap)
685 map(os.close, _openedPipes)
688 # we are the parent process:
691 # arrange for the parent-side pipes to be read and written
692 for childFD, parentFD in helpers.items():
693 os.close(fdmap[childFD])
695 if childFDs[childFD] == "r":
696 reader = self.processReaderFactory(reactor, self, childFD,
698 self.pipes[childFD] = reader
700 if childFDs[childFD] == "w":
701 writer = self.processWriterFactory(reactor, self, childFD,
702 parentFD, forceReadHack=True)
703 self.pipes[childFD] = writer
706 # the 'transport' is used for some compatibility methods
707 if self.proto is not None:
708 self.proto.makeConnection(self)
712 # The reactor might not be running yet. This might call back into
713 # processEnded synchronously, triggering an application-visible
714 # callback. That's probably not ideal. The replacement API for
715 # spawnProcess should improve upon this situation.
716 registerReapProcessHandler(self.pid, self)
719 def _setupChild(self, fdmap):
721 fdmap[childFD] = parentFD
723 The child wants to end up with 'childFD' attached to what used to be
724 the parent's parentFD. As an example, a bash command run like
725 'command 2>&1' would correspond to an fdmap of {0:0, 1:1, 2:1}.
726 'command >foo.txt' would be {0:0, 1:os.open('foo.txt'), 2:2}.
728 This is accomplished in two steps::
730 1. close all file descriptors that aren't values of fdmap. This
731 means 0 .. maxfds (or just the open fds within that range, if
732 the platform supports '/proc/<pid>/fd').
734 2. for each childFD::
736 - if fdmap[childFD] == childFD, the descriptor is already in
737 place. Make sure the CLOEXEC flag is not set, then delete
738 the entry from fdmap.
740 - if childFD is in fdmap.values(), then the target descriptor
741 is busy. Use os.dup() to move it elsewhere, update all
742 fdmap[childFD] items that point to it, then close the
743 original. Then fall through to the next case.
745 - now fdmap[childFD] is not in fdmap.values(), and is free.
746 Use os.dup2() to move it to the right place, then close the
750 debug = self.debug_child
753 errfd.write("starting _setupChild\n")
755 destList = fdmap.values()
756 for fd in _listOpenFDs():
759 if debug and fd == errfd.fileno():
766 # at this point, the only fds still open are the ones that need to
767 # be moved to their appropriate positions in the child (the targets
768 # of fdmap, i.e. fdmap.values() )
770 if debug: print >>errfd, "fdmap", fdmap
771 childlist = fdmap.keys()
774 for child in childlist:
775 target = fdmap[child]
777 # fd is already in place
778 if debug: print >>errfd, "%d already in place" % target
779 fdesc._unsetCloseOnExec(child)
781 if child in fdmap.values():
782 # we can't replace child-fd yet, as some other mapping
783 # still needs the fd it wants to target. We must preserve
784 # that old fd by duping it to a new home.
785 newtarget = os.dup(child) # give it a safe home
786 if debug: print >>errfd, "os.dup(%d) -> %d" % (child,
788 os.close(child) # close the original
789 for c, p in fdmap.items():
791 fdmap[c] = newtarget # update all pointers
792 # now it should be available
793 if debug: print >>errfd, "os.dup2(%d,%d)" % (target, child)
794 os.dup2(target, child)
796 # At this point, the child has everything it needs. We want to close
797 # everything that isn't going to be used by the child, i.e.
798 # everything not in fdmap.keys(). The only remaining fds open are
799 # those in fdmap.values().
801 # Any given fd may appear in fdmap.values() multiple times, so we
802 # need to remove duplicates first.
805 for fd in fdmap.values():
807 if not fd in fdmap.keys():
809 if debug: print >>errfd, "old", old
813 self._resetSignalDisposition()
816 def writeToChild(self, childFD, data):
817 self.pipes[childFD].write(data)
819 def closeChildFD(self, childFD):
820 # for writer pipes, loseConnection tries to write the remaining data
821 # out to the pipe before closing it
822 # if childFD is not in the list of pipes, assume that it is already
824 if childFD in self.pipes:
825 self.pipes[childFD].loseConnection()
827 def pauseProducing(self):
828 for p in self.pipes.itervalues():
829 if isinstance(p, ProcessReader):
832 def resumeProducing(self):
833 for p in self.pipes.itervalues():
834 if isinstance(p, ProcessReader):
838 def closeStdin(self):
840 Call this to close standard input on this process.
844 def closeStdout(self):
847 def closeStderr(self):
850 def loseConnection(self):
855 def write(self, data):
857 Call this to write to standard input on this process.
859 NOTE: This will silently lose data if there is no standard input.
862 self.pipes[0].write(data)
864 def registerProducer(self, producer, streaming):
866 Call this to register producer for standard input.
868 If there is no standard input producer.stopProducing() will
869 be called immediately.
872 self.pipes[0].registerProducer(producer, streaming)
874 producer.stopProducing()
876 def unregisterProducer(self):
878 Call this to unregister producer for standard input."""
880 self.pipes[0].unregisterProducer()
882 def writeSequence(self, seq):
884 Call this to write to standard input on this process.
886 NOTE: This will silently lose data if there is no standard input.
889 self.pipes[0].writeSequence(seq)
892 def childDataReceived(self, name, data):
893 self.proto.childDataReceived(name, data)
896 def childConnectionLost(self, childFD, reason):
897 # this is called when one of the helpers (ProcessReader or
898 # ProcessWriter) notices their pipe has been closed
899 os.close(self.pipes[childFD].fileno())
900 del self.pipes[childFD]
902 self.proto.childConnectionLost(childFD)
905 self.maybeCallProcessEnded()
907 def maybeCallProcessEnded(self):
908 # we don't call ProcessProtocol.processEnded until:
909 # the child has terminated, AND
910 # all writers have indicated an error status, AND
911 # all readers have indicated EOF
912 # This insures that we've gathered all output from the process.
915 if not self.lostProcess:
918 _BaseProcess.maybeCallProcessEnded(self)
922 class PTYProcess(abstract.FileDescriptor, _BaseProcess):
924 An operating-system Process that uses PTY support.
926 implements(IProcessTransport)
931 def __init__(self, reactor, executable, args, environment, path, proto,
932 uid=None, gid=None, usePTY=None):
934 Spawn an operating-system process.
936 This is where the hard work of disconnecting all currently open
937 files / forking / executing the new process happens. (This is
938 executed automatically when a Process is instantiated.)
940 This will also run the subprocess as a given user ID and group ID, if
941 specified. (Implementation Note: this doesn't support all the arcane
942 nuances of setXXuid on UNIX: it will assume that either your effective
945 if pty is None and not isinstance(usePTY, (tuple, list)):
946 # no pty module and we didn't get a pty to use
947 raise NotImplementedError(
948 "cannot use PTYProcess on platforms without the pty module.")
949 abstract.FileDescriptor.__init__(self, reactor)
950 _BaseProcess.__init__(self, proto)
952 if isinstance(usePTY, (tuple, list)):
953 masterfd, slavefd, ttyname = usePTY
955 masterfd, slavefd = pty.openpty()
956 ttyname = os.ttyname(slavefd)
959 self._fork(path, uid, gid, executable, args, environment,
960 masterfd=masterfd, slavefd=slavefd)
962 if not isinstance(usePTY, (tuple, list)):
967 # we are now in parent process:
969 fdesc.setNonBlocking(masterfd)
975 self.proto.makeConnection(self)
978 registerReapProcessHandler(self.pid, self)
980 def _setupChild(self, masterfd, slavefd):
982 Setup child process after fork() but before exec().
985 if hasattr(termios, 'TIOCNOTTY'):
987 fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
992 fcntl.ioctl(fd, termios.TIOCNOTTY, '')
999 if hasattr(termios, 'TIOCSCTTY'):
1000 fcntl.ioctl(slavefd, termios.TIOCSCTTY, '')
1006 os.dup2(slavefd, 0) # stdin
1007 os.dup2(slavefd, 1) # stdout
1008 os.dup2(slavefd, 2) # stderr
1010 for fd in _listOpenFDs():
1017 self._resetSignalDisposition()
1020 # PTYs do not have stdin/stdout/stderr. They only have in and out, just
1021 # like sockets. You cannot close one without closing off the entire PTY.
1022 def closeStdin(self):
1025 def closeStdout(self):
1028 def closeStderr(self):
1033 Called when my standard output stream is ready for reading.
1035 return fdesc.readFromFD(
1037 lambda data: self.proto.childDataReceived(1, data))
1041 This returns the file number of standard output on this process.
1045 def maybeCallProcessEnded(self):
1046 # two things must happen before we call the ProcessProtocol's
1047 # processEnded method. 1: the child process must die and be reaped
1048 # (which calls our own processEnded method). 2: the child must close
1049 # their stdin/stdout/stderr fds, causing the pty to close, causing
1050 # our connectionLost method to be called. #2 can also be triggered
1051 # by calling .loseConnection().
1052 if self.lostProcess == 2:
1053 _BaseProcess.maybeCallProcessEnded(self)
1055 def connectionLost(self, reason):
1057 I call this to clean up when one or all of my connections has died.
1059 abstract.FileDescriptor.connectionLost(self, reason)
1061 self.lostProcess += 1
1062 self.maybeCallProcessEnded()
1064 def writeSomeData(self, data):
1066 Write some data to the open process.
1068 return fdesc.writeToFD(self.fd, data)