summaryrefslogtreecommitdiffstats
path: root/testing/mozbase/mozprocess
diff options
context:
space:
mode:
Diffstat (limited to 'testing/mozbase/mozprocess')
-rw-r--r--testing/mozbase/mozprocess/mozprocess/__init__.py8
-rw-r--r--testing/mozbase/mozprocess/mozprocess/processhandler.py1403
-rw-r--r--testing/mozbase/mozprocess/mozprocess/qijo.py177
-rw-r--r--testing/mozbase/mozprocess/mozprocess/winprocess.py566
-rw-r--r--testing/mozbase/mozprocess/setup.cfg2
-rw-r--r--testing/mozbase/mozprocess/setup.py38
-rw-r--r--testing/mozbase/mozprocess/tests/manifest.ini12
-rw-r--r--testing/mozbase/mozprocess/tests/process_normal_broad.ini30
-rw-r--r--testing/mozbase/mozprocess/tests/process_normal_deep.ini65
-rw-r--r--testing/mozbase/mozprocess/tests/process_normal_finish.ini17
-rw-r--r--testing/mozbase/mozprocess/tests/process_normal_finish_no_process_group.ini2
-rw-r--r--testing/mozbase/mozprocess/tests/process_waittimeout.ini16
-rw-r--r--testing/mozbase/mozprocess/tests/process_waittimeout_10s.ini16
-rw-r--r--testing/mozbase/mozprocess/tests/proclaunch.py211
-rw-r--r--testing/mozbase/mozprocess/tests/proctest.py65
-rw-r--r--testing/mozbase/mozprocess/tests/scripts/infinite_loop.py20
-rw-r--r--testing/mozbase/mozprocess/tests/scripts/proccountfive.py4
-rw-r--r--testing/mozbase/mozprocess/tests/scripts/procnonewline.py6
-rw-r--r--testing/mozbase/mozprocess/tests/test_detached.py67
-rw-r--r--testing/mozbase/mozprocess/tests/test_kill.py132
-rw-r--r--testing/mozbase/mozprocess/tests/test_misc.py66
-rw-r--r--testing/mozbase/mozprocess/tests/test_output.py78
-rw-r--r--testing/mozbase/mozprocess/tests/test_params.py97
-rw-r--r--testing/mozbase/mozprocess/tests/test_pid.py51
-rw-r--r--testing/mozbase/mozprocess/tests/test_poll.py152
-rw-r--r--testing/mozbase/mozprocess/tests/test_process_reader.py116
-rw-r--r--testing/mozbase/mozprocess/tests/test_wait.py146
27 files changed, 3563 insertions, 0 deletions
diff --git a/testing/mozbase/mozprocess/mozprocess/__init__.py b/testing/mozbase/mozprocess/mozprocess/__init__.py
new file mode 100644
index 0000000000..0b238c2b25
--- /dev/null
+++ b/testing/mozbase/mozprocess/mozprocess/__init__.py
@@ -0,0 +1,8 @@
+# flake8: noqa
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import
+
+from .processhandler import *
diff --git a/testing/mozbase/mozprocess/mozprocess/processhandler.py b/testing/mozbase/mozprocess/mozprocess/processhandler.py
new file mode 100644
index 0000000000..464b1ee3a0
--- /dev/null
+++ b/testing/mozbase/mozprocess/mozprocess/processhandler.py
@@ -0,0 +1,1403 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import codecs
+import errno
+import io
+import os
+import signal
+import subprocess
+import sys
+import threading
+import traceback
+from datetime import datetime
+
+import six
+import time
+
+if six.PY2:
+ from Queue import Queue, Empty # Python 2
+else:
+ from queue import Queue, Empty # Python 3
+
+__all__ = [
+ "ProcessHandlerMixin",
+ "ProcessHandler",
+ "LogOutput",
+ "StoreOutput",
+ "StreamOutput",
+]
+
+# Set the MOZPROCESS_DEBUG environment variable to 1 to see some debugging output
+MOZPROCESS_DEBUG = os.getenv("MOZPROCESS_DEBUG")
+
+INTERVAL_PROCESS_ALIVE_CHECK = 0.02
+
+# We dont use mozinfo because it is expensive to import, see bug 933558.
+isWin = os.name == "nt"
+isPosix = os.name == "posix" # includes MacOS X
+
+if isWin:
+ from ctypes import sizeof, addressof, c_ulong, byref, WinError, c_longlong
+ from . import winprocess
+ from .qijo import (
+ JobObjectAssociateCompletionPortInformation,
+ JOBOBJECT_ASSOCIATE_COMPLETION_PORT,
+ JobObjectExtendedLimitInformation,
+ JOBOBJECT_BASIC_LIMIT_INFORMATION,
+ JOBOBJECT_EXTENDED_LIMIT_INFORMATION,
+ IO_COUNTERS,
+ )
+
+
+class ProcessHandlerMixin(object):
+ """
+ A class for launching and manipulating local processes.
+
+ :param cmd: command to run. May be a string or a list. If specified as a list, the first
+ element will be interpreted as the command, and all additional elements will be interpreted
+ as arguments to that command.
+ :param args: list of arguments to pass to the command (defaults to None). Must not be set when
+ `cmd` is specified as a list.
+ :param cwd: working directory for command (defaults to None).
+ :param env: is the environment to use for the process (defaults to os.environ).
+ :param ignore_children: causes system to ignore child processes when True,
+ defaults to False (which tracks child processes).
+ :param kill_on_timeout: when True, the process will be killed when a timeout is reached.
+ When False, the caller is responsible for killing the process.
+ Failure to do so could cause a call to wait() to hang indefinitely. (Defaults to True.)
+ :param processOutputLine: function or list of functions to be called for
+ each line of output produced by the process (defaults to an empty
+ list).
+ :param processStderrLine: function or list of functions to be called
+ for each line of error output - stderr - produced by the process
+ (defaults to an empty list). If this is not specified, stderr lines
+ will be sent to the *processOutputLine* callbacks.
+ :param onTimeout: function or list of functions to be called when the process times out.
+ :param onFinish: function or list of functions to be called when the process terminates
+ normally without timing out.
+ :param kwargs: additional keyword args to pass directly into Popen.
+
+ NOTE: Child processes will be tracked by default. If for any reason
+ we are unable to track child processes and ignore_children is set to False,
+ then we will fall back to only tracking the root process. The fallback
+ will be logged.
+ """
+
+ class Process(subprocess.Popen):
+ """
+ Represents our view of a subprocess.
+ It adds a kill() method which allows it to be stopped explicitly.
+ """
+
+ MAX_IOCOMPLETION_PORT_NOTIFICATION_DELAY = 180
+ MAX_PROCESS_KILL_DELAY = 30
+ TIMEOUT_BEFORE_SIGKILL = 1.0
+
+ def __init__(
+ self,
+ args,
+ bufsize=0,
+ executable=None,
+ stdin=None,
+ stdout=None,
+ stderr=None,
+ preexec_fn=None,
+ close_fds=False,
+ shell=False,
+ cwd=None,
+ env=None,
+ universal_newlines=False,
+ startupinfo=None,
+ creationflags=0,
+ ignore_children=False,
+ encoding="utf-8",
+ ):
+
+ # Parameter for whether or not we should attempt to track child processes
+ self._ignore_children = ignore_children
+
+ if not self._ignore_children and not isWin:
+ # Set the process group id for linux systems
+ # Sets process group id to the pid of the parent process
+ # NOTE: This prevents you from using preexec_fn and managing
+ # child processes, TODO: Ideally, find a way around this
+ def setpgidfn():
+ os.setpgid(0, 0)
+
+ preexec_fn = setpgidfn
+
+ kwargs = {
+ "bufsize": bufsize,
+ "executable": executable,
+ "stdin": stdin,
+ "stdout": stdout,
+ "stderr": stderr,
+ "preexec_fn": preexec_fn,
+ "close_fds": close_fds,
+ "shell": shell,
+ "cwd": cwd,
+ "env": env,
+ "startupinfo": startupinfo,
+ "creationflags": creationflags,
+ }
+ if six.PY2:
+ kwargs["universal_newlines"] = universal_newlines
+ if six.PY3 and sys.version_info.minor >= 6 and universal_newlines:
+ kwargs["universal_newlines"] = universal_newlines
+ kwargs["encoding"] = encoding
+ try:
+ subprocess.Popen.__init__(self, args, **kwargs)
+ except OSError:
+ print(args, file=sys.stderr)
+ raise
+ # We need to support Python 3.5 for now, which doesn't support the
+ # "encoding" argument to the Popen constructor. For now, emulate it
+ # by patching the streams so that they return consistent values.
+ # This can be removed once we remove support for Python 3.5.
+ if six.PY3 and sys.version_info.minor == 5 and universal_newlines:
+ if self.stdin is not None:
+ self.stdin = io.TextIOWrapper(self.stdin, encoding=encoding)
+ if self.stdout is not None:
+ self.stdout = io.TextIOWrapper(self.stdout, encoding=encoding)
+ if self.stderr is not None:
+ self.stderr = io.TextIOWrapper(self.stderr, encoding=encoding)
+
+ def debug(self, msg):
+ if not MOZPROCESS_DEBUG:
+ return
+ thread = threading.current_thread().name
+ print("DBG::MOZPROC PID:{} ({}) | {}".format(self.pid, thread, msg))
+
+ def __del__(self):
+ if isWin:
+ if six.PY2:
+ _maxint = sys.maxint
+ else:
+ _maxint = sys.maxsize
+ handle = getattr(self, "_handle", None)
+ if handle:
+ self._internal_poll(_deadstate=_maxint)
+ if handle or self._job or self._io_port:
+ self._cleanup()
+ else:
+ subprocess.Popen.__del__(self)
+
+ def kill(self, sig=None):
+ if isWin:
+ try:
+ if not self._ignore_children and self._handle and self._job:
+ self.debug("calling TerminateJobObject")
+ winprocess.TerminateJobObject(
+ self._job, winprocess.ERROR_CONTROL_C_EXIT
+ )
+ elif self._handle:
+ self.debug("calling TerminateProcess")
+ winprocess.TerminateProcess(
+ self._handle, winprocess.ERROR_CONTROL_C_EXIT
+ )
+ except WindowsError:
+ self._cleanup()
+
+ traceback.print_exc()
+ raise OSError("Could not terminate process")
+
+ else:
+
+ def send_sig(sig, retries=0):
+ pid = self.detached_pid or self.pid
+ if not self._ignore_children:
+ try:
+ os.killpg(pid, sig)
+ except BaseException as e:
+ # On Mac OSX if the process group contains zombie
+ # processes, killpg results in an EPERM.
+ # In this case, zombie processes need to be reaped
+ # before continuing
+ # Note: A negative pid refers to the entire process
+ # group
+ if retries < 1 and getattr(e, "errno", None) == errno.EPERM:
+ try:
+ os.waitpid(-pid, 0)
+ finally:
+ return send_sig(sig, retries + 1)
+
+ # ESRCH is a "no such process" failure, which is fine because the
+ # application might already have been terminated itself. Any other
+ # error would indicate a problem in killing the process.
+ if getattr(e, "errno", None) != errno.ESRCH:
+ print(
+ "Could not terminate process: %s" % self.pid,
+ file=sys.stderr,
+ )
+ raise
+ else:
+ os.kill(pid, sig)
+
+ if sig is None and isPosix:
+ # ask the process for termination and wait a bit
+ send_sig(signal.SIGTERM)
+ limit = time.time() + self.TIMEOUT_BEFORE_SIGKILL
+ while time.time() <= limit:
+ if self.poll() is not None:
+ # process terminated nicely
+ break
+ time.sleep(INTERVAL_PROCESS_ALIVE_CHECK)
+ else:
+ # process did not terminate - send SIGKILL to force
+ send_sig(signal.SIGKILL)
+ else:
+ # a signal was explicitly set or not posix
+ send_sig(sig or signal.SIGKILL)
+
+ self.returncode = self.wait()
+ self._cleanup()
+ return self.returncode
+
+ def poll(self):
+ """Popen.poll
+ Check if child process has terminated. Set and return returncode attribute.
+ """
+ # If we have a handle, the process is alive
+ if isWin and getattr(self, "_handle", None):
+ return None
+
+ return subprocess.Popen.poll(self)
+
+ def wait(self, timeout=None):
+ """Popen.wait
+ Called to wait for a running process to shut down and return
+ its exit code
+ Returns the main process's exit code
+ """
+ # This call will be different for each OS
+ self.returncode = self._custom_wait(timeout=timeout)
+ self._cleanup()
+ return self.returncode
+
+ """ Private Members of Process class """
+
+ if isWin:
+ # Redefine the execute child so that we can track process groups
+ def _execute_child(self, *args_tuple):
+ # workaround for bug 1670130
+ if sys.hexversion >= 0x03090000: # after 3.9.0
+ (
+ args,
+ executable,
+ preexec_fn,
+ close_fds,
+ pass_fds,
+ cwd,
+ env,
+ startupinfo,
+ creationflags,
+ shell,
+ p2cread,
+ p2cwrite,
+ c2pread,
+ c2pwrite,
+ errread,
+ errwrite,
+ restore_signals,
+ gid,
+ gids,
+ uid,
+ umask,
+ start_new_session,
+ ) = args_tuple
+ elif six.PY3:
+ (
+ args,
+ executable,
+ preexec_fn,
+ close_fds,
+ pass_fds,
+ cwd,
+ env,
+ startupinfo,
+ creationflags,
+ shell,
+ p2cread,
+ p2cwrite,
+ c2pread,
+ c2pwrite,
+ errread,
+ errwrite,
+ restore_signals,
+ start_new_session,
+ ) = args_tuple
+ # workaround for bug 950894
+ elif sys.hexversion < 0x02070600: # prior to 2.7.6
+ (
+ args,
+ executable,
+ preexec_fn,
+ close_fds,
+ cwd,
+ env,
+ universal_newlines,
+ startupinfo,
+ creationflags,
+ shell,
+ p2cread,
+ p2cwrite,
+ c2pread,
+ c2pwrite,
+ errread,
+ errwrite,
+ ) = args_tuple
+ to_close = set()
+ else: # 2.7.6 and later
+ (
+ args,
+ executable,
+ preexec_fn,
+ close_fds,
+ cwd,
+ env,
+ universal_newlines,
+ startupinfo,
+ creationflags,
+ shell,
+ to_close,
+ p2cread,
+ p2cwrite,
+ c2pread,
+ c2pwrite,
+ errread,
+ errwrite,
+ ) = args_tuple
+ if not isinstance(args, six.string_types):
+ args = subprocess.list2cmdline(args)
+
+ # Always or in the create new process group
+ creationflags |= winprocess.CREATE_NEW_PROCESS_GROUP
+
+ if startupinfo is None:
+ startupinfo = winprocess.STARTUPINFO()
+
+ if None not in (p2cread, c2pwrite, errwrite):
+ startupinfo.dwFlags |= winprocess.STARTF_USESTDHANDLES
+ startupinfo.hStdInput = int(p2cread)
+ startupinfo.hStdOutput = int(c2pwrite)
+ startupinfo.hStdError = int(errwrite)
+ if shell:
+ startupinfo.dwFlags |= winprocess.STARTF_USESHOWWINDOW
+ startupinfo.wShowWindow = winprocess.SW_HIDE
+ comspec = os.environ.get("COMSPEC", "cmd.exe")
+ args = comspec + " /c " + args
+
+ # Determine if we can create a job or create nested jobs.
+ can_create_job = winprocess.CanCreateJobObject()
+ can_nest_jobs = self._can_nest_jobs()
+
+ # Ensure we write a warning message if we are falling back
+ if not (can_create_job or can_nest_jobs) and not self._ignore_children:
+ # We can't create job objects AND the user wanted us to
+ # Warn the user about this.
+ print(
+ "ProcessManager UNABLE to use job objects to manage "
+ "child processes",
+ file=sys.stderr,
+ )
+
+ # set process creation flags
+ creationflags |= winprocess.CREATE_SUSPENDED
+ creationflags |= winprocess.CREATE_UNICODE_ENVIRONMENT
+ if can_create_job:
+ creationflags |= winprocess.CREATE_BREAKAWAY_FROM_JOB
+ if not (can_create_job or can_nest_jobs):
+ # Since we've warned, we just log info here to inform you
+ # of the consequence of setting ignore_children = True
+ print("ProcessManager NOT managing child processes")
+
+ # create the process
+ hp, ht, pid, tid = winprocess.CreateProcess(
+ executable,
+ args,
+ None,
+ None, # No special security
+ 1, # Must inherit handles!
+ creationflags,
+ winprocess.EnvironmentBlock(env),
+ cwd,
+ startupinfo,
+ )
+ self._child_created = True
+ self._handle = hp
+ self._thread = ht
+ self.pid = pid
+ self.tid = tid
+
+ if not self._ignore_children and (can_create_job or can_nest_jobs):
+ try:
+ # We create a new job for this process, so that we can kill
+ # the process and any sub-processes
+ # Create the IO Completion Port
+ self._io_port = winprocess.CreateIoCompletionPort()
+ self._job = winprocess.CreateJobObject()
+
+ # Now associate the io comp port and the job object
+ joacp = JOBOBJECT_ASSOCIATE_COMPLETION_PORT(
+ winprocess.COMPKEY_JOBOBJECT, self._io_port
+ )
+ winprocess.SetInformationJobObject(
+ self._job,
+ JobObjectAssociateCompletionPortInformation,
+ addressof(joacp),
+ sizeof(joacp),
+ )
+
+ # Allow subprocesses to break away from us - necessary when
+ # Firefox restarts, or flash with protected mode
+ limit_flags = winprocess.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
+ if not can_nest_jobs:
+ # This allows sandbox processes to create their own job,
+ # and is necessary to set for older versions of Windows
+ # without nested job support.
+ limit_flags |= winprocess.JOB_OBJECT_LIMIT_BREAKAWAY_OK
+
+ jbli = JOBOBJECT_BASIC_LIMIT_INFORMATION(
+ c_longlong(0), # per process time limit (ignored)
+ c_longlong(0), # per job user time limit (ignored)
+ limit_flags,
+ 0, # min working set (ignored)
+ 0, # max working set (ignored)
+ 0, # active process limit (ignored)
+ None, # affinity (ignored)
+ 0, # Priority class (ignored)
+ 0, # Scheduling class (ignored)
+ )
+
+ iocntr = IO_COUNTERS()
+ jeli = JOBOBJECT_EXTENDED_LIMIT_INFORMATION(
+ jbli, # basic limit info struct
+ iocntr, # io_counters (ignored)
+ 0, # process mem limit (ignored)
+ 0, # job mem limit (ignored)
+ 0, # peak process limit (ignored)
+ 0,
+ ) # peak job limit (ignored)
+
+ winprocess.SetInformationJobObject(
+ self._job,
+ JobObjectExtendedLimitInformation,
+ addressof(jeli),
+ sizeof(jeli),
+ )
+
+ # Assign the job object to the process
+ winprocess.AssignProcessToJobObject(self._job, int(hp))
+
+ # It's overkill, but we use Queue to signal between threads
+ # because it handles errors more gracefully than event or condition.
+ self._process_events = Queue()
+
+ # Spin up our thread for managing the IO Completion Port
+ self._procmgrthread = threading.Thread(target=self._procmgr)
+ except Exception:
+ print(
+ """Exception trying to use job objects;
+falling back to not using job objects for managing child processes""",
+ file=sys.stderr,
+ )
+ tb = traceback.format_exc()
+ print(tb, file=sys.stderr)
+ # Ensure no dangling handles left behind
+ self._cleanup_job_io_port()
+ else:
+ self._job = None
+
+ winprocess.ResumeThread(int(ht))
+ if getattr(self, "_procmgrthread", None):
+ self._procmgrthread.start()
+ ht.Close()
+
+ for i in (p2cread, c2pwrite, errwrite):
+ if i is not None:
+ i.Close()
+
+ # Per:
+ # https://msdn.microsoft.com/en-us/library/windows/desktop/hh448388%28v=vs.85%29.aspx
+ # Nesting jobs came in with windows versions starting with 6.2 according to the table
+ # on this page:
+ # https://msdn.microsoft.com/en-us/library/ms724834%28v=vs.85%29.aspx
+ def _can_nest_jobs(self):
+ winver = sys.getwindowsversion()
+ return winver.major > 6 or winver.major == 6 and winver.minor >= 2
+
+ # Windows Process Manager - watches the IO Completion Port and
+ # keeps track of child processes
+ def _procmgr(self):
+ if not (self._io_port) or not (self._job):
+ return
+
+ try:
+ self._poll_iocompletion_port()
+ except KeyboardInterrupt:
+ raise KeyboardInterrupt
+
+ def _poll_iocompletion_port(self):
+ # Watch the IO Completion port for status
+ self._spawned_procs = {}
+ countdowntokill = 0
+
+ self.debug("start polling IO completion port")
+
+ while True:
+ msgid = c_ulong(0)
+ compkey = c_ulong(0)
+ pid = c_ulong(0)
+ portstatus = winprocess.GetQueuedCompletionStatus(
+ self._io_port, byref(msgid), byref(compkey), byref(pid), 5000
+ )
+
+ # If the countdowntokill has been activated, we need to check
+ # if we should start killing the children or not.
+ if countdowntokill != 0:
+ diff = datetime.now() - countdowntokill
+ # Arbitrarily wait 3 minutes for windows to get its act together
+ # Windows sometimes takes a small nap between notifying the
+ # IO Completion port and actually killing the children, and we
+ # don't want to mistake that situation for the situation of an unexpected
+ # parent abort (which is what we're looking for here).
+ if diff.seconds > self.MAX_IOCOMPLETION_PORT_NOTIFICATION_DELAY:
+ print(
+ "WARNING | IO Completion Port failed to signal "
+ "process shutdown",
+ file=sys.stderr,
+ )
+ print(
+ "Parent process %s exited with children alive:"
+ % self.pid,
+ file=sys.stderr,
+ )
+ print(
+ "PIDS: %s"
+ % ", ".join([str(i) for i in self._spawned_procs]),
+ file=sys.stderr,
+ )
+ print(
+ "Attempting to kill them, but no guarantee of success",
+ file=sys.stderr,
+ )
+
+ self.kill()
+ self._process_events.put({self.pid: "FINISHED"})
+ break
+
+ if not portstatus:
+ # Check to see what happened
+ errcode = winprocess.GetLastError()
+ if errcode == winprocess.ERROR_ABANDONED_WAIT_0:
+ # Then something has killed the port, break the loop
+ print(
+ "IO Completion Port unexpectedly closed",
+ file=sys.stderr,
+ )
+ self._process_events.put({self.pid: "FINISHED"})
+ break
+ elif errcode == winprocess.WAIT_TIMEOUT:
+ # Timeouts are expected, just keep on polling
+ continue
+ else:
+ print(
+ "Error Code %s trying to query IO Completion Port, "
+ "exiting" % errcode,
+ file=sys.stderr,
+ )
+ raise WinError(errcode)
+ break
+
+ if compkey.value == winprocess.COMPKEY_TERMINATE.value:
+ self.debug("compkeyterminate detected")
+ # Then we're done
+ break
+
+ # Check the status of the IO Port and do things based on it
+ if compkey.value == winprocess.COMPKEY_JOBOBJECT.value:
+ if msgid.value == winprocess.JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO:
+ # No processes left, time to shut down
+ # Signal anyone waiting on us that it is safe to shut down
+ self.debug("job object msg active processes zero")
+ self._process_events.put({self.pid: "FINISHED"})
+ break
+ elif msgid.value == winprocess.JOB_OBJECT_MSG_NEW_PROCESS:
+ # New Process started
+ # Add the child proc to our list in case our parent flakes out on us
+ # without killing everything.
+ if pid.value != self.pid:
+ self._spawned_procs[pid.value] = 1
+ self.debug(
+ "new process detected with pid value: %s"
+ % pid.value
+ )
+ elif msgid.value == winprocess.JOB_OBJECT_MSG_EXIT_PROCESS:
+ self.debug("process id %s exited normally" % pid.value)
+ # One process exited normally
+ if pid.value == self.pid and len(self._spawned_procs) > 0:
+ # Parent process dying, start countdown timer
+ countdowntokill = datetime.now()
+ elif pid.value in self._spawned_procs:
+ # Child Process died remove from list
+ del self._spawned_procs[pid.value]
+ elif (
+ msgid.value
+ == winprocess.JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS
+ ):
+ # One process existed abnormally
+ self.debug("process id %s exited abnormally" % pid.value)
+ if pid.value == self.pid and len(self._spawned_procs) > 0:
+ # Parent process dying, start countdown timer
+ countdowntokill = datetime.now()
+ elif pid.value in self._spawned_procs:
+ # Child Process died remove from list
+ del self._spawned_procs[pid.value]
+ else:
+ # We don't care about anything else
+ self.debug("We got a message %s" % msgid.value)
+ pass
+
+ def _custom_wait(self, timeout=None):
+ """Custom implementation of wait.
+
+ - timeout: number of seconds before timing out. If None,
+ will wait indefinitely.
+ """
+ # First, check to see if the process is still running
+ if self._handle:
+ self.returncode = winprocess.GetExitCodeProcess(self._handle)
+ else:
+ # Dude, the process is like totally dead!
+ return self.returncode
+
+ threadalive = False
+ if hasattr(self, "_procmgrthread"):
+ threadalive = self._procmgrthread.is_alive()
+ if (
+ self._job
+ and threadalive
+ and threading.current_thread() != self._procmgrthread
+ ):
+ self.debug("waiting with IO completion port")
+ if timeout is None:
+ timeout = (
+ self.MAX_IOCOMPLETION_PORT_NOTIFICATION_DELAY
+ + self.MAX_PROCESS_KILL_DELAY
+ )
+ # Then we are managing with IO Completion Ports
+ # wait on a signal so we know when we have seen the last
+ # process come through.
+ # We use queues to synchronize between the thread and this
+ # function because events just didn't have robust enough error
+ # handling on pre-2.7 versions
+ try:
+ # timeout is the max amount of time the procmgr thread will wait for
+ # child processes to shutdown before killing them with extreme prejudice.
+ item = self._process_events.get(timeout=timeout)
+ if item[self.pid] == "FINISHED":
+ self.debug("received 'FINISHED' from _procmgrthread")
+ self._process_events.task_done()
+ except Exception:
+ traceback.print_exc()
+ raise OSError(
+ "IO Completion Port failed to signal process shutdown"
+ )
+ finally:
+ if self._handle:
+ self.returncode = winprocess.GetExitCodeProcess(
+ self._handle
+ )
+ self._cleanup()
+
+ else:
+ # Not managing with job objects, so all we can reasonably do
+ # is call waitforsingleobject and hope for the best
+ self.debug("waiting without IO completion port")
+
+ if not self._ignore_children:
+ self.debug("NOT USING JOB OBJECTS!!!")
+ # First, make sure we have not already ended
+ if self.returncode != winprocess.STILL_ACTIVE:
+ self._cleanup()
+ return self.returncode
+
+ rc = None
+ if self._handle:
+ if timeout is None:
+ timeout = -1
+ else:
+ # timeout for WaitForSingleObject is in ms
+ timeout = timeout * 1000
+
+ rc = winprocess.WaitForSingleObject(self._handle, timeout)
+
+ if rc == winprocess.WAIT_TIMEOUT:
+ # The process isn't dead, so kill it
+ print(
+ "Timed out waiting for process to close, "
+ "attempting TerminateProcess"
+ )
+ self.kill()
+ elif rc == winprocess.WAIT_OBJECT_0:
+ # We caught WAIT_OBJECT_0, which indicates all is well
+ print("Single process terminated successfully")
+ self.returncode = winprocess.GetExitCodeProcess(self._handle)
+ else:
+ # An error occured we should probably throw
+ rc = winprocess.GetLastError()
+ if rc:
+ raise WinError(rc)
+
+ self._cleanup()
+
+ return self.returncode
+
+ def _cleanup_job_io_port(self):
+ """Do the job and IO port cleanup separately because there are
+ cases where we want to clean these without killing _handle
+ (i.e. if we fail to create the job object in the first place)
+ """
+ if (
+ getattr(self, "_job")
+ and self._job != winprocess.INVALID_HANDLE_VALUE
+ ):
+ self._job.Close()
+ self._job = None
+ else:
+ # If windows already freed our handle just set it to none
+ # (saw this intermittently while testing)
+ self._job = None
+
+ if (
+ getattr(self, "_io_port", None)
+ and self._io_port != winprocess.INVALID_HANDLE_VALUE
+ ):
+ self._io_port.Close()
+ self._io_port = None
+ else:
+ self._io_port = None
+
+ if getattr(self, "_procmgrthread", None):
+ self._procmgrthread = None
+
+ def _cleanup(self):
+ self._cleanup_job_io_port()
+ if self._thread and self._thread != winprocess.INVALID_HANDLE_VALUE:
+ self._thread.Close()
+ self._thread = None
+ else:
+ self._thread = None
+
+ if self._handle and self._handle != winprocess.INVALID_HANDLE_VALUE:
+ self._handle.Close()
+ self._handle = None
+ else:
+ self._handle = None
+
+ elif isPosix:
+
+ def _custom_wait(self, timeout=None):
+ """Haven't found any reason to differentiate between these platforms
+ so they all use the same wait callback. If it is necessary to
+ craft different styles of wait, then a new _custom_wait method
+ could be easily implemented.
+ """
+
+ if not self._ignore_children:
+ try:
+ # os.waitpid return value:
+ # > [...] a tuple containing its pid and exit status
+ # > indication: a 16-bit number, whose low byte is the
+ # > signal number that killed the process, and whose
+ # > high byte is the exit status (if the signal number
+ # > is zero)
+ # - http://docs.python.org/2/library/os.html#os.wait
+ status = os.waitpid(self.pid, 0)[1]
+
+ # For consistency, format status the same as subprocess'
+ # returncode attribute
+ if status > 255:
+ return status >> 8
+ return -status
+ except OSError as e:
+ if getattr(e, "errno", None) != 10:
+ # Error 10 is "no child process", which could indicate normal
+ # close
+ print(
+ "Encountered error waiting for pid to close: %s" % e,
+ file=sys.stderr,
+ )
+ raise
+
+ return self.returncode
+
+ else:
+ # For non-group wait, call base class
+ if six.PY2:
+ subprocess.Popen.wait(self)
+ else:
+ # timeout was introduced in Python 3.3
+ subprocess.Popen.wait(self, timeout=timeout)
+ return self.returncode
+
+ def _cleanup(self):
+ pass
+
+ else:
+ # An unrecognized platform, we will call the base class for everything
+ print(
+ "Unrecognized platform, process groups may not " "be managed properly",
+ file=sys.stderr,
+ )
+
+ def _custom_wait(self, timeout=None):
+ if six.PY2:
+ self.returncode = subprocess.Popen.wait(self)
+ else:
+ # timeout was introduced in Python 3.3
+ self.returncode = subprocess.Popen.wait(self, timeout=timeout)
+ return self.returncode
+
+ def _cleanup(self):
+ pass
+
+ def __init__(
+ self,
+ cmd,
+ args=None,
+ cwd=None,
+ env=None,
+ ignore_children=False,
+ kill_on_timeout=True,
+ processOutputLine=(),
+ processStderrLine=(),
+ onTimeout=(),
+ onFinish=(),
+ **kwargs
+ ):
+ self.cmd = cmd
+ self.args = args
+ self.cwd = cwd
+ self.didTimeout = False
+ self.didOutputTimeout = False
+ self._ignore_children = ignore_children
+ self.keywordargs = kwargs
+ self.read_buffer = ""
+
+ if env is None:
+ env = os.environ.copy()
+ self.env = env
+
+ # handlers
+ def to_callable_list(arg):
+ if callable(arg):
+ arg = [arg]
+ return CallableList(arg)
+
+ processOutputLine = to_callable_list(processOutputLine)
+ processStderrLine = to_callable_list(processStderrLine)
+ onTimeout = to_callable_list(onTimeout)
+ onFinish = to_callable_list(onFinish)
+
+ def on_timeout():
+ self.didTimeout = True
+ self.didOutputTimeout = self.reader.didOutputTimeout
+ if kill_on_timeout:
+ self.kill()
+
+ onTimeout.insert(0, on_timeout)
+
+ self._stderr = subprocess.STDOUT
+ if processStderrLine:
+ self._stderr = subprocess.PIPE
+ self.reader = ProcessReader(
+ stdout_callback=processOutputLine,
+ stderr_callback=processStderrLine,
+ finished_callback=onFinish,
+ timeout_callback=onTimeout,
+ )
+
+ # It is common for people to pass in the entire array with the cmd and
+ # the args together since this is how Popen uses it. Allow for that.
+ if isinstance(self.cmd, list):
+ if self.args is not None:
+ raise TypeError("cmd and args must not both be lists")
+ (self.cmd, self.args) = (self.cmd[0], self.cmd[1:])
+ elif self.args is None:
+ self.args = []
+
+ def debug(self, msg):
+ if not MOZPROCESS_DEBUG:
+ return
+ cmd = self.cmd.split(os.sep)[-1:]
+ print("DBG::MOZPROC ProcessHandlerMixin {} | {}".format(cmd, msg))
+
+ @property
+ def timedOut(self):
+ """True if the process has timed out for any reason."""
+ return self.didTimeout
+
+ @property
+ def outputTimedOut(self):
+ """True if the process has timed out for no output."""
+ return self.didOutputTimeout
+
+ @property
+ def commandline(self):
+ """the string value of the command line (command + args)"""
+ return subprocess.list2cmdline([self.cmd] + self.args)
+
+ def run(self, timeout=None, outputTimeout=None):
+ """
+ Starts the process.
+
+ If timeout is not None, the process will be allowed to continue for
+ that number of seconds before being killed. If the process is killed
+ due to a timeout, the onTimeout handler will be called.
+
+ If outputTimeout is not None, the process will be allowed to continue
+ for that number of seconds without producing any output before
+ being killed.
+ """
+ self.didTimeout = False
+ self.didOutputTimeout = False
+
+ # default arguments
+ args = dict(
+ stdout=subprocess.PIPE,
+ stderr=self._stderr,
+ cwd=self.cwd,
+ env=self.env,
+ ignore_children=self._ignore_children,
+ )
+
+ # build process arguments
+ args.update(self.keywordargs)
+
+ # launch the process
+ self.proc = self.Process([self.cmd] + self.args, **args)
+
+ if isPosix:
+ # Keep track of the initial process group in case the process detaches itself
+ self.proc.pgid = self._getpgid(self.proc.pid)
+ self.proc.detached_pid = None
+
+ self.processOutput(timeout=timeout, outputTimeout=outputTimeout)
+
+ def kill(self, sig=None):
+ """
+ Kills the managed process.
+
+ If you created the process with 'ignore_children=False' (the
+ default) then it will also also kill all child processes spawned by
+ it. If you specified 'ignore_children=True' when creating the
+ process, only the root process will be killed.
+
+ Note that this does not manage any state, save any output etc,
+ it immediately kills the process.
+
+ :param sig: Signal used to kill the process, defaults to SIGKILL
+ (has no effect on Windows)
+ """
+ if not hasattr(self, "proc"):
+ raise RuntimeError("Process hasn't been started yet")
+
+ self.proc.kill(sig=sig)
+
+ # When we kill the the managed process we also have to wait for the
+ # reader thread to be finished. Otherwise consumers would have to assume
+ # that it still has not completely shutdown.
+ rc = self.wait()
+ if rc is None:
+ self.debug("kill: wait failed -- process is still alive")
+ return rc
+
+ def poll(self):
+ """Check if child process has terminated
+
+ Returns the current returncode value:
+ - None if the process hasn't terminated yet
+ - A negative number if the process was killed by signal N (Unix only)
+ - '0' if the process ended without failures
+
+ """
+ if not hasattr(self, "proc"):
+ raise RuntimeError("Process hasn't been started yet")
+
+ # Ensure that we first check for the reader status. Otherwise
+ # we might mark the process as finished while output is still getting
+ # processed.
+ elif self.reader.is_alive():
+ return None
+ elif hasattr(self, "returncode"):
+ return self.returncode
+ else:
+ return self.proc.poll()
+
+ def processOutput(self, timeout=None, outputTimeout=None):
+ """
+ Handle process output until the process terminates or times out.
+
+ If timeout is not None, the process will be allowed to continue for
+ that number of seconds before being killed.
+
+ If outputTimeout is not None, the process will be allowed to continue
+ for that number of seconds without producing any output before
+ being killed.
+ """
+ # this method is kept for backward compatibility
+ if not hasattr(self, "proc"):
+ self.run(timeout=timeout, outputTimeout=outputTimeout)
+ # self.run will call this again
+ return
+ if not self.reader.is_alive():
+ self.reader.timeout = timeout
+ self.reader.output_timeout = outputTimeout
+ self.reader.start(self.proc)
+
+ def wait(self, timeout=None):
+ """
+ Waits until all output has been read and the process is
+ terminated.
+
+ If timeout is not None, will return after timeout seconds.
+ This timeout only causes the wait function to return and
+ does not kill the process.
+
+ Returns the process exit code value:
+ - None if the process hasn't terminated yet
+ - A negative number if the process was killed by signal N (Unix only)
+ - '0' if the process ended without failures
+
+ """
+ # Thread.join() blocks the main thread until the reader thread is finished
+ # wake up once a second in case a keyboard interrupt is sent
+ if self.reader.thread and self.reader.thread is not threading.current_thread():
+ count = 0
+ while self.reader.is_alive():
+ self.reader.join(timeout=1)
+ count += 1
+ if timeout is not None and count > timeout:
+ self.debug("wait timeout for reader thread")
+ return None
+
+ self.returncode = self.proc.wait()
+ return self.returncode
+
+ @property
+ def pid(self):
+ if not hasattr(self, "proc"):
+ raise RuntimeError("Process hasn't been started yet")
+
+ return self.proc.pid
+
+ @staticmethod
+ def pid_exists(pid):
+ if pid < 0:
+ return False
+
+ if isWin:
+ try:
+ process = winprocess.OpenProcess(
+ winprocess.PROCESS_QUERY_INFORMATION | winprocess.PROCESS_VM_READ,
+ False,
+ pid,
+ )
+ return winprocess.GetExitCodeProcess(process) == winprocess.STILL_ACTIVE
+
+ except WindowsError as e:
+ # no such process
+ if e.winerror == winprocess.ERROR_INVALID_PARAMETER:
+ return False
+
+ # access denied
+ if e.winerror == winprocess.ERROR_ACCESS_DENIED:
+ return True
+
+ # re-raise for any other type of exception
+ raise
+
+ elif isPosix:
+ try:
+ os.kill(pid, 0)
+ except OSError as e:
+ return e.errno == errno.EPERM
+ else:
+ return True
+
+ @classmethod
+ def _getpgid(cls, pid):
+ try:
+ return os.getpgid(pid)
+ except OSError as e:
+ # Do not raise for "No such process"
+ if e.errno != errno.ESRCH:
+ raise
+
+ def check_for_detached(self, new_pid):
+ """Check if the current process has been detached and mark it appropriately.
+
+ In case of application restarts the process can spawn itself into a new process group.
+ From now on the process can no longer be tracked by mozprocess anymore and has to be
+ marked as detached. If the consumer of mozprocess still knows the new process id it could
+ check for the detached state.
+
+ new_pid is the new process id of the child process.
+ """
+ if not hasattr(self, "proc"):
+ raise RuntimeError("Process hasn't been started yet")
+
+ if isPosix:
+ new_pgid = self._getpgid(new_pid)
+
+ if new_pgid and new_pgid != self.proc.pgid:
+ self.proc.detached_pid = new_pid
+ print(
+ 'Child process with id "%s" has been marked as detached because it is no '
+ "longer in the managed process group. Keeping reference to the process id "
+ '"%s" which is the new child process.' % (self.pid, new_pid),
+ file=sys.stdout,
+ )
+
+
+class CallableList(list):
+ def __call__(self, *args, **kwargs):
+ for e in self:
+ e(*args, **kwargs)
+
+ def __add__(self, lst):
+ return CallableList(list.__add__(self, lst))
+
+
+class ProcessReader(object):
+ def __init__(
+ self,
+ stdout_callback=None,
+ stderr_callback=None,
+ finished_callback=None,
+ timeout_callback=None,
+ timeout=None,
+ output_timeout=None,
+ ):
+ self.stdout_callback = stdout_callback or (lambda line: True)
+ self.stderr_callback = stderr_callback or (lambda line: True)
+ self.finished_callback = finished_callback or (lambda: True)
+ self.timeout_callback = timeout_callback or (lambda: True)
+ self.timeout = timeout
+ self.output_timeout = output_timeout
+ self.thread = None
+ self.didOutputTimeout = False
+
+ def debug(self, msg):
+ if not MOZPROCESS_DEBUG:
+ return
+ print("DBG::MOZPROC ProcessReader | {}".format(msg))
+
+ def _create_stream_reader(self, name, stream, queue, callback):
+ thread = threading.Thread(
+ name=name, target=self._read_stream, args=(stream, queue, callback)
+ )
+ thread.daemon = True
+ thread.start()
+ return thread
+
+ def _read_stream(self, stream, queue, callback):
+ while True:
+ line = stream.readline()
+ if not line:
+ break
+ queue.put((line, callback))
+ stream.close()
+
+ def start(self, proc):
+ queue = Queue()
+ stdout_reader = None
+ if proc.stdout:
+ stdout_reader = self._create_stream_reader(
+ "ProcessReaderStdout", proc.stdout, queue, self.stdout_callback
+ )
+ stderr_reader = None
+ if proc.stderr and proc.stderr != proc.stdout:
+ stderr_reader = self._create_stream_reader(
+ "ProcessReaderStderr", proc.stderr, queue, self.stderr_callback
+ )
+ self.thread = threading.Thread(
+ name="ProcessReader",
+ target=self._read,
+ args=(stdout_reader, stderr_reader, queue),
+ )
+ self.thread.daemon = True
+ self.thread.start()
+ self.debug("ProcessReader started")
+
+ def _read(self, stdout_reader, stderr_reader, queue):
+ start_time = time.time()
+ timed_out = False
+ timeout = self.timeout
+ if timeout is not None:
+ timeout += start_time
+ output_timeout = self.output_timeout
+ if output_timeout is not None:
+ output_timeout += start_time
+
+ while (stdout_reader and stdout_reader.is_alive()) or (
+ stderr_reader and stderr_reader.is_alive()
+ ):
+ has_line = True
+ try:
+ line, callback = queue.get(True, INTERVAL_PROCESS_ALIVE_CHECK)
+ except Empty:
+ has_line = False
+ now = time.time()
+ if not has_line:
+ if output_timeout is not None and now > output_timeout:
+ timed_out = True
+ self.didOutputTimeout = True
+ break
+ else:
+ if output_timeout is not None:
+ output_timeout = now + self.output_timeout
+ callback(line.rstrip())
+ if timeout is not None and now > timeout:
+ timed_out = True
+ break
+ self.debug("_read loop exited")
+ # process remaining lines to read
+ while not queue.empty():
+ line, callback = queue.get(False)
+ callback(line.rstrip())
+ if timed_out:
+ self.timeout_callback()
+ if stdout_reader:
+ stdout_reader.join()
+ if stderr_reader:
+ stderr_reader.join()
+ if not timed_out:
+ self.finished_callback()
+ self.debug("_read exited")
+
+ def is_alive(self):
+ if self.thread:
+ return self.thread.is_alive()
+ return False
+
+ def join(self, timeout=None):
+ if self.thread:
+ self.thread.join(timeout=timeout)
+
+
+# default output handlers
+# these should be callables that take the output line
+
+
+class StoreOutput(object):
+ """accumulate stdout"""
+
+ def __init__(self):
+ self.output = []
+
+ def __call__(self, line):
+ self.output.append(line)
+
+
+class StreamOutput(object):
+ """pass output to a stream and flush"""
+
+ def __init__(self, stream, text=True):
+ self.stream = stream
+ self.text = text
+
+ def __call__(self, line):
+ ensure = six.ensure_text if self.text else six.ensure_binary
+ try:
+ self.stream.write(ensure(line) + ensure("\n"))
+ except TypeError:
+ print(
+ "HEY! If you're reading this, you're about to encounter a "
+ "type error, probably as a result of a conversion from "
+ "Python 2 to Python 3. This is almost definitely because "
+ "you're trying to write binary data to a text-encoded "
+ "stream, or text data to a binary-encoded stream. Check how "
+ "you're instantiating your ProcessHandler and if the output "
+ "should be text-encoded, make sure you pass "
+ "universal_newlines=True.",
+ file=sys.stderr,
+ )
+ raise
+ self.stream.flush()
+
+
+class LogOutput(StreamOutput):
+ """pass output to a file"""
+
+ def __init__(self, filename):
+ self.file_obj = open(filename, "a")
+ StreamOutput.__init__(self, self.file_obj, True)
+
+ def __del__(self):
+ if self.file_obj is not None:
+ self.file_obj.close()
+
+
+# front end class with the default handlers
+
+
+class ProcessHandler(ProcessHandlerMixin):
+ """
+ Convenience class for handling processes with default output handlers.
+
+ By default, all output is sent to stdout. This can be disabled by setting
+ the *stream* argument to None.
+
+ If processOutputLine keyword argument is specified the function or the
+ list of functions specified by this argument will be called for each line
+ of output; the output will not be written to stdout automatically then
+ if stream is True (the default).
+
+ If storeOutput==True, the output produced by the process will be saved
+ as self.output.
+
+ If logfile is not None, the output produced by the process will be
+ appended to the given file.
+ """
+
+ def __init__(self, cmd, logfile=None, stream=True, storeOutput=True, **kwargs):
+ kwargs.setdefault("processOutputLine", [])
+ if callable(kwargs["processOutputLine"]):
+ kwargs["processOutputLine"] = [kwargs["processOutputLine"]]
+
+ if logfile:
+ logoutput = LogOutput(logfile)
+ kwargs["processOutputLine"].append(logoutput)
+
+ text = kwargs.get("universal_newlines", False) or kwargs.get("text", False)
+
+ if stream is True:
+ # Print to standard output only if no outputline provided
+ stdout = sys.stdout
+ if six.PY2 and text:
+ stdout = codecs.getwriter("utf-8")(sys.stdout)
+ elif six.PY3 and text:
+ # The encoding of stdout isn't guaranteed to be utf-8. Fix that.
+ stdout = codecs.getwriter("utf-8")(sys.stdout.buffer)
+ elif six.PY3 and not text:
+ stdout = sys.stdout.buffer
+
+ if not kwargs["processOutputLine"]:
+ kwargs["processOutputLine"].append(StreamOutput(stdout, text))
+ elif stream:
+ streamoutput = StreamOutput(stream, text)
+ kwargs["processOutputLine"].append(streamoutput)
+
+ self.output = None
+ if storeOutput:
+ storeoutput = StoreOutput()
+ self.output = storeoutput.output
+ kwargs["processOutputLine"].append(storeoutput)
+
+ ProcessHandlerMixin.__init__(self, cmd, **kwargs)
diff --git a/testing/mozbase/mozprocess/mozprocess/qijo.py b/testing/mozbase/mozprocess/mozprocess/qijo.py
new file mode 100644
index 0000000000..e6d5b7c718
--- /dev/null
+++ b/testing/mozbase/mozprocess/mozprocess/qijo.py
@@ -0,0 +1,177 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import
+
+from ctypes import (
+ c_void_p,
+ POINTER,
+ sizeof,
+ Structure,
+ windll,
+ WinError,
+ WINFUNCTYPE,
+ addressof,
+ c_size_t,
+ c_ulong,
+)
+from ctypes.wintypes import BOOL, BYTE, DWORD, HANDLE, LARGE_INTEGER
+
+import six
+
+LPVOID = c_void_p
+LPDWORD = POINTER(DWORD)
+SIZE_T = c_size_t
+ULONG_PTR = POINTER(c_ulong)
+
+# A ULONGLONG is a 64-bit unsigned integer.
+# Thus there are 8 bytes in a ULONGLONG.
+# XXX why not import c_ulonglong ?
+ULONGLONG = BYTE * 8
+
+
+class IO_COUNTERS(Structure):
+ # The IO_COUNTERS struct is 6 ULONGLONGs.
+ # TODO: Replace with non-dummy fields.
+ _fields_ = [("dummy", ULONGLONG * 6)]
+
+
+class JOBOBJECT_BASIC_ACCOUNTING_INFORMATION(Structure):
+ _fields_ = [
+ ("TotalUserTime", LARGE_INTEGER),
+ ("TotalKernelTime", LARGE_INTEGER),
+ ("ThisPeriodTotalUserTime", LARGE_INTEGER),
+ ("ThisPeriodTotalKernelTime", LARGE_INTEGER),
+ ("TotalPageFaultCount", DWORD),
+ ("TotalProcesses", DWORD),
+ ("ActiveProcesses", DWORD),
+ ("TotalTerminatedProcesses", DWORD),
+ ]
+
+
+class JOBOBJECT_BASIC_AND_IO_ACCOUNTING_INFORMATION(Structure):
+ _fields_ = [
+ ("BasicInfo", JOBOBJECT_BASIC_ACCOUNTING_INFORMATION),
+ ("IoInfo", IO_COUNTERS),
+ ]
+
+
+# see http://msdn.microsoft.com/en-us/library/ms684147%28VS.85%29.aspx
+class JOBOBJECT_BASIC_LIMIT_INFORMATION(Structure):
+ _fields_ = [
+ ("PerProcessUserTimeLimit", LARGE_INTEGER),
+ ("PerJobUserTimeLimit", LARGE_INTEGER),
+ ("LimitFlags", DWORD),
+ ("MinimumWorkingSetSize", SIZE_T),
+ ("MaximumWorkingSetSize", SIZE_T),
+ ("ActiveProcessLimit", DWORD),
+ ("Affinity", ULONG_PTR),
+ ("PriorityClass", DWORD),
+ ("SchedulingClass", DWORD),
+ ]
+
+
+class JOBOBJECT_ASSOCIATE_COMPLETION_PORT(Structure):
+ _fields_ = [("CompletionKey", c_ulong), ("CompletionPort", HANDLE)]
+
+
+# see http://msdn.microsoft.com/en-us/library/ms684156%28VS.85%29.aspx
+class JOBOBJECT_EXTENDED_LIMIT_INFORMATION(Structure):
+ _fields_ = [
+ ("BasicLimitInformation", JOBOBJECT_BASIC_LIMIT_INFORMATION),
+ ("IoInfo", IO_COUNTERS),
+ ("ProcessMemoryLimit", SIZE_T),
+ ("JobMemoryLimit", SIZE_T),
+ ("PeakProcessMemoryUsed", SIZE_T),
+ ("PeakJobMemoryUsed", SIZE_T),
+ ]
+
+
+# These numbers below come from:
+# http://msdn.microsoft.com/en-us/library/ms686216%28v=vs.85%29.aspx
+JobObjectAssociateCompletionPortInformation = 7
+JobObjectBasicAndIoAccountingInformation = 8
+JobObjectExtendedLimitInformation = 9
+
+
+class JobObjectInfo(object):
+ mapping = {
+ "JobObjectBasicAndIoAccountingInformation": 8,
+ "JobObjectExtendedLimitInformation": 9,
+ "JobObjectAssociateCompletionPortInformation": 7,
+ }
+ structures = {
+ 7: JOBOBJECT_ASSOCIATE_COMPLETION_PORT,
+ 8: JOBOBJECT_BASIC_AND_IO_ACCOUNTING_INFORMATION,
+ 9: JOBOBJECT_EXTENDED_LIMIT_INFORMATION,
+ }
+
+ def __init__(self, _class):
+ if isinstance(_class, six.string_types):
+ assert _class in self.mapping, "Class should be one of %s; you gave %s" % (
+ self.mapping,
+ _class,
+ )
+ _class = self.mapping[_class]
+ assert _class in self.structures, "Class should be one of %s; you gave %s" % (
+ self.structures,
+ _class,
+ )
+ self.code = _class
+ self.info = self.structures[_class]()
+
+
+QueryInformationJobObjectProto = WINFUNCTYPE(
+ BOOL, # Return type
+ HANDLE, # hJob
+ DWORD, # JobObjectInfoClass
+ LPVOID, # lpJobObjectInfo
+ DWORD, # cbJobObjectInfoLength
+ LPDWORD, # lpReturnLength
+)
+
+QueryInformationJobObjectFlags = (
+ (1, "hJob"),
+ (1, "JobObjectInfoClass"),
+ (1, "lpJobObjectInfo"),
+ (1, "cbJobObjectInfoLength"),
+ (1, "lpReturnLength", None),
+)
+
+_QueryInformationJobObject = QueryInformationJobObjectProto(
+ ("QueryInformationJobObject", windll.kernel32), QueryInformationJobObjectFlags
+)
+
+
+class SubscriptableReadOnlyStruct(object):
+ def __init__(self, struct):
+ self._struct = struct
+
+ def _delegate(self, name):
+ result = getattr(self._struct, name)
+ if isinstance(result, Structure):
+ return SubscriptableReadOnlyStruct(result)
+ return result
+
+ def __getitem__(self, name):
+ match = [fname for fname, ftype in self._struct._fields_ if fname == name]
+ if match:
+ return self._delegate(name)
+ raise KeyError(name)
+
+ def __getattr__(self, name):
+ return self._delegate(name)
+
+
+def QueryInformationJobObject(hJob, JobObjectInfoClass):
+ jobinfo = JobObjectInfo(JobObjectInfoClass)
+ result = _QueryInformationJobObject(
+ hJob=hJob,
+ JobObjectInfoClass=jobinfo.code,
+ lpJobObjectInfo=addressof(jobinfo.info),
+ cbJobObjectInfoLength=sizeof(jobinfo.info),
+ )
+ if not result:
+ raise WinError()
+ return SubscriptableReadOnlyStruct(jobinfo.info)
diff --git a/testing/mozbase/mozprocess/mozprocess/winprocess.py b/testing/mozbase/mozprocess/mozprocess/winprocess.py
new file mode 100644
index 0000000000..2e89de0a98
--- /dev/null
+++ b/testing/mozbase/mozprocess/mozprocess/winprocess.py
@@ -0,0 +1,566 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# A module to expose various thread/process/job related structures and
+# methods from kernel32
+#
+# The MIT License
+#
+# Copyright (c) 2003-2004 by Peter Astrand <astrand@lysator.liu.se>
+#
+# Additions and modifications written by Benjamin Smedberg
+# <benjamin@smedbergs.us> are Copyright (c) 2006 by the Mozilla Foundation
+# <http://www.mozilla.org/>
+#
+# More Modifications
+# Copyright (c) 2006-2007 by Mike Taylor <bear@code-bear.com>
+# Copyright (c) 2007-2008 by Mikeal Rogers <mikeal@mozilla.com>
+#
+# By obtaining, using, and/or copying this software and/or its
+# associated documentation, you agree that you have read, understood,
+# and will comply with the following terms and conditions:
+#
+# Permission to use, copy, modify, and distribute this software and
+# its associated documentation for any purpose and without fee is
+# hereby granted, provided that the above copyright notice appears in
+# all copies, and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of the
+# author not be used in advertising or publicity pertaining to
+# distribution of the software without specific, written prior
+# permission.
+#
+# THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from __future__ import absolute_import, unicode_literals, print_function
+
+import subprocess
+import sys
+from ctypes import (
+ cast,
+ create_unicode_buffer,
+ c_ulong,
+ c_void_p,
+ POINTER,
+ sizeof,
+ Structure,
+ windll,
+ WinError,
+ WINFUNCTYPE,
+)
+from ctypes.wintypes import BOOL, BYTE, DWORD, HANDLE, LPCWSTR, LPWSTR, UINT, WORD
+
+from .qijo import QueryInformationJobObject
+
+LPVOID = c_void_p
+LPBYTE = POINTER(BYTE)
+LPDWORD = POINTER(DWORD)
+LPBOOL = POINTER(BOOL)
+LPULONG = POINTER(c_ulong)
+
+
+def ErrCheckBool(result, func, args):
+ """errcheck function for Windows functions that return a BOOL True
+ on success"""
+ if not result:
+ raise WinError()
+ return args
+
+
+# AutoHANDLE
+
+
+class AutoHANDLE(HANDLE):
+ """Subclass of HANDLE which will call CloseHandle() on deletion."""
+
+ CloseHandleProto = WINFUNCTYPE(BOOL, HANDLE)
+ CloseHandle = CloseHandleProto(("CloseHandle", windll.kernel32))
+ CloseHandle.errcheck = ErrCheckBool
+
+ def Close(self):
+ if self.value and self.value != HANDLE(-1).value:
+ self.CloseHandle(self)
+ self.value = 0
+
+ def __del__(self):
+ self.Close()
+
+ def __int__(self):
+ return self.value
+
+
+def ErrCheckHandle(result, func, args):
+ """errcheck function for Windows functions that return a HANDLE."""
+ if not result:
+ raise WinError()
+ return AutoHANDLE(result)
+
+
+# PROCESS_INFORMATION structure
+
+
+class PROCESS_INFORMATION(Structure):
+ _fields_ = [
+ ("hProcess", HANDLE),
+ ("hThread", HANDLE),
+ ("dwProcessID", DWORD),
+ ("dwThreadID", DWORD),
+ ]
+
+ def __init__(self):
+ Structure.__init__(self)
+
+ self.cb = sizeof(self)
+
+
+LPPROCESS_INFORMATION = POINTER(PROCESS_INFORMATION)
+
+
+# STARTUPINFO structure
+
+
+class STARTUPINFO(Structure):
+ _fields_ = [
+ ("cb", DWORD),
+ ("lpReserved", LPWSTR),
+ ("lpDesktop", LPWSTR),
+ ("lpTitle", LPWSTR),
+ ("dwX", DWORD),
+ ("dwY", DWORD),
+ ("dwXSize", DWORD),
+ ("dwYSize", DWORD),
+ ("dwXCountChars", DWORD),
+ ("dwYCountChars", DWORD),
+ ("dwFillAttribute", DWORD),
+ ("dwFlags", DWORD),
+ ("wShowWindow", WORD),
+ ("cbReserved2", WORD),
+ ("lpReserved2", LPBYTE),
+ ("hStdInput", HANDLE),
+ ("hStdOutput", HANDLE),
+ ("hStdError", HANDLE),
+ ]
+
+
+LPSTARTUPINFO = POINTER(STARTUPINFO)
+
+SW_HIDE = 0
+
+STARTF_USESHOWWINDOW = 0x01
+STARTF_USESIZE = 0x02
+STARTF_USEPOSITION = 0x04
+STARTF_USECOUNTCHARS = 0x08
+STARTF_USEFILLATTRIBUTE = 0x10
+STARTF_RUNFULLSCREEN = 0x20
+STARTF_FORCEONFEEDBACK = 0x40
+STARTF_FORCEOFFFEEDBACK = 0x80
+STARTF_USESTDHANDLES = 0x100
+
+
+# EnvironmentBlock
+
+
+class EnvironmentBlock:
+ """An object which can be passed as the lpEnv parameter of CreateProcess.
+ It is initialized with a dictionary."""
+
+ def __init__(self, env):
+ if not env:
+ self._as_parameter_ = None
+ else:
+ values = []
+ fs_encoding = sys.getfilesystemencoding() or "mbcs"
+ for k, v in env.items():
+ if isinstance(k, bytes):
+ k = k.decode(fs_encoding, "replace")
+ if isinstance(v, bytes):
+ v = v.decode(fs_encoding, "replace")
+ values.append("{}={}".format(k, v))
+
+ # The lpEnvironment parameter of the 'CreateProcess' function expects a series
+ # of null terminated strings followed by a final null terminator. We write this
+ # value to a buffer and then cast it to LPCWSTR to avoid a Python ctypes bug
+ # that probihits embedded null characters (https://bugs.python.org/issue32745).
+ values = create_unicode_buffer("\0".join(values) + "\0")
+ self._as_parameter_ = cast(values, LPCWSTR)
+
+
+# Error Messages we need to watch for go here
+
+# https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx (0 - 499)
+ERROR_ACCESS_DENIED = 5
+ERROR_INVALID_PARAMETER = 87
+
+# http://msdn.microsoft.com/en-us/library/ms681388%28v=vs.85%29.aspx (500 - 999)
+ERROR_ABANDONED_WAIT_0 = 735
+
+# GetLastError()
+GetLastErrorProto = WINFUNCTYPE(DWORD) # Return Type
+GetLastErrorFlags = ()
+GetLastError = GetLastErrorProto(("GetLastError", windll.kernel32), GetLastErrorFlags)
+
+# CreateProcess()
+
+CreateProcessProto = WINFUNCTYPE(
+ BOOL, # Return type
+ LPCWSTR, # lpApplicationName
+ LPWSTR, # lpCommandLine
+ LPVOID, # lpProcessAttributes
+ LPVOID, # lpThreadAttributes
+ BOOL, # bInheritHandles
+ DWORD, # dwCreationFlags
+ LPVOID, # lpEnvironment
+ LPCWSTR, # lpCurrentDirectory
+ LPSTARTUPINFO, # lpStartupInfo
+ LPPROCESS_INFORMATION, # lpProcessInformation
+)
+
+CreateProcessFlags = (
+ (1, "lpApplicationName", None),
+ (1, "lpCommandLine"),
+ (1, "lpProcessAttributes", None),
+ (1, "lpThreadAttributes", None),
+ (1, "bInheritHandles", True),
+ (1, "dwCreationFlags", 0),
+ (1, "lpEnvironment", None),
+ (1, "lpCurrentDirectory", None),
+ (1, "lpStartupInfo"),
+ (2, "lpProcessInformation"),
+)
+
+
+def ErrCheckCreateProcess(result, func, args):
+ ErrCheckBool(result, func, args)
+ # return a tuple (hProcess, hThread, dwProcessID, dwThreadID)
+ pi = args[9]
+ return (
+ AutoHANDLE(pi.hProcess),
+ AutoHANDLE(pi.hThread),
+ pi.dwProcessID,
+ pi.dwThreadID,
+ )
+
+
+CreateProcess = CreateProcessProto(
+ ("CreateProcessW", windll.kernel32), CreateProcessFlags
+)
+CreateProcess.errcheck = ErrCheckCreateProcess
+
+# flags for CreateProcess
+CREATE_BREAKAWAY_FROM_JOB = 0x01000000
+CREATE_DEFAULT_ERROR_MODE = 0x04000000
+CREATE_NEW_CONSOLE = 0x00000010
+CREATE_NEW_PROCESS_GROUP = 0x00000200
+CREATE_NO_WINDOW = 0x08000000
+CREATE_SUSPENDED = 0x00000004
+CREATE_UNICODE_ENVIRONMENT = 0x00000400
+
+# Flags for IOCompletion ports (some of these would probably be defined if
+# we used the win32 extensions for python, but we don't want to do that if we
+# can help it.
+INVALID_HANDLE_VALUE = HANDLE(-1) # From winbase.h
+
+# Self Defined Constants for IOPort <--> Job Object communication
+COMPKEY_TERMINATE = c_ulong(0)
+COMPKEY_JOBOBJECT = c_ulong(1)
+
+# flags for job limit information
+# see http://msdn.microsoft.com/en-us/library/ms684147%28VS.85%29.aspx
+JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800
+JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK = 0x00001000
+JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000
+
+# Flags for Job Object Completion Port Message IDs from winnt.h
+# See also: http://msdn.microsoft.com/en-us/library/ms684141%28v=vs.85%29.aspx
+JOB_OBJECT_MSG_END_OF_JOB_TIME = 1
+JOB_OBJECT_MSG_END_OF_PROCESS_TIME = 2
+JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT = 3
+JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO = 4
+JOB_OBJECT_MSG_NEW_PROCESS = 6
+JOB_OBJECT_MSG_EXIT_PROCESS = 7
+JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS = 8
+JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT = 9
+JOB_OBJECT_MSG_JOB_MEMORY_LIMIT = 10
+
+# See winbase.h
+DEBUG_ONLY_THIS_PROCESS = 0x00000002
+DEBUG_PROCESS = 0x00000001
+DETACHED_PROCESS = 0x00000008
+
+# OpenProcess -
+# https://msdn.microsoft.com/en-us/library/windows/desktop/ms684320(v=vs.85).aspx
+PROCESS_QUERY_INFORMATION = 0x0400
+PROCESS_VM_READ = 0x0010
+
+OpenProcessProto = WINFUNCTYPE(
+ HANDLE, # Return type
+ DWORD, # dwDesiredAccess
+ BOOL, # bInheritHandle
+ DWORD, # dwProcessId
+)
+
+OpenProcessFlags = (
+ (1, "dwDesiredAccess", 0),
+ (1, "bInheritHandle", False),
+ (1, "dwProcessId", 0),
+)
+
+
+def ErrCheckOpenProcess(result, func, args):
+ ErrCheckBool(result, func, args)
+
+ return AutoHANDLE(result)
+
+
+OpenProcess = OpenProcessProto(("OpenProcess", windll.kernel32), OpenProcessFlags)
+OpenProcess.errcheck = ErrCheckOpenProcess
+
+# GetQueuedCompletionPortStatus -
+# http://msdn.microsoft.com/en-us/library/aa364986%28v=vs.85%29.aspx
+GetQueuedCompletionStatusProto = WINFUNCTYPE(
+ BOOL, # Return Type
+ HANDLE, # Completion Port
+ LPDWORD, # Msg ID
+ LPULONG, # Completion Key
+ # PID Returned from the call (may be null)
+ LPULONG,
+ DWORD,
+) # milliseconds to wait
+GetQueuedCompletionStatusFlags = (
+ (1, "CompletionPort", INVALID_HANDLE_VALUE),
+ (1, "lpNumberOfBytes", None),
+ (1, "lpCompletionKey", None),
+ (1, "lpPID", None),
+ (1, "dwMilliseconds", 0),
+)
+GetQueuedCompletionStatus = GetQueuedCompletionStatusProto(
+ ("GetQueuedCompletionStatus", windll.kernel32), GetQueuedCompletionStatusFlags
+)
+
+# CreateIOCompletionPort
+# Note that the completion key is just a number, not a pointer.
+CreateIoCompletionPortProto = WINFUNCTYPE(
+ HANDLE, # Return Type
+ HANDLE, # File Handle
+ HANDLE, # Existing Completion Port
+ c_ulong, # Completion Key
+ DWORD,
+) # Number of Threads
+
+CreateIoCompletionPortFlags = (
+ (1, "FileHandle", INVALID_HANDLE_VALUE),
+ (1, "ExistingCompletionPort", 0),
+ (1, "CompletionKey", c_ulong(0)),
+ (1, "NumberOfConcurrentThreads", 0),
+)
+CreateIoCompletionPort = CreateIoCompletionPortProto(
+ ("CreateIoCompletionPort", windll.kernel32), CreateIoCompletionPortFlags
+)
+CreateIoCompletionPort.errcheck = ErrCheckHandle
+
+# SetInformationJobObject
+SetInformationJobObjectProto = WINFUNCTYPE(
+ BOOL, # Return Type
+ HANDLE, # Job Handle
+ DWORD, # Type of Class next param is
+ LPVOID, # Job Object Class
+ DWORD,
+) # Job Object Class Length
+
+SetInformationJobObjectProtoFlags = (
+ (1, "hJob", None),
+ (1, "JobObjectInfoClass", None),
+ (1, "lpJobObjectInfo", None),
+ (1, "cbJobObjectInfoLength", 0),
+)
+SetInformationJobObject = SetInformationJobObjectProto(
+ ("SetInformationJobObject", windll.kernel32), SetInformationJobObjectProtoFlags
+)
+SetInformationJobObject.errcheck = ErrCheckBool
+
+# CreateJobObject()
+CreateJobObjectProto = WINFUNCTYPE(
+ HANDLE, LPVOID, LPCWSTR # Return type # lpJobAttributes # lpName
+)
+
+CreateJobObjectFlags = ((1, "lpJobAttributes", None), (1, "lpName", None))
+
+CreateJobObject = CreateJobObjectProto(
+ ("CreateJobObjectW", windll.kernel32), CreateJobObjectFlags
+)
+CreateJobObject.errcheck = ErrCheckHandle
+
+# AssignProcessToJobObject()
+
+AssignProcessToJobObjectProto = WINFUNCTYPE(
+ BOOL, HANDLE, HANDLE # Return type # hJob # hProcess
+)
+AssignProcessToJobObjectFlags = ((1, "hJob"), (1, "hProcess"))
+AssignProcessToJobObject = AssignProcessToJobObjectProto(
+ ("AssignProcessToJobObject", windll.kernel32), AssignProcessToJobObjectFlags
+)
+AssignProcessToJobObject.errcheck = ErrCheckBool
+
+# GetCurrentProcess()
+# because os.getPid() is way too easy
+GetCurrentProcessProto = WINFUNCTYPE(HANDLE) # Return type
+GetCurrentProcessFlags = ()
+GetCurrentProcess = GetCurrentProcessProto(
+ ("GetCurrentProcess", windll.kernel32), GetCurrentProcessFlags
+)
+GetCurrentProcess.errcheck = ErrCheckHandle
+
+# IsProcessInJob()
+try:
+ IsProcessInJobProto = WINFUNCTYPE(
+ BOOL, # Return type
+ HANDLE, # Process Handle
+ HANDLE, # Job Handle
+ LPBOOL, # Result
+ )
+ IsProcessInJobFlags = (
+ (1, "ProcessHandle"),
+ (1, "JobHandle", HANDLE(0)),
+ (2, "Result"),
+ )
+ IsProcessInJob = IsProcessInJobProto(
+ ("IsProcessInJob", windll.kernel32), IsProcessInJobFlags
+ )
+ IsProcessInJob.errcheck = ErrCheckBool
+except AttributeError:
+ # windows 2k doesn't have this API
+ def IsProcessInJob(process):
+ return False
+
+
+# ResumeThread()
+
+
+def ErrCheckResumeThread(result, func, args):
+ if result == -1:
+ raise WinError()
+
+ return args
+
+
+ResumeThreadProto = WINFUNCTYPE(DWORD, HANDLE) # Return type # hThread
+ResumeThreadFlags = ((1, "hThread"),)
+ResumeThread = ResumeThreadProto(("ResumeThread", windll.kernel32), ResumeThreadFlags)
+ResumeThread.errcheck = ErrCheckResumeThread
+
+# TerminateProcess()
+
+TerminateProcessProto = WINFUNCTYPE(
+ BOOL, HANDLE, UINT # Return type # hProcess # uExitCode
+)
+TerminateProcessFlags = ((1, "hProcess"), (1, "uExitCode", 127))
+TerminateProcess = TerminateProcessProto(
+ ("TerminateProcess", windll.kernel32), TerminateProcessFlags
+)
+TerminateProcess.errcheck = ErrCheckBool
+
+# TerminateJobObject()
+
+TerminateJobObjectProto = WINFUNCTYPE(
+ BOOL, HANDLE, UINT # Return type # hJob # uExitCode
+)
+TerminateJobObjectFlags = ((1, "hJob"), (1, "uExitCode", 127))
+TerminateJobObject = TerminateJobObjectProto(
+ ("TerminateJobObject", windll.kernel32), TerminateJobObjectFlags
+)
+TerminateJobObject.errcheck = ErrCheckBool
+
+# WaitForSingleObject()
+
+WaitForSingleObjectProto = WINFUNCTYPE(
+ DWORD,
+ HANDLE,
+ DWORD, # Return type # hHandle # dwMilliseconds
+)
+WaitForSingleObjectFlags = ((1, "hHandle"), (1, "dwMilliseconds", -1))
+WaitForSingleObject = WaitForSingleObjectProto(
+ ("WaitForSingleObject", windll.kernel32), WaitForSingleObjectFlags
+)
+
+# http://msdn.microsoft.com/en-us/library/ms681381%28v=vs.85%29.aspx
+INFINITE = -1
+WAIT_TIMEOUT = 0x0102
+WAIT_OBJECT_0 = 0x0
+WAIT_ABANDONED = 0x0080
+
+# http://msdn.microsoft.com/en-us/library/ms683189%28VS.85%29.aspx
+STILL_ACTIVE = 259
+
+# Used when we terminate a process.
+ERROR_CONTROL_C_EXIT = 0x23C
+
+# GetExitCodeProcess()
+
+GetExitCodeProcessProto = WINFUNCTYPE(
+ BOOL,
+ HANDLE,
+ LPDWORD, # Return type # hProcess # lpExitCode
+)
+GetExitCodeProcessFlags = ((1, "hProcess"), (2, "lpExitCode"))
+GetExitCodeProcess = GetExitCodeProcessProto(
+ ("GetExitCodeProcess", windll.kernel32), GetExitCodeProcessFlags
+)
+GetExitCodeProcess.errcheck = ErrCheckBool
+
+
+def CanCreateJobObject():
+ currentProc = GetCurrentProcess()
+ if IsProcessInJob(currentProc):
+ jobinfo = QueryInformationJobObject(
+ HANDLE(0), "JobObjectExtendedLimitInformation"
+ )
+ limitflags = jobinfo["BasicLimitInformation"]["LimitFlags"]
+ return bool(limitflags & JOB_OBJECT_LIMIT_BREAKAWAY_OK) or bool(
+ limitflags & JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK
+ )
+ else:
+ return True
+
+
+# testing functions
+
+
+def parent():
+ print("Starting parent")
+ currentProc = GetCurrentProcess()
+ if IsProcessInJob(currentProc):
+ print("You should not be in a job object to test", file=sys.stderr)
+ sys.exit(1)
+ assert CanCreateJobObject()
+ print("File: %s" % __file__)
+ command = [sys.executable, __file__, "-child"]
+ print("Running command: %s" % command)
+ process = subprocess.Popen(command)
+ process.kill()
+ code = process.returncode
+ print("Child code: %s" % code)
+ assert code == 127
+
+
+def child():
+ print("Starting child")
+ currentProc = GetCurrentProcess()
+ injob = IsProcessInJob(currentProc)
+ print("Is in a job?: %s" % injob)
+ can_create = CanCreateJobObject()
+ print("Can create job?: %s" % can_create)
+ process = subprocess.Popen("c:\\windows\\notepad.exe")
+ assert process._job
+ jobinfo = QueryInformationJobObject(
+ process._job, "JobObjectExtendedLimitInformation"
+ )
+ print("Job info: %s" % jobinfo)
+ limitflags = jobinfo["BasicLimitInformation"]["LimitFlags"]
+ print("LimitFlags: %s" % limitflags)
+ process.kill()
diff --git a/testing/mozbase/mozprocess/setup.cfg b/testing/mozbase/mozprocess/setup.cfg
new file mode 100644
index 0000000000..2a9acf13da
--- /dev/null
+++ b/testing/mozbase/mozprocess/setup.cfg
@@ -0,0 +1,2 @@
+[bdist_wheel]
+universal = 1
diff --git a/testing/mozbase/mozprocess/setup.py b/testing/mozbase/mozprocess/setup.py
new file mode 100644
index 0000000000..b4ad2a8e18
--- /dev/null
+++ b/testing/mozbase/mozprocess/setup.py
@@ -0,0 +1,38 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import
+
+from setuptools import setup
+
+PACKAGE_VERSION = "1.2.1"
+
+setup(
+ name="mozprocess",
+ version=PACKAGE_VERSION,
+ description="Mozilla-authored process handling",
+ long_description="see https://firefox-source-docs.mozilla.org/mozbase/index.html",
+ classifiers=[
+ "Environment :: Console",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
+ "Natural Language :: English",
+ "Operating System :: OS Independent",
+ "Programming Language :: Python :: 2.7",
+ "Programming Language :: Python :: 3.5",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ ],
+ keywords="mozilla",
+ author="Mozilla Automation and Tools team",
+ author_email="tools@lists.mozilla.org",
+ url="https://wiki.mozilla.org/Auto-tools/Projects/Mozbase",
+ license="MPL 2.0",
+ packages=["mozprocess"],
+ include_package_data=True,
+ zip_safe=False,
+ install_requires=["mozinfo"],
+ entry_points="""
+ # -*- Entry points: -*-
+ """,
+)
diff --git a/testing/mozbase/mozprocess/tests/manifest.ini b/testing/mozbase/mozprocess/tests/manifest.ini
new file mode 100644
index 0000000000..ec1dcf2cff
--- /dev/null
+++ b/testing/mozbase/mozprocess/tests/manifest.ini
@@ -0,0 +1,12 @@
+[DEFAULT]
+subsuite = mozbase
+[test_detached.py]
+skip-if = os == "win" # Bug 1493796
+[test_kill.py]
+[test_misc.py]
+[test_pid.py]
+[test_poll.py]
+[test_wait.py]
+[test_output.py]
+[test_params.py]
+[test_process_reader.py]
diff --git a/testing/mozbase/mozprocess/tests/process_normal_broad.ini b/testing/mozbase/mozprocess/tests/process_normal_broad.ini
new file mode 100644
index 0000000000..28109cb31e
--- /dev/null
+++ b/testing/mozbase/mozprocess/tests/process_normal_broad.ini
@@ -0,0 +1,30 @@
+; Generate a Broad Process Tree
+; This generates a Tree of the form:
+;
+; main
+; \_ c1
+; | \_ c2
+; | \_ c2
+; | \_ c2
+; | \_ c2
+; | \_ c2
+; |
+; \_ c1
+; | \_ c2
+; | \_ c2
+; | \_ c2
+; | \_ c2
+; | \_ c2
+; |
+; \_ ... 23 more times
+
+[main]
+children=25*c1
+maxtime=10
+
+[c1]
+children=5*c2
+maxtime=10
+
+[c2]
+maxtime=5
diff --git a/testing/mozbase/mozprocess/tests/process_normal_deep.ini b/testing/mozbase/mozprocess/tests/process_normal_deep.ini
new file mode 100644
index 0000000000..ef9809f6ab
--- /dev/null
+++ b/testing/mozbase/mozprocess/tests/process_normal_deep.ini
@@ -0,0 +1,65 @@
+; Deep Process Tree
+; Should generate a process tree of the form:
+;
+; main
+; \_ c2
+; | \_ c5
+; | | \_ c6
+; | | \_ c7
+; | | \_ c8
+; | | \_ c1
+; | | \_ c4
+; | \_ c5
+; | \_ c6
+; | \_ c7
+; | \_ c8
+; | \_ c1
+; | \_ c4
+; \_ c2
+; | \_ c5
+; | | \_ c6
+; | | \_ c7
+; | | \_ c8
+; | | \_ c1
+; | | \_ c4
+; | \_ c5
+; | \_ c6
+; | \_ c7
+; | \_ c8
+; | \_ c1
+; | \_ c4
+; \_ c1
+; | \_ c4
+; \_ c1
+; \_ c4
+
+[main]
+children=2*c1, 2*c2
+maxtime=20
+
+[c1]
+children=c4
+maxtime=20
+
+[c2]
+children=2*c5
+maxtime=20
+
+[c4]
+maxtime=20
+
+[c5]
+children=c6
+maxtime=20
+
+[c6]
+children=c7
+maxtime=20
+
+[c7]
+children=c8
+maxtime=20
+
+[c8]
+children=c1
+maxtime=20
diff --git a/testing/mozbase/mozprocess/tests/process_normal_finish.ini b/testing/mozbase/mozprocess/tests/process_normal_finish.ini
new file mode 100644
index 0000000000..4519c70830
--- /dev/null
+++ b/testing/mozbase/mozprocess/tests/process_normal_finish.ini
@@ -0,0 +1,17 @@
+; Generates a normal process tree
+; Tree is of the form:
+; main
+; \_ c1
+; \_ c2
+
+[main]
+children=c1,c2
+maxtime=10
+
+[c1]
+children=c2
+maxtime=5
+
+[c2]
+maxtime=5
+
diff --git a/testing/mozbase/mozprocess/tests/process_normal_finish_no_process_group.ini b/testing/mozbase/mozprocess/tests/process_normal_finish_no_process_group.ini
new file mode 100644
index 0000000000..2b0f1f9a4f
--- /dev/null
+++ b/testing/mozbase/mozprocess/tests/process_normal_finish_no_process_group.ini
@@ -0,0 +1,2 @@
+[main]
+maxtime=10
diff --git a/testing/mozbase/mozprocess/tests/process_waittimeout.ini b/testing/mozbase/mozprocess/tests/process_waittimeout.ini
new file mode 100644
index 0000000000..5800267d18
--- /dev/null
+++ b/testing/mozbase/mozprocess/tests/process_waittimeout.ini
@@ -0,0 +1,16 @@
+; Generates a normal process tree
+; Tree is of the form:
+; main
+; \_ c1
+; \_ c2
+
+[main]
+children=2*c1
+maxtime=300
+
+[c1]
+children=2*c2
+maxtime=300
+
+[c2]
+maxtime=300
diff --git a/testing/mozbase/mozprocess/tests/process_waittimeout_10s.ini b/testing/mozbase/mozprocess/tests/process_waittimeout_10s.ini
new file mode 100644
index 0000000000..abf8d6a4ef
--- /dev/null
+++ b/testing/mozbase/mozprocess/tests/process_waittimeout_10s.ini
@@ -0,0 +1,16 @@
+; Generate a normal process tree
+; Tree is of the form:
+; main
+; \_ c1
+; \_ c2
+
+[main]
+children=c1
+maxtime=10
+
+[c1]
+children=2*c2
+maxtime=5
+
+[c2]
+maxtime=5
diff --git a/testing/mozbase/mozprocess/tests/proclaunch.py b/testing/mozbase/mozprocess/tests/proclaunch.py
new file mode 100644
index 0000000000..3f7a1b6936
--- /dev/null
+++ b/testing/mozbase/mozprocess/tests/proclaunch.py
@@ -0,0 +1,211 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import argparse
+import collections
+import multiprocessing
+
+import time
+from six.moves import configparser
+
+ProcessNode = collections.namedtuple("ProcessNode", ["maxtime", "children"])
+
+
+class ProcessLauncher(object):
+ """Create and Launch process trees specified by a '.ini' file
+
+ Typical .ini file accepted by this class :
+
+ [main]
+ children=c1, 1*c2, 4*c3
+ maxtime=10
+
+ [c1]
+ children= 2*c2, c3
+ maxtime=20
+
+ [c2]
+ children=3*c3
+ maxtime=5
+
+ [c3]
+ maxtime=3
+
+ This generates a process tree of the form:
+ [main]
+ |---[c1]
+ | |---[c2]
+ | | |---[c3]
+ | | |---[c3]
+ | | |---[c3]
+ | |
+ | |---[c2]
+ | | |---[c3]
+ | | |---[c3]
+ | | |---[c3]
+ | |
+ | |---[c3]
+ |
+ |---[c2]
+ | |---[c3]
+ | |---[c3]
+ | |---[c3]
+ |
+ |---[c3]
+ |---[c3]
+ |---[c3]
+
+ Caveat: The section names cannot contain a '*'(asterisk) or a ','(comma)
+ character as these are used as delimiters for parsing.
+ """
+
+ # Unit time for processes in seconds
+ UNIT_TIME = 1
+
+ def __init__(self, manifest, verbose=False):
+ """
+ Parses the manifest and stores the information about the process tree
+ in a format usable by the class.
+
+ Raises IOError if :
+ - The path does not exist
+ - The file cannot be read
+ Raises ConfigParser.*Error if:
+ - Files does not contain section headers
+ - File cannot be parsed because of incorrect specification
+
+ :param manifest: Path to the manifest file that contains the
+ configuration for the process tree to be launched
+ :verbose: Print the process start and end information.
+ Genrates a lot of output. Disabled by default.
+ """
+
+ self.verbose = verbose
+
+ # Children is a dictionary used to store information from the,
+ # Configuration file in a more usable format.
+ # Key : string contain the name of child process
+ # Value : A Named tuple of the form (max_time, (list of child processes of Key))
+ # Where each child process is a list of type: [count to run, name of child]
+ self.children = {}
+
+ cfgparser = configparser.ConfigParser()
+
+ if not cfgparser.read(manifest):
+ raise IOError("The manifest %s could not be found/opened", manifest)
+
+ sections = cfgparser.sections()
+ for section in sections:
+ # Maxtime is a mandatory option
+ # ConfigParser.NoOptionError is raised if maxtime does not exist
+ if "*" in section or "," in section:
+ raise configparser.ParsingError(
+ "%s is not a valid section name. "
+ "Section names cannot contain a '*' or ','." % section
+ )
+ m_time = cfgparser.get(section, "maxtime")
+ try:
+ m_time = int(m_time)
+ except ValueError:
+ raise ValueError(
+ "Expected maxtime to be an integer, specified %s" % m_time
+ )
+
+ # No children option implies there are no further children
+ # Leaving the children option blank is an error.
+ try:
+ c = cfgparser.get(section, "children")
+ if not c:
+ # If children is an empty field, assume no children
+ children = None
+
+ else:
+ # Tokenize chilren field, ignore empty strings
+ children = [
+ [y.strip() for y in x.strip().split("*", 1)]
+ for x in c.split(",")
+ if x
+ ]
+ try:
+ for i, child in enumerate(children):
+ # No multiplicate factor infront of a process implies 1
+ if len(child) == 1:
+ children[i] = [1, child[0]]
+ else:
+ children[i][0] = int(child[0])
+
+ if children[i][1] not in sections:
+ raise configparser.ParsingError(
+ "No section corresponding to child %s" % child[1]
+ )
+ except ValueError:
+ raise ValueError(
+ "Expected process count to be an integer, specified %s"
+ % child[0]
+ )
+
+ except configparser.NoOptionError:
+ children = None
+ pn = ProcessNode(maxtime=m_time, children=children)
+ self.children[section] = pn
+
+ def run(self):
+ """
+ This function launches the process tree.
+ """
+ self._run("main", 0)
+
+ def _run(self, proc_name, level):
+ """
+ Runs the process specified by the section-name `proc_name` in the manifest file.
+ Then makes calls to launch the child processes of `proc_name`
+
+ :param proc_name: File name of the manifest as a string.
+ :param level: Depth of the current process in the tree.
+ """
+ if proc_name not in self.children:
+ raise IOError("%s is not a valid process" % proc_name)
+
+ maxtime = self.children[proc_name].maxtime
+ if self.verbose:
+ print(
+ "%sLaunching %s for %d*%d seconds"
+ % (" " * level, proc_name, maxtime, self.UNIT_TIME)
+ )
+
+ while self.children[proc_name].children:
+ child = self.children[proc_name].children.pop()
+
+ count, child_proc = child
+ for i in range(count):
+ p = multiprocessing.Process(
+ target=self._run, args=(child[1], level + 1)
+ )
+ p.start()
+
+ self._launch(maxtime)
+ if self.verbose:
+ print("%sFinished %s" % (" " * level, proc_name))
+
+ def _launch(self, running_time):
+ """
+ Create and launch a process and idles for the time specified by
+ `running_time`
+
+ :param running_time: Running time of the process in seconds.
+ """
+ elapsed_time = 0
+
+ while elapsed_time < running_time:
+ time.sleep(self.UNIT_TIME)
+ elapsed_time += self.UNIT_TIME
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("manifest", help="Specify the configuration .ini file")
+ args = parser.parse_args()
+
+ proclaunch = ProcessLauncher(args.manifest)
+ proclaunch.run()
diff --git a/testing/mozbase/mozprocess/tests/proctest.py b/testing/mozbase/mozprocess/tests/proctest.py
new file mode 100644
index 0000000000..c1e70388c5
--- /dev/null
+++ b/testing/mozbase/mozprocess/tests/proctest.py
@@ -0,0 +1,65 @@
+from __future__ import absolute_import
+
+import os
+import sys
+import unittest
+
+from mozprocess import ProcessHandler
+
+
+here = os.path.dirname(os.path.abspath(__file__))
+
+
+class ProcTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.proclaunch = os.path.join(here, "proclaunch.py")
+ cls.python = sys.executable
+
+ def determine_status(self, proc, isalive=False, expectedfail=()):
+ """
+ Use to determine if the situation has failed.
+ Parameters:
+ proc -- the processhandler instance
+ isalive -- Use True to indicate we pass if the process exists; however, by default
+ the test will pass if the process does not exist (isalive == False)
+ expectedfail -- Defaults to [], used to indicate a list of fields
+ that are expected to fail
+ """
+ returncode = proc.proc.returncode
+ didtimeout = proc.didTimeout
+ detected = ProcessHandler.pid_exists(proc.pid)
+ output = ""
+ # ProcessHandler has output when store_output is set to True in the constructor
+ # (this is the default)
+ if getattr(proc, "output"):
+ output = proc.output
+
+ if "returncode" in expectedfail:
+ self.assertTrue(
+ returncode, "Detected an unexpected return code of: %s" % returncode
+ )
+ elif isalive:
+ self.assertEqual(
+ returncode, None, "Detected not None return code of: %s" % returncode
+ )
+ else:
+ self.assertNotEqual(
+ returncode, None, "Detected unexpected None return code of"
+ )
+
+ if "didtimeout" in expectedfail:
+ self.assertTrue(didtimeout, "Detected that process didn't time out")
+ else:
+ self.assertTrue(not didtimeout, "Detected that process timed out")
+
+ if isalive:
+ self.assertTrue(
+ detected,
+ "Detected process is not running, " "process output: %s" % output,
+ )
+ else:
+ self.assertTrue(
+ not detected,
+ "Detected process is still running, " "process output: %s" % output,
+ )
diff --git a/testing/mozbase/mozprocess/tests/scripts/infinite_loop.py b/testing/mozbase/mozprocess/tests/scripts/infinite_loop.py
new file mode 100644
index 0000000000..74c2b630a4
--- /dev/null
+++ b/testing/mozbase/mozprocess/tests/scripts/infinite_loop.py
@@ -0,0 +1,20 @@
+from __future__ import absolute_import
+
+import threading
+import time
+import sys
+import signal
+
+if "deadlock" in sys.argv:
+ lock = threading.Lock()
+
+ def trap(sig, frame):
+ lock.acquire()
+
+ # get the lock once
+ lock.acquire()
+ # and take it again on SIGTERM signal: deadlock.
+ signal.signal(signal.SIGTERM, trap)
+
+while 1:
+ time.sleep(1)
diff --git a/testing/mozbase/mozprocess/tests/scripts/proccountfive.py b/testing/mozbase/mozprocess/tests/scripts/proccountfive.py
new file mode 100644
index 0000000000..c79faab6c2
--- /dev/null
+++ b/testing/mozbase/mozprocess/tests/scripts/proccountfive.py
@@ -0,0 +1,4 @@
+from __future__ import absolute_import, print_function
+
+for i in range(0, 5):
+ print(i)
diff --git a/testing/mozbase/mozprocess/tests/scripts/procnonewline.py b/testing/mozbase/mozprocess/tests/scripts/procnonewline.py
new file mode 100644
index 0000000000..170cc1b19d
--- /dev/null
+++ b/testing/mozbase/mozprocess/tests/scripts/procnonewline.py
@@ -0,0 +1,6 @@
+from __future__ import absolute_import, print_function
+
+import sys
+
+print("this is a newline")
+sys.stdout.write("this has NO newline")
diff --git a/testing/mozbase/mozprocess/tests/test_detached.py b/testing/mozbase/mozprocess/tests/test_detached.py
new file mode 100644
index 0000000000..10f754ebca
--- /dev/null
+++ b/testing/mozbase/mozprocess/tests/test_detached.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import
+
+import os
+
+import mozunit
+
+from mozprocess import processhandler
+
+import proctest
+
+
+here = os.path.dirname(os.path.abspath(__file__))
+
+
+class ProcTestDetached(proctest.ProcTest):
+ """Class to test for detached processes."""
+
+ def test_check_for_detached_before_run(self):
+ """Process is not started yet when checked for detached."""
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_normal_finish.ini"], cwd=here
+ )
+
+ with self.assertRaises(RuntimeError):
+ p.check_for_detached(1234)
+
+ def test_check_for_detached_while_running_with_current_pid(self):
+ """Process is started, and check for detached with original pid."""
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_normal_finish.ini"], cwd=here
+ )
+ p.run()
+
+ orig_pid = p.pid
+ p.check_for_detached(p.pid)
+
+ self.assertEqual(p.pid, orig_pid)
+ self.assertIsNone(p.proc.detached_pid)
+
+ self.determine_status(p, True)
+ p.kill()
+
+ def test_check_for_detached_after_fork(self):
+ """Process is started, and check for detached with new pid."""
+ pass
+
+ def test_check_for_detached_after_kill(self):
+ """Process is killed before checking for detached pid."""
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_normal_finish.ini"], cwd=here
+ )
+ p.run()
+ p.kill()
+
+ orig_pid = p.pid
+ p.check_for_detached(p.pid)
+
+ self.assertEqual(p.pid, orig_pid)
+ self.assertIsNone(p.proc.detached_pid)
+
+ self.determine_status(p)
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/testing/mozbase/mozprocess/tests/test_kill.py b/testing/mozbase/mozprocess/tests/test_kill.py
new file mode 100644
index 0000000000..2abfe8e033
--- /dev/null
+++ b/testing/mozbase/mozprocess/tests/test_kill.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import
+
+import os
+import time
+import unittest
+import proctest
+import signal
+import sys
+
+import mozunit
+
+from mozprocess import processhandler
+
+here = os.path.dirname(os.path.abspath(__file__))
+
+
+class ProcTestKill(proctest.ProcTest):
+ """ Class to test various process tree killing scenatios """
+
+ def test_kill_before_run(self):
+ """Process is not started, and kill() is called"""
+
+ p = processhandler.ProcessHandler([self.python, "-V"])
+ self.assertRaises(RuntimeError, p.kill)
+
+ def test_process_kill(self):
+ """Process is started, we kill it"""
+
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_normal_finish.ini"], cwd=here
+ )
+ p.run()
+ p.kill()
+
+ self.determine_status(p, expectedfail=("returncode",))
+
+ def test_process_kill_deep(self):
+ """Process is started, we kill it, we use a deep process tree"""
+
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_normal_deep.ini"], cwd=here
+ )
+ p.run()
+ p.kill()
+
+ self.determine_status(p, expectedfail=("returncode",))
+
+ def test_process_kill_deep_wait(self):
+ """Process is started, we use a deep process tree, we let it spawn
+ for a bit, we kill it"""
+
+ myenv = None
+ # On macosx1014, subprocess fails to find `six` when run with python3.
+ # This ensures that subprocess first looks to sys.path to find `six`.
+ # See https://bugzilla.mozilla.org/show_bug.cgi?id=1562083
+ if sys.platform == "darwin" and sys.version_info[0] > 2:
+ myenv = os.environ.copy()
+ myenv["PYTHONPATH"] = ":".join(sys.path)
+
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_normal_deep.ini"],
+ cwd=here,
+ env=myenv,
+ )
+ p.run()
+ # Let the tree spawn a bit, before attempting to kill
+ time.sleep(3)
+ p.kill()
+
+ self.determine_status(p, expectedfail=("returncode",))
+
+ def test_process_kill_broad(self):
+ """Process is started, we kill it, we use a broad process tree"""
+
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_normal_broad.ini"], cwd=here
+ )
+ p.run()
+ p.kill()
+
+ self.determine_status(p, expectedfail=("returncode",))
+
+ def test_process_kill_broad_delayed(self):
+ """Process is started, we use a broad process tree, we let it spawn
+ for a bit, we kill it"""
+
+ myenv = None
+ # On macosx1014, subprocess fails to find `six` when run with python3.
+ # This ensures that subprocess first looks to sys.path to find `six`.
+ # See https://bugzilla.mozilla.org/show_bug.cgi?id=1562083
+ if sys.platform == "darwin" and sys.version_info[0] > 2:
+ myenv = os.environ.copy()
+ myenv["PYTHONPATH"] = ":".join(sys.path)
+
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_normal_broad.ini"],
+ cwd=here,
+ env=myenv,
+ )
+ p.run()
+ # Let the tree spawn a bit, before attempting to kill
+ time.sleep(3)
+ p.kill()
+
+ self.determine_status(p, expectedfail=("returncode",))
+
+ @unittest.skipUnless(processhandler.isPosix, "posix only")
+ def test_process_kill_with_sigterm(self):
+ script = os.path.join(here, "scripts", "infinite_loop.py")
+ p = processhandler.ProcessHandler([self.python, script])
+
+ p.run()
+ p.kill()
+
+ self.assertEquals(p.proc.returncode, -signal.SIGTERM)
+
+ @unittest.skipUnless(processhandler.isPosix, "posix only")
+ def test_process_kill_with_sigint_if_needed(self):
+ script = os.path.join(here, "scripts", "infinite_loop.py")
+ p = processhandler.ProcessHandler([self.python, script, "deadlock"])
+
+ p.run()
+ time.sleep(1)
+ p.kill()
+
+ self.assertEquals(p.proc.returncode, -signal.SIGKILL)
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/testing/mozbase/mozprocess/tests/test_misc.py b/testing/mozbase/mozprocess/tests/test_misc.py
new file mode 100644
index 0000000000..ef678501be
--- /dev/null
+++ b/testing/mozbase/mozprocess/tests/test_misc.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+import os
+import sys
+
+import mozunit
+
+import proctest
+from mozprocess import processhandler
+
+here = os.path.dirname(os.path.abspath(__file__))
+
+
+class ProcTestMisc(proctest.ProcTest):
+ """ Class to test misc operations """
+
+ def test_process_timeout_no_kill(self):
+ """Process is started, runs but we time out waiting on it
+ to complete. Process should not be killed.
+ """
+ p = None
+
+ def timeout_handler():
+ self.assertEqual(p.proc.poll(), None)
+ p.kill()
+
+ myenv = None
+ # On macosx1014, subprocess fails to find `six` when run with python3.
+ # This ensures that subprocess first looks to sys.path to find `six`.
+ # See https://bugzilla.mozilla.org/show_bug.cgi?id=1562083
+ if sys.platform == "darwin" and sys.version_info[0] > 2:
+ myenv = os.environ.copy()
+ myenv["PYTHONPATH"] = ":".join(sys.path)
+
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_waittimeout.ini"],
+ cwd=here,
+ env=myenv,
+ onTimeout=(timeout_handler,),
+ kill_on_timeout=False,
+ )
+ p.run(timeout=1)
+ p.wait()
+ self.assertTrue(p.didTimeout)
+
+ self.determine_status(p, False, ["returncode", "didtimeout"])
+
+ def test_unicode_in_environment(self):
+ env = {
+ "FOOBAR": "ʘ",
+ }
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_normal_finish.ini"],
+ cwd=here,
+ env=env,
+ )
+ # passes if no exceptions are raised
+ p.run()
+ p.wait()
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/testing/mozbase/mozprocess/tests/test_output.py b/testing/mozbase/mozprocess/tests/test_output.py
new file mode 100644
index 0000000000..d93c1a254b
--- /dev/null
+++ b/testing/mozbase/mozprocess/tests/test_output.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import
+
+import io
+import os
+
+import mozunit
+import proctest
+from mozprocess import processhandler
+
+here = os.path.dirname(os.path.abspath(__file__))
+
+
+class ProcTestOutput(proctest.ProcTest):
+ """ Class to test operations related to output handling """
+
+ def test_process_output_twice(self):
+ """
+ Process is started, then processOutput is called a second time explicitly
+ """
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_waittimeout_10s.ini"], cwd=here
+ )
+
+ p.run()
+ p.processOutput(timeout=5)
+ p.wait()
+
+ self.determine_status(p, False, ())
+
+ def test_process_output_nonewline(self):
+ """
+ Process is started, outputs data with no newline
+ """
+ p = processhandler.ProcessHandler(
+ [self.python, os.path.join("scripts", "procnonewline.py")], cwd=here
+ )
+
+ p.run()
+ p.processOutput(timeout=5)
+ p.wait()
+
+ self.determine_status(p, False, ())
+
+ def test_stream_process_output(self):
+ """
+ Process output stream does not buffer
+ """
+ expected = "\n".join([str(n) for n in range(0, 10)])
+
+ stream = io.BytesIO()
+ buf = io.BufferedRandom(stream)
+
+ p = processhandler.ProcessHandler(
+ [self.python, os.path.join("scripts", "proccountfive.py")],
+ cwd=here,
+ stream=buf,
+ )
+
+ p.run()
+ p.wait()
+ for i in range(5, 10):
+ stream.write(str(i).encode("utf8") + "\n".encode("utf8"))
+
+ buf.flush()
+ self.assertEquals(stream.getvalue().strip().decode("utf8"), expected)
+
+ # make sure mozprocess doesn't close the stream
+ # since mozprocess didn't create it
+ self.assertFalse(buf.closed)
+ buf.close()
+
+ self.determine_status(p, False, ())
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/testing/mozbase/mozprocess/tests/test_params.py b/testing/mozbase/mozprocess/tests/test_params.py
new file mode 100644
index 0000000000..3160496898
--- /dev/null
+++ b/testing/mozbase/mozprocess/tests/test_params.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function
+
+import unittest
+
+import mozunit
+
+from mozprocess import processhandler
+
+
+class ParamTests(unittest.TestCase):
+ def test_process_outputline_handler(self):
+ """Parameter processOutputLine is accepted with a single function"""
+
+ def output(line):
+ print("output " + str(line))
+
+ err = None
+ try:
+ processhandler.ProcessHandler(["ls", "-l"], processOutputLine=output)
+ except (TypeError, AttributeError) as e:
+ err = e
+ self.assertFalse(err)
+
+ def test_process_outputline_handler_list(self):
+ """Parameter processOutputLine is accepted with a list of functions"""
+
+ def output(line):
+ print("output " + str(line))
+
+ err = None
+ try:
+ processhandler.ProcessHandler(["ls", "-l"], processOutputLine=[output])
+ except (TypeError, AttributeError) as e:
+ err = e
+ self.assertFalse(err)
+
+ def test_process_ontimeout_handler(self):
+ """Parameter onTimeout is accepted with a single function"""
+
+ def timeout():
+ print("timeout!")
+
+ err = None
+ try:
+ processhandler.ProcessHandler(["sleep", "2"], onTimeout=timeout)
+ except (TypeError, AttributeError) as e:
+ err = e
+ self.assertFalse(err)
+
+ def test_process_ontimeout_handler_list(self):
+ """Parameter onTimeout is accepted with a list of functions"""
+
+ def timeout():
+ print("timeout!")
+
+ err = None
+ try:
+ processhandler.ProcessHandler(["sleep", "2"], onTimeout=[timeout])
+ except (TypeError, AttributeError) as e:
+ err = e
+ self.assertFalse(err)
+
+ def test_process_onfinish_handler(self):
+ """Parameter onFinish is accepted with a single function"""
+
+ def finish():
+ print("finished!")
+
+ err = None
+ try:
+ processhandler.ProcessHandler(["sleep", "1"], onFinish=finish)
+ except (TypeError, AttributeError) as e:
+ err = e
+ self.assertFalse(err)
+
+ def test_process_onfinish_handler_list(self):
+ """Parameter onFinish is accepted with a list of functions"""
+
+ def finish():
+ print("finished!")
+
+ err = None
+ try:
+ processhandler.ProcessHandler(["sleep", "1"], onFinish=[finish])
+ except (TypeError, AttributeError) as e:
+ err = e
+ self.assertFalse(err)
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/testing/mozbase/mozprocess/tests/test_pid.py b/testing/mozbase/mozprocess/tests/test_pid.py
new file mode 100644
index 0000000000..44ddfafaa3
--- /dev/null
+++ b/testing/mozbase/mozprocess/tests/test_pid.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import
+
+import os
+
+import mozunit
+
+from mozprocess import processhandler
+
+import proctest
+
+
+here = os.path.dirname(os.path.abspath(__file__))
+
+
+class ProcTestPid(proctest.ProcTest):
+ """Class to test process pid."""
+
+ def test_pid_before_run(self):
+ """Process is not started, and pid is checked."""
+ p = processhandler.ProcessHandler([self.python])
+ with self.assertRaises(RuntimeError):
+ p.pid
+
+ def test_pid_while_running(self):
+ """Process is started, and pid is checked."""
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_normal_finish.ini"], cwd=here
+ )
+ p.run()
+
+ self.assertIsNotNone(p.pid)
+
+ self.determine_status(p, True)
+ p.kill()
+
+ def test_pid_after_kill(self):
+ """Process is killed, and pid is checked."""
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_normal_finish.ini"], cwd=here
+ )
+ p.run()
+ p.kill()
+
+ self.assertIsNotNone(p.pid)
+ self.determine_status(p)
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/testing/mozbase/mozprocess/tests/test_poll.py b/testing/mozbase/mozprocess/tests/test_poll.py
new file mode 100644
index 0000000000..ae6784f502
--- /dev/null
+++ b/testing/mozbase/mozprocess/tests/test_poll.py
@@ -0,0 +1,152 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import
+
+import os
+import signal
+import sys
+import unittest
+
+import mozinfo
+import mozunit
+import proctest
+import time
+from mozprocess import processhandler
+
+here = os.path.dirname(os.path.abspath(__file__))
+
+
+class ProcTestPoll(proctest.ProcTest):
+ """Class to test process poll."""
+
+ def test_poll_before_run(self):
+ """Process is not started, and poll() is called."""
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_normal_finish.ini"], cwd=here
+ )
+ self.assertRaises(RuntimeError, p.poll)
+
+ def test_poll_while_running(self):
+ """Process is started, and poll() is called."""
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_normal_finish.ini"], cwd=here
+ )
+ p.run()
+ returncode = p.poll()
+
+ self.assertEqual(returncode, None)
+
+ self.determine_status(p, True)
+ p.kill()
+
+ def test_poll_after_kill(self):
+ """Process is killed, and poll() is called."""
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_normal_finish.ini"], cwd=here
+ )
+ p.run()
+ returncode = p.kill()
+
+ # We killed the process, so the returncode should be non-zero
+ if mozinfo.isWin:
+ self.assertGreater(
+ returncode, 0, 'Positive returncode expected, got "%s"' % returncode
+ )
+ else:
+ self.assertLess(
+ returncode, 0, 'Negative returncode expected, got "%s"' % returncode
+ )
+
+ self.assertEqual(returncode, p.poll())
+
+ self.determine_status(p)
+
+ def test_poll_after_kill_no_process_group(self):
+ """Process (no group) is killed, and poll() is called."""
+ p = processhandler.ProcessHandler(
+ [
+ self.python,
+ self.proclaunch,
+ "process_normal_finish_no_process_group.ini",
+ ],
+ cwd=here,
+ ignore_children=True,
+ )
+ p.run()
+ returncode = p.kill()
+
+ # We killed the process, so the returncode should be non-zero
+ if mozinfo.isWin:
+ self.assertGreater(
+ returncode, 0, 'Positive returncode expected, got "%s"' % returncode
+ )
+ else:
+ self.assertLess(
+ returncode, 0, 'Negative returncode expected, got "%s"' % returncode
+ )
+
+ self.assertEqual(returncode, p.poll())
+
+ self.determine_status(p)
+
+ def test_poll_after_double_kill(self):
+ """Process is killed twice, and poll() is called."""
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_normal_finish.ini"], cwd=here
+ )
+ p.run()
+ p.kill()
+ returncode = p.kill()
+
+ # We killed the process, so the returncode should be non-zero
+ if mozinfo.isWin:
+ self.assertGreater(
+ returncode, 0, 'Positive returncode expected, got "%s"' % returncode
+ )
+ else:
+ self.assertLess(
+ returncode, 0, 'Negative returncode expected, got "%s"' % returncode
+ )
+
+ self.assertEqual(returncode, p.poll())
+
+ self.determine_status(p)
+
+ @unittest.skipIf(sys.platform.startswith("win"), "Bug 1493796")
+ def test_poll_after_external_kill(self):
+ """Process is killed externally, and poll() is called."""
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_normal_finish.ini"], cwd=here
+ )
+ p.run()
+
+ os.kill(p.pid, signal.SIGTERM)
+
+ # Allow the output reader thread to finish processing remaining data
+ for i in range(0, 100):
+ time.sleep(processhandler.INTERVAL_PROCESS_ALIVE_CHECK)
+ returncode = p.poll()
+ if returncode is not None:
+ break
+
+ # We killed the process, so the returncode should be non-zero
+ if mozinfo.isWin:
+ self.assertEqual(
+ returncode,
+ signal.SIGTERM,
+ 'Positive returncode expected, got "%s"' % returncode,
+ )
+ else:
+ self.assertEqual(
+ returncode,
+ -signal.SIGTERM,
+ '%s expected, got "%s"' % (-signal.SIGTERM, returncode),
+ )
+
+ self.assertEqual(returncode, p.wait())
+
+ self.determine_status(p)
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/testing/mozbase/mozprocess/tests/test_process_reader.py b/testing/mozbase/mozprocess/tests/test_process_reader.py
new file mode 100644
index 0000000000..77e37ef4fa
--- /dev/null
+++ b/testing/mozbase/mozprocess/tests/test_process_reader.py
@@ -0,0 +1,116 @@
+from __future__ import absolute_import
+
+import subprocess
+import sys
+import unittest
+
+import mozunit
+from mozprocess.processhandler import ProcessReader, StoreOutput
+
+
+def run_python(str_code, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
+ cmd = [sys.executable, "-c", str_code]
+ return subprocess.Popen(cmd, stdout=stdout, stderr=stderr)
+
+
+class TestProcessReader(unittest.TestCase):
+ def setUp(self):
+ self.out = StoreOutput()
+ self.err = StoreOutput()
+ self.finished = False
+
+ def on_finished():
+ self.finished = True
+
+ self.timeout = False
+
+ def on_timeout():
+ self.timeout = True
+
+ self.reader = ProcessReader(
+ stdout_callback=self.out,
+ stderr_callback=self.err,
+ finished_callback=on_finished,
+ timeout_callback=on_timeout,
+ )
+
+ def test_stdout_callback(self):
+ proc = run_python("print(1); print(2)")
+ self.reader.start(proc)
+ self.reader.thread.join()
+
+ self.assertEqual([x.decode("utf8") for x in self.out.output], ["1", "2"])
+ self.assertEqual(self.err.output, [])
+
+ def test_stderr_callback(self):
+ proc = run_python('import sys; sys.stderr.write("hello world\\n")')
+ self.reader.start(proc)
+ self.reader.thread.join()
+
+ self.assertEqual(self.out.output, [])
+ self.assertEqual([x.decode("utf8") for x in self.err.output], ["hello world"])
+
+ def test_stdout_and_stderr_callbacks(self):
+ proc = run_python(
+ 'import sys; sys.stderr.write("hello world\\n"); print(1); print(2)'
+ )
+ self.reader.start(proc)
+ self.reader.thread.join()
+
+ self.assertEqual([x.decode("utf8") for x in self.out.output], ["1", "2"])
+ self.assertEqual([x.decode("utf8") for x in self.err.output], ["hello world"])
+
+ def test_finished_callback(self):
+ self.assertFalse(self.finished)
+ proc = run_python("")
+ self.reader.start(proc)
+ self.reader.thread.join()
+ self.assertTrue(self.finished)
+
+ def test_timeout(self):
+ self.reader.timeout = 0.05
+ self.assertFalse(self.timeout)
+ proc = run_python("import time; time.sleep(0.1)")
+ self.reader.start(proc)
+ self.reader.thread.join()
+ self.assertTrue(self.timeout)
+ self.assertFalse(self.finished)
+
+ def test_output_timeout(self):
+ self.reader.output_timeout = 0.05
+ self.assertFalse(self.timeout)
+ proc = run_python("import time; time.sleep(0.1)")
+ self.reader.start(proc)
+ self.reader.thread.join()
+ self.assertTrue(self.timeout)
+ self.assertFalse(self.finished)
+
+ def test_read_without_eol(self):
+ proc = run_python('import sys; sys.stdout.write("1")')
+ self.reader.start(proc)
+ self.reader.thread.join()
+ self.assertEqual([x.decode("utf8") for x in self.out.output], ["1"])
+
+ def test_read_with_strange_eol(self):
+ proc = run_python('import sys; sys.stdout.write("1\\r\\r\\r\\n")')
+ self.reader.start(proc)
+ self.reader.thread.join()
+ self.assertEqual([x.decode("utf8") for x in self.out.output], ["1"])
+
+ def test_mixed_stdout_stderr(self):
+ proc = run_python(
+ 'import sys; sys.stderr.write("hello world\\n"); print(1); print(2)',
+ stderr=subprocess.STDOUT,
+ )
+ self.reader.start(proc)
+ self.reader.thread.join()
+
+ self.assertEqual(
+ sorted([x.decode("utf8") for x in self.out.output]),
+ sorted(["1", "2", "hello world"]),
+ )
+ self.assertEqual(self.err.output, [])
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/testing/mozbase/mozprocess/tests/test_wait.py b/testing/mozbase/mozprocess/tests/test_wait.py
new file mode 100644
index 0000000000..64026029a6
--- /dev/null
+++ b/testing/mozbase/mozprocess/tests/test_wait.py
@@ -0,0 +1,146 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import
+
+import os
+import signal
+import sys
+
+import mozinfo
+import mozunit
+import proctest
+from mozprocess import processhandler
+
+here = os.path.dirname(os.path.abspath(__file__))
+
+
+class ProcTestWait(proctest.ProcTest):
+ """ Class to test process waits and timeouts """
+
+ def test_normal_finish(self):
+ """Process is started, runs to completion while we wait for it"""
+
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_normal_finish.ini"], cwd=here
+ )
+ p.run()
+ p.wait()
+
+ self.determine_status(p)
+
+ def test_wait(self):
+ """Process is started runs to completion while we wait indefinitely"""
+
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_waittimeout_10s.ini"], cwd=here
+ )
+ p.run()
+ p.wait()
+
+ self.determine_status(p)
+
+ def test_timeout(self):
+ """Process is started, runs but we time out waiting on it
+ to complete
+ """
+ myenv = None
+ # On macosx1014, subprocess fails to find `six` when run with python3.
+ # This ensures that subprocess first looks to sys.path to find `six`.
+ # See https://bugzilla.mozilla.org/show_bug.cgi?id=1562083
+ if sys.platform == "darwin" and sys.version_info[0] > 2:
+ myenv = os.environ.copy()
+ myenv["PYTHONPATH"] = ":".join(sys.path)
+
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_waittimeout.ini"],
+ cwd=here,
+ env=myenv,
+ )
+ p.run(timeout=10)
+ p.wait()
+
+ if mozinfo.isUnix:
+ # process was killed, so returncode should be negative
+ self.assertLess(p.proc.returncode, 0)
+
+ self.determine_status(p, False, ["returncode", "didtimeout"])
+
+ def test_waittimeout(self):
+ """
+ Process is started, then wait is called and times out.
+ Process is still running and didn't timeout
+ """
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_waittimeout_10s.ini"], cwd=here
+ )
+
+ p.run()
+ p.wait(timeout=0)
+
+ self.determine_status(p, True, ())
+
+ def test_waitnotimeout(self):
+ """Process is started, runs to completion before our wait times out"""
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_waittimeout_10s.ini"], cwd=here
+ )
+ p.run(timeout=30)
+ p.wait()
+
+ self.determine_status(p)
+
+ def test_wait_twice_after_kill(self):
+ """Bug 968718: Process is started and stopped. wait() twice afterward."""
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_waittimeout.ini"], cwd=here
+ )
+ p.run()
+ p.kill()
+ returncode1 = p.wait()
+ returncode2 = p.wait()
+
+ self.determine_status(p)
+
+ # We killed the process, so the returncode should be non-zero
+ if mozinfo.isWin:
+ self.assertGreater(
+ returncode2, 0, 'Positive returncode expected, got "%s"' % returncode2
+ )
+ else:
+ self.assertLess(
+ returncode2, 0, 'Negative returncode expected, got "%s"' % returncode2
+ )
+ self.assertEqual(
+ returncode1, returncode2, "Expected both returncodes of wait() to be equal"
+ )
+
+ def test_wait_after_external_kill(self):
+ """Process is killed externally, and poll() is called."""
+ p = processhandler.ProcessHandler(
+ [self.python, self.proclaunch, "process_normal_finish.ini"], cwd=here
+ )
+ p.run()
+ os.kill(p.pid, signal.SIGTERM)
+ returncode = p.wait()
+
+ # We killed the process, so the returncode should be non-zero
+ if mozinfo.isWin:
+ self.assertEqual(
+ returncode,
+ signal.SIGTERM,
+ 'Positive returncode expected, got "%s"' % returncode,
+ )
+ else:
+ self.assertEqual(
+ returncode,
+ -signal.SIGTERM,
+ '%s expected, got "%s"' % (-signal.SIGTERM, returncode),
+ )
+
+ self.assertEqual(returncode, p.poll())
+
+ self.determine_status(p)
+
+
+if __name__ == "__main__":
+ mozunit.main()