jpayne@69: # jpayne@69: # Module providing various facilities to other parts of the package jpayne@69: # jpayne@69: # multiprocessing/util.py jpayne@69: # jpayne@69: # Copyright (c) 2006-2008, R Oudkerk jpayne@69: # Licensed to PSF under a Contributor Agreement. jpayne@69: # jpayne@69: jpayne@69: import os jpayne@69: import itertools jpayne@69: import sys jpayne@69: import weakref jpayne@69: import atexit jpayne@69: import threading # we want threading to install it's jpayne@69: # cleanup function before multiprocessing does jpayne@69: from subprocess import _args_from_interpreter_flags jpayne@69: jpayne@69: from . import process jpayne@69: jpayne@69: __all__ = [ jpayne@69: 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', jpayne@69: 'log_to_stderr', 'get_temp_dir', 'register_after_fork', jpayne@69: 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', jpayne@69: 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING', jpayne@69: ] jpayne@69: jpayne@69: # jpayne@69: # Logging jpayne@69: # jpayne@69: jpayne@69: NOTSET = 0 jpayne@69: SUBDEBUG = 5 jpayne@69: DEBUG = 10 jpayne@69: INFO = 20 jpayne@69: SUBWARNING = 25 jpayne@69: jpayne@69: LOGGER_NAME = 'multiprocessing' jpayne@69: DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' jpayne@69: jpayne@69: _logger = None jpayne@69: _log_to_stderr = False jpayne@69: jpayne@69: def sub_debug(msg, *args): jpayne@69: if _logger: jpayne@69: _logger.log(SUBDEBUG, msg, *args) jpayne@69: jpayne@69: def debug(msg, *args): jpayne@69: if _logger: jpayne@69: _logger.log(DEBUG, msg, *args) jpayne@69: jpayne@69: def info(msg, *args): jpayne@69: if _logger: jpayne@69: _logger.log(INFO, msg, *args) jpayne@69: jpayne@69: def sub_warning(msg, *args): jpayne@69: if _logger: jpayne@69: _logger.log(SUBWARNING, msg, *args) jpayne@69: jpayne@69: def get_logger(): jpayne@69: ''' jpayne@69: Returns logger used by multiprocessing jpayne@69: ''' jpayne@69: global _logger jpayne@69: import logging jpayne@69: jpayne@69: logging._acquireLock() jpayne@69: try: jpayne@69: if not _logger: jpayne@69: jpayne@69: _logger = logging.getLogger(LOGGER_NAME) jpayne@69: _logger.propagate = 0 jpayne@69: jpayne@69: # XXX multiprocessing should cleanup before logging jpayne@69: if hasattr(atexit, 'unregister'): jpayne@69: atexit.unregister(_exit_function) jpayne@69: atexit.register(_exit_function) jpayne@69: else: jpayne@69: atexit._exithandlers.remove((_exit_function, (), {})) jpayne@69: atexit._exithandlers.append((_exit_function, (), {})) jpayne@69: jpayne@69: finally: jpayne@69: logging._releaseLock() jpayne@69: jpayne@69: return _logger jpayne@69: jpayne@69: def log_to_stderr(level=None): jpayne@69: ''' jpayne@69: Turn on logging and add a handler which prints to stderr jpayne@69: ''' jpayne@69: global _log_to_stderr jpayne@69: import logging jpayne@69: jpayne@69: logger = get_logger() jpayne@69: formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) jpayne@69: handler = logging.StreamHandler() jpayne@69: handler.setFormatter(formatter) jpayne@69: logger.addHandler(handler) jpayne@69: jpayne@69: if level: jpayne@69: logger.setLevel(level) jpayne@69: _log_to_stderr = True jpayne@69: return _logger jpayne@69: jpayne@69: # jpayne@69: # Function returning a temp directory which will be removed on exit jpayne@69: # jpayne@69: jpayne@69: def _remove_temp_dir(rmtree, tempdir): jpayne@69: rmtree(tempdir) jpayne@69: jpayne@69: current_process = process.current_process() jpayne@69: # current_process() can be None if the finalizer is called jpayne@69: # late during Python finalization jpayne@69: if current_process is not None: jpayne@69: current_process._config['tempdir'] = None jpayne@69: jpayne@69: def get_temp_dir(): jpayne@69: # get name of a temp directory which will be automatically cleaned up jpayne@69: tempdir = process.current_process()._config.get('tempdir') jpayne@69: if tempdir is None: jpayne@69: import shutil, tempfile jpayne@69: tempdir = tempfile.mkdtemp(prefix='pymp-') jpayne@69: info('created temp directory %s', tempdir) jpayne@69: # keep a strong reference to shutil.rmtree(), since the finalizer jpayne@69: # can be called late during Python shutdown jpayne@69: Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir), jpayne@69: exitpriority=-100) jpayne@69: process.current_process()._config['tempdir'] = tempdir jpayne@69: return tempdir jpayne@69: jpayne@69: # jpayne@69: # Support for reinitialization of objects when bootstrapping a child process jpayne@69: # jpayne@69: jpayne@69: _afterfork_registry = weakref.WeakValueDictionary() jpayne@69: _afterfork_counter = itertools.count() jpayne@69: jpayne@69: def _run_after_forkers(): jpayne@69: items = list(_afterfork_registry.items()) jpayne@69: items.sort() jpayne@69: for (index, ident, func), obj in items: jpayne@69: try: jpayne@69: func(obj) jpayne@69: except Exception as e: jpayne@69: info('after forker raised exception %s', e) jpayne@69: jpayne@69: def register_after_fork(obj, func): jpayne@69: _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj jpayne@69: jpayne@69: # jpayne@69: # Finalization using weakrefs jpayne@69: # jpayne@69: jpayne@69: _finalizer_registry = {} jpayne@69: _finalizer_counter = itertools.count() jpayne@69: jpayne@69: jpayne@69: class Finalize(object): jpayne@69: ''' jpayne@69: Class which supports object finalization using weakrefs jpayne@69: ''' jpayne@69: def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None): jpayne@69: if (exitpriority is not None) and not isinstance(exitpriority,int): jpayne@69: raise TypeError( jpayne@69: "Exitpriority ({0!r}) must be None or int, not {1!s}".format( jpayne@69: exitpriority, type(exitpriority))) jpayne@69: jpayne@69: if obj is not None: jpayne@69: self._weakref = weakref.ref(obj, self) jpayne@69: elif exitpriority is None: jpayne@69: raise ValueError("Without object, exitpriority cannot be None") jpayne@69: jpayne@69: self._callback = callback jpayne@69: self._args = args jpayne@69: self._kwargs = kwargs or {} jpayne@69: self._key = (exitpriority, next(_finalizer_counter)) jpayne@69: self._pid = os.getpid() jpayne@69: jpayne@69: _finalizer_registry[self._key] = self jpayne@69: jpayne@69: def __call__(self, wr=None, jpayne@69: # Need to bind these locally because the globals can have jpayne@69: # been cleared at shutdown jpayne@69: _finalizer_registry=_finalizer_registry, jpayne@69: sub_debug=sub_debug, getpid=os.getpid): jpayne@69: ''' jpayne@69: Run the callback unless it has already been called or cancelled jpayne@69: ''' jpayne@69: try: jpayne@69: del _finalizer_registry[self._key] jpayne@69: except KeyError: jpayne@69: sub_debug('finalizer no longer registered') jpayne@69: else: jpayne@69: if self._pid != getpid(): jpayne@69: sub_debug('finalizer ignored because different process') jpayne@69: res = None jpayne@69: else: jpayne@69: sub_debug('finalizer calling %s with args %s and kwargs %s', jpayne@69: self._callback, self._args, self._kwargs) jpayne@69: res = self._callback(*self._args, **self._kwargs) jpayne@69: self._weakref = self._callback = self._args = \ jpayne@69: self._kwargs = self._key = None jpayne@69: return res jpayne@69: jpayne@69: def cancel(self): jpayne@69: ''' jpayne@69: Cancel finalization of the object jpayne@69: ''' jpayne@69: try: jpayne@69: del _finalizer_registry[self._key] jpayne@69: except KeyError: jpayne@69: pass jpayne@69: else: jpayne@69: self._weakref = self._callback = self._args = \ jpayne@69: self._kwargs = self._key = None jpayne@69: jpayne@69: def still_active(self): jpayne@69: ''' jpayne@69: Return whether this finalizer is still waiting to invoke callback jpayne@69: ''' jpayne@69: return self._key in _finalizer_registry jpayne@69: jpayne@69: def __repr__(self): jpayne@69: try: jpayne@69: obj = self._weakref() jpayne@69: except (AttributeError, TypeError): jpayne@69: obj = None jpayne@69: jpayne@69: if obj is None: jpayne@69: return '<%s object, dead>' % self.__class__.__name__ jpayne@69: jpayne@69: x = '<%s object, callback=%s' % ( jpayne@69: self.__class__.__name__, jpayne@69: getattr(self._callback, '__name__', self._callback)) jpayne@69: if self._args: jpayne@69: x += ', args=' + str(self._args) jpayne@69: if self._kwargs: jpayne@69: x += ', kwargs=' + str(self._kwargs) jpayne@69: if self._key[0] is not None: jpayne@69: x += ', exitpriority=' + str(self._key[0]) jpayne@69: return x + '>' jpayne@69: jpayne@69: jpayne@69: def _run_finalizers(minpriority=None): jpayne@69: ''' jpayne@69: Run all finalizers whose exit priority is not None and at least minpriority jpayne@69: jpayne@69: Finalizers with highest priority are called first; finalizers with jpayne@69: the same priority will be called in reverse order of creation. jpayne@69: ''' jpayne@69: if _finalizer_registry is None: jpayne@69: # This function may be called after this module's globals are jpayne@69: # destroyed. See the _exit_function function in this module for more jpayne@69: # notes. jpayne@69: return jpayne@69: jpayne@69: if minpriority is None: jpayne@69: f = lambda p : p[0] is not None jpayne@69: else: jpayne@69: f = lambda p : p[0] is not None and p[0] >= minpriority jpayne@69: jpayne@69: # Careful: _finalizer_registry may be mutated while this function jpayne@69: # is running (either by a GC run or by another thread). jpayne@69: jpayne@69: # list(_finalizer_registry) should be atomic, while jpayne@69: # list(_finalizer_registry.items()) is not. jpayne@69: keys = [key for key in list(_finalizer_registry) if f(key)] jpayne@69: keys.sort(reverse=True) jpayne@69: jpayne@69: for key in keys: jpayne@69: finalizer = _finalizer_registry.get(key) jpayne@69: # key may have been removed from the registry jpayne@69: if finalizer is not None: jpayne@69: sub_debug('calling %s', finalizer) jpayne@69: try: jpayne@69: finalizer() jpayne@69: except Exception: jpayne@69: import traceback jpayne@69: traceback.print_exc() jpayne@69: jpayne@69: if minpriority is None: jpayne@69: _finalizer_registry.clear() jpayne@69: jpayne@69: # jpayne@69: # Clean up on exit jpayne@69: # jpayne@69: jpayne@69: def is_exiting(): jpayne@69: ''' jpayne@69: Returns true if the process is shutting down jpayne@69: ''' jpayne@69: return _exiting or _exiting is None jpayne@69: jpayne@69: _exiting = False jpayne@69: jpayne@69: def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, jpayne@69: active_children=process.active_children, jpayne@69: current_process=process.current_process): jpayne@69: # We hold on to references to functions in the arglist due to the jpayne@69: # situation described below, where this function is called after this jpayne@69: # module's globals are destroyed. jpayne@69: jpayne@69: global _exiting jpayne@69: jpayne@69: if not _exiting: jpayne@69: _exiting = True jpayne@69: jpayne@69: info('process shutting down') jpayne@69: debug('running all "atexit" finalizers with priority >= 0') jpayne@69: _run_finalizers(0) jpayne@69: jpayne@69: if current_process() is not None: jpayne@69: # We check if the current process is None here because if jpayne@69: # it's None, any call to ``active_children()`` will raise jpayne@69: # an AttributeError (active_children winds up trying to jpayne@69: # get attributes from util._current_process). One jpayne@69: # situation where this can happen is if someone has jpayne@69: # manipulated sys.modules, causing this module to be jpayne@69: # garbage collected. The destructor for the module type jpayne@69: # then replaces all values in the module dict with None. jpayne@69: # For instance, after setuptools runs a test it replaces jpayne@69: # sys.modules with a copy created earlier. See issues jpayne@69: # #9775 and #15881. Also related: #4106, #9205, and jpayne@69: # #9207. jpayne@69: jpayne@69: for p in active_children(): jpayne@69: if p.daemon: jpayne@69: info('calling terminate() for daemon %s', p.name) jpayne@69: p._popen.terminate() jpayne@69: jpayne@69: for p in active_children(): jpayne@69: info('calling join() for process %s', p.name) jpayne@69: p.join() jpayne@69: jpayne@69: debug('running the remaining "atexit" finalizers') jpayne@69: _run_finalizers() jpayne@69: jpayne@69: atexit.register(_exit_function) jpayne@69: jpayne@69: # jpayne@69: # Some fork aware types jpayne@69: # jpayne@69: jpayne@69: class ForkAwareThreadLock(object): jpayne@69: def __init__(self): jpayne@69: self._reset() jpayne@69: register_after_fork(self, ForkAwareThreadLock._reset) jpayne@69: jpayne@69: def _reset(self): jpayne@69: self._lock = threading.Lock() jpayne@69: self.acquire = self._lock.acquire jpayne@69: self.release = self._lock.release jpayne@69: jpayne@69: def __enter__(self): jpayne@69: return self._lock.__enter__() jpayne@69: jpayne@69: def __exit__(self, *args): jpayne@69: return self._lock.__exit__(*args) jpayne@69: jpayne@69: jpayne@69: class ForkAwareLocal(threading.local): jpayne@69: def __init__(self): jpayne@69: register_after_fork(self, lambda obj : obj.__dict__.clear()) jpayne@69: def __reduce__(self): jpayne@69: return type(self), () jpayne@69: jpayne@69: # jpayne@69: # Close fds except those specified jpayne@69: # jpayne@69: jpayne@69: try: jpayne@69: MAXFD = os.sysconf("SC_OPEN_MAX") jpayne@69: except Exception: jpayne@69: MAXFD = 256 jpayne@69: jpayne@69: def close_all_fds_except(fds): jpayne@69: fds = list(fds) + [-1, MAXFD] jpayne@69: fds.sort() jpayne@69: assert fds[-1] == MAXFD, 'fd too large' jpayne@69: for i in range(len(fds) - 1): jpayne@69: os.closerange(fds[i]+1, fds[i+1]) jpayne@69: # jpayne@69: # Close sys.stdin and replace stdin with os.devnull jpayne@69: # jpayne@69: jpayne@69: def _close_stdin(): jpayne@69: if sys.stdin is None: jpayne@69: return jpayne@69: jpayne@69: try: jpayne@69: sys.stdin.close() jpayne@69: except (OSError, ValueError): jpayne@69: pass jpayne@69: jpayne@69: try: jpayne@69: fd = os.open(os.devnull, os.O_RDONLY) jpayne@69: try: jpayne@69: sys.stdin = open(fd, closefd=False) jpayne@69: except: jpayne@69: os.close(fd) jpayne@69: raise jpayne@69: except (OSError, ValueError): jpayne@69: pass jpayne@69: jpayne@69: # jpayne@69: # Flush standard streams, if any jpayne@69: # jpayne@69: jpayne@69: def _flush_std_streams(): jpayne@69: try: jpayne@69: sys.stdout.flush() jpayne@69: except (AttributeError, ValueError): jpayne@69: pass jpayne@69: try: jpayne@69: sys.stderr.flush() jpayne@69: except (AttributeError, ValueError): jpayne@69: pass jpayne@69: jpayne@69: # jpayne@69: # Start a program with only specified fds kept open jpayne@69: # jpayne@69: jpayne@69: def spawnv_passfds(path, args, passfds): jpayne@69: import _posixsubprocess jpayne@69: passfds = tuple(sorted(map(int, passfds))) jpayne@69: errpipe_read, errpipe_write = os.pipe() jpayne@69: try: jpayne@69: return _posixsubprocess.fork_exec( jpayne@69: args, [os.fsencode(path)], True, passfds, None, None, jpayne@69: -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write, jpayne@69: False, False, None) jpayne@69: finally: jpayne@69: os.close(errpipe_read) jpayne@69: os.close(errpipe_write) jpayne@69: jpayne@69: jpayne@69: def close_fds(*fds): jpayne@69: """Close each file descriptor given as an argument""" jpayne@69: for fd in fds: jpayne@69: os.close(fd) jpayne@69: jpayne@69: jpayne@69: def _cleanup_tests(): jpayne@69: """Cleanup multiprocessing resources when multiprocessing tests jpayne@69: completed.""" jpayne@69: jpayne@69: from test import support jpayne@69: jpayne@69: # cleanup multiprocessing jpayne@69: process._cleanup() jpayne@69: jpayne@69: # Stop the ForkServer process if it's running jpayne@69: from multiprocessing import forkserver jpayne@69: forkserver._forkserver._stop() jpayne@69: jpayne@69: # Stop the ResourceTracker process if it's running jpayne@69: from multiprocessing import resource_tracker jpayne@69: resource_tracker._resource_tracker._stop() jpayne@69: jpayne@69: # bpo-37421: Explicitly call _run_finalizers() to remove immediately jpayne@69: # temporary directories created by multiprocessing.util.get_temp_dir(). jpayne@69: _run_finalizers() jpayne@69: support.gc_collect() jpayne@69: jpayne@69: support.reap_children()