summaryrefslogtreecommitdiffstats
path: root/third_party/python/futures
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/python/futures')
-rw-r--r--third_party/python/futures/CHANGES107
-rw-r--r--third_party/python/futures/LICENSE21
-rw-r--r--third_party/python/futures/MANIFEST.in5
-rw-r--r--third_party/python/futures/PKG-INFO16
-rw-r--r--third_party/python/futures/concurrent/__init__.py3
-rw-r--r--third_party/python/futures/concurrent/futures/__init__.py23
-rw-r--r--third_party/python/futures/concurrent/futures/_base.py607
-rw-r--r--third_party/python/futures/concurrent/futures/process.py359
-rw-r--r--third_party/python/futures/concurrent/futures/thread.py134
-rw-r--r--third_party/python/futures/crawl.py74
-rw-r--r--third_party/python/futures/docs/Makefile88
-rw-r--r--third_party/python/futures/docs/conf.py194
-rw-r--r--third_party/python/futures/docs/index.rst347
-rw-r--r--third_party/python/futures/docs/make.bat112
-rw-r--r--third_party/python/futures/futures.egg-info/PKG-INFO16
-rw-r--r--third_party/python/futures/futures.egg-info/SOURCES.txt24
-rw-r--r--third_party/python/futures/futures.egg-info/dependency_links.txt1
-rw-r--r--third_party/python/futures/futures.egg-info/not-zip-safe1
-rw-r--r--third_party/python/futures/futures.egg-info/pbr.json1
-rw-r--r--third_party/python/futures/futures.egg-info/top_level.txt1
-rw-r--r--third_party/python/futures/primes.py50
-rw-r--r--third_party/python/futures/setup.cfg12
-rwxr-xr-xthird_party/python/futures/setup.py34
-rw-r--r--third_party/python/futures/test_futures.py727
-rw-r--r--third_party/python/futures/tox.ini8
25 files changed, 2965 insertions, 0 deletions
diff --git a/third_party/python/futures/CHANGES b/third_party/python/futures/CHANGES
new file mode 100644
index 0000000000..7dd051447b
--- /dev/null
+++ b/third_party/python/futures/CHANGES
@@ -0,0 +1,107 @@
+3.0.5
+=====
+
+- Fixed OverflowError with ProcessPoolExecutor on Windows (regression introduced in 3.0.4)
+
+
+3.0.4
+=====
+
+- Fixed inability to forcibly terminate the process if there are pending workers
+
+
+3.0.3
+=====
+
+- Fixed AttributeErrors on exit on Python 2.x
+
+
+3.0.2
+=====
+
+- Made multiprocessing optional again on implementations other than just Jython
+
+
+3.0.1
+=====
+
+- Made Executor.map() non-greedy
+
+
+3.0.0
+=====
+
+- Dropped Python 2.5 and 3.1 support
+- Removed the deprecated "futures" top level package
+- Applied patch for issue 11777 (Executor.map does not submit futures until
+ iter.next() is called)
+- Applied patch for issue 15015 (accessing an non-existing attribute)
+- Applied patch for issue 16284 (memory leak)
+- Applied patch for issue 20367 (behavior of concurrent.futures.as_completed()
+ for duplicate arguments)
+
+2.2.0
+=====
+
+- Added the set_exception_info() and exception_info() methods to Future
+ to enable extraction of tracebacks on Python 2.x
+- Added support for Future.set_exception_info() to ThreadPoolExecutor
+
+
+2.1.6
+=====
+
+- Fixed a problem with files missing from the source distribution
+
+
+2.1.5
+=====
+
+- Fixed Jython compatibility
+- Added metadata for wheel support
+
+
+2.1.4
+=====
+
+- Ported the library again from Python 3.2.5 to get the latest bug fixes
+
+
+2.1.3
+=====
+
+- Fixed race condition in wait(return_when=ALL_COMPLETED)
+ (http://bugs.python.org/issue14406) -- thanks Ralf Schmitt
+- Added missing setUp() methods to several test classes
+
+
+2.1.2
+=====
+
+- Fixed installation problem on Python 3.1
+
+
+2.1.1
+=====
+
+- Fixed missing 'concurrent' package declaration in setup.py
+
+
+2.1
+===
+
+- Moved the code from the 'futures' package to 'concurrent.futures' to provide
+ a drop in backport that matches the code in Python 3.2 standard library
+- Deprecated the old 'futures' package
+
+
+2.0
+===
+
+- Changed implementation to match PEP 3148
+
+
+1.0
+===
+
+Initial release.
diff --git a/third_party/python/futures/LICENSE b/third_party/python/futures/LICENSE
new file mode 100644
index 0000000000..c430db0f17
--- /dev/null
+++ b/third_party/python/futures/LICENSE
@@ -0,0 +1,21 @@
+Copyright 2009 Brian Quinlan. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY BRIAN QUINLAN "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+HALL THE FREEBSD PROJECT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file
diff --git a/third_party/python/futures/MANIFEST.in b/third_party/python/futures/MANIFEST.in
new file mode 100644
index 0000000000..52860d0435
--- /dev/null
+++ b/third_party/python/futures/MANIFEST.in
@@ -0,0 +1,5 @@
+recursive-include docs *
+include *.py
+include tox.ini
+include CHANGES
+include LICENSE
diff --git a/third_party/python/futures/PKG-INFO b/third_party/python/futures/PKG-INFO
new file mode 100644
index 0000000000..00e3a99767
--- /dev/null
+++ b/third_party/python/futures/PKG-INFO
@@ -0,0 +1,16 @@
+Metadata-Version: 1.0
+Name: futures
+Version: 3.0.5
+Summary: Backport of the concurrent.futures package from Python 3.2
+Home-page: https://github.com/agronholm/pythonfutures
+Author: Alex Gronholm
+Author-email: alex.gronholm+pypi@nextday.fi
+License: BSD
+Description: UNKNOWN
+Platform: UNKNOWN
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 2 :: Only
diff --git a/third_party/python/futures/concurrent/__init__.py b/third_party/python/futures/concurrent/__init__.py
new file mode 100644
index 0000000000..b36383a610
--- /dev/null
+++ b/third_party/python/futures/concurrent/__init__.py
@@ -0,0 +1,3 @@
+from pkgutil import extend_path
+
+__path__ = extend_path(__path__, __name__)
diff --git a/third_party/python/futures/concurrent/futures/__init__.py b/third_party/python/futures/concurrent/futures/__init__.py
new file mode 100644
index 0000000000..428b14bdfe
--- /dev/null
+++ b/third_party/python/futures/concurrent/futures/__init__.py
@@ -0,0 +1,23 @@
+# Copyright 2009 Brian Quinlan. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Execute computations asynchronously using threads or processes."""
+
+__author__ = 'Brian Quinlan (brian@sweetapp.com)'
+
+from concurrent.futures._base import (FIRST_COMPLETED,
+ FIRST_EXCEPTION,
+ ALL_COMPLETED,
+ CancelledError,
+ TimeoutError,
+ Future,
+ Executor,
+ wait,
+ as_completed)
+from concurrent.futures.thread import ThreadPoolExecutor
+
+try:
+ from concurrent.futures.process import ProcessPoolExecutor
+except ImportError:
+ # some platforms don't have multiprocessing
+ pass
diff --git a/third_party/python/futures/concurrent/futures/_base.py b/third_party/python/futures/concurrent/futures/_base.py
new file mode 100644
index 0000000000..2936c46b16
--- /dev/null
+++ b/third_party/python/futures/concurrent/futures/_base.py
@@ -0,0 +1,607 @@
+# Copyright 2009 Brian Quinlan. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+import collections
+import logging
+import threading
+import itertools
+import time
+
+__author__ = 'Brian Quinlan (brian@sweetapp.com)'
+
+FIRST_COMPLETED = 'FIRST_COMPLETED'
+FIRST_EXCEPTION = 'FIRST_EXCEPTION'
+ALL_COMPLETED = 'ALL_COMPLETED'
+_AS_COMPLETED = '_AS_COMPLETED'
+
+# Possible future states (for internal use by the futures package).
+PENDING = 'PENDING'
+RUNNING = 'RUNNING'
+# The future was cancelled by the user...
+CANCELLED = 'CANCELLED'
+# ...and _Waiter.add_cancelled() was called by a worker.
+CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
+FINISHED = 'FINISHED'
+
+_FUTURE_STATES = [
+ PENDING,
+ RUNNING,
+ CANCELLED,
+ CANCELLED_AND_NOTIFIED,
+ FINISHED
+]
+
+_STATE_TO_DESCRIPTION_MAP = {
+ PENDING: "pending",
+ RUNNING: "running",
+ CANCELLED: "cancelled",
+ CANCELLED_AND_NOTIFIED: "cancelled",
+ FINISHED: "finished"
+}
+
+# Logger for internal use by the futures package.
+LOGGER = logging.getLogger("concurrent.futures")
+
+class Error(Exception):
+ """Base class for all future-related exceptions."""
+ pass
+
+class CancelledError(Error):
+ """The Future was cancelled."""
+ pass
+
+class TimeoutError(Error):
+ """The operation exceeded the given deadline."""
+ pass
+
+class _Waiter(object):
+ """Provides the event that wait() and as_completed() block on."""
+ def __init__(self):
+ self.event = threading.Event()
+ self.finished_futures = []
+
+ def add_result(self, future):
+ self.finished_futures.append(future)
+
+ def add_exception(self, future):
+ self.finished_futures.append(future)
+
+ def add_cancelled(self, future):
+ self.finished_futures.append(future)
+
+class _AsCompletedWaiter(_Waiter):
+ """Used by as_completed()."""
+
+ def __init__(self):
+ super(_AsCompletedWaiter, self).__init__()
+ self.lock = threading.Lock()
+
+ def add_result(self, future):
+ with self.lock:
+ super(_AsCompletedWaiter, self).add_result(future)
+ self.event.set()
+
+ def add_exception(self, future):
+ with self.lock:
+ super(_AsCompletedWaiter, self).add_exception(future)
+ self.event.set()
+
+ def add_cancelled(self, future):
+ with self.lock:
+ super(_AsCompletedWaiter, self).add_cancelled(future)
+ self.event.set()
+
+class _FirstCompletedWaiter(_Waiter):
+ """Used by wait(return_when=FIRST_COMPLETED)."""
+
+ def add_result(self, future):
+ super(_FirstCompletedWaiter, self).add_result(future)
+ self.event.set()
+
+ def add_exception(self, future):
+ super(_FirstCompletedWaiter, self).add_exception(future)
+ self.event.set()
+
+ def add_cancelled(self, future):
+ super(_FirstCompletedWaiter, self).add_cancelled(future)
+ self.event.set()
+
+class _AllCompletedWaiter(_Waiter):
+ """Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
+
+ def __init__(self, num_pending_calls, stop_on_exception):
+ self.num_pending_calls = num_pending_calls
+ self.stop_on_exception = stop_on_exception
+ self.lock = threading.Lock()
+ super(_AllCompletedWaiter, self).__init__()
+
+ def _decrement_pending_calls(self):
+ with self.lock:
+ self.num_pending_calls -= 1
+ if not self.num_pending_calls:
+ self.event.set()
+
+ def add_result(self, future):
+ super(_AllCompletedWaiter, self).add_result(future)
+ self._decrement_pending_calls()
+
+ def add_exception(self, future):
+ super(_AllCompletedWaiter, self).add_exception(future)
+ if self.stop_on_exception:
+ self.event.set()
+ else:
+ self._decrement_pending_calls()
+
+ def add_cancelled(self, future):
+ super(_AllCompletedWaiter, self).add_cancelled(future)
+ self._decrement_pending_calls()
+
+class _AcquireFutures(object):
+ """A context manager that does an ordered acquire of Future conditions."""
+
+ def __init__(self, futures):
+ self.futures = sorted(futures, key=id)
+
+ def __enter__(self):
+ for future in self.futures:
+ future._condition.acquire()
+
+ def __exit__(self, *args):
+ for future in self.futures:
+ future._condition.release()
+
+def _create_and_install_waiters(fs, return_when):
+ if return_when == _AS_COMPLETED:
+ waiter = _AsCompletedWaiter()
+ elif return_when == FIRST_COMPLETED:
+ waiter = _FirstCompletedWaiter()
+ else:
+ pending_count = sum(
+ f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
+
+ if return_when == FIRST_EXCEPTION:
+ waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
+ elif return_when == ALL_COMPLETED:
+ waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
+ else:
+ raise ValueError("Invalid return condition: %r" % return_when)
+
+ for f in fs:
+ f._waiters.append(waiter)
+
+ return waiter
+
+def as_completed(fs, timeout=None):
+ """An iterator over the given futures that yields each as it completes.
+
+ Args:
+ fs: The sequence of Futures (possibly created by different Executors) to
+ iterate over.
+ timeout: The maximum number of seconds to wait. If None, then there
+ is no limit on the wait time.
+
+ Returns:
+ An iterator that yields the given Futures as they complete (finished or
+ cancelled). If any given Futures are duplicated, they will be returned
+ once.
+
+ Raises:
+ TimeoutError: If the entire result iterator could not be generated
+ before the given timeout.
+ """
+ if timeout is not None:
+ end_time = timeout + time.time()
+
+ fs = set(fs)
+ with _AcquireFutures(fs):
+ finished = set(
+ f for f in fs
+ if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
+ pending = fs - finished
+ waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
+
+ try:
+ for future in finished:
+ yield future
+
+ while pending:
+ if timeout is None:
+ wait_timeout = None
+ else:
+ wait_timeout = end_time - time.time()
+ if wait_timeout < 0:
+ raise TimeoutError(
+ '%d (of %d) futures unfinished' % (
+ len(pending), len(fs)))
+
+ waiter.event.wait(wait_timeout)
+
+ with waiter.lock:
+ finished = waiter.finished_futures
+ waiter.finished_futures = []
+ waiter.event.clear()
+
+ for future in finished:
+ yield future
+ pending.remove(future)
+
+ finally:
+ for f in fs:
+ with f._condition:
+ f._waiters.remove(waiter)
+
+DoneAndNotDoneFutures = collections.namedtuple(
+ 'DoneAndNotDoneFutures', 'done not_done')
+def wait(fs, timeout=None, return_when=ALL_COMPLETED):
+ """Wait for the futures in the given sequence to complete.
+
+ Args:
+ fs: The sequence of Futures (possibly created by different Executors) to
+ wait upon.
+ timeout: The maximum number of seconds to wait. If None, then there
+ is no limit on the wait time.
+ return_when: Indicates when this function should return. The options
+ are:
+
+ FIRST_COMPLETED - Return when any future finishes or is
+ cancelled.
+ FIRST_EXCEPTION - Return when any future finishes by raising an
+ exception. If no future raises an exception
+ then it is equivalent to ALL_COMPLETED.
+ ALL_COMPLETED - Return when all futures finish or are cancelled.
+
+ Returns:
+ A named 2-tuple of sets. The first set, named 'done', contains the
+ futures that completed (is finished or cancelled) before the wait
+ completed. The second set, named 'not_done', contains uncompleted
+ futures.
+ """
+ with _AcquireFutures(fs):
+ done = set(f for f in fs
+ if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
+ not_done = set(fs) - done
+
+ if (return_when == FIRST_COMPLETED) and done:
+ return DoneAndNotDoneFutures(done, not_done)
+ elif (return_when == FIRST_EXCEPTION) and done:
+ if any(f for f in done
+ if not f.cancelled() and f.exception() is not None):
+ return DoneAndNotDoneFutures(done, not_done)
+
+ if len(done) == len(fs):
+ return DoneAndNotDoneFutures(done, not_done)
+
+ waiter = _create_and_install_waiters(fs, return_when)
+
+ waiter.event.wait(timeout)
+ for f in fs:
+ with f._condition:
+ f._waiters.remove(waiter)
+
+ done.update(waiter.finished_futures)
+ return DoneAndNotDoneFutures(done, set(fs) - done)
+
+class Future(object):
+ """Represents the result of an asynchronous computation."""
+
+ def __init__(self):
+ """Initializes the future. Should not be called by clients."""
+ self._condition = threading.Condition()
+ self._state = PENDING
+ self._result = None
+ self._exception = None
+ self._traceback = None
+ self._waiters = []
+ self._done_callbacks = []
+
+ def _invoke_callbacks(self):
+ for callback in self._done_callbacks:
+ try:
+ callback(self)
+ except Exception:
+ LOGGER.exception('exception calling callback for %r', self)
+
+ def __repr__(self):
+ with self._condition:
+ if self._state == FINISHED:
+ if self._exception:
+ return '<Future at %s state=%s raised %s>' % (
+ hex(id(self)),
+ _STATE_TO_DESCRIPTION_MAP[self._state],
+ self._exception.__class__.__name__)
+ else:
+ return '<Future at %s state=%s returned %s>' % (
+ hex(id(self)),
+ _STATE_TO_DESCRIPTION_MAP[self._state],
+ self._result.__class__.__name__)
+ return '<Future at %s state=%s>' % (
+ hex(id(self)),
+ _STATE_TO_DESCRIPTION_MAP[self._state])
+
+ def cancel(self):
+ """Cancel the future if possible.
+
+ Returns True if the future was cancelled, False otherwise. A future
+ cannot be cancelled if it is running or has already completed.
+ """
+ with self._condition:
+ if self._state in [RUNNING, FINISHED]:
+ return False
+
+ if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
+ return True
+
+ self._state = CANCELLED
+ self._condition.notify_all()
+
+ self._invoke_callbacks()
+ return True
+
+ def cancelled(self):
+ """Return True if the future has cancelled."""
+ with self._condition:
+ return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
+
+ def running(self):
+ """Return True if the future is currently executing."""
+ with self._condition:
+ return self._state == RUNNING
+
+ def done(self):
+ """Return True of the future was cancelled or finished executing."""
+ with self._condition:
+ return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
+
+ def __get_result(self):
+ if self._exception:
+ raise type(self._exception), self._exception, self._traceback
+ else:
+ return self._result
+
+ def add_done_callback(self, fn):
+ """Attaches a callable that will be called when the future finishes.
+
+ Args:
+ fn: A callable that will be called with this future as its only
+ argument when the future completes or is cancelled. The callable
+ will always be called by a thread in the same process in which
+ it was added. If the future has already completed or been
+ cancelled then the callable will be called immediately. These
+ callables are called in the order that they were added.
+ """
+ with self._condition:
+ if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
+ self._done_callbacks.append(fn)
+ return
+ fn(self)
+
+ def result(self, timeout=None):
+ """Return the result of the call that the future represents.
+
+ Args:
+ timeout: The number of seconds to wait for the result if the future
+ isn't done. If None, then there is no limit on the wait time.
+
+ Returns:
+ The result of the call that the future represents.
+
+ Raises:
+ CancelledError: If the future was cancelled.
+ TimeoutError: If the future didn't finish executing before the given
+ timeout.
+ Exception: If the call raised then that exception will be raised.
+ """
+ with self._condition:
+ if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
+ raise CancelledError()
+ elif self._state == FINISHED:
+ return self.__get_result()
+
+ self._condition.wait(timeout)
+
+ if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
+ raise CancelledError()
+ elif self._state == FINISHED:
+ return self.__get_result()
+ else:
+ raise TimeoutError()
+
+ def exception_info(self, timeout=None):
+ """Return a tuple of (exception, traceback) raised by the call that the
+ future represents.
+
+ Args:
+ timeout: The number of seconds to wait for the exception if the
+ future isn't done. If None, then there is no limit on the wait
+ time.
+
+ Returns:
+ The exception raised by the call that the future represents or None
+ if the call completed without raising.
+
+ Raises:
+ CancelledError: If the future was cancelled.
+ TimeoutError: If the future didn't finish executing before the given
+ timeout.
+ """
+ with self._condition:
+ if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
+ raise CancelledError()
+ elif self._state == FINISHED:
+ return self._exception, self._traceback
+
+ self._condition.wait(timeout)
+
+ if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
+ raise CancelledError()
+ elif self._state == FINISHED:
+ return self._exception, self._traceback
+ else:
+ raise TimeoutError()
+
+ def exception(self, timeout=None):
+ """Return the exception raised by the call that the future represents.
+
+ Args:
+ timeout: The number of seconds to wait for the exception if the
+ future isn't done. If None, then there is no limit on the wait
+ time.
+
+ Returns:
+ The exception raised by the call that the future represents or None
+ if the call completed without raising.
+
+ Raises:
+ CancelledError: If the future was cancelled.
+ TimeoutError: If the future didn't finish executing before the given
+ timeout.
+ """
+ return self.exception_info(timeout)[0]
+
+ # The following methods should only be used by Executors and in tests.
+ def set_running_or_notify_cancel(self):
+ """Mark the future as running or process any cancel notifications.
+
+ Should only be used by Executor implementations and unit tests.
+
+ If the future has been cancelled (cancel() was called and returned
+ True) then any threads waiting on the future completing (though calls
+ to as_completed() or wait()) are notified and False is returned.
+
+ If the future was not cancelled then it is put in the running state
+ (future calls to running() will return True) and True is returned.
+
+ This method should be called by Executor implementations before
+ executing the work associated with this future. If this method returns
+ False then the work should not be executed.
+
+ Returns:
+ False if the Future was cancelled, True otherwise.
+
+ Raises:
+ RuntimeError: if this method was already called or if set_result()
+ or set_exception() was called.
+ """
+ with self._condition:
+ if self._state == CANCELLED:
+ self._state = CANCELLED_AND_NOTIFIED
+ for waiter in self._waiters:
+ waiter.add_cancelled(self)
+ # self._condition.notify_all() is not necessary because
+ # self.cancel() triggers a notification.
+ return False
+ elif self._state == PENDING:
+ self._state = RUNNING
+ return True
+ else:
+ LOGGER.critical('Future %s in unexpected state: %s',
+ id(self),
+ self._state)
+ raise RuntimeError('Future in unexpected state')
+
+ def set_result(self, result):
+ """Sets the return value of work associated with the future.
+
+ Should only be used by Executor implementations and unit tests.
+ """
+ with self._condition:
+ self._result = result
+ self._state = FINISHED
+ for waiter in self._waiters:
+ waiter.add_result(self)
+ self._condition.notify_all()
+ self._invoke_callbacks()
+
+ def set_exception_info(self, exception, traceback):
+ """Sets the result of the future as being the given exception
+ and traceback.
+
+ Should only be used by Executor implementations and unit tests.
+ """
+ with self._condition:
+ self._exception = exception
+ self._traceback = traceback
+ self._state = FINISHED
+ for waiter in self._waiters:
+ waiter.add_exception(self)
+ self._condition.notify_all()
+ self._invoke_callbacks()
+
+ def set_exception(self, exception):
+ """Sets the result of the future as being the given exception.
+
+ Should only be used by Executor implementations and unit tests.
+ """
+ self.set_exception_info(exception, None)
+
+class Executor(object):
+ """This is an abstract base class for concrete asynchronous executors."""
+
+ def submit(self, fn, *args, **kwargs):
+ """Submits a callable to be executed with the given arguments.
+
+ Schedules the callable to be executed as fn(*args, **kwargs) and returns
+ a Future instance representing the execution of the callable.
+
+ Returns:
+ A Future representing the given call.
+ """
+ raise NotImplementedError()
+
+ def map(self, fn, *iterables, **kwargs):
+ """Returns a iterator equivalent to map(fn, iter).
+
+ Args:
+ fn: A callable that will take as many arguments as there are
+ passed iterables.
+ timeout: The maximum number of seconds to wait. If None, then there
+ is no limit on the wait time.
+
+ Returns:
+ An iterator equivalent to: map(func, *iterables) but the calls may
+ be evaluated out-of-order.
+
+ Raises:
+ TimeoutError: If the entire result iterator could not be generated
+ before the given timeout.
+ Exception: If fn(*args) raises for any values.
+ """
+ timeout = kwargs.get('timeout')
+ if timeout is not None:
+ end_time = timeout + time.time()
+
+ fs = [self.submit(fn, *args) for args in itertools.izip(*iterables)]
+
+ # Yield must be hidden in closure so that the futures are submitted
+ # before the first iterator value is required.
+ def result_iterator():
+ try:
+ for future in fs:
+ if timeout is None:
+ yield future.result()
+ else:
+ yield future.result(end_time - time.time())
+ finally:
+ for future in fs:
+ future.cancel()
+ return result_iterator()
+
+ def shutdown(self, wait=True):
+ """Clean-up the resources associated with the Executor.
+
+ It is safe to call this method several times. Otherwise, no other
+ methods can be called after this one.
+
+ Args:
+ wait: If True then shutdown will not return until all running
+ futures have finished executing and the resources used by the
+ executor have been reclaimed.
+ """
+ pass
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.shutdown(wait=True)
+ return False
diff --git a/third_party/python/futures/concurrent/futures/process.py b/third_party/python/futures/concurrent/futures/process.py
new file mode 100644
index 0000000000..72528410c1
--- /dev/null
+++ b/third_party/python/futures/concurrent/futures/process.py
@@ -0,0 +1,359 @@
+# Copyright 2009 Brian Quinlan. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Implements ProcessPoolExecutor.
+
+The follow diagram and text describe the data-flow through the system:
+
+|======================= In-process =====================|== Out-of-process ==|
+
++----------+ +----------+ +--------+ +-----------+ +---------+
+| | => | Work Ids | => | | => | Call Q | => | |
+| | +----------+ | | +-----------+ | |
+| | | ... | | | | ... | | |
+| | | 6 | | | | 5, call() | | |
+| | | 7 | | | | ... | | |
+| Process | | ... | | Local | +-----------+ | Process |
+| Pool | +----------+ | Worker | | #1..n |
+| Executor | | Thread | | |
+| | +----------- + | | +-----------+ | |
+| | <=> | Work Items | <=> | | <= | Result Q | <= | |
+| | +------------+ | | +-----------+ | |
+| | | 6: call() | | | | ... | | |
+| | | future | | | | 4, result | | |
+| | | ... | | | | 3, except | | |
++----------+ +------------+ +--------+ +-----------+ +---------+
+
+Executor.submit() called:
+- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
+- adds the id of the _WorkItem to the "Work Ids" queue
+
+Local worker thread:
+- reads work ids from the "Work Ids" queue and looks up the corresponding
+ WorkItem from the "Work Items" dict: if the work item has been cancelled then
+ it is simply removed from the dict, otherwise it is repackaged as a
+ _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
+ until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
+ calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
+- reads _ResultItems from "Result Q", updates the future stored in the
+ "Work Items" dict and deletes the dict entry
+
+Process #1..n:
+- reads _CallItems from "Call Q", executes the calls, and puts the resulting
+ _ResultItems in "Request Q"
+"""
+
+import atexit
+from concurrent.futures import _base
+import Queue as queue
+import multiprocessing
+import threading
+import weakref
+import sys
+
+__author__ = 'Brian Quinlan (brian@sweetapp.com)'
+
+# Workers are created as daemon threads and processes. This is done to allow the
+# interpreter to exit when there are still idle processes in a
+# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,
+# allowing workers to die with the interpreter has two undesirable properties:
+# - The workers would still be running during interpretor shutdown,
+# meaning that they would fail in unpredictable ways.
+# - The workers could be killed while evaluating a work item, which could
+# be bad if the callable being evaluated has external side-effects e.g.
+# writing to a file.
+#
+# To work around this problem, an exit handler is installed which tells the
+# workers to exit when their work queues are empty and then waits until the
+# threads/processes finish.
+
+_threads_queues = weakref.WeakKeyDictionary()
+_shutdown = False
+
+def _python_exit():
+ global _shutdown
+ _shutdown = True
+ items = list(_threads_queues.items()) if _threads_queues else ()
+ for t, q in items:
+ q.put(None)
+ for t, q in items:
+ t.join(sys.maxint)
+
+# Controls how many more calls than processes will be queued in the call queue.
+# A smaller number will mean that processes spend more time idle waiting for
+# work while a larger number will make Future.cancel() succeed less frequently
+# (Futures in the call queue cannot be cancelled).
+EXTRA_QUEUED_CALLS = 1
+
+class _WorkItem(object):
+ def __init__(self, future, fn, args, kwargs):
+ self.future = future
+ self.fn = fn
+ self.args = args
+ self.kwargs = kwargs
+
+class _ResultItem(object):
+ def __init__(self, work_id, exception=None, result=None):
+ self.work_id = work_id
+ self.exception = exception
+ self.result = result
+
+class _CallItem(object):
+ def __init__(self, work_id, fn, args, kwargs):
+ self.work_id = work_id
+ self.fn = fn
+ self.args = args
+ self.kwargs = kwargs
+
+def _process_worker(call_queue, result_queue):
+ """Evaluates calls from call_queue and places the results in result_queue.
+
+ This worker is run in a separate process.
+
+ Args:
+ call_queue: A multiprocessing.Queue of _CallItems that will be read and
+ evaluated by the worker.
+ result_queue: A multiprocessing.Queue of _ResultItems that will written
+ to by the worker.
+ shutdown: A multiprocessing.Event that will be set as a signal to the
+ worker that it should exit when call_queue is empty.
+ """
+ while True:
+ call_item = call_queue.get(block=True)
+ if call_item is None:
+ # Wake up queue management thread
+ result_queue.put(None)
+ return
+ try:
+ r = call_item.fn(*call_item.args, **call_item.kwargs)
+ except BaseException:
+ e = sys.exc_info()[1]
+ result_queue.put(_ResultItem(call_item.work_id,
+ exception=e))
+ else:
+ result_queue.put(_ResultItem(call_item.work_id,
+ result=r))
+
+def _add_call_item_to_queue(pending_work_items,
+ work_ids,
+ call_queue):
+ """Fills call_queue with _WorkItems from pending_work_items.
+
+ This function never blocks.
+
+ Args:
+ pending_work_items: A dict mapping work ids to _WorkItems e.g.
+ {5: <_WorkItem...>, 6: <_WorkItem...>, ...}
+ work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
+ are consumed and the corresponding _WorkItems from
+ pending_work_items are transformed into _CallItems and put in
+ call_queue.
+ call_queue: A multiprocessing.Queue that will be filled with _CallItems
+ derived from _WorkItems.
+ """
+ while True:
+ if call_queue.full():
+ return
+ try:
+ work_id = work_ids.get(block=False)
+ except queue.Empty:
+ return
+ else:
+ work_item = pending_work_items[work_id]
+
+ if work_item.future.set_running_or_notify_cancel():
+ call_queue.put(_CallItem(work_id,
+ work_item.fn,
+ work_item.args,
+ work_item.kwargs),
+ block=True)
+ else:
+ del pending_work_items[work_id]
+ continue
+
+def _queue_management_worker(executor_reference,
+ processes,
+ pending_work_items,
+ work_ids_queue,
+ call_queue,
+ result_queue):
+ """Manages the communication between this process and the worker processes.
+
+ This function is run in a local thread.
+
+ Args:
+ executor_reference: A weakref.ref to the ProcessPoolExecutor that owns
+ this thread. Used to determine if the ProcessPoolExecutor has been
+ garbage collected and that this function can exit.
+ process: A list of the multiprocessing.Process instances used as
+ workers.
+ pending_work_items: A dict mapping work ids to _WorkItems e.g.
+ {5: <_WorkItem...>, 6: <_WorkItem...>, ...}
+ work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]).
+ call_queue: A multiprocessing.Queue that will be filled with _CallItems
+ derived from _WorkItems for processing by the process workers.
+ result_queue: A multiprocessing.Queue of _ResultItems generated by the
+ process workers.
+ """
+ nb_shutdown_processes = [0]
+ def shutdown_one_process():
+ """Tell a worker to terminate, which will in turn wake us again"""
+ call_queue.put(None)
+ nb_shutdown_processes[0] += 1
+ while True:
+ _add_call_item_to_queue(pending_work_items,
+ work_ids_queue,
+ call_queue)
+
+ result_item = result_queue.get(block=True)
+ if result_item is not None:
+ work_item = pending_work_items[result_item.work_id]
+ del pending_work_items[result_item.work_id]
+
+ if result_item.exception:
+ work_item.future.set_exception(result_item.exception)
+ else:
+ work_item.future.set_result(result_item.result)
+ # Delete references to object. See issue16284
+ del work_item
+ # Check whether we should start shutting down.
+ executor = executor_reference()
+ # No more work items can be added if:
+ # - The interpreter is shutting down OR
+ # - The executor that owns this worker has been collected OR
+ # - The executor that owns this worker has been shutdown.
+ if _shutdown or executor is None or executor._shutdown_thread:
+ # Since no new work items can be added, it is safe to shutdown
+ # this thread if there are no pending work items.
+ if not pending_work_items:
+ while nb_shutdown_processes[0] < len(processes):
+ shutdown_one_process()
+ # If .join() is not called on the created processes then
+ # some multiprocessing.Queue methods may deadlock on Mac OS
+ # X.
+ for p in processes:
+ p.join()
+ call_queue.close()
+ return
+ del executor
+
+_system_limits_checked = False
+_system_limited = None
+def _check_system_limits():
+ global _system_limits_checked, _system_limited
+ if _system_limits_checked:
+ if _system_limited:
+ raise NotImplementedError(_system_limited)
+ _system_limits_checked = True
+ try:
+ import os
+ nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
+ except (AttributeError, ValueError):
+ # sysconf not available or setting not available
+ return
+ if nsems_max == -1:
+ # indetermine limit, assume that limit is determined
+ # by available memory only
+ return
+ if nsems_max >= 256:
+ # minimum number of semaphores available
+ # according to POSIX
+ return
+ _system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max
+ raise NotImplementedError(_system_limited)
+
+class ProcessPoolExecutor(_base.Executor):
+ def __init__(self, max_workers=None):
+ """Initializes a new ProcessPoolExecutor instance.
+
+ Args:
+ max_workers: The maximum number of processes that can be used to
+ execute the given calls. If None or not given then as many
+ worker processes will be created as the machine has processors.
+ """
+ _check_system_limits()
+
+ if max_workers is None:
+ self._max_workers = multiprocessing.cpu_count()
+ else:
+ self._max_workers = max_workers
+
+ # Make the call queue slightly larger than the number of processes to
+ # prevent the worker processes from idling. But don't make it too big
+ # because futures in the call queue cannot be cancelled.
+ self._call_queue = multiprocessing.Queue(self._max_workers +
+ EXTRA_QUEUED_CALLS)
+ self._result_queue = multiprocessing.Queue()
+ self._work_ids = queue.Queue()
+ self._queue_management_thread = None
+ self._processes = set()
+
+ # Shutdown is a two-step process.
+ self._shutdown_thread = False
+ self._shutdown_lock = threading.Lock()
+ self._queue_count = 0
+ self._pending_work_items = {}
+
+ def _start_queue_management_thread(self):
+ # When the executor gets lost, the weakref callback will wake up
+ # the queue management thread.
+ def weakref_cb(_, q=self._result_queue):
+ q.put(None)
+ if self._queue_management_thread is None:
+ self._queue_management_thread = threading.Thread(
+ target=_queue_management_worker,
+ args=(weakref.ref(self, weakref_cb),
+ self._processes,
+ self._pending_work_items,
+ self._work_ids,
+ self._call_queue,
+ self._result_queue))
+ self._queue_management_thread.daemon = True
+ self._queue_management_thread.start()
+ _threads_queues[self._queue_management_thread] = self._result_queue
+
+ def _adjust_process_count(self):
+ for _ in range(len(self._processes), self._max_workers):
+ p = multiprocessing.Process(
+ target=_process_worker,
+ args=(self._call_queue,
+ self._result_queue))
+ p.start()
+ self._processes.add(p)
+
+ def submit(self, fn, *args, **kwargs):
+ with self._shutdown_lock:
+ if self._shutdown_thread:
+ raise RuntimeError('cannot schedule new futures after shutdown')
+
+ f = _base.Future()
+ w = _WorkItem(f, fn, args, kwargs)
+
+ self._pending_work_items[self._queue_count] = w
+ self._work_ids.put(self._queue_count)
+ self._queue_count += 1
+ # Wake up queue management thread
+ self._result_queue.put(None)
+
+ self._start_queue_management_thread()
+ self._adjust_process_count()
+ return f
+ submit.__doc__ = _base.Executor.submit.__doc__
+
+ def shutdown(self, wait=True):
+ with self._shutdown_lock:
+ self._shutdown_thread = True
+ if self._queue_management_thread:
+ # Wake up queue management thread
+ self._result_queue.put(None)
+ if wait:
+ self._queue_management_thread.join(sys.maxint)
+ # To reduce the risk of openning too many files, remove references to
+ # objects that use file descriptors.
+ self._queue_management_thread = None
+ self._call_queue = None
+ self._result_queue = None
+ self._processes = None
+ shutdown.__doc__ = _base.Executor.shutdown.__doc__
+
+atexit.register(_python_exit)
diff --git a/third_party/python/futures/concurrent/futures/thread.py b/third_party/python/futures/concurrent/futures/thread.py
new file mode 100644
index 0000000000..85ab4b7432
--- /dev/null
+++ b/third_party/python/futures/concurrent/futures/thread.py
@@ -0,0 +1,134 @@
+# Copyright 2009 Brian Quinlan. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Implements ThreadPoolExecutor."""
+
+import atexit
+from concurrent.futures import _base
+import Queue as queue
+import threading
+import weakref
+import sys
+
+__author__ = 'Brian Quinlan (brian@sweetapp.com)'
+
+# Workers are created as daemon threads. This is done to allow the interpreter
+# to exit when there are still idle threads in a ThreadPoolExecutor's thread
+# pool (i.e. shutdown() was not called). However, allowing workers to die with
+# the interpreter has two undesirable properties:
+# - The workers would still be running during interpretor shutdown,
+# meaning that they would fail in unpredictable ways.
+# - The workers could be killed while evaluating a work item, which could
+# be bad if the callable being evaluated has external side-effects e.g.
+# writing to a file.
+#
+# To work around this problem, an exit handler is installed which tells the
+# workers to exit when their work queues are empty and then waits until the
+# threads finish.
+
+_threads_queues = weakref.WeakKeyDictionary()
+_shutdown = False
+
+def _python_exit():
+ global _shutdown
+ _shutdown = True
+ items = list(_threads_queues.items()) if _threads_queues else ()
+ for t, q in items:
+ q.put(None)
+ for t, q in items:
+ t.join(sys.maxint)
+
+atexit.register(_python_exit)
+
+class _WorkItem(object):
+ def __init__(self, future, fn, args, kwargs):
+ self.future = future
+ self.fn = fn
+ self.args = args
+ self.kwargs = kwargs
+
+ def run(self):
+ if not self.future.set_running_or_notify_cancel():
+ return
+
+ try:
+ result = self.fn(*self.args, **self.kwargs)
+ except BaseException:
+ e, tb = sys.exc_info()[1:]
+ self.future.set_exception_info(e, tb)
+ else:
+ self.future.set_result(result)
+
+def _worker(executor_reference, work_queue):
+ try:
+ while True:
+ work_item = work_queue.get(block=True)
+ if work_item is not None:
+ work_item.run()
+ # Delete references to object. See issue16284
+ del work_item
+ continue
+ executor = executor_reference()
+ # Exit if:
+ # - The interpreter is shutting down OR
+ # - The executor that owns the worker has been collected OR
+ # - The executor that owns the worker has been shutdown.
+ if _shutdown or executor is None or executor._shutdown:
+ # Notice other workers
+ work_queue.put(None)
+ return
+ del executor
+ except BaseException:
+ _base.LOGGER.critical('Exception in worker', exc_info=True)
+
+class ThreadPoolExecutor(_base.Executor):
+ def __init__(self, max_workers):
+ """Initializes a new ThreadPoolExecutor instance.
+
+ Args:
+ max_workers: The maximum number of threads that can be used to
+ execute the given calls.
+ """
+ self._max_workers = max_workers
+ self._work_queue = queue.Queue()
+ self._threads = set()
+ self._shutdown = False
+ self._shutdown_lock = threading.Lock()
+
+ def submit(self, fn, *args, **kwargs):
+ with self._shutdown_lock:
+ if self._shutdown:
+ raise RuntimeError('cannot schedule new futures after shutdown')
+
+ f = _base.Future()
+ w = _WorkItem(f, fn, args, kwargs)
+
+ self._work_queue.put(w)
+ self._adjust_thread_count()
+ return f
+ submit.__doc__ = _base.Executor.submit.__doc__
+
+ def _adjust_thread_count(self):
+ # When the executor gets lost, the weakref callback will wake up
+ # the worker threads.
+ def weakref_cb(_, q=self._work_queue):
+ q.put(None)
+ # TODO(bquinlan): Should avoid creating new threads if there are more
+ # idle threads than items in the work queue.
+ if len(self._threads) < self._max_workers:
+ t = threading.Thread(target=_worker,
+ args=(weakref.ref(self, weakref_cb),
+ self._work_queue))
+ t.daemon = True
+ t.start()
+ self._threads.add(t)
+ _threads_queues[t] = self._work_queue
+
+ def shutdown(self, wait=True):
+ with self._shutdown_lock:
+ self._shutdown = True
+ self._work_queue.put(None)
+ if wait:
+ for t in self._threads:
+ t.join(sys.maxint)
+ shutdown.__doc__ = _base.Executor.shutdown.__doc__
diff --git a/third_party/python/futures/crawl.py b/third_party/python/futures/crawl.py
new file mode 100644
index 0000000000..86e0af7fe6
--- /dev/null
+++ b/third_party/python/futures/crawl.py
@@ -0,0 +1,74 @@
+"""Compare the speed of downloading URLs sequentially vs. using futures."""
+
+import functools
+import time
+import timeit
+import sys
+
+try:
+ from urllib2 import urlopen
+except ImportError:
+ from urllib.request import urlopen
+
+from concurrent.futures import (as_completed, ThreadPoolExecutor,
+ ProcessPoolExecutor)
+
+URLS = ['http://www.google.com/',
+ 'http://www.apple.com/',
+ 'http://www.ibm.com',
+ 'http://www.thisurlprobablydoesnotexist.com',
+ 'http://www.slashdot.org/',
+ 'http://www.python.org/',
+ 'http://www.bing.com/',
+ 'http://www.facebook.com/',
+ 'http://www.yahoo.com/',
+ 'http://www.youtube.com/',
+ 'http://www.blogger.com/']
+
+def load_url(url, timeout):
+ kwargs = {'timeout': timeout} if sys.version_info >= (2, 6) else {}
+ return urlopen(url, **kwargs).read()
+
+def download_urls_sequential(urls, timeout=60):
+ url_to_content = {}
+ for url in urls:
+ try:
+ url_to_content[url] = load_url(url, timeout=timeout)
+ except:
+ pass
+ return url_to_content
+
+def download_urls_with_executor(urls, executor, timeout=60):
+ try:
+ url_to_content = {}
+ future_to_url = dict((executor.submit(load_url, url, timeout), url)
+ for url in urls)
+
+ for future in as_completed(future_to_url):
+ try:
+ url_to_content[future_to_url[future]] = future.result()
+ except:
+ pass
+ return url_to_content
+ finally:
+ executor.shutdown()
+
+def main():
+ for name, fn in [('sequential',
+ functools.partial(download_urls_sequential, URLS)),
+ ('processes',
+ functools.partial(download_urls_with_executor,
+ URLS,
+ ProcessPoolExecutor(10))),
+ ('threads',
+ functools.partial(download_urls_with_executor,
+ URLS,
+ ThreadPoolExecutor(10)))]:
+ sys.stdout.write('%s: ' % name.ljust(12))
+ start = time.time()
+ url_map = fn()
+ sys.stdout.write('%.2f seconds (%d of %d downloaded)\n' %
+ (time.time() - start, len(url_map), len(URLS)))
+
+if __name__ == '__main__':
+ main()
diff --git a/third_party/python/futures/docs/Makefile b/third_party/python/futures/docs/Makefile
new file mode 100644
index 0000000000..f69d840353
--- /dev/null
+++ b/third_party/python/futures/docs/Makefile
@@ -0,0 +1,88 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ -rm -rf _build/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html
+ @echo
+ @echo "Build finished. The HTML pages are in _build/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) _build/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in _build/dirhtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) _build/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) _build/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) _build/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in _build/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) _build/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in _build/qthelp, like this:"
+ @echo "# qcollectiongenerator _build/qthelp/futures.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile _build/qthelp/futures.qhc"
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in _build/latex."
+ @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+ "run these through (pdf)latex."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) _build/changes
+ @echo
+ @echo "The overview file is in _build/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) _build/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in _build/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) _build/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in _build/doctest/output.txt."
diff --git a/third_party/python/futures/docs/conf.py b/third_party/python/futures/docs/conf.py
new file mode 100644
index 0000000000..5cea44c881
--- /dev/null
+++ b/third_party/python/futures/docs/conf.py
@@ -0,0 +1,194 @@
+# -*- coding: utf-8 -*-
+#
+# futures documentation build configuration file, created by
+# sphinx-quickstart on Wed Jun 3 19:35:34 2009.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.append(os.path.abspath('.'))
+
+# -- General configuration -----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = []
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'futures'
+copyright = u'2009-2011, Brian Quinlan'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '2.1.6'
+# The full version, including alpha/beta/rc tags.
+release = '2.1.6'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+#unused_docs = []
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_use_modindex = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'futuresdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('index', 'futures.tex', u'futures Documentation',
+ u'Brian Quinlan', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_use_modindex = True
diff --git a/third_party/python/futures/docs/index.rst b/third_party/python/futures/docs/index.rst
new file mode 100644
index 0000000000..f53bc04cf5
--- /dev/null
+++ b/third_party/python/futures/docs/index.rst
@@ -0,0 +1,347 @@
+:mod:`concurrent.futures` --- Asynchronous computation
+======================================================
+
+.. module:: concurrent.futures
+ :synopsis: Execute computations asynchronously using threads or processes.
+
+The :mod:`concurrent.futures` module provides a high-level interface for
+asynchronously executing callables.
+
+The asynchronous execution can be be performed by threads using
+:class:`ThreadPoolExecutor` or seperate processes using
+:class:`ProcessPoolExecutor`. Both implement the same interface, which is
+defined by the abstract :class:`Executor` class.
+
+Executor Objects
+----------------
+
+:class:`Executor` is an abstract class that provides methods to execute calls
+asynchronously. It should not be used directly, but through its two
+subclasses: :class:`ThreadPoolExecutor` and :class:`ProcessPoolExecutor`.
+
+.. method:: Executor.submit(fn, *args, **kwargs)
+
+ Schedules the callable to be executed as *fn*(*\*args*, *\*\*kwargs*) and
+ returns a :class:`Future` representing the execution of the callable.
+
+::
+
+ with ThreadPoolExecutor(max_workers=1) as executor:
+ future = executor.submit(pow, 323, 1235)
+ print(future.result())
+
+.. method:: Executor.map(func, *iterables, timeout=None)
+
+ Equivalent to map(*func*, *\*iterables*) but func is executed asynchronously
+ and several calls to *func* may be made concurrently. The returned iterator
+ raises a :exc:`TimeoutError` if :meth:`__next__()` is called and the result
+ isn't available after *timeout* seconds from the original call to
+ :meth:`map()`. *timeout* can be an int or float. If *timeout* is not
+ specified or ``None`` then there is no limit to the wait time. If a call
+ raises an exception then that exception will be raised when its value is
+ retrieved from the iterator.
+
+.. method:: Executor.shutdown(wait=True)
+
+ Signal the executor that it should free any resources that it is using when
+ the currently pending futures are done executing. Calls to
+ :meth:`Executor.submit` and :meth:`Executor.map` made after shutdown will
+ raise :exc:`RuntimeError`.
+
+ If *wait* is `True` then this method will not return until all the pending
+ futures are done executing and the resources associated with the executor
+ have been freed. If *wait* is `False` then this method will return
+ immediately and the resources associated with the executor will be freed
+ when all pending futures are done executing. Regardless of the value of
+ *wait*, the entire Python program will not exit until all pending futures
+ are done executing.
+
+ You can avoid having to call this method explicitly if you use the `with`
+ statement, which will shutdown the `Executor` (waiting as if
+ `Executor.shutdown` were called with *wait* set to `True`):
+
+::
+
+ import shutil
+ with ThreadPoolExecutor(max_workers=4) as e:
+ e.submit(shutil.copy, 'src1.txt', 'dest1.txt')
+ e.submit(shutil.copy, 'src2.txt', 'dest2.txt')
+ e.submit(shutil.copy, 'src3.txt', 'dest3.txt')
+ e.submit(shutil.copy, 'src3.txt', 'dest4.txt')
+
+
+ThreadPoolExecutor Objects
+--------------------------
+
+The :class:`ThreadPoolExecutor` class is an :class:`Executor` subclass that uses
+a pool of threads to execute calls asynchronously.
+
+Deadlock can occur when the callable associated with a :class:`Future` waits on
+the results of another :class:`Future`. For example:
+
+::
+
+ import time
+ def wait_on_b():
+ time.sleep(5)
+ print(b.result()) # b will never complete because it is waiting on a.
+ return 5
+
+ def wait_on_a():
+ time.sleep(5)
+ print(a.result()) # a will never complete because it is waiting on b.
+ return 6
+
+
+ executor = ThreadPoolExecutor(max_workers=2)
+ a = executor.submit(wait_on_b)
+ b = executor.submit(wait_on_a)
+
+And:
+
+::
+
+ def wait_on_future():
+ f = executor.submit(pow, 5, 2)
+ # This will never complete because there is only one worker thread and
+ # it is executing this function.
+ print(f.result())
+
+ executor = ThreadPoolExecutor(max_workers=1)
+ executor.submit(wait_on_future)
+
+.. class:: ThreadPoolExecutor(max_workers)
+
+ Executes calls asynchronously using a pool of at most *max_workers* threads.
+
+.. _threadpoolexecutor-example:
+
+ThreadPoolExecutor Example
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+::
+
+ from concurrent import futures
+ import urllib.request
+
+ URLS = ['http://www.foxnews.com/',
+ 'http://www.cnn.com/',
+ 'http://europe.wsj.com/',
+ 'http://www.bbc.co.uk/',
+ 'http://some-made-up-domain.com/']
+
+ def load_url(url, timeout):
+ return urllib.request.urlopen(url, timeout=timeout).read()
+
+ with futures.ThreadPoolExecutor(max_workers=5) as executor:
+ future_to_url = dict((executor.submit(load_url, url, 60), url)
+ for url in URLS)
+
+ for future in futures.as_completed(future_to_url):
+ url = future_to_url[future]
+ if future.exception() is not None:
+ print('%r generated an exception: %s' % (url,
+ future.exception()))
+ else:
+ print('%r page is %d bytes' % (url, len(future.result())))
+
+ProcessPoolExecutor Objects
+---------------------------
+
+The :class:`ProcessPoolExecutor` class is an :class:`Executor` subclass that
+uses a pool of processes to execute calls asynchronously.
+:class:`ProcessPoolExecutor` uses the :mod:`multiprocessing` module, which
+allows it to side-step the :term:`Global Interpreter Lock` but also means that
+only picklable objects can be executed and returned.
+
+Calling :class:`Executor` or :class:`Future` methods from a callable submitted
+to a :class:`ProcessPoolExecutor` will result in deadlock.
+
+.. class:: ProcessPoolExecutor(max_workers=None)
+
+ Executes calls asynchronously using a pool of at most *max_workers*
+ processes. If *max_workers* is ``None`` or not given then as many worker
+ processes will be created as the machine has processors.
+
+.. _processpoolexecutor-example:
+
+ProcessPoolExecutor Example
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+::
+
+ import math
+
+ PRIMES = [
+ 112272535095293,
+ 112582705942171,
+ 112272535095293,
+ 115280095190773,
+ 115797848077099,
+ 1099726899285419]
+
+ def is_prime(n):
+ if n % 2 == 0:
+ return False
+
+ sqrt_n = int(math.floor(math.sqrt(n)))
+ for i in range(3, sqrt_n + 1, 2):
+ if n % i == 0:
+ return False
+ return True
+
+ def main():
+ with futures.ProcessPoolExecutor() as executor:
+ for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)):
+ print('%d is prime: %s' % (number, prime))
+
+ if __name__ == '__main__':
+ main()
+
+Future Objects
+--------------
+
+The :class:`Future` class encapulates the asynchronous execution of a callable.
+:class:`Future` instances are created by :meth:`Executor.submit`.
+
+.. method:: Future.cancel()
+
+ Attempt to cancel the call. If the call is currently being executed then
+ it cannot be cancelled and the method will return `False`, otherwise the call
+ will be cancelled and the method will return `True`.
+
+.. method:: Future.cancelled()
+
+ Return `True` if the call was successfully cancelled.
+
+.. method:: Future.running()
+
+ Return `True` if the call is currently being executed and cannot be
+ cancelled.
+
+.. method:: Future.done()
+
+ Return `True` if the call was successfully cancelled or finished running.
+
+.. method:: Future.result(timeout=None)
+
+ Return the value returned by the call. If the call hasn't yet completed then
+ this method will wait up to *timeout* seconds. If the call hasn't completed
+ in *timeout* seconds then a :exc:`TimeoutError` will be raised. *timeout* can
+ be an int or float.If *timeout* is not specified or ``None`` then there is no
+ limit to the wait time.
+
+ If the future is cancelled before completing then :exc:`CancelledError` will
+ be raised.
+
+ If the call raised then this method will raise the same exception.
+
+.. method:: Future.exception(timeout=None)
+
+ Return the exception raised by the call. If the call hasn't yet completed
+ then this method will wait up to *timeout* seconds. If the call hasn't
+ completed in *timeout* seconds then a :exc:`TimeoutError` will be raised.
+ *timeout* can be an int or float. If *timeout* is not specified or ``None``
+ then there is no limit to the wait time.
+
+ If the future is cancelled before completing then :exc:`CancelledError` will
+ be raised.
+
+ If the call completed without raising then ``None`` is returned.
+
+.. method:: Future.add_done_callback(fn)
+
+ Attaches the callable *fn* to the future. *fn* will be called, with the
+ future as its only argument, when the future is cancelled or finishes
+ running.
+
+ Added callables are called in the order that they were added and are always
+ called in a thread belonging to the process that added them. If the callable
+ raises an :exc:`Exception` then it will be logged and ignored. If the
+ callable raises another :exc:`BaseException` then the behavior is not
+ defined.
+
+ If the future has already completed or been cancelled then *fn* will be
+ called immediately.
+
+Internal Future Methods
+^^^^^^^^^^^^^^^^^^^^^^^
+
+The following :class:`Future` methods are meant for use in unit tests and
+:class:`Executor` implementations.
+
+.. method:: Future.set_running_or_notify_cancel()
+
+ This method should only be called by :class:`Executor` implementations before
+ executing the work associated with the :class:`Future` and by unit tests.
+
+ If the method returns `False` then the :class:`Future` was cancelled i.e.
+ :meth:`Future.cancel` was called and returned `True`. Any threads waiting
+ on the :class:`Future` completing (i.e. through :func:`as_completed` or
+ :func:`wait`) will be woken up.
+
+ If the method returns `True` then the :class:`Future` was not cancelled
+ and has been put in the running state i.e. calls to
+ :meth:`Future.running` will return `True`.
+
+ This method can only be called once and cannot be called after
+ :meth:`Future.set_result` or :meth:`Future.set_exception` have been
+ called.
+
+.. method:: Future.set_result(result)
+
+ Sets the result of the work associated with the :class:`Future` to *result*.
+
+ This method should only be used by Executor implementations and unit tests.
+
+.. method:: Future.set_exception(exception)
+
+ Sets the result of the work associated with the :class:`Future` to the
+ :class:`Exception` *exception*.
+
+ This method should only be used by Executor implementations and unit tests.
+
+Module Functions
+----------------
+
+.. function:: wait(fs, timeout=None, return_when=ALL_COMPLETED)
+
+ Wait for the :class:`Future` instances (possibly created by different
+ :class:`Executor` instances) given by *fs* to complete. Returns a named
+ 2-tuple of sets. The first set, named "done", contains the futures that
+ completed (finished or were cancelled) before the wait completed. The second
+ set, named "not_done", contains uncompleted futures.
+
+ *timeout* can be used to control the maximum number of seconds to wait before
+ returning. *timeout* can be an int or float. If *timeout* is not specified or
+ ``None`` then there is no limit to the wait time.
+
+ *return_when* indicates when this function should return. It must be one of
+ the following constants:
+
+ +-----------------------------+----------------------------------------+
+ | Constant | Description |
+ +=============================+========================================+
+ | :const:`FIRST_COMPLETED` | The function will return when any |
+ | | future finishes or is cancelled. |
+ +-----------------------------+----------------------------------------+
+ | :const:`FIRST_EXCEPTION` | The function will return when any |
+ | | future finishes by raising an |
+ | | exception. If no future raises an |
+ | | exception then it is equivalent to |
+ | | `ALL_COMPLETED`. |
+ +-----------------------------+----------------------------------------+
+ | :const:`ALL_COMPLETED` | The function will return when all |
+ | | futures finish or are cancelled. |
+ +-----------------------------+----------------------------------------+
+
+.. function:: as_completed(fs, timeout=None)
+
+ Returns an iterator over the :class:`Future` instances (possibly created by
+ different :class:`Executor` instances) given by *fs* that yields futures as
+ they complete (finished or were cancelled). Any futures given by *fs* that
+ are duplicated will be returned once. Any futures that completed
+ before :func:`as_completed` is called will be yielded first. The returned
+ iterator raises a :exc:`TimeoutError` if :meth:`~iterator.__next__` is
+ called and the result isn't available after *timeout* seconds from the
+ original call to :func:`as_completed`. *timeout* can be an int or float.
+ If *timeout* is not specified or ``None``, there is no limit to the wait
+ time.
diff --git a/third_party/python/futures/docs/make.bat b/third_party/python/futures/docs/make.bat
new file mode 100644
index 0000000000..3e8021b56e
--- /dev/null
+++ b/third_party/python/futures/docs/make.bat
@@ -0,0 +1,112 @@
+@ECHO OFF
+
+REM Command file for Sphinx documentation
+
+set SPHINXBUILD=sphinx-build
+set ALLSPHINXOPTS=-d _build/doctrees %SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+ set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+ :help
+ echo.Please use `make ^<target^>` where ^<target^> is one of
+ echo. html to make standalone HTML files
+ echo. dirhtml to make HTML files named index.html in directories
+ echo. pickle to make pickle files
+ echo. json to make JSON files
+ echo. htmlhelp to make HTML files and a HTML help project
+ echo. qthelp to make HTML files and a qthelp project
+ echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+ echo. changes to make an overview over all changed/added/deprecated items
+ echo. linkcheck to check all external links for integrity
+ echo. doctest to run all doctests embedded in the documentation if enabled
+ goto end
+)
+
+if "%1" == "clean" (
+ for /d %%i in (_build\*) do rmdir /q /s %%i
+ del /q /s _build\*
+ goto end
+)
+
+if "%1" == "html" (
+ %SPHINXBUILD% -b html %ALLSPHINXOPTS% _build/html
+ echo.
+ echo.Build finished. The HTML pages are in _build/html.
+ goto end
+)
+
+if "%1" == "dirhtml" (
+ %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% _build/dirhtml
+ echo.
+ echo.Build finished. The HTML pages are in _build/dirhtml.
+ goto end
+)
+
+if "%1" == "pickle" (
+ %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% _build/pickle
+ echo.
+ echo.Build finished; now you can process the pickle files.
+ goto end
+)
+
+if "%1" == "json" (
+ %SPHINXBUILD% -b json %ALLSPHINXOPTS% _build/json
+ echo.
+ echo.Build finished; now you can process the JSON files.
+ goto end
+)
+
+if "%1" == "htmlhelp" (
+ %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% _build/htmlhelp
+ echo.
+ echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in _build/htmlhelp.
+ goto end
+)
+
+if "%1" == "qthelp" (
+ %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% _build/qthelp
+ echo.
+ echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in _build/qthelp, like this:
+ echo.^> qcollectiongenerator _build\qthelp\futures.qhcp
+ echo.To view the help file:
+ echo.^> assistant -collectionFile _build\qthelp\futures.ghc
+ goto end
+)
+
+if "%1" == "latex" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% _build/latex
+ echo.
+ echo.Build finished; the LaTeX files are in _build/latex.
+ goto end
+)
+
+if "%1" == "changes" (
+ %SPHINXBUILD% -b changes %ALLSPHINXOPTS% _build/changes
+ echo.
+ echo.The overview file is in _build/changes.
+ goto end
+)
+
+if "%1" == "linkcheck" (
+ %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% _build/linkcheck
+ echo.
+ echo.Link check complete; look for any errors in the above output ^
+or in _build/linkcheck/output.txt.
+ goto end
+)
+
+if "%1" == "doctest" (
+ %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% _build/doctest
+ echo.
+ echo.Testing of doctests in the sources finished, look at the ^
+results in _build/doctest/output.txt.
+ goto end
+)
+
+:end
diff --git a/third_party/python/futures/futures.egg-info/PKG-INFO b/third_party/python/futures/futures.egg-info/PKG-INFO
new file mode 100644
index 0000000000..00e3a99767
--- /dev/null
+++ b/third_party/python/futures/futures.egg-info/PKG-INFO
@@ -0,0 +1,16 @@
+Metadata-Version: 1.0
+Name: futures
+Version: 3.0.5
+Summary: Backport of the concurrent.futures package from Python 3.2
+Home-page: https://github.com/agronholm/pythonfutures
+Author: Alex Gronholm
+Author-email: alex.gronholm+pypi@nextday.fi
+License: BSD
+Description: UNKNOWN
+Platform: UNKNOWN
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 2 :: Only
diff --git a/third_party/python/futures/futures.egg-info/SOURCES.txt b/third_party/python/futures/futures.egg-info/SOURCES.txt
new file mode 100644
index 0000000000..31d473be10
--- /dev/null
+++ b/third_party/python/futures/futures.egg-info/SOURCES.txt
@@ -0,0 +1,24 @@
+CHANGES
+LICENSE
+MANIFEST.in
+crawl.py
+primes.py
+setup.cfg
+setup.py
+test_futures.py
+tox.ini
+concurrent/__init__.py
+concurrent/futures/__init__.py
+concurrent/futures/_base.py
+concurrent/futures/process.py
+concurrent/futures/thread.py
+docs/Makefile
+docs/conf.py
+docs/index.rst
+docs/make.bat
+futures.egg-info/PKG-INFO
+futures.egg-info/SOURCES.txt
+futures.egg-info/dependency_links.txt
+futures.egg-info/not-zip-safe
+futures.egg-info/pbr.json
+futures.egg-info/top_level.txt \ No newline at end of file
diff --git a/third_party/python/futures/futures.egg-info/dependency_links.txt b/third_party/python/futures/futures.egg-info/dependency_links.txt
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/third_party/python/futures/futures.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/third_party/python/futures/futures.egg-info/not-zip-safe b/third_party/python/futures/futures.egg-info/not-zip-safe
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/third_party/python/futures/futures.egg-info/not-zip-safe
@@ -0,0 +1 @@
+
diff --git a/third_party/python/futures/futures.egg-info/pbr.json b/third_party/python/futures/futures.egg-info/pbr.json
new file mode 100644
index 0000000000..6937c11cb5
--- /dev/null
+++ b/third_party/python/futures/futures.egg-info/pbr.json
@@ -0,0 +1 @@
+{"is_release": false, "git_version": "6532a74"} \ No newline at end of file
diff --git a/third_party/python/futures/futures.egg-info/top_level.txt b/third_party/python/futures/futures.egg-info/top_level.txt
new file mode 100644
index 0000000000..e4d7bdcbdb
--- /dev/null
+++ b/third_party/python/futures/futures.egg-info/top_level.txt
@@ -0,0 +1 @@
+concurrent
diff --git a/third_party/python/futures/primes.py b/third_party/python/futures/primes.py
new file mode 100644
index 0000000000..0da2b3e64c
--- /dev/null
+++ b/third_party/python/futures/primes.py
@@ -0,0 +1,50 @@
+from __future__ import with_statement
+import math
+import time
+import sys
+
+from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
+
+PRIMES = [
+ 112272535095293,
+ 112582705942171,
+ 112272535095293,
+ 115280095190773,
+ 115797848077099,
+ 117450548693743,
+ 993960000099397]
+
+def is_prime(n):
+ if n % 2 == 0:
+ return False
+
+ sqrt_n = int(math.floor(math.sqrt(n)))
+ for i in range(3, sqrt_n + 1, 2):
+ if n % i == 0:
+ return False
+ return True
+
+def sequential():
+ return list(map(is_prime, PRIMES))
+
+def with_process_pool_executor():
+ with ProcessPoolExecutor(10) as executor:
+ return list(executor.map(is_prime, PRIMES))
+
+def with_thread_pool_executor():
+ with ThreadPoolExecutor(10) as executor:
+ return list(executor.map(is_prime, PRIMES))
+
+def main():
+ for name, fn in [('sequential', sequential),
+ ('processes', with_process_pool_executor),
+ ('threads', with_thread_pool_executor)]:
+ sys.stdout.write('%s: ' % name.ljust(12))
+ start = time.time()
+ if fn() != [True] * len(PRIMES):
+ sys.stdout.write('failed\n')
+ else:
+ sys.stdout.write('%.2f seconds\n' % (time.time() - start))
+
+if __name__ == '__main__':
+ main()
diff --git a/third_party/python/futures/setup.cfg b/third_party/python/futures/setup.cfg
new file mode 100644
index 0000000000..e04dbabe38
--- /dev/null
+++ b/third_party/python/futures/setup.cfg
@@ -0,0 +1,12 @@
+[build_sphinx]
+build-dir = build/sphinx
+source-dir = docs
+
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
+[upload_docs]
+upload-dir = build/sphinx/html
+
diff --git a/third_party/python/futures/setup.py b/third_party/python/futures/setup.py
new file mode 100755
index 0000000000..6961f35306
--- /dev/null
+++ b/third_party/python/futures/setup.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+from warnings import warn
+import sys
+
+if sys.version_info[0] > 2:
+ warn('This backport is meant only for Python 2.\n'
+ 'Python 3 users do not need it, as the concurrent.futures '
+ 'package is available in the standard library.')
+
+extras = {}
+try:
+ from setuptools import setup
+ extras['zip_safe'] = False
+except ImportError:
+ from distutils.core import setup
+
+setup(name='futures',
+ version='3.0.5',
+ description='Backport of the concurrent.futures package from Python 3.2',
+ author='Brian Quinlan',
+ author_email='brian@sweetapp.com',
+ maintainer='Alex Gronholm',
+ maintainer_email='alex.gronholm+pypi@nextday.fi',
+ url='https://github.com/agronholm/pythonfutures',
+ packages=['concurrent', 'concurrent.futures'],
+ license='BSD',
+ classifiers=['License :: OSI Approved :: BSD License',
+ 'Development Status :: 5 - Production/Stable',
+ 'Intended Audience :: Developers',
+ 'Programming Language :: Python :: 2.6',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 2 :: Only'],
+ **extras
+ )
diff --git a/third_party/python/futures/test_futures.py b/third_party/python/futures/test_futures.py
new file mode 100644
index 0000000000..301cd0a934
--- /dev/null
+++ b/third_party/python/futures/test_futures.py
@@ -0,0 +1,727 @@
+import os
+import subprocess
+import sys
+import threading
+import functools
+import contextlib
+import logging
+import re
+import time
+import gc
+from StringIO import StringIO
+from test import test_support
+
+from concurrent import futures
+from concurrent.futures._base import (
+ PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future)
+
+try:
+ import unittest2 as unittest
+except ImportError:
+ import unittest
+
+
+def reap_threads(func):
+ """Use this function when threads are being used. This will
+ ensure that the threads are cleaned up even when the test fails.
+ If threading is unavailable this function does nothing.
+ """
+ @functools.wraps(func)
+ def decorator(*args):
+ key = test_support.threading_setup()
+ try:
+ return func(*args)
+ finally:
+ test_support.threading_cleanup(*key)
+ return decorator
+
+
+# Executing the interpreter in a subprocess
+def _assert_python(expected_success, *args, **env_vars):
+ cmd_line = [sys.executable]
+ if not env_vars:
+ cmd_line.append('-E')
+ # Need to preserve the original environment, for in-place testing of
+ # shared library builds.
+ env = os.environ.copy()
+ # But a special flag that can be set to override -- in this case, the
+ # caller is responsible to pass the full environment.
+ if env_vars.pop('__cleanenv', None):
+ env = {}
+ env.update(env_vars)
+ cmd_line.extend(args)
+ p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ env=env)
+ try:
+ out, err = p.communicate()
+ finally:
+ subprocess._cleanup()
+ p.stdout.close()
+ p.stderr.close()
+ rc = p.returncode
+ err = strip_python_stderr(err)
+ if (rc and expected_success) or (not rc and not expected_success):
+ raise AssertionError(
+ "Process return code is %d, "
+ "stderr follows:\n%s" % (rc, err.decode('ascii', 'ignore')))
+ return rc, out, err
+
+
+def assert_python_ok(*args, **env_vars):
+ """
+ Assert that running the interpreter with `args` and optional environment
+ variables `env_vars` is ok and return a (return code, stdout, stderr) tuple.
+ """
+ return _assert_python(True, *args, **env_vars)
+
+
+def strip_python_stderr(stderr):
+ """Strip the stderr of a Python process from potential debug output
+ emitted by the interpreter.
+
+ This will typically be run on the result of the communicate() method
+ of a subprocess.Popen object.
+ """
+ stderr = re.sub(r"\[\d+ refs\]\r?\n?$".encode(), "".encode(), stderr).strip()
+ return stderr
+
+
+@contextlib.contextmanager
+def captured_stderr():
+ """Return a context manager used by captured_stdout/stdin/stderr
+ that temporarily replaces the sys stream *stream_name* with a StringIO."""
+ logging_stream = StringIO()
+ handler = logging.StreamHandler(logging_stream)
+ logging.root.addHandler(handler)
+
+ try:
+ yield logging_stream
+ finally:
+ logging.root.removeHandler(handler)
+
+
+def create_future(state=PENDING, exception=None, result=None):
+ f = Future()
+ f._state = state
+ f._exception = exception
+ f._result = result
+ return f
+
+
+PENDING_FUTURE = create_future(state=PENDING)
+RUNNING_FUTURE = create_future(state=RUNNING)
+CANCELLED_FUTURE = create_future(state=CANCELLED)
+CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
+EXCEPTION_FUTURE = create_future(state=FINISHED, exception=IOError())
+SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
+
+
+def mul(x, y):
+ return x * y
+
+
+def sleep_and_raise(t):
+ time.sleep(t)
+ raise Exception('this is an exception')
+
+def sleep_and_print(t, msg):
+ time.sleep(t)
+ print(msg)
+ sys.stdout.flush()
+
+
+class ExecutorMixin:
+ worker_count = 5
+
+ def setUp(self):
+ self.t1 = time.time()
+ try:
+ self.executor = self.executor_type(max_workers=self.worker_count)
+ except NotImplementedError:
+ e = sys.exc_info()[1]
+ self.skipTest(str(e))
+ self._prime_executor()
+
+ def tearDown(self):
+ self.executor.shutdown(wait=True)
+ dt = time.time() - self.t1
+ if test_support.verbose:
+ print("%.2fs" % dt)
+ self.assertLess(dt, 60, "synchronization issue: test lasted too long")
+
+ def _prime_executor(self):
+ # Make sure that the executor is ready to do work before running the
+ # tests. This should reduce the probability of timeouts in the tests.
+ futures = [self.executor.submit(time.sleep, 0.1)
+ for _ in range(self.worker_count)]
+
+ for f in futures:
+ f.result()
+
+
+class ThreadPoolMixin(ExecutorMixin):
+ executor_type = futures.ThreadPoolExecutor
+
+
+class ProcessPoolMixin(ExecutorMixin):
+ executor_type = futures.ProcessPoolExecutor
+
+
+class ExecutorShutdownTest(unittest.TestCase):
+ def test_run_after_shutdown(self):
+ self.executor.shutdown()
+ self.assertRaises(RuntimeError,
+ self.executor.submit,
+ pow, 2, 5)
+
+ def test_interpreter_shutdown(self):
+ # Test the atexit hook for shutdown of worker threads and processes
+ rc, out, err = assert_python_ok('-c', """if 1:
+ from concurrent.futures import %s
+ from time import sleep
+ from test_futures import sleep_and_print
+ t = %s(5)
+ t.submit(sleep_and_print, 1.0, "apple")
+ """ % (self.executor_type.__name__, self.executor_type.__name__))
+ # Errors in atexit hooks don't change the process exit code, check
+ # stderr manually.
+ self.assertFalse(err)
+ self.assertEqual(out.strip(), "apple".encode())
+
+ def test_hang_issue12364(self):
+ fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
+ self.executor.shutdown()
+ for f in fs:
+ f.result()
+
+
+class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest):
+ def _prime_executor(self):
+ pass
+
+ def test_threads_terminate(self):
+ self.executor.submit(mul, 21, 2)
+ self.executor.submit(mul, 6, 7)
+ self.executor.submit(mul, 3, 14)
+ self.assertEqual(len(self.executor._threads), 3)
+ self.executor.shutdown()
+ for t in self.executor._threads:
+ t.join()
+
+ def test_context_manager_shutdown(self):
+ with futures.ThreadPoolExecutor(max_workers=5) as e:
+ executor = e
+ self.assertEqual(list(e.map(abs, range(-5, 5))),
+ [5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
+
+ for t in executor._threads:
+ t.join()
+
+ def test_del_shutdown(self):
+ executor = futures.ThreadPoolExecutor(max_workers=5)
+ executor.map(abs, range(-5, 5))
+ threads = executor._threads
+ del executor
+ gc.collect()
+
+ for t in threads:
+ t.join()
+
+
+class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest):
+ def _prime_executor(self):
+ pass
+
+ def test_processes_terminate(self):
+ self.executor.submit(mul, 21, 2)
+ self.executor.submit(mul, 6, 7)
+ self.executor.submit(mul, 3, 14)
+ self.assertEqual(len(self.executor._processes), 5)
+ processes = self.executor._processes
+ self.executor.shutdown()
+
+ for p in processes:
+ p.join()
+
+ def test_context_manager_shutdown(self):
+ with futures.ProcessPoolExecutor(max_workers=5) as e:
+ processes = e._processes
+ self.assertEqual(list(e.map(abs, range(-5, 5))),
+ [5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
+
+ for p in processes:
+ p.join()
+
+ def test_del_shutdown(self):
+ executor = futures.ProcessPoolExecutor(max_workers=5)
+ list(executor.map(abs, range(-5, 5)))
+ queue_management_thread = executor._queue_management_thread
+ processes = executor._processes
+ del executor
+ gc.collect()
+
+ queue_management_thread.join()
+ for p in processes:
+ p.join()
+
+
+class WaitTests(unittest.TestCase):
+
+ def test_first_completed(self):
+ future1 = self.executor.submit(mul, 21, 2)
+ future2 = self.executor.submit(time.sleep, 1.5)
+
+ done, not_done = futures.wait(
+ [CANCELLED_FUTURE, future1, future2],
+ return_when=futures.FIRST_COMPLETED)
+
+ self.assertEqual(set([future1]), done)
+ self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
+
+ def test_first_completed_some_already_completed(self):
+ future1 = self.executor.submit(time.sleep, 1.5)
+
+ finished, pending = futures.wait(
+ [CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
+ return_when=futures.FIRST_COMPLETED)
+
+ self.assertEqual(
+ set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
+ finished)
+ self.assertEqual(set([future1]), pending)
+
+ def test_first_exception(self):
+ future1 = self.executor.submit(mul, 2, 21)
+ future2 = self.executor.submit(sleep_and_raise, 1.5)
+ future3 = self.executor.submit(time.sleep, 3)
+
+ finished, pending = futures.wait(
+ [future1, future2, future3],
+ return_when=futures.FIRST_EXCEPTION)
+
+ self.assertEqual(set([future1, future2]), finished)
+ self.assertEqual(set([future3]), pending)
+
+ def test_first_exception_some_already_complete(self):
+ future1 = self.executor.submit(divmod, 21, 0)
+ future2 = self.executor.submit(time.sleep, 1.5)
+
+ finished, pending = futures.wait(
+ [SUCCESSFUL_FUTURE,
+ CANCELLED_FUTURE,
+ CANCELLED_AND_NOTIFIED_FUTURE,
+ future1, future2],
+ return_when=futures.FIRST_EXCEPTION)
+
+ self.assertEqual(set([SUCCESSFUL_FUTURE,
+ CANCELLED_AND_NOTIFIED_FUTURE,
+ future1]), finished)
+ self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
+
+ def test_first_exception_one_already_failed(self):
+ future1 = self.executor.submit(time.sleep, 2)
+
+ finished, pending = futures.wait(
+ [EXCEPTION_FUTURE, future1],
+ return_when=futures.FIRST_EXCEPTION)
+
+ self.assertEqual(set([EXCEPTION_FUTURE]), finished)
+ self.assertEqual(set([future1]), pending)
+
+ def test_all_completed(self):
+ future1 = self.executor.submit(divmod, 2, 0)
+ future2 = self.executor.submit(mul, 2, 21)
+
+ finished, pending = futures.wait(
+ [SUCCESSFUL_FUTURE,
+ CANCELLED_AND_NOTIFIED_FUTURE,
+ EXCEPTION_FUTURE,
+ future1,
+ future2],
+ return_when=futures.ALL_COMPLETED)
+
+ self.assertEqual(set([SUCCESSFUL_FUTURE,
+ CANCELLED_AND_NOTIFIED_FUTURE,
+ EXCEPTION_FUTURE,
+ future1,
+ future2]), finished)
+ self.assertEqual(set(), pending)
+
+ def test_timeout(self):
+ future1 = self.executor.submit(mul, 6, 7)
+ future2 = self.executor.submit(time.sleep, 3)
+
+ finished, pending = futures.wait(
+ [CANCELLED_AND_NOTIFIED_FUTURE,
+ EXCEPTION_FUTURE,
+ SUCCESSFUL_FUTURE,
+ future1, future2],
+ timeout=1.5,
+ return_when=futures.ALL_COMPLETED)
+
+ self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
+ EXCEPTION_FUTURE,
+ SUCCESSFUL_FUTURE,
+ future1]), finished)
+ self.assertEqual(set([future2]), pending)
+
+
+class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests):
+
+ def test_pending_calls_race(self):
+ # Issue #14406: multi-threaded race condition when waiting on all
+ # futures.
+ event = threading.Event()
+ def future_func():
+ event.wait()
+ oldswitchinterval = sys.getcheckinterval()
+ sys.setcheckinterval(1)
+ try:
+ fs = set(self.executor.submit(future_func) for i in range(100))
+ event.set()
+ futures.wait(fs, return_when=futures.ALL_COMPLETED)
+ finally:
+ sys.setcheckinterval(oldswitchinterval)
+
+
+class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests):
+ pass
+
+
+class AsCompletedTests(unittest.TestCase):
+ # TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
+ def test_no_timeout(self):
+ future1 = self.executor.submit(mul, 2, 21)
+ future2 = self.executor.submit(mul, 7, 6)
+
+ completed = set(futures.as_completed(
+ [CANCELLED_AND_NOTIFIED_FUTURE,
+ EXCEPTION_FUTURE,
+ SUCCESSFUL_FUTURE,
+ future1, future2]))
+ self.assertEqual(set(
+ [CANCELLED_AND_NOTIFIED_FUTURE,
+ EXCEPTION_FUTURE,
+ SUCCESSFUL_FUTURE,
+ future1, future2]),
+ completed)
+
+ def test_zero_timeout(self):
+ future1 = self.executor.submit(time.sleep, 2)
+ completed_futures = set()
+ try:
+ for future in futures.as_completed(
+ [CANCELLED_AND_NOTIFIED_FUTURE,
+ EXCEPTION_FUTURE,
+ SUCCESSFUL_FUTURE,
+ future1],
+ timeout=0):
+ completed_futures.add(future)
+ except futures.TimeoutError:
+ pass
+
+ self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
+ EXCEPTION_FUTURE,
+ SUCCESSFUL_FUTURE]),
+ completed_futures)
+
+ def test_duplicate_futures(self):
+ # Issue 20367. Duplicate futures should not raise exceptions or give
+ # duplicate responses.
+ future1 = self.executor.submit(time.sleep, 2)
+ completed = [f for f in futures.as_completed([future1,future1])]
+ self.assertEqual(len(completed), 1)
+
+
+class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests):
+ pass
+
+
+class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests):
+ pass
+
+
+class ExecutorTest(unittest.TestCase):
+ # Executor.shutdown() and context manager usage is tested by
+ # ExecutorShutdownTest.
+ def test_submit(self):
+ future = self.executor.submit(pow, 2, 8)
+ self.assertEqual(256, future.result())
+
+ def test_submit_keyword(self):
+ future = self.executor.submit(mul, 2, y=8)
+ self.assertEqual(16, future.result())
+
+ def test_map(self):
+ self.assertEqual(
+ list(self.executor.map(pow, range(10), range(10))),
+ list(map(pow, range(10), range(10))))
+
+ def test_map_exception(self):
+ i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
+ self.assertEqual(next(i), (0, 1))
+ self.assertEqual(next(i), (0, 1))
+ self.assertRaises(ZeroDivisionError, next, i)
+
+ def test_map_timeout(self):
+ results = []
+ try:
+ for i in self.executor.map(time.sleep,
+ [0, 0, 3],
+ timeout=1.5):
+ results.append(i)
+ except futures.TimeoutError:
+ pass
+ else:
+ self.fail('expected TimeoutError')
+
+ self.assertEqual([None, None], results)
+
+
+class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest):
+ def test_map_submits_without_iteration(self):
+ """Tests verifying issue 11777."""
+ finished = []
+ def record_finished(n):
+ finished.append(n)
+
+ self.executor.map(record_finished, range(10))
+ self.executor.shutdown(wait=True)
+ self.assertEqual(len(finished), 10)
+
+
+class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest):
+ pass
+
+
+class FutureTests(unittest.TestCase):
+ def test_done_callback_with_result(self):
+ callback_result = [None]
+ def fn(callback_future):
+ callback_result[0] = callback_future.result()
+
+ f = Future()
+ f.add_done_callback(fn)
+ f.set_result(5)
+ self.assertEqual(5, callback_result[0])
+
+ def test_done_callback_with_exception(self):
+ callback_exception = [None]
+ def fn(callback_future):
+ callback_exception[0] = callback_future.exception()
+
+ f = Future()
+ f.add_done_callback(fn)
+ f.set_exception(Exception('test'))
+ self.assertEqual(('test',), callback_exception[0].args)
+
+ def test_done_callback_with_cancel(self):
+ was_cancelled = [None]
+ def fn(callback_future):
+ was_cancelled[0] = callback_future.cancelled()
+
+ f = Future()
+ f.add_done_callback(fn)
+ self.assertTrue(f.cancel())
+ self.assertTrue(was_cancelled[0])
+
+ def test_done_callback_raises(self):
+ with captured_stderr() as stderr:
+ raising_was_called = [False]
+ fn_was_called = [False]
+
+ def raising_fn(callback_future):
+ raising_was_called[0] = True
+ raise Exception('doh!')
+
+ def fn(callback_future):
+ fn_was_called[0] = True
+
+ f = Future()
+ f.add_done_callback(raising_fn)
+ f.add_done_callback(fn)
+ f.set_result(5)
+ self.assertTrue(raising_was_called)
+ self.assertTrue(fn_was_called)
+ self.assertIn('Exception: doh!', stderr.getvalue())
+
+ def test_done_callback_already_successful(self):
+ callback_result = [None]
+ def fn(callback_future):
+ callback_result[0] = callback_future.result()
+
+ f = Future()
+ f.set_result(5)
+ f.add_done_callback(fn)
+ self.assertEqual(5, callback_result[0])
+
+ def test_done_callback_already_failed(self):
+ callback_exception = [None]
+ def fn(callback_future):
+ callback_exception[0] = callback_future.exception()
+
+ f = Future()
+ f.set_exception(Exception('test'))
+ f.add_done_callback(fn)
+ self.assertEqual(('test',), callback_exception[0].args)
+
+ def test_done_callback_already_cancelled(self):
+ was_cancelled = [None]
+ def fn(callback_future):
+ was_cancelled[0] = callback_future.cancelled()
+
+ f = Future()
+ self.assertTrue(f.cancel())
+ f.add_done_callback(fn)
+ self.assertTrue(was_cancelled[0])
+
+ def test_repr(self):
+ self.assertRegexpMatches(repr(PENDING_FUTURE),
+ '<Future at 0x[0-9a-f]+L? state=pending>')
+ self.assertRegexpMatches(repr(RUNNING_FUTURE),
+ '<Future at 0x[0-9a-f]+L? state=running>')
+ self.assertRegexpMatches(repr(CANCELLED_FUTURE),
+ '<Future at 0x[0-9a-f]+L? state=cancelled>')
+ self.assertRegexpMatches(repr(CANCELLED_AND_NOTIFIED_FUTURE),
+ '<Future at 0x[0-9a-f]+L? state=cancelled>')
+ self.assertRegexpMatches(
+ repr(EXCEPTION_FUTURE),
+ '<Future at 0x[0-9a-f]+L? state=finished raised IOError>')
+ self.assertRegexpMatches(
+ repr(SUCCESSFUL_FUTURE),
+ '<Future at 0x[0-9a-f]+L? state=finished returned int>')
+
+ def test_cancel(self):
+ f1 = create_future(state=PENDING)
+ f2 = create_future(state=RUNNING)
+ f3 = create_future(state=CANCELLED)
+ f4 = create_future(state=CANCELLED_AND_NOTIFIED)
+ f5 = create_future(state=FINISHED, exception=IOError())
+ f6 = create_future(state=FINISHED, result=5)
+
+ self.assertTrue(f1.cancel())
+ self.assertEqual(f1._state, CANCELLED)
+
+ self.assertFalse(f2.cancel())
+ self.assertEqual(f2._state, RUNNING)
+
+ self.assertTrue(f3.cancel())
+ self.assertEqual(f3._state, CANCELLED)
+
+ self.assertTrue(f4.cancel())
+ self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
+
+ self.assertFalse(f5.cancel())
+ self.assertEqual(f5._state, FINISHED)
+
+ self.assertFalse(f6.cancel())
+ self.assertEqual(f6._state, FINISHED)
+
+ def test_cancelled(self):
+ self.assertFalse(PENDING_FUTURE.cancelled())
+ self.assertFalse(RUNNING_FUTURE.cancelled())
+ self.assertTrue(CANCELLED_FUTURE.cancelled())
+ self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
+ self.assertFalse(EXCEPTION_FUTURE.cancelled())
+ self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
+
+ def test_done(self):
+ self.assertFalse(PENDING_FUTURE.done())
+ self.assertFalse(RUNNING_FUTURE.done())
+ self.assertTrue(CANCELLED_FUTURE.done())
+ self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
+ self.assertTrue(EXCEPTION_FUTURE.done())
+ self.assertTrue(SUCCESSFUL_FUTURE.done())
+
+ def test_running(self):
+ self.assertFalse(PENDING_FUTURE.running())
+ self.assertTrue(RUNNING_FUTURE.running())
+ self.assertFalse(CANCELLED_FUTURE.running())
+ self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
+ self.assertFalse(EXCEPTION_FUTURE.running())
+ self.assertFalse(SUCCESSFUL_FUTURE.running())
+
+ def test_result_with_timeout(self):
+ self.assertRaises(futures.TimeoutError,
+ PENDING_FUTURE.result, timeout=0)
+ self.assertRaises(futures.TimeoutError,
+ RUNNING_FUTURE.result, timeout=0)
+ self.assertRaises(futures.CancelledError,
+ CANCELLED_FUTURE.result, timeout=0)
+ self.assertRaises(futures.CancelledError,
+ CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
+ self.assertRaises(IOError, EXCEPTION_FUTURE.result, timeout=0)
+ self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
+
+ def test_result_with_success(self):
+ # TODO(brian@sweetapp.com): This test is timing dependant.
+ def notification():
+ # Wait until the main thread is waiting for the result.
+ time.sleep(1)
+ f1.set_result(42)
+
+ f1 = create_future(state=PENDING)
+ t = threading.Thread(target=notification)
+ t.start()
+
+ self.assertEqual(f1.result(timeout=5), 42)
+
+ def test_result_with_cancel(self):
+ # TODO(brian@sweetapp.com): This test is timing dependant.
+ def notification():
+ # Wait until the main thread is waiting for the result.
+ time.sleep(1)
+ f1.cancel()
+
+ f1 = create_future(state=PENDING)
+ t = threading.Thread(target=notification)
+ t.start()
+
+ self.assertRaises(futures.CancelledError, f1.result, timeout=5)
+
+ def test_exception_with_timeout(self):
+ self.assertRaises(futures.TimeoutError,
+ PENDING_FUTURE.exception, timeout=0)
+ self.assertRaises(futures.TimeoutError,
+ RUNNING_FUTURE.exception, timeout=0)
+ self.assertRaises(futures.CancelledError,
+ CANCELLED_FUTURE.exception, timeout=0)
+ self.assertRaises(futures.CancelledError,
+ CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
+ self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
+ IOError))
+ self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
+
+ def test_exception_with_success(self):
+ def notification():
+ # Wait until the main thread is waiting for the exception.
+ time.sleep(1)
+ with f1._condition:
+ f1._state = FINISHED
+ f1._exception = IOError()
+ f1._condition.notify_all()
+
+ f1 = create_future(state=PENDING)
+ t = threading.Thread(target=notification)
+ t.start()
+
+ self.assertTrue(isinstance(f1.exception(timeout=5), IOError))
+
+@reap_threads
+def test_main():
+ try:
+ test_support.run_unittest(ProcessPoolExecutorTest,
+ ThreadPoolExecutorTest,
+ ProcessPoolWaitTests,
+ ThreadPoolWaitTests,
+ ProcessPoolAsCompletedTests,
+ ThreadPoolAsCompletedTests,
+ FutureTests,
+ ProcessPoolShutdownTest,
+ ThreadPoolShutdownTest)
+ finally:
+ test_support.reap_children()
+
+if __name__ == "__main__":
+ test_main()
diff --git a/third_party/python/futures/tox.ini b/third_party/python/futures/tox.ini
new file mode 100644
index 0000000000..2b35e7ddb6
--- /dev/null
+++ b/third_party/python/futures/tox.ini
@@ -0,0 +1,8 @@
+[tox]
+envlist = py26,py27,pypy,jython
+
+[testenv]
+commands={envpython} test_futures.py []
+
+[testenv:py26]
+deps=unittest2