summaryrefslogtreecommitdiffstats
path: root/third_party/libwebrtc/build/android/pylib/local
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 17:32:43 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 17:32:43 +0000
commit6bf0a5cb5034a7e684dcc3500e841785237ce2dd (patch)
treea68f146d7fa01f0134297619fbe7e33db084e0aa /third_party/libwebrtc/build/android/pylib/local
parentInitial commit. (diff)
downloadthunderbird-upstream.tar.xz
thunderbird-upstream.zip
Adding upstream version 1:115.7.0.upstream/1%115.7.0upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/libwebrtc/build/android/pylib/local')
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/__init__.py3
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/device/__init__.py3
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/device/local_device_environment.py328
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/device/local_device_gtest_run.py896
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/local/device/local_device_gtest_run_test.py79
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/device/local_device_instrumentation_test_run.py1512
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/local/device/local_device_instrumentation_test_run_test.py169
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/device/local_device_monkey_test_run.py128
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/device/local_device_test_run.py395
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/local/device/local_device_test_run_test.py171
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/emulator/OWNERS4
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/emulator/__init__.py3
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/emulator/avd.py629
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/emulator/ini.py58
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/local/emulator/ini_test.py69
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/emulator/local_emulator_environment.py102
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/emulator/proto/__init__.py3
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/emulator/proto/avd.proto75
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/emulator/proto/avd_pb2.py362
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/local_test_server_spawner.py101
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/machine/__init__.py3
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/machine/local_machine_environment.py25
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/machine/local_machine_junit_test_run.py309
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/local/machine/local_machine_junit_test_run_test.py89
24 files changed, 5516 insertions, 0 deletions
diff --git a/third_party/libwebrtc/build/android/pylib/local/__init__.py b/third_party/libwebrtc/build/android/pylib/local/__init__.py
new file mode 100644
index 0000000000..4d6aabb953
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/third_party/libwebrtc/build/android/pylib/local/device/__init__.py b/third_party/libwebrtc/build/android/pylib/local/device/__init__.py
new file mode 100644
index 0000000000..4d6aabb953
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/device/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_environment.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_environment.py
new file mode 100644
index 0000000000..c254d2e8ca
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_environment.py
@@ -0,0 +1,328 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import datetime
+import functools
+import logging
+import os
+import shutil
+import tempfile
+import threading
+
+import devil_chromium
+from devil import base_error
+from devil.android import device_denylist
+from devil.android import device_errors
+from devil.android import device_utils
+from devil.android import logcat_monitor
+from devil.android.sdk import adb_wrapper
+from devil.utils import file_utils
+from devil.utils import parallelizer
+from pylib import constants
+from pylib.constants import host_paths
+from pylib.base import environment
+from pylib.utils import instrumentation_tracing
+from py_trace_event import trace_event
+
+
+LOGCAT_FILTERS = [
+ 'chromium:v',
+ 'cr_*:v',
+ 'DEBUG:I',
+ 'StrictMode:D',
+]
+
+
+def _DeviceCachePath(device):
+ file_name = 'device_cache_%s.json' % device.adb.GetDeviceSerial()
+ return os.path.join(constants.GetOutDirectory(), file_name)
+
+
+def handle_shard_failures(f):
+ """A decorator that handles device failures for per-device functions.
+
+ Args:
+ f: the function being decorated. The function must take at least one
+ argument, and that argument must be the device.
+ """
+ return handle_shard_failures_with(None)(f)
+
+
+# TODO(jbudorick): Refactor this to work as a decorator or context manager.
+def handle_shard_failures_with(on_failure):
+ """A decorator that handles device failures for per-device functions.
+
+ This calls on_failure in the event of a failure.
+
+ Args:
+ f: the function being decorated. The function must take at least one
+ argument, and that argument must be the device.
+ on_failure: A binary function to call on failure.
+ """
+ def decorator(f):
+ @functools.wraps(f)
+ def wrapper(dev, *args, **kwargs):
+ try:
+ return f(dev, *args, **kwargs)
+ except device_errors.CommandTimeoutError:
+ logging.exception('Shard timed out: %s(%s)', f.__name__, str(dev))
+ except device_errors.DeviceUnreachableError:
+ logging.exception('Shard died: %s(%s)', f.__name__, str(dev))
+ except base_error.BaseError:
+ logging.exception('Shard failed: %s(%s)', f.__name__, str(dev))
+ except SystemExit:
+ logging.exception('Shard killed: %s(%s)', f.__name__, str(dev))
+ raise
+ if on_failure:
+ on_failure(dev, f.__name__)
+ return None
+
+ return wrapper
+
+ return decorator
+
+
+def place_nomedia_on_device(dev, device_root):
+ """Places .nomedia file in test data root.
+
+ This helps to prevent system from scanning media files inside test data.
+
+ Args:
+ dev: Device to place .nomedia file.
+ device_root: Base path on device to place .nomedia file.
+ """
+
+ dev.RunShellCommand(['mkdir', '-p', device_root], check_return=True)
+ dev.WriteFile('%s/.nomedia' % device_root, 'https://crbug.com/796640')
+
+
+class LocalDeviceEnvironment(environment.Environment):
+
+ def __init__(self, args, output_manager, _error_func):
+ super(LocalDeviceEnvironment, self).__init__(output_manager)
+ self._current_try = 0
+ self._denylist = (device_denylist.Denylist(args.denylist_file)
+ if args.denylist_file else None)
+ self._device_serials = args.test_devices
+ self._devices_lock = threading.Lock()
+ self._devices = None
+ self._concurrent_adb = args.enable_concurrent_adb
+ self._enable_device_cache = args.enable_device_cache
+ self._logcat_monitors = []
+ self._logcat_output_dir = args.logcat_output_dir
+ self._logcat_output_file = args.logcat_output_file
+ self._max_tries = 1 + args.num_retries
+ self._preferred_abis = None
+ self._recover_devices = args.recover_devices
+ self._skip_clear_data = args.skip_clear_data
+ self._tool_name = args.tool
+ self._trace_output = None
+ if hasattr(args, 'trace_output'):
+ self._trace_output = args.trace_output
+ self._trace_all = None
+ if hasattr(args, 'trace_all'):
+ self._trace_all = args.trace_all
+
+ devil_chromium.Initialize(
+ output_directory=constants.GetOutDirectory(),
+ adb_path=args.adb_path)
+
+ # Some things such as Forwarder require ADB to be in the environment path,
+ # while others like Devil's bundletool.py require Java on the path.
+ adb_dir = os.path.dirname(adb_wrapper.AdbWrapper.GetAdbPath())
+ if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep):
+ os.environ['PATH'] = os.pathsep.join(
+ [adb_dir, host_paths.JAVA_PATH, os.environ['PATH']])
+
+ #override
+ def SetUp(self):
+ if self.trace_output and self._trace_all:
+ to_include = [r"pylib\..*", r"devil\..*", "__main__"]
+ to_exclude = ["logging"]
+ instrumentation_tracing.start_instrumenting(self.trace_output, to_include,
+ to_exclude)
+ elif self.trace_output:
+ self.EnableTracing()
+
+ # Must be called before accessing |devices|.
+ def SetPreferredAbis(self, abis):
+ assert self._devices is None
+ self._preferred_abis = abis
+
+ def _InitDevices(self):
+ device_arg = []
+ if self._device_serials:
+ device_arg = self._device_serials
+
+ self._devices = device_utils.DeviceUtils.HealthyDevices(
+ self._denylist,
+ retries=5,
+ enable_usb_resets=True,
+ enable_device_files_cache=self._enable_device_cache,
+ default_retries=self._max_tries - 1,
+ device_arg=device_arg,
+ abis=self._preferred_abis)
+
+ if self._logcat_output_file:
+ self._logcat_output_dir = tempfile.mkdtemp()
+
+ @handle_shard_failures_with(on_failure=self.DenylistDevice)
+ def prepare_device(d):
+ d.WaitUntilFullyBooted()
+
+ if self._enable_device_cache:
+ cache_path = _DeviceCachePath(d)
+ if os.path.exists(cache_path):
+ logging.info('Using device cache: %s', cache_path)
+ with open(cache_path) as f:
+ d.LoadCacheData(f.read())
+ # Delete cached file so that any exceptions cause it to be cleared.
+ os.unlink(cache_path)
+
+ if self._logcat_output_dir:
+ logcat_file = os.path.join(
+ self._logcat_output_dir,
+ '%s_%s' % (d.adb.GetDeviceSerial(),
+ datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%S')))
+ monitor = logcat_monitor.LogcatMonitor(
+ d.adb, clear=True, output_file=logcat_file)
+ self._logcat_monitors.append(monitor)
+ monitor.Start()
+
+ self.parallel_devices.pMap(prepare_device)
+
+ @property
+ def current_try(self):
+ return self._current_try
+
+ def IncrementCurrentTry(self):
+ self._current_try += 1
+
+ def ResetCurrentTry(self):
+ self._current_try = 0
+
+ @property
+ def denylist(self):
+ return self._denylist
+
+ @property
+ def concurrent_adb(self):
+ return self._concurrent_adb
+
+ @property
+ def devices(self):
+ # Initialize lazily so that host-only tests do not fail when no devices are
+ # attached.
+ if self._devices is None:
+ self._InitDevices()
+ return self._devices
+
+ @property
+ def max_tries(self):
+ return self._max_tries
+
+ @property
+ def parallel_devices(self):
+ return parallelizer.SyncParallelizer(self.devices)
+
+ @property
+ def recover_devices(self):
+ return self._recover_devices
+
+ @property
+ def skip_clear_data(self):
+ return self._skip_clear_data
+
+ @property
+ def tool(self):
+ return self._tool_name
+
+ @property
+ def trace_output(self):
+ return self._trace_output
+
+ #override
+ def TearDown(self):
+ if self.trace_output and self._trace_all:
+ instrumentation_tracing.stop_instrumenting()
+ elif self.trace_output:
+ self.DisableTracing()
+
+ # By default, teardown will invoke ADB. When receiving SIGTERM due to a
+ # timeout, there's a high probability that ADB is non-responsive. In these
+ # cases, sending an ADB command will potentially take a long time to time
+ # out. Before this happens, the process will be hard-killed for not
+ # responding to SIGTERM fast enough.
+ if self._received_sigterm:
+ return
+
+ if not self._devices:
+ return
+
+ @handle_shard_failures_with(on_failure=self.DenylistDevice)
+ def tear_down_device(d):
+ # Write the cache even when not using it so that it will be ready the
+ # first time that it is enabled. Writing it every time is also necessary
+ # so that an invalid cache can be flushed just by disabling it for one
+ # run.
+ cache_path = _DeviceCachePath(d)
+ if os.path.exists(os.path.dirname(cache_path)):
+ with open(cache_path, 'w') as f:
+ f.write(d.DumpCacheData())
+ logging.info('Wrote device cache: %s', cache_path)
+ else:
+ logging.warning(
+ 'Unable to write device cache as %s directory does not exist',
+ os.path.dirname(cache_path))
+
+ self.parallel_devices.pMap(tear_down_device)
+
+ for m in self._logcat_monitors:
+ try:
+ m.Stop()
+ m.Close()
+ _, temp_path = tempfile.mkstemp()
+ with open(m.output_file, 'r') as infile:
+ with open(temp_path, 'w') as outfile:
+ for line in infile:
+ outfile.write('Device(%s) %s' % (m.adb.GetDeviceSerial(), line))
+ shutil.move(temp_path, m.output_file)
+ except base_error.BaseError:
+ logging.exception('Failed to stop logcat monitor for %s',
+ m.adb.GetDeviceSerial())
+ except IOError:
+ logging.exception('Failed to locate logcat for device %s',
+ m.adb.GetDeviceSerial())
+
+ if self._logcat_output_file:
+ file_utils.MergeFiles(
+ self._logcat_output_file,
+ [m.output_file for m in self._logcat_monitors
+ if os.path.exists(m.output_file)])
+ shutil.rmtree(self._logcat_output_dir)
+
+ def DenylistDevice(self, device, reason='local_device_failure'):
+ device_serial = device.adb.GetDeviceSerial()
+ if self._denylist:
+ self._denylist.Extend([device_serial], reason=reason)
+ with self._devices_lock:
+ self._devices = [d for d in self._devices if str(d) != device_serial]
+ logging.error('Device %s denylisted: %s', device_serial, reason)
+ if not self._devices:
+ raise device_errors.NoDevicesError(
+ 'All devices were denylisted due to errors')
+
+ @staticmethod
+ def DisableTracing():
+ if not trace_event.trace_is_enabled():
+ logging.warning('Tracing is not running.')
+ else:
+ trace_event.trace_disable()
+
+ def EnableTracing(self):
+ if trace_event.trace_is_enabled():
+ logging.warning('Tracing is already running.')
+ else:
+ trace_event.trace_enable(self._trace_output)
diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_gtest_run.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_gtest_run.py
new file mode 100644
index 0000000000..c81722da6e
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_gtest_run.py
@@ -0,0 +1,896 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import contextlib
+import collections
+import itertools
+import logging
+import math
+import os
+import posixpath
+import subprocess
+import shutil
+import time
+
+from six.moves import range # pylint: disable=redefined-builtin
+from devil import base_error
+from devil.android import crash_handler
+from devil.android import device_errors
+from devil.android import device_temp_file
+from devil.android import logcat_monitor
+from devil.android import ports
+from devil.android.sdk import version_codes
+from devil.utils import reraiser_thread
+from incremental_install import installer
+from pylib import constants
+from pylib.base import base_test_result
+from pylib.gtest import gtest_test_instance
+from pylib.local import local_test_server_spawner
+from pylib.local.device import local_device_environment
+from pylib.local.device import local_device_test_run
+from pylib.utils import google_storage_helper
+from pylib.utils import logdog_helper
+from py_trace_event import trace_event
+from py_utils import contextlib_ext
+from py_utils import tempfile_ext
+import tombstones
+
+_MAX_INLINE_FLAGS_LENGTH = 50 # Arbitrarily chosen.
+_EXTRA_COMMAND_LINE_FILE = (
+ 'org.chromium.native_test.NativeTest.CommandLineFile')
+_EXTRA_COMMAND_LINE_FLAGS = (
+ 'org.chromium.native_test.NativeTest.CommandLineFlags')
+_EXTRA_COVERAGE_DEVICE_FILE = (
+ 'org.chromium.native_test.NativeTest.CoverageDeviceFile')
+_EXTRA_STDOUT_FILE = (
+ 'org.chromium.native_test.NativeTestInstrumentationTestRunner'
+ '.StdoutFile')
+_EXTRA_TEST = (
+ 'org.chromium.native_test.NativeTestInstrumentationTestRunner'
+ '.Test')
+_EXTRA_TEST_LIST = (
+ 'org.chromium.native_test.NativeTestInstrumentationTestRunner'
+ '.TestList')
+
+_SECONDS_TO_NANOS = int(1e9)
+
+# Tests that use SpawnedTestServer must run the LocalTestServerSpawner on the
+# host machine.
+# TODO(jbudorick): Move this up to the test instance if the net test server is
+# handled outside of the APK for the remote_device environment.
+_SUITE_REQUIRES_TEST_SERVER_SPAWNER = [
+ 'components_browsertests', 'content_unittests', 'content_browsertests',
+ 'net_unittests', 'services_unittests', 'unit_tests'
+]
+
+# These are use for code coverage.
+_LLVM_PROFDATA_PATH = os.path.join(constants.DIR_SOURCE_ROOT, 'third_party',
+ 'llvm-build', 'Release+Asserts', 'bin',
+ 'llvm-profdata')
+# Name of the file extension for profraw data files.
+_PROFRAW_FILE_EXTENSION = 'profraw'
+# Name of the file where profraw data files are merged.
+_MERGE_PROFDATA_FILE_NAME = 'coverage_merged.' + _PROFRAW_FILE_EXTENSION
+
+# No-op context manager. If we used Python 3, we could change this to
+# contextlib.ExitStack()
+class _NullContextManager(object):
+ def __enter__(self):
+ pass
+ def __exit__(self, *args):
+ pass
+
+
+def _GenerateSequentialFileNames(filename):
+ """Infinite generator of names: 'name.ext', 'name_1.ext', 'name_2.ext', ..."""
+ yield filename
+ base, ext = os.path.splitext(filename)
+ for i in itertools.count(1):
+ yield '%s_%d%s' % (base, i, ext)
+
+
+def _ExtractTestsFromFilter(gtest_filter):
+ """Returns the list of tests specified by the given filter.
+
+ Returns:
+ None if the device should be queried for the test list instead.
+ """
+ # Empty means all tests, - means exclude filter.
+ if not gtest_filter or '-' in gtest_filter:
+ return None
+
+ patterns = gtest_filter.split(':')
+ # For a single pattern, allow it even if it has a wildcard so long as the
+ # wildcard comes at the end and there is at least one . to prove the scope is
+ # not too large.
+ # This heuristic is not necessarily faster, but normally is.
+ if len(patterns) == 1 and patterns[0].endswith('*'):
+ no_suffix = patterns[0].rstrip('*')
+ if '*' not in no_suffix and '.' in no_suffix:
+ return patterns
+
+ if '*' in gtest_filter:
+ return None
+ return patterns
+
+
+def _GetDeviceTimeoutMultiplier():
+ # Emulated devices typically run 20-150x slower than real-time.
+ # Give a way to control this through the DEVICE_TIMEOUT_MULTIPLIER
+ # environment variable.
+ multiplier = os.getenv("DEVICE_TIMEOUT_MULTIPLIER")
+ if multiplier:
+ return int(multiplier)
+ return 1
+
+
+def _MergeCoverageFiles(coverage_dir, profdata_dir):
+ """Merge coverage data files.
+
+ Each instrumentation activity generates a separate profraw data file. This
+ merges all profraw files in profdata_dir into a single file in
+ coverage_dir. This happens after each test, rather than waiting until after
+ all tests are ran to reduce the memory footprint used by all the profraw
+ files.
+
+ Args:
+ coverage_dir: The path to the coverage directory.
+ profdata_dir: The directory where the profraw data file(s) are located.
+
+ Return:
+ None
+ """
+ # profdata_dir may not exist if pulling coverage files failed.
+ if not os.path.exists(profdata_dir):
+ logging.debug('Profraw directory does not exist.')
+ return
+
+ merge_file = os.path.join(coverage_dir, _MERGE_PROFDATA_FILE_NAME)
+ profraw_files = [
+ os.path.join(profdata_dir, f) for f in os.listdir(profdata_dir)
+ if f.endswith(_PROFRAW_FILE_EXTENSION)
+ ]
+
+ try:
+ logging.debug('Merging target profraw files into merged profraw file.')
+ subprocess_cmd = [
+ _LLVM_PROFDATA_PATH,
+ 'merge',
+ '-o',
+ merge_file,
+ '-sparse=true',
+ ]
+ # Grow the merge file by merging it with itself and the new files.
+ if os.path.exists(merge_file):
+ subprocess_cmd.append(merge_file)
+ subprocess_cmd.extend(profraw_files)
+ output = subprocess.check_output(subprocess_cmd)
+ logging.debug('Merge output: %s', output)
+ except subprocess.CalledProcessError:
+ # Don't raise error as that will kill the test run. When code coverage
+ # generates a report, that will raise the error in the report generation.
+ logging.error(
+ 'Failed to merge target profdata files to create merged profraw file.')
+
+ # Free up memory space on bot as all data is in the merge file.
+ for f in profraw_files:
+ os.remove(f)
+
+
+def _PullCoverageFiles(device, device_coverage_dir, output_dir):
+ """Pulls coverage files on device to host directory.
+
+ Args:
+ device: The working device.
+ device_coverage_dir: The directory to store coverage data on device.
+ output_dir: The output directory on host.
+ """
+ try:
+ if not os.path.exists(output_dir):
+ os.makedirs(output_dir)
+ device.PullFile(device_coverage_dir, output_dir)
+ if not os.listdir(os.path.join(output_dir, 'profraw')):
+ logging.warning('No coverage data was generated for this run')
+ except (OSError, base_error.BaseError) as e:
+ logging.warning('Failed to handle coverage data after tests: %s', e)
+ finally:
+ device.RemovePath(device_coverage_dir, force=True, recursive=True)
+
+
+def _GetDeviceCoverageDir(device):
+ """Gets the directory to generate coverage data on device.
+
+ Args:
+ device: The working device.
+
+ Returns:
+ The directory path on the device.
+ """
+ return posixpath.join(device.GetExternalStoragePath(), 'chrome', 'test',
+ 'coverage', 'profraw')
+
+
+def _GetLLVMProfilePath(device_coverage_dir, suite, coverage_index):
+ """Gets 'LLVM_PROFILE_FILE' environment variable path.
+
+ Dumping data to ONLY 1 file may cause warning and data overwrite in
+ browsertests, so that pattern "%2m" is used to expand to 2 raw profiles
+ at runtime.
+
+ Args:
+ device_coverage_dir: The directory to generate data on device.
+ suite: Test suite name.
+ coverage_index: The incremental index for this test suite.
+
+ Returns:
+ The path pattern for environment variable 'LLVM_PROFILE_FILE'.
+ """
+ return posixpath.join(device_coverage_dir,
+ '_'.join([suite,
+ str(coverage_index), '%2m.profraw']))
+
+
+class _ApkDelegate(object):
+ def __init__(self, test_instance, tool):
+ self._activity = test_instance.activity
+ self._apk_helper = test_instance.apk_helper
+ self._test_apk_incremental_install_json = (
+ test_instance.test_apk_incremental_install_json)
+ self._package = test_instance.package
+ self._runner = test_instance.runner
+ self._permissions = test_instance.permissions
+ self._suite = test_instance.suite
+ self._component = '%s/%s' % (self._package, self._runner)
+ self._extras = test_instance.extras
+ self._wait_for_java_debugger = test_instance.wait_for_java_debugger
+ self._tool = tool
+ self._coverage_dir = test_instance.coverage_dir
+ self._coverage_index = 0
+ self._use_existing_test_data = test_instance.use_existing_test_data
+
+ def GetTestDataRoot(self, device):
+ # pylint: disable=no-self-use
+ return posixpath.join(device.GetExternalStoragePath(),
+ 'chromium_tests_root')
+
+ def Install(self, device):
+ if self._use_existing_test_data:
+ return
+ if self._test_apk_incremental_install_json:
+ installer.Install(device, self._test_apk_incremental_install_json,
+ apk=self._apk_helper, permissions=self._permissions)
+ else:
+ device.Install(
+ self._apk_helper,
+ allow_downgrade=True,
+ reinstall=True,
+ permissions=self._permissions)
+
+ def ResultsDirectory(self, device):
+ return device.GetApplicationDataDirectory(self._package)
+
+ def Run(self, test, device, flags=None, **kwargs):
+ extras = dict(self._extras)
+ device_api = device.build_version_sdk
+
+ if self._coverage_dir and device_api >= version_codes.LOLLIPOP:
+ device_coverage_dir = _GetDeviceCoverageDir(device)
+ extras[_EXTRA_COVERAGE_DEVICE_FILE] = _GetLLVMProfilePath(
+ device_coverage_dir, self._suite, self._coverage_index)
+ self._coverage_index += 1
+
+ if ('timeout' in kwargs
+ and gtest_test_instance.EXTRA_SHARD_NANO_TIMEOUT not in extras):
+ # Make sure the instrumentation doesn't kill the test before the
+ # scripts do. The provided timeout value is in seconds, but the
+ # instrumentation deals with nanoseconds because that's how Android
+ # handles time.
+ extras[gtest_test_instance.EXTRA_SHARD_NANO_TIMEOUT] = int(
+ kwargs['timeout'] * _SECONDS_TO_NANOS)
+
+ # pylint: disable=redefined-variable-type
+ command_line_file = _NullContextManager()
+ if flags:
+ if len(flags) > _MAX_INLINE_FLAGS_LENGTH:
+ command_line_file = device_temp_file.DeviceTempFile(device.adb)
+ device.WriteFile(command_line_file.name, '_ %s' % flags)
+ extras[_EXTRA_COMMAND_LINE_FILE] = command_line_file.name
+ else:
+ extras[_EXTRA_COMMAND_LINE_FLAGS] = flags
+
+ test_list_file = _NullContextManager()
+ if test:
+ if len(test) > 1:
+ test_list_file = device_temp_file.DeviceTempFile(device.adb)
+ device.WriteFile(test_list_file.name, '\n'.join(test))
+ extras[_EXTRA_TEST_LIST] = test_list_file.name
+ else:
+ extras[_EXTRA_TEST] = test[0]
+ # pylint: enable=redefined-variable-type
+
+ # We need to use GetAppWritablePath here instead of GetExternalStoragePath
+ # since we will not have yet applied legacy storage permission workarounds
+ # on R+.
+ stdout_file = device_temp_file.DeviceTempFile(
+ device.adb, dir=device.GetAppWritablePath(), suffix='.gtest_out')
+ extras[_EXTRA_STDOUT_FILE] = stdout_file.name
+
+ if self._wait_for_java_debugger:
+ cmd = ['am', 'set-debug-app', '-w', self._package]
+ device.RunShellCommand(cmd, check_return=True)
+ logging.warning('*' * 80)
+ logging.warning('Waiting for debugger to attach to process: %s',
+ self._package)
+ logging.warning('*' * 80)
+
+ with command_line_file, test_list_file, stdout_file:
+ try:
+ device.StartInstrumentation(
+ self._component, extras=extras, raw=False, **kwargs)
+ except device_errors.CommandFailedError:
+ logging.exception('gtest shard failed.')
+ except device_errors.CommandTimeoutError:
+ logging.exception('gtest shard timed out.')
+ except device_errors.DeviceUnreachableError:
+ logging.exception('gtest shard device unreachable.')
+ except Exception:
+ device.ForceStop(self._package)
+ raise
+ finally:
+ if self._coverage_dir and device_api >= version_codes.LOLLIPOP:
+ if not os.path.isdir(self._coverage_dir):
+ os.makedirs(self._coverage_dir)
+ # TODO(crbug.com/1179004) Use _MergeCoverageFiles when llvm-profdata
+ # not found is fixed.
+ _PullCoverageFiles(
+ device, device_coverage_dir,
+ os.path.join(self._coverage_dir, str(self._coverage_index)))
+
+ return device.ReadFile(stdout_file.name).splitlines()
+
+ def PullAppFiles(self, device, files, directory):
+ device_dir = device.GetApplicationDataDirectory(self._package)
+ host_dir = os.path.join(directory, str(device))
+ for f in files:
+ device_file = posixpath.join(device_dir, f)
+ host_file = os.path.join(host_dir, *f.split(posixpath.sep))
+ for host_file in _GenerateSequentialFileNames(host_file):
+ if not os.path.exists(host_file):
+ break
+ device.PullFile(device_file, host_file)
+
+ def Clear(self, device):
+ device.ClearApplicationState(self._package, permissions=self._permissions)
+
+
+class _ExeDelegate(object):
+
+ def __init__(self, tr, test_instance, tool):
+ self._host_dist_dir = test_instance.exe_dist_dir
+ self._exe_file_name = os.path.basename(
+ test_instance.exe_dist_dir)[:-len('__dist')]
+ self._device_dist_dir = posixpath.join(
+ constants.TEST_EXECUTABLE_DIR,
+ os.path.basename(test_instance.exe_dist_dir))
+ self._test_run = tr
+ self._tool = tool
+ self._suite = test_instance.suite
+ self._coverage_dir = test_instance.coverage_dir
+ self._coverage_index = 0
+
+ def GetTestDataRoot(self, device):
+ # pylint: disable=no-self-use
+ # pylint: disable=unused-argument
+ return posixpath.join(constants.TEST_EXECUTABLE_DIR, 'chromium_tests_root')
+
+ def Install(self, device):
+ # TODO(jbudorick): Look into merging this with normal data deps pushing if
+ # executables become supported on nonlocal environments.
+ device.PushChangedFiles([(self._host_dist_dir, self._device_dist_dir)],
+ delete_device_stale=True)
+
+ def ResultsDirectory(self, device):
+ # pylint: disable=no-self-use
+ # pylint: disable=unused-argument
+ return constants.TEST_EXECUTABLE_DIR
+
+ def Run(self, test, device, flags=None, **kwargs):
+ tool = self._test_run.GetTool(device).GetTestWrapper()
+ if tool:
+ cmd = [tool]
+ else:
+ cmd = []
+ cmd.append(posixpath.join(self._device_dist_dir, self._exe_file_name))
+
+ if test:
+ cmd.append('--gtest_filter=%s' % ':'.join(test))
+ if flags:
+ # TODO(agrieve): This won't work if multiple flags are passed.
+ cmd.append(flags)
+ cwd = constants.TEST_EXECUTABLE_DIR
+
+ env = {
+ 'LD_LIBRARY_PATH': self._device_dist_dir
+ }
+
+ if self._coverage_dir:
+ device_coverage_dir = _GetDeviceCoverageDir(device)
+ env['LLVM_PROFILE_FILE'] = _GetLLVMProfilePath(
+ device_coverage_dir, self._suite, self._coverage_index)
+ self._coverage_index += 1
+
+ if self._tool != 'asan':
+ env['UBSAN_OPTIONS'] = constants.UBSAN_OPTIONS
+
+ try:
+ gcov_strip_depth = os.environ['NATIVE_COVERAGE_DEPTH_STRIP']
+ external = device.GetExternalStoragePath()
+ env['GCOV_PREFIX'] = '%s/gcov' % external
+ env['GCOV_PREFIX_STRIP'] = gcov_strip_depth
+ except (device_errors.CommandFailedError, KeyError):
+ pass
+
+ # Executable tests return a nonzero exit code on test failure, which is
+ # fine from the test runner's perspective; thus check_return=False.
+ output = device.RunShellCommand(
+ cmd, cwd=cwd, env=env, check_return=False, large_output=True, **kwargs)
+
+ if self._coverage_dir:
+ _PullCoverageFiles(
+ device, device_coverage_dir,
+ os.path.join(self._coverage_dir, str(self._coverage_index)))
+
+ return output
+
+ def PullAppFiles(self, device, files, directory):
+ pass
+
+ def Clear(self, device):
+ device.KillAll(self._exe_file_name,
+ blocking=True,
+ timeout=30 * _GetDeviceTimeoutMultiplier(),
+ quiet=True)
+
+
+class LocalDeviceGtestRun(local_device_test_run.LocalDeviceTestRun):
+
+ def __init__(self, env, test_instance):
+ assert isinstance(env, local_device_environment.LocalDeviceEnvironment)
+ assert isinstance(test_instance, gtest_test_instance.GtestTestInstance)
+ super(LocalDeviceGtestRun, self).__init__(env, test_instance)
+
+ if self._test_instance.apk_helper:
+ self._installed_packages = [
+ self._test_instance.apk_helper.GetPackageName()
+ ]
+
+ # pylint: disable=redefined-variable-type
+ if self._test_instance.apk:
+ self._delegate = _ApkDelegate(self._test_instance, env.tool)
+ elif self._test_instance.exe_dist_dir:
+ self._delegate = _ExeDelegate(self, self._test_instance, self._env.tool)
+ if self._test_instance.isolated_script_test_perf_output:
+ self._test_perf_output_filenames = _GenerateSequentialFileNames(
+ self._test_instance.isolated_script_test_perf_output)
+ else:
+ self._test_perf_output_filenames = itertools.repeat(None)
+ # pylint: enable=redefined-variable-type
+ self._crashes = set()
+ self._servers = collections.defaultdict(list)
+
+ #override
+ def TestPackage(self):
+ return self._test_instance.suite
+
+ #override
+ def SetUp(self):
+ @local_device_environment.handle_shard_failures_with(
+ on_failure=self._env.DenylistDevice)
+ @trace_event.traced
+ def individual_device_set_up(device, host_device_tuples):
+ def install_apk(dev):
+ # Install test APK.
+ self._delegate.Install(dev)
+
+ def push_test_data(dev):
+ if self._test_instance.use_existing_test_data:
+ return
+ # Push data dependencies.
+ device_root = self._delegate.GetTestDataRoot(dev)
+ host_device_tuples_substituted = [
+ (h, local_device_test_run.SubstituteDeviceRoot(d, device_root))
+ for h, d in host_device_tuples]
+ local_device_environment.place_nomedia_on_device(dev, device_root)
+ dev.PushChangedFiles(
+ host_device_tuples_substituted,
+ delete_device_stale=True,
+ # Some gtest suites, e.g. unit_tests, have data dependencies that
+ # can take longer than the default timeout to push. See
+ # crbug.com/791632 for context.
+ timeout=600 * math.ceil(_GetDeviceTimeoutMultiplier() / 10))
+ if not host_device_tuples:
+ dev.RemovePath(device_root, force=True, recursive=True, rename=True)
+ dev.RunShellCommand(['mkdir', '-p', device_root], check_return=True)
+
+ def init_tool_and_start_servers(dev):
+ tool = self.GetTool(dev)
+ tool.CopyFiles(dev)
+ tool.SetupEnvironment()
+
+ try:
+ # See https://crbug.com/1030827.
+ # This is a hack that may break in the future. We're relying on the
+ # fact that adb doesn't use ipv6 for it's server, and so doesn't
+ # listen on ipv6, but ssh remote forwarding does. 5037 is the port
+ # number adb uses for its server.
+ if "[::1]:5037" in subprocess.check_output(
+ "ss -o state listening 'sport = 5037'", shell=True):
+ logging.error(
+ 'Test Server cannot be started with a remote-forwarded adb '
+ 'server. Continuing anyways, but some tests may fail.')
+ return
+ except subprocess.CalledProcessError:
+ pass
+
+ self._servers[str(dev)] = []
+ if self.TestPackage() in _SUITE_REQUIRES_TEST_SERVER_SPAWNER:
+ self._servers[str(dev)].append(
+ local_test_server_spawner.LocalTestServerSpawner(
+ ports.AllocateTestServerPort(), dev, tool))
+
+ for s in self._servers[str(dev)]:
+ s.SetUp()
+
+ def bind_crash_handler(step, dev):
+ return lambda: crash_handler.RetryOnSystemCrash(step, dev)
+
+ # Explicitly enable root to ensure that tests run under deterministic
+ # conditions. Without this explicit call, EnableRoot() is called from
+ # push_test_data() when PushChangedFiles() determines that it should use
+ # _PushChangedFilesZipped(), which is only most of the time.
+ # Root is required (amongst maybe other reasons) to pull the results file
+ # from the device, since it lives within the application's data directory
+ # (via GetApplicationDataDirectory()).
+ device.EnableRoot()
+
+ steps = [
+ bind_crash_handler(s, device)
+ for s in (install_apk, push_test_data, init_tool_and_start_servers)]
+ if self._env.concurrent_adb:
+ reraiser_thread.RunAsync(steps)
+ else:
+ for step in steps:
+ step()
+
+ self._env.parallel_devices.pMap(
+ individual_device_set_up,
+ self._test_instance.GetDataDependencies())
+
+ #override
+ def _ShouldShard(self):
+ return True
+
+ #override
+ def _CreateShards(self, tests):
+ # _crashes are tests that might crash and make the tests in the same shard
+ # following the crashed testcase not run.
+ # Thus we need to create separate shards for each crashed testcase,
+ # so that other tests can be run.
+ device_count = len(self._env.devices)
+ shards = []
+
+ # Add shards with only one suspect testcase.
+ shards += [[crash] for crash in self._crashes if crash in tests]
+
+ # Delete suspect testcase from tests.
+ tests = [test for test in tests if not test in self._crashes]
+
+ max_shard_size = self._test_instance.test_launcher_batch_limit
+
+ shards.extend(self._PartitionTests(tests, device_count, max_shard_size))
+ return shards
+
+ #override
+ def _GetTests(self):
+ if self._test_instance.extract_test_list_from_filter:
+ # When the exact list of tests to run is given via command-line (e.g. when
+ # locally iterating on a specific test), skip querying the device (which
+ # takes ~3 seconds).
+ tests = _ExtractTestsFromFilter(self._test_instance.gtest_filter)
+ if tests:
+ return tests
+
+ # Even when there's only one device, it still makes sense to retrieve the
+ # test list so that tests can be split up and run in batches rather than all
+ # at once (since test output is not streamed).
+ @local_device_environment.handle_shard_failures_with(
+ on_failure=self._env.DenylistDevice)
+ def list_tests(dev):
+ timeout = 30 * _GetDeviceTimeoutMultiplier()
+ retries = 1
+ if self._test_instance.wait_for_java_debugger:
+ timeout = None
+
+ flags = [
+ f for f in self._test_instance.flags
+ if f not in ['--wait-for-debugger', '--wait-for-java-debugger']
+ ]
+ flags.append('--gtest_list_tests')
+
+ # TODO(crbug.com/726880): Remove retries when no longer necessary.
+ for i in range(0, retries+1):
+ logging.info('flags:')
+ for f in flags:
+ logging.info(' %s', f)
+
+ with self._ArchiveLogcat(dev, 'list_tests'):
+ raw_test_list = crash_handler.RetryOnSystemCrash(
+ lambda d: self._delegate.Run(
+ None, d, flags=' '.join(flags), timeout=timeout),
+ device=dev)
+
+ tests = gtest_test_instance.ParseGTestListTests(raw_test_list)
+ if not tests:
+ logging.info('No tests found. Output:')
+ for l in raw_test_list:
+ logging.info(' %s', l)
+ if i < retries:
+ logging.info('Retrying...')
+ else:
+ break
+ return tests
+
+ # Query all devices in case one fails.
+ test_lists = self._env.parallel_devices.pMap(list_tests).pGet(None)
+
+ # If all devices failed to list tests, raise an exception.
+ # Check that tl is not None and is not empty.
+ if all(not tl for tl in test_lists):
+ raise device_errors.CommandFailedError(
+ 'Failed to list tests on any device')
+ tests = list(sorted(set().union(*[set(tl) for tl in test_lists if tl])))
+ tests = self._test_instance.FilterTests(tests)
+ tests = self._ApplyExternalSharding(
+ tests, self._test_instance.external_shard_index,
+ self._test_instance.total_external_shards)
+ return tests
+
+ def _UploadTestArtifacts(self, device, test_artifacts_dir):
+ # TODO(jbudorick): Reconcile this with the output manager once
+ # https://codereview.chromium.org/2933993002/ lands.
+ if test_artifacts_dir:
+ with tempfile_ext.NamedTemporaryDirectory() as test_artifacts_host_dir:
+ device.PullFile(test_artifacts_dir.name, test_artifacts_host_dir)
+ with tempfile_ext.NamedTemporaryDirectory() as temp_zip_dir:
+ zip_base_name = os.path.join(temp_zip_dir, 'test_artifacts')
+ test_artifacts_zip = shutil.make_archive(
+ zip_base_name, 'zip', test_artifacts_host_dir)
+ link = google_storage_helper.upload(
+ google_storage_helper.unique_name(
+ 'test_artifacts', device=device),
+ test_artifacts_zip,
+ bucket='%s/test_artifacts' % (
+ self._test_instance.gs_test_artifacts_bucket))
+ logging.info('Uploading test artifacts to %s.', link)
+ return link
+ return None
+
+ def _PullRenderTestOutput(self, device, render_test_output_device_dir):
+ # We pull the render tests into a temp directory then copy them over
+ # individually. Otherwise we end up with a temporary directory name
+ # in the host output directory.
+ with tempfile_ext.NamedTemporaryDirectory() as tmp_host_dir:
+ try:
+ device.PullFile(render_test_output_device_dir, tmp_host_dir)
+ except device_errors.CommandFailedError:
+ logging.exception('Failed to pull render test output dir %s',
+ render_test_output_device_dir)
+ temp_host_dir = os.path.join(
+ tmp_host_dir, os.path.basename(render_test_output_device_dir))
+ for output_file in os.listdir(temp_host_dir):
+ src_path = os.path.join(temp_host_dir, output_file)
+ dst_path = os.path.join(self._test_instance.render_test_output_dir,
+ output_file)
+ shutil.move(src_path, dst_path)
+
+ @contextlib.contextmanager
+ def _ArchiveLogcat(self, device, test):
+ if isinstance(test, str):
+ desc = test
+ else:
+ desc = hash(tuple(test))
+
+ stream_name = 'logcat_%s_shard%s_%s_%s' % (
+ desc, self._test_instance.external_shard_index,
+ time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()), device.serial)
+
+ logcat_file = None
+ logmon = None
+ try:
+ with self._env.output_manager.ArchivedTempfile(stream_name,
+ 'logcat') as logcat_file:
+ with logcat_monitor.LogcatMonitor(
+ device.adb,
+ filter_specs=local_device_environment.LOGCAT_FILTERS,
+ output_file=logcat_file.name,
+ check_error=False) as logmon:
+ with contextlib_ext.Optional(trace_event.trace(str(test)),
+ self._env.trace_output):
+ yield logcat_file
+ finally:
+ if logmon:
+ logmon.Close()
+ if logcat_file and logcat_file.Link():
+ logging.info('Logcat saved to %s', logcat_file.Link())
+
+ #override
+ def _RunTest(self, device, test):
+ # Run the test.
+ timeout = (self._test_instance.shard_timeout *
+ self.GetTool(device).GetTimeoutScale() *
+ _GetDeviceTimeoutMultiplier())
+ if self._test_instance.wait_for_java_debugger:
+ timeout = None
+ if self._test_instance.store_tombstones:
+ tombstones.ClearAllTombstones(device)
+ test_perf_output_filename = next(self._test_perf_output_filenames)
+
+ if self._test_instance.isolated_script_test_output:
+ suffix = '.json'
+ else:
+ suffix = '.xml'
+
+ with device_temp_file.DeviceTempFile(
+ adb=device.adb,
+ dir=self._delegate.ResultsDirectory(device),
+ suffix=suffix) as device_tmp_results_file:
+ with contextlib_ext.Optional(
+ device_temp_file.NamedDeviceTemporaryDirectory(
+ adb=device.adb, dir='/sdcard/'),
+ self._test_instance.gs_test_artifacts_bucket) as test_artifacts_dir:
+ with (contextlib_ext.Optional(
+ device_temp_file.DeviceTempFile(
+ adb=device.adb, dir=self._delegate.ResultsDirectory(device)),
+ test_perf_output_filename)) as isolated_script_test_perf_output:
+ with contextlib_ext.Optional(
+ device_temp_file.NamedDeviceTemporaryDirectory(adb=device.adb,
+ dir='/sdcard/'),
+ self._test_instance.render_test_output_dir
+ ) as render_test_output_dir:
+
+ flags = list(self._test_instance.flags)
+ if self._test_instance.enable_xml_result_parsing:
+ flags.append('--gtest_output=xml:%s' %
+ device_tmp_results_file.name)
+
+ if self._test_instance.gs_test_artifacts_bucket:
+ flags.append('--test_artifacts_dir=%s' % test_artifacts_dir.name)
+
+ if self._test_instance.isolated_script_test_output:
+ flags.append('--isolated-script-test-output=%s' %
+ device_tmp_results_file.name)
+
+ if test_perf_output_filename:
+ flags.append('--isolated_script_test_perf_output=%s' %
+ isolated_script_test_perf_output.name)
+
+ if self._test_instance.render_test_output_dir:
+ flags.append('--render-test-output-dir=%s' %
+ render_test_output_dir.name)
+
+ logging.info('flags:')
+ for f in flags:
+ logging.info(' %s', f)
+
+ with self._ArchiveLogcat(device, test) as logcat_file:
+ output = self._delegate.Run(test,
+ device,
+ flags=' '.join(flags),
+ timeout=timeout,
+ retries=0)
+
+ if self._test_instance.enable_xml_result_parsing:
+ try:
+ gtest_xml = device.ReadFile(device_tmp_results_file.name)
+ except device_errors.CommandFailedError:
+ logging.exception('Failed to pull gtest results XML file %s',
+ device_tmp_results_file.name)
+ gtest_xml = None
+
+ if self._test_instance.isolated_script_test_output:
+ try:
+ gtest_json = device.ReadFile(device_tmp_results_file.name)
+ except device_errors.CommandFailedError:
+ logging.exception('Failed to pull gtest results JSON file %s',
+ device_tmp_results_file.name)
+ gtest_json = None
+
+ if test_perf_output_filename:
+ try:
+ device.PullFile(isolated_script_test_perf_output.name,
+ test_perf_output_filename)
+ except device_errors.CommandFailedError:
+ logging.exception('Failed to pull chartjson results %s',
+ isolated_script_test_perf_output.name)
+
+ test_artifacts_url = self._UploadTestArtifacts(
+ device, test_artifacts_dir)
+
+ if render_test_output_dir:
+ self._PullRenderTestOutput(device, render_test_output_dir.name)
+
+ for s in self._servers[str(device)]:
+ s.Reset()
+ if self._test_instance.app_files:
+ self._delegate.PullAppFiles(device, self._test_instance.app_files,
+ self._test_instance.app_file_dir)
+ if not self._env.skip_clear_data:
+ self._delegate.Clear(device)
+
+ for l in output:
+ logging.info(l)
+
+ # Parse the output.
+ # TODO(jbudorick): Transition test scripts away from parsing stdout.
+ if self._test_instance.enable_xml_result_parsing:
+ results = gtest_test_instance.ParseGTestXML(gtest_xml)
+ elif self._test_instance.isolated_script_test_output:
+ results = gtest_test_instance.ParseGTestJSON(gtest_json)
+ else:
+ results = gtest_test_instance.ParseGTestOutput(
+ output, self._test_instance.symbolizer, device.product_cpu_abi)
+
+ tombstones_url = None
+ for r in results:
+ if logcat_file:
+ r.SetLink('logcat', logcat_file.Link())
+
+ if self._test_instance.gs_test_artifacts_bucket:
+ r.SetLink('test_artifacts', test_artifacts_url)
+
+ if r.GetType() == base_test_result.ResultType.CRASH:
+ self._crashes.add(r.GetName())
+ if self._test_instance.store_tombstones:
+ if not tombstones_url:
+ resolved_tombstones = tombstones.ResolveTombstones(
+ device,
+ resolve_all_tombstones=True,
+ include_stack_symbols=False,
+ wipe_tombstones=True)
+ stream_name = 'tombstones_%s_%s' % (
+ time.strftime('%Y%m%dT%H%M%S', time.localtime()),
+ device.serial)
+ tombstones_url = logdog_helper.text(
+ stream_name, '\n'.join(resolved_tombstones))
+ r.SetLink('tombstones', tombstones_url)
+
+ tests_stripped_disabled_prefix = set()
+ for t in test:
+ tests_stripped_disabled_prefix.add(
+ gtest_test_instance.TestNameWithoutDisabledPrefix(t))
+ not_run_tests = tests_stripped_disabled_prefix.difference(
+ set(r.GetName() for r in results))
+ return results, list(not_run_tests) if results else None
+
+ #override
+ def TearDown(self):
+ # By default, teardown will invoke ADB. When receiving SIGTERM due to a
+ # timeout, there's a high probability that ADB is non-responsive. In these
+ # cases, sending an ADB command will potentially take a long time to time
+ # out. Before this happens, the process will be hard-killed for not
+ # responding to SIGTERM fast enough.
+ if self._received_sigterm:
+ return
+
+ @local_device_environment.handle_shard_failures
+ @trace_event.traced
+ def individual_device_tear_down(dev):
+ for s in self._servers.get(str(dev), []):
+ s.TearDown()
+
+ tool = self.GetTool(dev)
+ tool.CleanUpEnvironment()
+
+ self._env.parallel_devices.pMap(individual_device_tear_down)
diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_gtest_run_test.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_gtest_run_test.py
new file mode 100755
index 0000000000..b664d58131
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_gtest_run_test.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env vpython3
+# Copyright 2021 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Tests for local_device_gtest_test_run."""
+
+# pylint: disable=protected-access
+
+
+import os
+import tempfile
+import unittest
+
+from pylib.gtest import gtest_test_instance
+from pylib.local.device import local_device_environment
+from pylib.local.device import local_device_gtest_run
+from py_utils import tempfile_ext
+
+import mock # pylint: disable=import-error
+
+
+class LocalDeviceGtestRunTest(unittest.TestCase):
+ def setUp(self):
+ self._obj = local_device_gtest_run.LocalDeviceGtestRun(
+ mock.MagicMock(spec=local_device_environment.LocalDeviceEnvironment),
+ mock.MagicMock(spec=gtest_test_instance.GtestTestInstance))
+
+ def testExtractTestsFromFilter(self):
+ # Checks splitting by colons.
+ self.assertEqual([
+ 'b17',
+ 'm4e3',
+ 'p51',
+ ], local_device_gtest_run._ExtractTestsFromFilter('b17:m4e3:p51'))
+ # Checks the '-' sign.
+ self.assertIsNone(local_device_gtest_run._ExtractTestsFromFilter('-mk2'))
+ # Checks the more than one asterick.
+ self.assertIsNone(
+ local_device_gtest_run._ExtractTestsFromFilter('.mk2*:.M67*'))
+ # Checks just an asterick without a period
+ self.assertIsNone(local_device_gtest_run._ExtractTestsFromFilter('M67*'))
+ # Checks an asterick at the end with a period.
+ self.assertEqual(['.M67*'],
+ local_device_gtest_run._ExtractTestsFromFilter('.M67*'))
+
+ def testGetLLVMProfilePath(self):
+ path = local_device_gtest_run._GetLLVMProfilePath('test_dir', 'sr71', '5')
+ self.assertEqual(path, os.path.join('test_dir', 'sr71_5_%2m.profraw'))
+
+ @mock.patch('subprocess.check_output')
+ def testMergeCoverageFiles(self, mock_sub):
+ with tempfile_ext.NamedTemporaryDirectory() as cov_tempd:
+ pro_tempd = os.path.join(cov_tempd, 'profraw')
+ os.mkdir(pro_tempd)
+ profdata = tempfile.NamedTemporaryFile(
+ dir=pro_tempd,
+ delete=False,
+ suffix=local_device_gtest_run._PROFRAW_FILE_EXTENSION)
+ local_device_gtest_run._MergeCoverageFiles(cov_tempd, pro_tempd)
+ # Merged file should be deleted.
+ self.assertFalse(os.path.exists(profdata.name))
+ self.assertTrue(mock_sub.called)
+
+ @mock.patch('pylib.utils.google_storage_helper.upload')
+ def testUploadTestArtifacts(self, mock_gsh):
+ link = self._obj._UploadTestArtifacts(mock.MagicMock(), None)
+ self.assertFalse(mock_gsh.called)
+ self.assertIsNone(link)
+
+ result = 'A/10/warthog/path'
+ mock_gsh.return_value = result
+ with tempfile_ext.NamedTemporaryFile() as temp_f:
+ link = self._obj._UploadTestArtifacts(mock.MagicMock(), temp_f)
+ self.assertTrue(mock_gsh.called)
+ self.assertEqual(result, link)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_instrumentation_test_run.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_instrumentation_test_run.py
new file mode 100644
index 0000000000..54cb92a39c
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_instrumentation_test_run.py
@@ -0,0 +1,1512 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import collections
+import contextlib
+import copy
+import hashlib
+import json
+import logging
+import os
+import posixpath
+import re
+import shutil
+import sys
+import tempfile
+import time
+
+from six.moves import range # pylint: disable=redefined-builtin
+from six.moves import zip # pylint: disable=redefined-builtin
+from devil import base_error
+from devil.android import apk_helper
+from devil.android import crash_handler
+from devil.android import device_errors
+from devil.android import device_temp_file
+from devil.android import flag_changer
+from devil.android.sdk import shared_prefs
+from devil.android import logcat_monitor
+from devil.android.tools import system_app
+from devil.android.tools import webview_app
+from devil.utils import reraiser_thread
+from incremental_install import installer
+from pylib import constants
+from pylib import valgrind_tools
+from pylib.base import base_test_result
+from pylib.base import output_manager
+from pylib.constants import host_paths
+from pylib.instrumentation import instrumentation_test_instance
+from pylib.local.device import local_device_environment
+from pylib.local.device import local_device_test_run
+from pylib.output import remote_output_manager
+from pylib.utils import chrome_proxy_utils
+from pylib.utils import gold_utils
+from pylib.utils import instrumentation_tracing
+from pylib.utils import shared_preference_utils
+from py_trace_event import trace_event
+from py_trace_event import trace_time
+from py_utils import contextlib_ext
+from py_utils import tempfile_ext
+import tombstones
+
+with host_paths.SysPath(
+ os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party'), 0):
+ import jinja2 # pylint: disable=import-error
+ import markupsafe # pylint: disable=import-error,unused-import
+
+
+_JINJA_TEMPLATE_DIR = os.path.join(
+ host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'pylib', 'instrumentation')
+_JINJA_TEMPLATE_FILENAME = 'render_test.html.jinja'
+
+_WPR_GO_LINUX_X86_64_PATH = os.path.join(host_paths.DIR_SOURCE_ROOT,
+ 'third_party', 'webpagereplay', 'bin',
+ 'linux', 'x86_64', 'wpr')
+
+_TAG = 'test_runner_py'
+
+TIMEOUT_ANNOTATIONS = [
+ ('Manual', 10 * 60 * 60),
+ ('IntegrationTest', 10 * 60),
+ ('External', 10 * 60),
+ ('EnormousTest', 5 * 60),
+ ('LargeTest', 2 * 60),
+ ('MediumTest', 30),
+ ('SmallTest', 10),
+]
+
+# Account for Instrumentation and process init overhead.
+FIXED_TEST_TIMEOUT_OVERHEAD = 60
+
+# 30 minute max timeout for an instrumentation invocation to avoid shard
+# timeouts when tests never finish. The shard timeout is currently 60 minutes,
+# so this needs to be less than that.
+MAX_BATCH_TEST_TIMEOUT = 30 * 60
+
+LOGCAT_FILTERS = ['*:e', 'chromium:v', 'cr_*:v', 'DEBUG:I',
+ 'StrictMode:D', '%s:I' % _TAG]
+
+EXTRA_SCREENSHOT_FILE = (
+ 'org.chromium.base.test.ScreenshotOnFailureStatement.ScreenshotFile')
+
+EXTRA_UI_CAPTURE_DIR = (
+ 'org.chromium.base.test.util.Screenshooter.ScreenshotDir')
+
+EXTRA_TRACE_FILE = ('org.chromium.base.test.BaseJUnit4ClassRunner.TraceFile')
+
+_EXTRA_TEST_LIST = (
+ 'org.chromium.base.test.BaseChromiumAndroidJUnitRunner.TestList')
+
+_EXTRA_PACKAGE_UNDER_TEST = ('org.chromium.chrome.test.pagecontroller.rules.'
+ 'ChromeUiApplicationTestRule.PackageUnderTest')
+
+FEATURE_ANNOTATION = 'Feature'
+RENDER_TEST_FEATURE_ANNOTATION = 'RenderTest'
+WPR_ARCHIVE_FILE_PATH_ANNOTATION = 'WPRArchiveDirectory'
+WPR_RECORD_REPLAY_TEST_FEATURE_ANNOTATION = 'WPRRecordReplayTest'
+
+_DEVICE_GOLD_DIR = 'skia_gold'
+# A map of Android product models to SDK ints.
+RENDER_TEST_MODEL_SDK_CONFIGS = {
+ # Android x86 emulator.
+ 'Android SDK built for x86': [23],
+ # We would like this to be supported, but it is currently too prone to
+ # introducing flakiness due to a combination of Gold and Chromium issues.
+ # See crbug.com/1233700 and skbug.com/12149 for more information.
+ # 'Pixel 2': [28],
+}
+
+_BATCH_SUFFIX = '_batch'
+_TEST_BATCH_MAX_GROUP_SIZE = 256
+
+
+@contextlib.contextmanager
+def _LogTestEndpoints(device, test_name):
+ device.RunShellCommand(
+ ['log', '-p', 'i', '-t', _TAG, 'START %s' % test_name],
+ check_return=True)
+ try:
+ yield
+ finally:
+ device.RunShellCommand(
+ ['log', '-p', 'i', '-t', _TAG, 'END %s' % test_name],
+ check_return=True)
+
+
+def DismissCrashDialogs(device):
+ # Dismiss any error dialogs. Limit the number in case we have an error
+ # loop or we are failing to dismiss.
+ packages = set()
+ try:
+ for _ in range(10):
+ package = device.DismissCrashDialogIfNeeded(timeout=10, retries=1)
+ if not package:
+ break
+ packages.add(package)
+ except device_errors.CommandFailedError:
+ logging.exception('Error while attempting to dismiss crash dialog.')
+ return packages
+
+
+_CURRENT_FOCUS_CRASH_RE = re.compile(
+ r'\s*mCurrentFocus.*Application (Error|Not Responding): (\S+)}')
+
+
+def _GetTargetPackageName(test_apk):
+ # apk_under_test does not work for smoke tests, where it is set to an
+ # apk that is not listed as the targetPackage in the test apk's manifest.
+ return test_apk.GetAllInstrumentations()[0]['android:targetPackage']
+
+
+class LocalDeviceInstrumentationTestRun(
+ local_device_test_run.LocalDeviceTestRun):
+ def __init__(self, env, test_instance):
+ super(LocalDeviceInstrumentationTestRun, self).__init__(
+ env, test_instance)
+ self._chrome_proxy = None
+ self._context_managers = collections.defaultdict(list)
+ self._flag_changers = {}
+ self._render_tests_device_output_dir = None
+ self._shared_prefs_to_restore = []
+ self._skia_gold_session_manager = None
+ self._skia_gold_work_dir = None
+
+ #override
+ def TestPackage(self):
+ return self._test_instance.suite
+
+ #override
+ def SetUp(self):
+ target_package = _GetTargetPackageName(self._test_instance.test_apk)
+
+ @local_device_environment.handle_shard_failures_with(
+ self._env.DenylistDevice)
+ @trace_event.traced
+ def individual_device_set_up(device, host_device_tuples):
+ steps = []
+
+ if self._test_instance.replace_system_package:
+ @trace_event.traced
+ def replace_package(dev):
+ # We need the context manager to be applied before modifying any
+ # shared preference files in case the replacement APK needs to be
+ # set up, and it needs to be applied while the test is running.
+ # Thus, it needs to be applied early during setup, but must still be
+ # applied during _RunTest, which isn't possible using 'with' without
+ # applying the context manager up in test_runner. Instead, we
+ # manually invoke its __enter__ and __exit__ methods in setup and
+ # teardown.
+ system_app_context = system_app.ReplaceSystemApp(
+ dev, self._test_instance.replace_system_package.package,
+ self._test_instance.replace_system_package.replacement_apk)
+ # Pylint is not smart enough to realize that this field has
+ # an __enter__ method, and will complain loudly.
+ # pylint: disable=no-member
+ system_app_context.__enter__()
+ # pylint: enable=no-member
+ self._context_managers[str(dev)].append(system_app_context)
+
+ steps.append(replace_package)
+
+ if self._test_instance.system_packages_to_remove:
+
+ @trace_event.traced
+ def remove_packages(dev):
+ logging.info('Attempting to remove system packages %s',
+ self._test_instance.system_packages_to_remove)
+ system_app.RemoveSystemApps(
+ dev, self._test_instance.system_packages_to_remove)
+ logging.info('Done removing system packages')
+
+ # This should be at the front in case we're removing the package to make
+ # room for another APK installation later on. Since we disallow
+ # concurrent adb with this option specified, this should be safe.
+ steps.insert(0, remove_packages)
+
+ if self._test_instance.use_webview_provider:
+ @trace_event.traced
+ def use_webview_provider(dev):
+ # We need the context manager to be applied before modifying any
+ # shared preference files in case the replacement APK needs to be
+ # set up, and it needs to be applied while the test is running.
+ # Thus, it needs to be applied early during setup, but must still be
+ # applied during _RunTest, which isn't possible using 'with' without
+ # applying the context manager up in test_runner. Instead, we
+ # manually invoke its __enter__ and __exit__ methods in setup and
+ # teardown.
+ webview_context = webview_app.UseWebViewProvider(
+ dev, self._test_instance.use_webview_provider)
+ # Pylint is not smart enough to realize that this field has
+ # an __enter__ method, and will complain loudly.
+ # pylint: disable=no-member
+ webview_context.__enter__()
+ # pylint: enable=no-member
+ self._context_managers[str(dev)].append(webview_context)
+
+ steps.append(use_webview_provider)
+
+ def install_helper(apk,
+ modules=None,
+ fake_modules=None,
+ permissions=None,
+ additional_locales=None):
+
+ @instrumentation_tracing.no_tracing
+ @trace_event.traced
+ def install_helper_internal(d, apk_path=None):
+ # pylint: disable=unused-argument
+ d.Install(apk,
+ modules=modules,
+ fake_modules=fake_modules,
+ permissions=permissions,
+ additional_locales=additional_locales)
+
+ return install_helper_internal
+
+ def incremental_install_helper(apk, json_path, permissions):
+
+ @trace_event.traced
+ def incremental_install_helper_internal(d, apk_path=None):
+ # pylint: disable=unused-argument
+ installer.Install(d, json_path, apk=apk, permissions=permissions)
+ return incremental_install_helper_internal
+
+ permissions = self._test_instance.test_apk.GetPermissions()
+ if self._test_instance.test_apk_incremental_install_json:
+ steps.append(incremental_install_helper(
+ self._test_instance.test_apk,
+ self._test_instance.
+ test_apk_incremental_install_json,
+ permissions))
+ else:
+ steps.append(
+ install_helper(
+ self._test_instance.test_apk, permissions=permissions))
+
+ steps.extend(
+ install_helper(apk) for apk in self._test_instance.additional_apks)
+
+ # We'll potentially need the package names later for setting app
+ # compatibility workarounds.
+ for apk in (self._test_instance.additional_apks +
+ [self._test_instance.test_apk]):
+ self._installed_packages.append(apk_helper.GetPackageName(apk))
+
+ # The apk under test needs to be installed last since installing other
+ # apks after will unintentionally clear the fake module directory.
+ # TODO(wnwen): Make this more robust, fix crbug.com/1010954.
+ if self._test_instance.apk_under_test:
+ self._installed_packages.append(
+ apk_helper.GetPackageName(self._test_instance.apk_under_test))
+ permissions = self._test_instance.apk_under_test.GetPermissions()
+ if self._test_instance.apk_under_test_incremental_install_json:
+ steps.append(
+ incremental_install_helper(
+ self._test_instance.apk_under_test,
+ self._test_instance.apk_under_test_incremental_install_json,
+ permissions))
+ else:
+ steps.append(
+ install_helper(self._test_instance.apk_under_test,
+ self._test_instance.modules,
+ self._test_instance.fake_modules, permissions,
+ self._test_instance.additional_locales))
+
+ @trace_event.traced
+ def set_debug_app(dev):
+ # Set debug app in order to enable reading command line flags on user
+ # builds
+ cmd = ['am', 'set-debug-app', '--persistent']
+ if self._test_instance.wait_for_java_debugger:
+ cmd.append('-w')
+ cmd.append(target_package)
+ dev.RunShellCommand(cmd, check_return=True)
+
+ @trace_event.traced
+ def edit_shared_prefs(dev):
+ for setting in self._test_instance.edit_shared_prefs:
+ shared_pref = shared_prefs.SharedPrefs(
+ dev, setting['package'], setting['filename'],
+ use_encrypted_path=setting.get('supports_encrypted_path', False))
+ pref_to_restore = copy.copy(shared_pref)
+ pref_to_restore.Load()
+ self._shared_prefs_to_restore.append(pref_to_restore)
+
+ shared_preference_utils.ApplySharedPreferenceSetting(
+ shared_pref, setting)
+
+ @trace_event.traced
+ def set_vega_permissions(dev):
+ # Normally, installation of VrCore automatically grants storage
+ # permissions. However, since VrCore is part of the system image on
+ # the Vega standalone headset, we don't install the APK as part of test
+ # setup. Instead, grant the permissions here so that it can take
+ # screenshots.
+ if dev.product_name == 'vega':
+ dev.GrantPermissions('com.google.vr.vrcore', [
+ 'android.permission.WRITE_EXTERNAL_STORAGE',
+ 'android.permission.READ_EXTERNAL_STORAGE'
+ ])
+
+ @instrumentation_tracing.no_tracing
+ def push_test_data(dev):
+ device_root = posixpath.join(dev.GetExternalStoragePath(),
+ 'chromium_tests_root')
+ host_device_tuples_substituted = [
+ (h, local_device_test_run.SubstituteDeviceRoot(d, device_root))
+ for h, d in host_device_tuples]
+ logging.info('Pushing data dependencies.')
+ for h, d in host_device_tuples_substituted:
+ logging.debug(' %r -> %r', h, d)
+ local_device_environment.place_nomedia_on_device(dev, device_root)
+ dev.PushChangedFiles(host_device_tuples_substituted,
+ delete_device_stale=True)
+ if not host_device_tuples_substituted:
+ dev.RunShellCommand(['rm', '-rf', device_root], check_return=True)
+ dev.RunShellCommand(['mkdir', '-p', device_root], check_return=True)
+
+ @trace_event.traced
+ def create_flag_changer(dev):
+ if self._test_instance.flags:
+ self._CreateFlagChangerIfNeeded(dev)
+ logging.debug('Attempting to set flags: %r',
+ self._test_instance.flags)
+ self._flag_changers[str(dev)].AddFlags(self._test_instance.flags)
+
+ valgrind_tools.SetChromeTimeoutScale(
+ dev, self._test_instance.timeout_scale)
+
+ steps += [
+ set_debug_app, edit_shared_prefs, push_test_data, create_flag_changer,
+ set_vega_permissions, DismissCrashDialogs
+ ]
+
+ def bind_crash_handler(step, dev):
+ return lambda: crash_handler.RetryOnSystemCrash(step, dev)
+
+ steps = [bind_crash_handler(s, device) for s in steps]
+
+ try:
+ if self._env.concurrent_adb:
+ reraiser_thread.RunAsync(steps)
+ else:
+ for step in steps:
+ step()
+ if self._test_instance.store_tombstones:
+ tombstones.ClearAllTombstones(device)
+ except device_errors.CommandFailedError:
+ if not device.IsOnline():
+ raise
+
+ # A bugreport can be large and take a while to generate, so only capture
+ # one if we're using a remote manager.
+ if isinstance(
+ self._env.output_manager,
+ remote_output_manager.RemoteOutputManager):
+ logging.error(
+ 'Error when setting up device for tests. Taking a bugreport for '
+ 'investigation. This may take a while...')
+ report_name = '%s.bugreport' % device.serial
+ with self._env.output_manager.ArchivedTempfile(
+ report_name, 'bug_reports') as report_file:
+ device.TakeBugReport(report_file.name)
+ logging.error('Bug report saved to %s', report_file.Link())
+ raise
+
+ self._env.parallel_devices.pMap(
+ individual_device_set_up,
+ self._test_instance.GetDataDependencies())
+ # Created here instead of on a per-test basis so that the downloaded
+ # expectations can be re-used between tests, saving a significant amount
+ # of time.
+ self._skia_gold_work_dir = tempfile.mkdtemp()
+ self._skia_gold_session_manager = gold_utils.AndroidSkiaGoldSessionManager(
+ self._skia_gold_work_dir, self._test_instance.skia_gold_properties)
+ if self._test_instance.wait_for_java_debugger:
+ logging.warning('*' * 80)
+ logging.warning('Waiting for debugger to attach to process: %s',
+ target_package)
+ logging.warning('*' * 80)
+
+ #override
+ def TearDown(self):
+ shutil.rmtree(self._skia_gold_work_dir)
+ self._skia_gold_work_dir = None
+ self._skia_gold_session_manager = None
+ # By default, teardown will invoke ADB. When receiving SIGTERM due to a
+ # timeout, there's a high probability that ADB is non-responsive. In these
+ # cases, sending an ADB command will potentially take a long time to time
+ # out. Before this happens, the process will be hard-killed for not
+ # responding to SIGTERM fast enough.
+ if self._received_sigterm:
+ return
+
+ @local_device_environment.handle_shard_failures_with(
+ self._env.DenylistDevice)
+ @trace_event.traced
+ def individual_device_tear_down(dev):
+ if str(dev) in self._flag_changers:
+ self._flag_changers[str(dev)].Restore()
+
+ # Remove package-specific configuration
+ dev.RunShellCommand(['am', 'clear-debug-app'], check_return=True)
+
+ valgrind_tools.SetChromeTimeoutScale(dev, None)
+
+ # Restore any shared preference files that we stored during setup.
+ # This should be run sometime before the replace package contextmanager
+ # gets exited so we don't have to special case restoring files of
+ # replaced system apps.
+ for pref_to_restore in self._shared_prefs_to_restore:
+ pref_to_restore.Commit(force_commit=True)
+
+ # Context manager exit handlers are applied in reverse order
+ # of the enter handlers.
+ for context in reversed(self._context_managers[str(dev)]):
+ # See pylint-related comment above with __enter__()
+ # pylint: disable=no-member
+ context.__exit__(*sys.exc_info())
+ # pylint: enable=no-member
+
+ self._env.parallel_devices.pMap(individual_device_tear_down)
+
+ def _CreateFlagChangerIfNeeded(self, device):
+ if str(device) not in self._flag_changers:
+ cmdline_file = 'test-cmdline-file'
+ if self._test_instance.use_apk_under_test_flags_file:
+ if self._test_instance.package_info:
+ cmdline_file = self._test_instance.package_info.cmdline_file
+ else:
+ raise Exception('No PackageInfo found but'
+ '--use-apk-under-test-flags-file is specified.')
+ self._flag_changers[str(device)] = flag_changer.FlagChanger(
+ device, cmdline_file)
+
+ #override
+ def _CreateShards(self, tests):
+ return tests
+
+ #override
+ def _GetTests(self):
+ if self._test_instance.junit4_runner_supports_listing:
+ raw_tests = self._GetTestsFromRunner()
+ tests = self._test_instance.ProcessRawTests(raw_tests)
+ else:
+ tests = self._test_instance.GetTests()
+ tests = self._ApplyExternalSharding(
+ tests, self._test_instance.external_shard_index,
+ self._test_instance.total_external_shards)
+ return tests
+
+ #override
+ def _GroupTests(self, tests):
+ batched_tests = dict()
+ other_tests = []
+ for test in tests:
+ annotations = test['annotations']
+ if 'Batch' in annotations and 'RequiresRestart' not in annotations:
+ batch_name = annotations['Batch']['value']
+ if not batch_name:
+ batch_name = test['class']
+
+ # Feature flags won't work in instrumentation tests unless the activity
+ # is restarted.
+ # Tests with identical features are grouped to minimize restarts.
+ if 'Features$EnableFeatures' in annotations:
+ batch_name += '|enabled:' + ','.join(
+ sorted(annotations['Features$EnableFeatures']['value']))
+ if 'Features$DisableFeatures' in annotations:
+ batch_name += '|disabled:' + ','.join(
+ sorted(annotations['Features$DisableFeatures']['value']))
+
+ if not batch_name in batched_tests:
+ batched_tests[batch_name] = []
+ batched_tests[batch_name].append(test)
+ else:
+ other_tests.append(test)
+
+ all_tests = []
+ for _, tests in list(batched_tests.items()):
+ tests.sort() # Ensure a consistent ordering across external shards.
+ all_tests.extend([
+ tests[i:i + _TEST_BATCH_MAX_GROUP_SIZE]
+ for i in range(0, len(tests), _TEST_BATCH_MAX_GROUP_SIZE)
+ ])
+ all_tests.extend(other_tests)
+ return all_tests
+
+ #override
+ def _GetUniqueTestName(self, test):
+ return instrumentation_test_instance.GetUniqueTestName(test)
+
+ #override
+ def _RunTest(self, device, test):
+ extras = {}
+
+ # Provide package name under test for apk_under_test.
+ if self._test_instance.apk_under_test:
+ package_name = self._test_instance.apk_under_test.GetPackageName()
+ extras[_EXTRA_PACKAGE_UNDER_TEST] = package_name
+
+ flags_to_add = []
+ test_timeout_scale = None
+ if self._test_instance.coverage_directory:
+ coverage_basename = '%s' % ('%s_%s_group' %
+ (test[0]['class'], test[0]['method'])
+ if isinstance(test, list) else '%s_%s' %
+ (test['class'], test['method']))
+ extras['coverage'] = 'true'
+ coverage_directory = os.path.join(
+ device.GetExternalStoragePath(), 'chrome', 'test', 'coverage')
+ if not device.PathExists(coverage_directory):
+ device.RunShellCommand(['mkdir', '-p', coverage_directory],
+ check_return=True)
+ coverage_device_file = os.path.join(coverage_directory, coverage_basename)
+ coverage_device_file += '.exec'
+ extras['coverageFile'] = coverage_device_file
+
+ if self._test_instance.enable_breakpad_dump:
+ # Use external storage directory so that the breakpad dump can be accessed
+ # by the test APK in addition to the apk_under_test.
+ breakpad_dump_directory = os.path.join(device.GetExternalStoragePath(),
+ 'chromium_dumps')
+ if device.PathExists(breakpad_dump_directory):
+ device.RemovePath(breakpad_dump_directory, recursive=True)
+ flags_to_add.append('--breakpad-dump-location=' + breakpad_dump_directory)
+
+ # Save screenshot if screenshot dir is specified (save locally) or if
+ # a GS bucket is passed (save in cloud).
+ screenshot_device_file = device_temp_file.DeviceTempFile(
+ device.adb, suffix='.png', dir=device.GetExternalStoragePath())
+ extras[EXTRA_SCREENSHOT_FILE] = screenshot_device_file.name
+
+ # Set up the screenshot directory. This needs to be done for each test so
+ # that we only get screenshots created by that test. It has to be on
+ # external storage since the default location doesn't allow file creation
+ # from the instrumentation test app on Android L and M.
+ ui_capture_dir = device_temp_file.NamedDeviceTemporaryDirectory(
+ device.adb,
+ dir=device.GetExternalStoragePath())
+ extras[EXTRA_UI_CAPTURE_DIR] = ui_capture_dir.name
+
+ if self._env.trace_output:
+ trace_device_file = device_temp_file.DeviceTempFile(
+ device.adb, suffix='.json', dir=device.GetExternalStoragePath())
+ extras[EXTRA_TRACE_FILE] = trace_device_file.name
+
+ target = '%s/%s' % (self._test_instance.test_package,
+ self._test_instance.junit4_runner_class)
+ if isinstance(test, list):
+
+ def name_and_timeout(t):
+ n = instrumentation_test_instance.GetTestName(t)
+ i = self._GetTimeoutFromAnnotations(t['annotations'], n)
+ return (n, i)
+
+ test_names, timeouts = list(zip(*(name_and_timeout(t) for t in test)))
+
+ test_name = instrumentation_test_instance.GetTestName(
+ test[0]) + _BATCH_SUFFIX
+ extras['class'] = ','.join(test_names)
+ test_display_name = test_name
+ timeout = min(MAX_BATCH_TEST_TIMEOUT,
+ FIXED_TEST_TIMEOUT_OVERHEAD + sum(timeouts))
+ else:
+ assert test['is_junit4']
+ test_name = instrumentation_test_instance.GetTestName(test)
+ test_display_name = self._GetUniqueTestName(test)
+
+ extras['class'] = test_name
+ if 'flags' in test and test['flags']:
+ flags_to_add.extend(test['flags'])
+ timeout = FIXED_TEST_TIMEOUT_OVERHEAD + self._GetTimeoutFromAnnotations(
+ test['annotations'], test_display_name)
+
+ test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
+ test['annotations'])
+ if test_timeout_scale and test_timeout_scale != 1:
+ valgrind_tools.SetChromeTimeoutScale(
+ device, test_timeout_scale * self._test_instance.timeout_scale)
+
+ if self._test_instance.wait_for_java_debugger:
+ timeout = None
+ logging.info('preparing to run %s: %s', test_display_name, test)
+
+ if _IsRenderTest(test):
+ # TODO(mikecase): Add DeviceTempDirectory class and use that instead.
+ self._render_tests_device_output_dir = posixpath.join(
+ device.GetExternalStoragePath(), 'render_test_output_dir')
+ flags_to_add.append('--render-test-output-dir=%s' %
+ self._render_tests_device_output_dir)
+
+ if _IsWPRRecordReplayTest(test):
+ wpr_archive_relative_path = _GetWPRArchivePath(test)
+ if not wpr_archive_relative_path:
+ raise RuntimeError('Could not find the WPR archive file path '
+ 'from annotation.')
+ wpr_archive_path = os.path.join(host_paths.DIR_SOURCE_ROOT,
+ wpr_archive_relative_path)
+ if not os.path.isdir(wpr_archive_path):
+ raise RuntimeError('WPRArchiveDirectory annotation should point '
+ 'to a directory only. '
+ '{0} exist: {1}'.format(
+ wpr_archive_path,
+ os.path.exists(wpr_archive_path)))
+
+ # Some linux version does not like # in the name. Replaces it with __.
+ archive_path = os.path.join(
+ wpr_archive_path,
+ _ReplaceUncommonChars(self._GetUniqueTestName(test)) + '.wprgo')
+
+ if not os.path.exists(_WPR_GO_LINUX_X86_64_PATH):
+ # If we got to this stage, then we should have
+ # checkout_android set.
+ raise RuntimeError(
+ 'WPR Go binary not found at {}'.format(_WPR_GO_LINUX_X86_64_PATH))
+ # Tells the server to use the binaries retrieved from CIPD.
+ chrome_proxy_utils.ChromeProxySession.SetWPRServerBinary(
+ _WPR_GO_LINUX_X86_64_PATH)
+ self._chrome_proxy = chrome_proxy_utils.ChromeProxySession()
+ self._chrome_proxy.wpr_record_mode = self._test_instance.wpr_record_mode
+ self._chrome_proxy.Start(device, archive_path)
+ flags_to_add.extend(self._chrome_proxy.GetFlags())
+
+ if flags_to_add:
+ self._CreateFlagChangerIfNeeded(device)
+ self._flag_changers[str(device)].PushFlags(add=flags_to_add)
+
+ time_ms = lambda: int(time.time() * 1e3)
+ start_ms = time_ms()
+
+ with ui_capture_dir:
+ with self._ArchiveLogcat(device, test_name) as logcat_file:
+ output = device.StartInstrumentation(
+ target, raw=True, extras=extras, timeout=timeout, retries=0)
+
+ duration_ms = time_ms() - start_ms
+
+ with contextlib_ext.Optional(
+ trace_event.trace('ProcessResults'),
+ self._env.trace_output):
+ output = self._test_instance.MaybeDeobfuscateLines(output)
+ # TODO(jbudorick): Make instrumentation tests output a JSON so this
+ # doesn't have to parse the output.
+ result_code, result_bundle, statuses = (
+ self._test_instance.ParseAmInstrumentRawOutput(output))
+ results = self._test_instance.GenerateTestResults(
+ result_code, result_bundle, statuses, duration_ms,
+ device.product_cpu_abi, self._test_instance.symbolizer)
+
+ if self._env.trace_output:
+ self._SaveTraceData(trace_device_file, device, test['class'])
+
+
+ def restore_flags():
+ if flags_to_add:
+ self._flag_changers[str(device)].Restore()
+
+ def restore_timeout_scale():
+ if test_timeout_scale:
+ valgrind_tools.SetChromeTimeoutScale(
+ device, self._test_instance.timeout_scale)
+
+ def handle_coverage_data():
+ if self._test_instance.coverage_directory:
+ try:
+ if not os.path.exists(self._test_instance.coverage_directory):
+ os.makedirs(self._test_instance.coverage_directory)
+ device.PullFile(coverage_device_file,
+ self._test_instance.coverage_directory)
+ device.RemovePath(coverage_device_file, True)
+ except (OSError, base_error.BaseError) as e:
+ logging.warning('Failed to handle coverage data after tests: %s', e)
+
+ def handle_render_test_data():
+ if _IsRenderTest(test):
+ # Render tests do not cause test failure by default. So we have to
+ # check to see if any failure images were generated even if the test
+ # does not fail.
+ try:
+ self._ProcessRenderTestResults(device, results)
+ finally:
+ device.RemovePath(self._render_tests_device_output_dir,
+ recursive=True,
+ force=True)
+ self._render_tests_device_output_dir = None
+
+ def pull_ui_screen_captures():
+ screenshots = []
+ for filename in device.ListDirectory(ui_capture_dir.name):
+ if filename.endswith('.json'):
+ screenshots.append(pull_ui_screenshot(filename))
+ if screenshots:
+ json_archive_name = 'ui_capture_%s_%s.json' % (
+ test_name.replace('#', '.'),
+ time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()))
+ with self._env.output_manager.ArchivedTempfile(
+ json_archive_name, 'ui_capture', output_manager.Datatype.JSON
+ ) as json_archive:
+ json.dump(screenshots, json_archive)
+ _SetLinkOnResults(results, test_name, 'ui screenshot',
+ json_archive.Link())
+
+ def pull_ui_screenshot(filename):
+ source_dir = ui_capture_dir.name
+ json_path = posixpath.join(source_dir, filename)
+ json_data = json.loads(device.ReadFile(json_path))
+ image_file_path = posixpath.join(source_dir, json_data['location'])
+ with self._env.output_manager.ArchivedTempfile(
+ json_data['location'], 'ui_capture', output_manager.Datatype.PNG
+ ) as image_archive:
+ device.PullFile(image_file_path, image_archive.name)
+ json_data['image_link'] = image_archive.Link()
+ return json_data
+
+ def stop_chrome_proxy():
+ # Removes the port forwarding
+ if self._chrome_proxy:
+ self._chrome_proxy.Stop(device)
+ if not self._chrome_proxy.wpr_replay_mode:
+ logging.info('WPR Record test generated archive file %s',
+ self._chrome_proxy.wpr_archive_path)
+ self._chrome_proxy = None
+
+
+ # While constructing the TestResult objects, we can parallelize several
+ # steps that involve ADB. These steps should NOT depend on any info in
+ # the results! Things such as whether the test CRASHED have not yet been
+ # determined.
+ post_test_steps = [
+ restore_flags, restore_timeout_scale, stop_chrome_proxy,
+ handle_coverage_data, handle_render_test_data, pull_ui_screen_captures
+ ]
+ if self._env.concurrent_adb:
+ reraiser_thread.RunAsync(post_test_steps)
+ else:
+ for step in post_test_steps:
+ step()
+
+ if logcat_file:
+ _SetLinkOnResults(results, test_name, 'logcat', logcat_file.Link())
+
+ # Update the result name if the test used flags.
+ if flags_to_add:
+ for r in results:
+ if r.GetName() == test_name:
+ r.SetName(test_display_name)
+
+ # Add UNKNOWN results for any missing tests.
+ iterable_test = test if isinstance(test, list) else [test]
+ test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
+ results_names = set(r.GetName() for r in results)
+ results.extend(
+ base_test_result.BaseTestResult(u, base_test_result.ResultType.UNKNOWN)
+ for u in test_names.difference(results_names))
+
+ # Update the result type if we detect a crash.
+ try:
+ crashed_packages = DismissCrashDialogs(device)
+ # Assume test package convention of ".test" suffix
+ if any(p in self._test_instance.test_package for p in crashed_packages):
+ for r in results:
+ if r.GetType() == base_test_result.ResultType.UNKNOWN:
+ r.SetType(base_test_result.ResultType.CRASH)
+ elif (crashed_packages and len(results) == 1
+ and results[0].GetType() != base_test_result.ResultType.PASS):
+ # Add log message and set failure reason if:
+ # 1) The app crash was likely not caused by the test.
+ # AND
+ # 2) The app crash possibly caused the test to fail.
+ # Crashes of the package under test are assumed to be the test's fault.
+ _AppendToLogForResult(
+ results[0], 'OS displayed error dialogs for {}'.format(
+ ', '.join(crashed_packages)))
+ results[0].SetFailureReason('{} Crashed'.format(
+ ','.join(crashed_packages)))
+ except device_errors.CommandTimeoutError:
+ logging.warning('timed out when detecting/dismissing error dialogs')
+ # Attach screenshot to the test to help with debugging the dialog boxes.
+ self._SaveScreenshot(device, screenshot_device_file, test_display_name,
+ results, 'dialog_box_screenshot')
+
+ # The crash result can be set above or in
+ # InstrumentationTestRun.GenerateTestResults. If a test crashes,
+ # subprocesses such as the one used by EmbeddedTestServerRule can be left
+ # alive in a bad state, so kill them now.
+ for r in results:
+ if r.GetType() == base_test_result.ResultType.CRASH:
+ for apk in self._test_instance.additional_apks:
+ device.ForceStop(apk.GetPackageName())
+
+ # Handle failures by:
+ # - optionally taking a screenshot
+ # - logging the raw output at INFO level
+ # - clearing the application state while persisting permissions
+ if any(r.GetType() not in (base_test_result.ResultType.PASS,
+ base_test_result.ResultType.SKIP)
+ for r in results):
+ self._SaveScreenshot(device, screenshot_device_file, test_display_name,
+ results, 'post_test_screenshot')
+
+ logging.info('detected failure in %s. raw output:', test_display_name)
+ for l in output:
+ logging.info(' %s', l)
+ if not self._env.skip_clear_data:
+ if self._test_instance.package_info:
+ permissions = (self._test_instance.apk_under_test.GetPermissions()
+ if self._test_instance.apk_under_test else None)
+ device.ClearApplicationState(self._test_instance.package_info.package,
+ permissions=permissions)
+ if self._test_instance.enable_breakpad_dump:
+ device.RemovePath(breakpad_dump_directory, recursive=True)
+ else:
+ logging.debug('raw output from %s:', test_display_name)
+ for l in output:
+ logging.debug(' %s', l)
+
+ if self._test_instance.store_tombstones:
+ resolved_tombstones = tombstones.ResolveTombstones(
+ device,
+ resolve_all_tombstones=True,
+ include_stack_symbols=False,
+ wipe_tombstones=True,
+ tombstone_symbolizer=self._test_instance.symbolizer)
+ if resolved_tombstones:
+ tombstone_filename = 'tombstones_%s_%s' % (time.strftime(
+ '%Y%m%dT%H%M%S-UTC', time.gmtime()), device.serial)
+ with self._env.output_manager.ArchivedTempfile(
+ tombstone_filename, 'tombstones') as tombstone_file:
+ tombstone_file.write('\n'.join(resolved_tombstones))
+
+ # Associate tombstones with first crashing test.
+ for result in results:
+ if result.GetType() == base_test_result.ResultType.CRASH:
+ result.SetLink('tombstones', tombstone_file.Link())
+ break
+ else:
+ # We don't always detect crashes correctly. In this case,
+ # associate with the first test.
+ results[0].SetLink('tombstones', tombstone_file.Link())
+
+ unknown_tests = set(r.GetName() for r in results
+ if r.GetType() == base_test_result.ResultType.UNKNOWN)
+
+ # If a test that is batched crashes, the rest of the tests in that batch
+ # won't be ran and will have their status left as unknown in results,
+ # so rerun the tests. (see crbug/1127935)
+ # Need to "unbatch" the tests, so that on subsequent tries, the tests can
+ # get ran individually. This prevents an unrecognized crash from preventing
+ # the tests in the batch from being ran. Running the test as unbatched does
+ # not happen until a retry happens at the local_device_test_run/environment
+ # level.
+ tests_to_rerun = []
+ for t in iterable_test:
+ if self._GetUniqueTestName(t) in unknown_tests:
+ prior_attempts = t.get('run_attempts', 0)
+ t['run_attempts'] = prior_attempts + 1
+ # It's possible every test in the batch could crash, so need to
+ # try up to as many times as tests that there are.
+ if prior_attempts < len(results):
+ if t['annotations']:
+ t['annotations'].pop('Batch', None)
+ tests_to_rerun.append(t)
+
+ # If we have a crash that isn't recognized as a crash in a batch, the tests
+ # will be marked as unknown. Sometimes a test failure causes a crash, but
+ # the crash isn't recorded because the failure was detected first.
+ # When the UNKNOWN tests are reran while unbatched and pass,
+ # they'll have an UNKNOWN, PASS status, so will be improperly marked as
+ # flaky, so change status to NOTRUN and don't try rerunning. They will
+ # get rerun individually at the local_device_test_run/environment level.
+ # as the "Batch" annotation was removed.
+ found_crash_or_fail = False
+ for r in results:
+ if (r.GetType() == base_test_result.ResultType.CRASH
+ or r.GetType() == base_test_result.ResultType.FAIL):
+ found_crash_or_fail = True
+ break
+ if not found_crash_or_fail:
+ # Don't bother rerunning since the unrecognized crashes in
+ # the batch will keep failing.
+ tests_to_rerun = None
+ for r in results:
+ if r.GetType() == base_test_result.ResultType.UNKNOWN:
+ r.SetType(base_test_result.ResultType.NOTRUN)
+
+ return results, tests_to_rerun if tests_to_rerun else None
+
+ def _GetTestsFromRunner(self):
+ test_apk_path = self._test_instance.test_apk.path
+ pickle_path = '%s-runner.pickle' % test_apk_path
+ # For incremental APKs, the code doesn't live in the apk, so instead check
+ # the timestamp of the target's .stamp file.
+ if self._test_instance.test_apk_incremental_install_json:
+ with open(self._test_instance.test_apk_incremental_install_json) as f:
+ data = json.load(f)
+ out_dir = constants.GetOutDirectory()
+ test_mtime = max(
+ os.path.getmtime(os.path.join(out_dir, p)) for p in data['dex_files'])
+ else:
+ test_mtime = os.path.getmtime(test_apk_path)
+
+ try:
+ return instrumentation_test_instance.GetTestsFromPickle(
+ pickle_path, test_mtime)
+ except instrumentation_test_instance.TestListPickleException as e:
+ logging.info('Could not get tests from pickle: %s', e)
+ logging.info('Getting tests by having %s list them.',
+ self._test_instance.junit4_runner_class)
+ def list_tests(d):
+ def _run(dev):
+ # We need to use GetAppWritablePath instead of GetExternalStoragePath
+ # here because we will not have applied legacy storage workarounds on R+
+ # yet.
+ with device_temp_file.DeviceTempFile(
+ dev.adb, suffix='.json',
+ dir=dev.GetAppWritablePath()) as dev_test_list_json:
+ junit4_runner_class = self._test_instance.junit4_runner_class
+ test_package = self._test_instance.test_package
+ extras = {
+ 'log': 'true',
+ # Workaround for https://github.com/mockito/mockito/issues/922
+ 'notPackage': 'net.bytebuddy',
+ }
+ extras[_EXTRA_TEST_LIST] = dev_test_list_json.name
+ target = '%s/%s' % (test_package, junit4_runner_class)
+ timeout = 240
+ if self._test_instance.wait_for_java_debugger:
+ timeout = None
+ with self._ArchiveLogcat(dev, 'list_tests'):
+ test_list_run_output = dev.StartInstrumentation(
+ target, extras=extras, retries=0, timeout=timeout)
+ if any(test_list_run_output):
+ logging.error('Unexpected output while listing tests:')
+ for line in test_list_run_output:
+ logging.error(' %s', line)
+ with tempfile_ext.NamedTemporaryDirectory() as host_dir:
+ host_file = os.path.join(host_dir, 'list_tests.json')
+ dev.PullFile(dev_test_list_json.name, host_file)
+ with open(host_file, 'r') as host_file:
+ return json.load(host_file)
+
+ return crash_handler.RetryOnSystemCrash(_run, d)
+
+ raw_test_lists = self._env.parallel_devices.pMap(list_tests).pGet(None)
+
+ # If all devices failed to list tests, raise an exception.
+ # Check that tl is not None and is not empty.
+ if all(not tl for tl in raw_test_lists):
+ raise device_errors.CommandFailedError(
+ 'Failed to list tests on any device')
+
+ # Get the first viable list of raw tests
+ raw_tests = [tl for tl in raw_test_lists if tl][0]
+
+ instrumentation_test_instance.SaveTestsToPickle(pickle_path, raw_tests)
+ return raw_tests
+
+ @contextlib.contextmanager
+ def _ArchiveLogcat(self, device, test_name):
+ stream_name = 'logcat_%s_shard%s_%s_%s' % (
+ test_name.replace('#', '.'), self._test_instance.external_shard_index,
+ time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()), device.serial)
+
+ logcat_file = None
+ logmon = None
+ try:
+ with self._env.output_manager.ArchivedTempfile(
+ stream_name, 'logcat') as logcat_file:
+ with logcat_monitor.LogcatMonitor(
+ device.adb,
+ filter_specs=local_device_environment.LOGCAT_FILTERS,
+ output_file=logcat_file.name,
+ transform_func=self._test_instance.MaybeDeobfuscateLines,
+ check_error=False) as logmon:
+ with _LogTestEndpoints(device, test_name):
+ with contextlib_ext.Optional(
+ trace_event.trace(test_name),
+ self._env.trace_output):
+ yield logcat_file
+ finally:
+ if logmon:
+ logmon.Close()
+ if logcat_file and logcat_file.Link():
+ logging.info('Logcat saved to %s', logcat_file.Link())
+
+ def _SaveTraceData(self, trace_device_file, device, test_class):
+ trace_host_file = self._env.trace_output
+
+ if device.FileExists(trace_device_file.name):
+ try:
+ java_trace_json = device.ReadFile(trace_device_file.name)
+ except IOError:
+ raise Exception('error pulling trace file from device')
+ finally:
+ trace_device_file.close()
+
+ process_name = '%s (device %s)' % (test_class, device.serial)
+ process_hash = int(hashlib.md5(process_name).hexdigest()[:6], 16)
+
+ java_trace = json.loads(java_trace_json)
+ java_trace.sort(key=lambda event: event['ts'])
+
+ get_date_command = 'echo $EPOCHREALTIME'
+ device_time = device.RunShellCommand(get_date_command, single_line=True)
+ device_time = float(device_time) * 1e6
+ system_time = trace_time.Now()
+ time_difference = system_time - device_time
+
+ threads_to_add = set()
+ for event in java_trace:
+ # Ensure thread ID and thread name will be linked in the metadata.
+ threads_to_add.add((event['tid'], event['name']))
+
+ event['pid'] = process_hash
+
+ # Adjust time stamp to align with Python trace times (from
+ # trace_time.Now()).
+ event['ts'] += time_difference
+
+ for tid, thread_name in threads_to_add:
+ thread_name_metadata = {'pid': process_hash, 'tid': tid,
+ 'ts': 0, 'ph': 'M', 'cat': '__metadata',
+ 'name': 'thread_name',
+ 'args': {'name': thread_name}}
+ java_trace.append(thread_name_metadata)
+
+ process_name_metadata = {'pid': process_hash, 'tid': 0, 'ts': 0,
+ 'ph': 'M', 'cat': '__metadata',
+ 'name': 'process_name',
+ 'args': {'name': process_name}}
+ java_trace.append(process_name_metadata)
+
+ java_trace_json = json.dumps(java_trace)
+ java_trace_json = java_trace_json.rstrip(' ]')
+
+ with open(trace_host_file, 'r') as host_handle:
+ host_contents = host_handle.readline()
+
+ if host_contents:
+ java_trace_json = ',%s' % java_trace_json.lstrip(' [')
+
+ with open(trace_host_file, 'a') as host_handle:
+ host_handle.write(java_trace_json)
+
+ def _SaveScreenshot(self, device, screenshot_device_file, test_name, results,
+ link_name):
+ screenshot_filename = '%s-%s.png' % (
+ test_name, time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()))
+ if device.FileExists(screenshot_device_file.name):
+ with self._env.output_manager.ArchivedTempfile(
+ screenshot_filename, 'screenshot',
+ output_manager.Datatype.PNG) as screenshot_host_file:
+ try:
+ device.PullFile(screenshot_device_file.name,
+ screenshot_host_file.name)
+ finally:
+ screenshot_device_file.close()
+ _SetLinkOnResults(results, test_name, link_name,
+ screenshot_host_file.Link())
+
+ def _ProcessRenderTestResults(self, device, results):
+ if not self._render_tests_device_output_dir:
+ return
+ self._ProcessSkiaGoldRenderTestResults(device, results)
+
+ def _IsRetryWithoutPatch(self):
+ """Checks whether this test run is a retry without a patch/CL.
+
+ Returns:
+ True iff this is being run on a trybot and the current step is a retry
+ without the patch applied, otherwise False.
+ """
+ is_tryjob = self._test_instance.skia_gold_properties.IsTryjobRun()
+ # Builders automatically pass in --gtest_repeat,
+ # --test-launcher-retry-limit, --test-launcher-batch-limit, and
+ # --gtest_filter when running a step without a CL applied, but not for
+ # steps with the CL applied.
+ # TODO(skbug.com/12100): Check this in a less hacky way if a way can be
+ # found to check the actual step name. Ideally, this would not be necessary
+ # at all, but will be until Chromium stops doing step retries on trybots
+ # (extremely unlikely) or Gold is updated to not clobber earlier results
+ # (more likely, but a ways off).
+ has_filter = bool(self._test_instance.test_filter)
+ has_batch_limit = self._test_instance.test_launcher_batch_limit is not None
+ return is_tryjob and has_filter and has_batch_limit
+
+ def _ProcessSkiaGoldRenderTestResults(self, device, results):
+ gold_dir = posixpath.join(self._render_tests_device_output_dir,
+ _DEVICE_GOLD_DIR)
+ if not device.FileExists(gold_dir):
+ return
+
+ gold_properties = self._test_instance.skia_gold_properties
+ with tempfile_ext.NamedTemporaryDirectory() as host_dir:
+ use_luci = not (gold_properties.local_pixel_tests
+ or gold_properties.no_luci_auth)
+
+ # Pull everything at once instead of pulling individually, as it's
+ # slightly faster since each command over adb has some overhead compared
+ # to doing the same thing locally.
+ host_dir = os.path.join(host_dir, _DEVICE_GOLD_DIR)
+ device.PullFile(gold_dir, host_dir)
+ for image_name in os.listdir(host_dir):
+ if not image_name.endswith('.png'):
+ continue
+
+ render_name = image_name[:-4]
+ json_name = render_name + '.json'
+ json_path = os.path.join(host_dir, json_name)
+ image_path = os.path.join(host_dir, image_name)
+ full_test_name = None
+ if not os.path.exists(json_path):
+ _FailTestIfNecessary(results, full_test_name)
+ _AppendToLog(
+ results, full_test_name,
+ 'Unable to find corresponding JSON file for image %s '
+ 'when doing Skia Gold comparison.' % image_name)
+ continue
+
+ # Add 'ignore': '1' if a comparison failure would not be surfaced, as
+ # that implies that we aren't actively maintaining baselines for the
+ # test. This helps prevent unrelated CLs from getting comments posted to
+ # them.
+ should_rewrite = False
+ with open(json_path) as infile:
+ # All the key/value pairs in the JSON file are strings, so convert
+ # to a bool.
+ json_dict = json.load(infile)
+ fail_on_unsupported = json_dict.get('fail_on_unsupported_configs',
+ 'false')
+ fail_on_unsupported = fail_on_unsupported.lower() == 'true'
+ # Grab the full test name so we can associate the comparison with a
+ # particular test, which is necessary if tests are batched together.
+ # Remove the key/value pair from the JSON since we don't need/want to
+ # upload it to Gold.
+ full_test_name = json_dict.get('full_test_name')
+ if 'full_test_name' in json_dict:
+ should_rewrite = True
+ del json_dict['full_test_name']
+
+ running_on_unsupported = (
+ device.build_version_sdk not in RENDER_TEST_MODEL_SDK_CONFIGS.get(
+ device.product_model, []) and not fail_on_unsupported)
+ should_ignore_in_gold = running_on_unsupported
+ # We still want to fail the test even if we're ignoring the image in
+ # Gold if we're running on a supported configuration, so
+ # should_ignore_in_gold != should_hide_failure.
+ should_hide_failure = running_on_unsupported
+ if should_ignore_in_gold:
+ should_rewrite = True
+ json_dict['ignore'] = '1'
+ if should_rewrite:
+ with open(json_path, 'w') as outfile:
+ json.dump(json_dict, outfile)
+
+ gold_session = self._skia_gold_session_manager.GetSkiaGoldSession(
+ keys_input=json_path)
+
+ try:
+ status, error = gold_session.RunComparison(
+ name=render_name,
+ png_file=image_path,
+ output_manager=self._env.output_manager,
+ use_luci=use_luci,
+ force_dryrun=self._IsRetryWithoutPatch())
+ except Exception as e: # pylint: disable=broad-except
+ _FailTestIfNecessary(results, full_test_name)
+ _AppendToLog(results, full_test_name,
+ 'Skia Gold comparison raised exception: %s' % e)
+ continue
+
+ if not status:
+ continue
+
+ # Don't fail the test if we ran on an unsupported configuration unless
+ # the test has explicitly opted in, as it's likely that baselines
+ # aren't maintained for that configuration.
+ if should_hide_failure:
+ if self._test_instance.skia_gold_properties.local_pixel_tests:
+ _AppendToLog(
+ results, full_test_name,
+ 'Gold comparison for %s failed, but model %s with SDK '
+ '%d is not a supported configuration. This failure would be '
+ 'ignored on the bots, but failing since tests are being run '
+ 'locally.' %
+ (render_name, device.product_model, device.build_version_sdk))
+ else:
+ _AppendToLog(
+ results, full_test_name,
+ 'Gold comparison for %s failed, but model %s with SDK '
+ '%d is not a supported configuration, so ignoring failure.' %
+ (render_name, device.product_model, device.build_version_sdk))
+ continue
+
+ _FailTestIfNecessary(results, full_test_name)
+ failure_log = (
+ 'Skia Gold reported failure for RenderTest %s. See '
+ 'RENDER_TESTS.md for how to fix this failure.' % render_name)
+ status_codes =\
+ self._skia_gold_session_manager.GetSessionClass().StatusCodes
+ if status == status_codes.AUTH_FAILURE:
+ _AppendToLog(results, full_test_name,
+ 'Gold authentication failed with output %s' % error)
+ elif status == status_codes.INIT_FAILURE:
+ _AppendToLog(results, full_test_name,
+ 'Gold initialization failed with output %s' % error)
+ elif status == status_codes.COMPARISON_FAILURE_REMOTE:
+ public_triage_link, internal_triage_link =\
+ gold_session.GetTriageLinks(render_name)
+ if not public_triage_link:
+ _AppendToLog(
+ results, full_test_name,
+ 'Failed to get triage link for %s, raw output: %s' %
+ (render_name, error))
+ _AppendToLog(
+ results, full_test_name, 'Reason for no triage link: %s' %
+ gold_session.GetTriageLinkOmissionReason(render_name))
+ continue
+ if gold_properties.IsTryjobRun():
+ _SetLinkOnResults(results, full_test_name,
+ 'Public Skia Gold triage link for entire CL',
+ public_triage_link)
+ _SetLinkOnResults(results, full_test_name,
+ 'Internal Skia Gold triage link for entire CL',
+ internal_triage_link)
+ else:
+ _SetLinkOnResults(
+ results, full_test_name,
+ 'Public Skia Gold triage link for %s' % render_name,
+ public_triage_link)
+ _SetLinkOnResults(
+ results, full_test_name,
+ 'Internal Skia Gold triage link for %s' % render_name,
+ internal_triage_link)
+ _AppendToLog(results, full_test_name, failure_log)
+
+ elif status == status_codes.COMPARISON_FAILURE_LOCAL:
+ given_link = gold_session.GetGivenImageLink(render_name)
+ closest_link = gold_session.GetClosestImageLink(render_name)
+ diff_link = gold_session.GetDiffImageLink(render_name)
+
+ processed_template_output = _GenerateRenderTestHtml(
+ render_name, given_link, closest_link, diff_link)
+ with self._env.output_manager.ArchivedTempfile(
+ '%s.html' % render_name, 'gold_local_diffs',
+ output_manager.Datatype.HTML) as html_results:
+ html_results.write(processed_template_output)
+ _SetLinkOnResults(results, full_test_name, render_name,
+ html_results.Link())
+ _AppendToLog(
+ results, full_test_name,
+ 'See %s link for diff image with closest positive.' % render_name)
+ elif status == status_codes.LOCAL_DIFF_FAILURE:
+ _AppendToLog(results, full_test_name,
+ 'Failed to generate diffs from Gold: %s' % error)
+ else:
+ logging.error(
+ 'Given unhandled SkiaGoldSession StatusCode %s with error %s',
+ status, error)
+
+ #override
+ def _ShouldRetry(self, test, result):
+ # We've tried to disable retries in the past with mixed results.
+ # See crbug.com/619055 for historical context and crbug.com/797002
+ # for ongoing efforts.
+ if 'Batch' in test['annotations'] and test['annotations']['Batch'][
+ 'value'] == 'UnitTests':
+ return False
+ del test, result
+ return True
+
+ #override
+ def _ShouldShard(self):
+ return True
+
+ @classmethod
+ def _GetTimeoutScaleFromAnnotations(cls, annotations):
+ try:
+ return int(annotations.get('TimeoutScale', {}).get('value', 1))
+ except ValueError as e:
+ logging.warning("Non-integer value of TimeoutScale ignored. (%s)", str(e))
+ return 1
+
+ @classmethod
+ def _GetTimeoutFromAnnotations(cls, annotations, test_name):
+ for k, v in TIMEOUT_ANNOTATIONS:
+ if k in annotations:
+ timeout = v
+ break
+ else:
+ logging.warning('Using default 1 minute timeout for %s', test_name)
+ timeout = 60
+
+ timeout *= cls._GetTimeoutScaleFromAnnotations(annotations)
+
+ return timeout
+
+
+def _IsWPRRecordReplayTest(test):
+ """Determines whether a test or a list of tests is a WPR RecordReplay Test."""
+ if not isinstance(test, list):
+ test = [test]
+ return any([
+ WPR_RECORD_REPLAY_TEST_FEATURE_ANNOTATION in t['annotations'].get(
+ FEATURE_ANNOTATION, {}).get('value', ()) for t in test
+ ])
+
+
+def _GetWPRArchivePath(test):
+ """Retrieves the archive path from the WPRArchiveDirectory annotation."""
+ return test['annotations'].get(WPR_ARCHIVE_FILE_PATH_ANNOTATION,
+ {}).get('value', ())
+
+
+def _ReplaceUncommonChars(original):
+ """Replaces uncommon characters with __."""
+ if not original:
+ raise ValueError('parameter should not be empty')
+
+ uncommon_chars = ['#']
+ for char in uncommon_chars:
+ original = original.replace(char, '__')
+ return original
+
+
+def _IsRenderTest(test):
+ """Determines if a test or list of tests has a RenderTest amongst them."""
+ if not isinstance(test, list):
+ test = [test]
+ return any([RENDER_TEST_FEATURE_ANNOTATION in t['annotations'].get(
+ FEATURE_ANNOTATION, {}).get('value', ()) for t in test])
+
+
+def _GenerateRenderTestHtml(image_name, failure_link, golden_link, diff_link):
+ """Generates a RenderTest results page.
+
+ Displays the generated (failure) image, the golden image, and the diff
+ between them.
+
+ Args:
+ image_name: The name of the image whose comparison failed.
+ failure_link: The URL to the generated/failure image.
+ golden_link: The URL to the golden image.
+ diff_link: The URL to the diff image between the failure and golden images.
+
+ Returns:
+ A string containing the generated HTML.
+ """
+ jinja2_env = jinja2.Environment(
+ loader=jinja2.FileSystemLoader(_JINJA_TEMPLATE_DIR), trim_blocks=True)
+ template = jinja2_env.get_template(_JINJA_TEMPLATE_FILENAME)
+ # pylint: disable=no-member
+ return template.render(
+ test_name=image_name,
+ failure_link=failure_link,
+ golden_link=golden_link,
+ diff_link=diff_link)
+
+
+def _FailTestIfNecessary(results, full_test_name):
+ """Marks the given results as failed if it wasn't already.
+
+ Marks the result types as ResultType.FAIL unless they were already some sort
+ of failure type, e.g. ResultType.CRASH.
+
+ Args:
+ results: A list of base_test_result.BaseTestResult objects.
+ full_test_name: A string containing the full name of the test, e.g.
+ org.chromium.chrome.SomeTestClass#someTestMethod.
+ """
+ found_matching_test = _MatchingTestInResults(results, full_test_name)
+ if not found_matching_test and _ShouldReportNoMatchingResult(full_test_name):
+ logging.error(
+ 'Could not find result specific to %s, failing all tests in the batch.',
+ full_test_name)
+ for result in results:
+ if found_matching_test and result.GetName() != full_test_name:
+ continue
+ if result.GetType() not in [
+ base_test_result.ResultType.FAIL, base_test_result.ResultType.CRASH,
+ base_test_result.ResultType.TIMEOUT, base_test_result.ResultType.UNKNOWN
+ ]:
+ result.SetType(base_test_result.ResultType.FAIL)
+
+
+def _AppendToLog(results, full_test_name, line):
+ """Appends the given line to the end of the logs of the given results.
+
+ Args:
+ results: A list of base_test_result.BaseTestResult objects.
+ full_test_name: A string containing the full name of the test, e.g.
+ org.chromium.chrome.SomeTestClass#someTestMethod.
+ line: A string to be appended as a neww line to the log of |result|.
+ """
+ found_matching_test = _MatchingTestInResults(results, full_test_name)
+ if not found_matching_test and _ShouldReportNoMatchingResult(full_test_name):
+ logging.error(
+ 'Could not find result specific to %s, appending to log of all tests '
+ 'in the batch.', full_test_name)
+ for result in results:
+ if found_matching_test and result.GetName() != full_test_name:
+ continue
+ _AppendToLogForResult(result, line)
+
+
+def _AppendToLogForResult(result, line):
+ result.SetLog(result.GetLog() + '\n' + line)
+
+
+def _SetLinkOnResults(results, full_test_name, link_name, link):
+ """Sets the given link on the given results.
+
+ Args:
+ results: A list of base_test_result.BaseTestResult objects.
+ full_test_name: A string containing the full name of the test, e.g.
+ org.chromium.chrome.SomeTestClass#someTestMethod.
+ link_name: A string containing the name of the link being set.
+ link: A string containing the lkink being set.
+ """
+ found_matching_test = _MatchingTestInResults(results, full_test_name)
+ if not found_matching_test and _ShouldReportNoMatchingResult(full_test_name):
+ logging.error(
+ 'Could not find result specific to %s, adding link to results of all '
+ 'tests in the batch.', full_test_name)
+ for result in results:
+ if found_matching_test and result.GetName() != full_test_name:
+ continue
+ result.SetLink(link_name, link)
+
+
+def _MatchingTestInResults(results, full_test_name):
+ """Checks if any tests named |full_test_name| are in |results|.
+
+ Args:
+ results: A list of base_test_result.BaseTestResult objects.
+ full_test_name: A string containing the full name of the test, e.g.
+ org.chromium.chrome.Some
+
+ Returns:
+ True if one of the results in |results| has the same name as
+ |full_test_name|, otherwise False.
+ """
+ return any([r for r in results if r.GetName() == full_test_name])
+
+
+def _ShouldReportNoMatchingResult(full_test_name):
+ """Determines whether a failure to find a matching result is actually bad.
+
+ Args:
+ full_test_name: A string containing the full name of the test, e.g.
+ org.chromium.chrome.Some
+
+ Returns:
+ False if the failure to find a matching result is expected and should not
+ be reported, otherwise True.
+ """
+ if full_test_name is not None and full_test_name.endswith(_BATCH_SUFFIX):
+ # Handle batched tests, whose reported name is the first test's name +
+ # "_batch".
+ return False
+ return True
diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_instrumentation_test_run_test.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_instrumentation_test_run_test.py
new file mode 100755
index 0000000000..948e34c17a
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_instrumentation_test_run_test.py
@@ -0,0 +1,169 @@
+#!/usr/bin/env vpython3
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for local_device_instrumentation_test_run."""
+
+# pylint: disable=protected-access
+
+
+import unittest
+
+from pylib.base import base_test_result
+from pylib.base import mock_environment
+from pylib.base import mock_test_instance
+from pylib.local.device import local_device_instrumentation_test_run
+
+
+class LocalDeviceInstrumentationTestRunTest(unittest.TestCase):
+
+ def setUp(self):
+ super(LocalDeviceInstrumentationTestRunTest, self).setUp()
+ self._env = mock_environment.MockEnvironment()
+ self._ti = mock_test_instance.MockTestInstance()
+ self._obj = (
+ local_device_instrumentation_test_run.LocalDeviceInstrumentationTestRun(
+ self._env, self._ti))
+
+ # TODO(crbug.com/797002): Decide whether the _ShouldRetry hook is worth
+ # retaining and remove these tests if not.
+
+ def testShouldRetry_failure(self):
+ test = {
+ 'annotations': {},
+ 'class': 'SadTest',
+ 'method': 'testFailure',
+ 'is_junit4': True,
+ }
+ result = base_test_result.BaseTestResult(
+ 'SadTest.testFailure', base_test_result.ResultType.FAIL)
+ self.assertTrue(self._obj._ShouldRetry(test, result))
+
+ def testShouldRetry_retryOnFailure(self):
+ test = {
+ 'annotations': {'RetryOnFailure': None},
+ 'class': 'SadTest',
+ 'method': 'testRetryOnFailure',
+ 'is_junit4': True,
+ }
+ result = base_test_result.BaseTestResult(
+ 'SadTest.testRetryOnFailure', base_test_result.ResultType.FAIL)
+ self.assertTrue(self._obj._ShouldRetry(test, result))
+
+ def testShouldRetry_notRun(self):
+ test = {
+ 'annotations': {},
+ 'class': 'SadTest',
+ 'method': 'testNotRun',
+ 'is_junit4': True,
+ }
+ result = base_test_result.BaseTestResult(
+ 'SadTest.testNotRun', base_test_result.ResultType.NOTRUN)
+ self.assertTrue(self._obj._ShouldRetry(test, result))
+
+ def testIsWPRRecordReplayTest_matchedWithKey(self):
+ test = {
+ 'annotations': {
+ 'Feature': {
+ 'value': ['WPRRecordReplayTest', 'dummy']
+ }
+ },
+ 'class': 'WPRDummyTest',
+ 'method': 'testRun',
+ 'is_junit4': True,
+ }
+ self.assertTrue(
+ local_device_instrumentation_test_run._IsWPRRecordReplayTest(test))
+
+ def testIsWPRRecordReplayTest_noMatchedKey(self):
+ test = {
+ 'annotations': {
+ 'Feature': {
+ 'value': ['abc', 'dummy']
+ }
+ },
+ 'class': 'WPRDummyTest',
+ 'method': 'testRun',
+ 'is_junit4': True,
+ }
+ self.assertFalse(
+ local_device_instrumentation_test_run._IsWPRRecordReplayTest(test))
+
+ def testGetWPRArchivePath_matchedWithKey(self):
+ test = {
+ 'annotations': {
+ 'WPRArchiveDirectory': {
+ 'value': 'abc'
+ }
+ },
+ 'class': 'WPRDummyTest',
+ 'method': 'testRun',
+ 'is_junit4': True,
+ }
+ self.assertEqual(
+ local_device_instrumentation_test_run._GetWPRArchivePath(test), 'abc')
+
+ def testGetWPRArchivePath_noMatchedWithKey(self):
+ test = {
+ 'annotations': {
+ 'Feature': {
+ 'value': 'abc'
+ }
+ },
+ 'class': 'WPRDummyTest',
+ 'method': 'testRun',
+ 'is_junit4': True,
+ }
+ self.assertFalse(
+ local_device_instrumentation_test_run._GetWPRArchivePath(test))
+
+ def testIsRenderTest_matchedWithKey(self):
+ test = {
+ 'annotations': {
+ 'Feature': {
+ 'value': ['RenderTest', 'dummy']
+ }
+ },
+ 'class': 'DummyTest',
+ 'method': 'testRun',
+ 'is_junit4': True,
+ }
+ self.assertTrue(local_device_instrumentation_test_run._IsRenderTest(test))
+
+ def testIsRenderTest_noMatchedKey(self):
+ test = {
+ 'annotations': {
+ 'Feature': {
+ 'value': ['abc', 'dummy']
+ }
+ },
+ 'class': 'DummyTest',
+ 'method': 'testRun',
+ 'is_junit4': True,
+ }
+ self.assertFalse(local_device_instrumentation_test_run._IsRenderTest(test))
+
+ def testReplaceUncommonChars(self):
+ original = 'abc#edf'
+ self.assertEqual(
+ local_device_instrumentation_test_run._ReplaceUncommonChars(original),
+ 'abc__edf')
+ original = 'abc#edf#hhf'
+ self.assertEqual(
+ local_device_instrumentation_test_run._ReplaceUncommonChars(original),
+ 'abc__edf__hhf')
+ original = 'abcedfhhf'
+ self.assertEqual(
+ local_device_instrumentation_test_run._ReplaceUncommonChars(original),
+ 'abcedfhhf')
+ original = None
+ with self.assertRaises(ValueError):
+ local_device_instrumentation_test_run._ReplaceUncommonChars(original)
+ original = ''
+ with self.assertRaises(ValueError):
+ local_device_instrumentation_test_run._ReplaceUncommonChars(original)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_monkey_test_run.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_monkey_test_run.py
new file mode 100644
index 0000000000..71dd9bd793
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_monkey_test_run.py
@@ -0,0 +1,128 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import logging
+
+from six.moves import range # pylint: disable=redefined-builtin
+from devil.android import device_errors
+from devil.android.sdk import intent
+from pylib import constants
+from pylib.base import base_test_result
+from pylib.local.device import local_device_test_run
+
+
+_CHROME_PACKAGE = constants.PACKAGE_INFO['chrome'].package
+
+class LocalDeviceMonkeyTestRun(local_device_test_run.LocalDeviceTestRun):
+ def __init__(self, env, test_instance):
+ super(LocalDeviceMonkeyTestRun, self).__init__(env, test_instance)
+
+ def TestPackage(self):
+ return 'monkey'
+
+ #override
+ def SetUp(self):
+ pass
+
+ #override
+ def _RunTest(self, device, test):
+ device.ClearApplicationState(self._test_instance.package)
+
+ # Chrome crashes are not always caught by Monkey test runner.
+ # Launch Chrome and verify Chrome has the same PID before and after
+ # the test.
+ device.StartActivity(
+ intent.Intent(package=self._test_instance.package,
+ activity=self._test_instance.activity,
+ action='android.intent.action.MAIN'),
+ blocking=True, force_stop=True)
+ before_pids = device.GetPids(self._test_instance.package)
+
+ output = ''
+ if before_pids:
+ if len(before_pids.get(self._test_instance.package, [])) > 1:
+ raise Exception(
+ 'At most one instance of process %s expected but found pids: '
+ '%s' % (self._test_instance.package, before_pids))
+ output = '\n'.join(self._LaunchMonkeyTest(device))
+ after_pids = device.GetPids(self._test_instance.package)
+
+ crashed = True
+ if not self._test_instance.package in before_pids:
+ logging.error('Failed to start the process.')
+ elif not self._test_instance.package in after_pids:
+ logging.error('Process %s has died.',
+ before_pids[self._test_instance.package])
+ elif (before_pids[self._test_instance.package] !=
+ after_pids[self._test_instance.package]):
+ logging.error('Detected process restart %s -> %s',
+ before_pids[self._test_instance.package],
+ after_pids[self._test_instance.package])
+ else:
+ crashed = False
+
+ success_pattern = 'Events injected: %d' % self._test_instance.event_count
+ if success_pattern in output and not crashed:
+ result = base_test_result.BaseTestResult(
+ test, base_test_result.ResultType.PASS, log=output)
+ else:
+ result = base_test_result.BaseTestResult(
+ test, base_test_result.ResultType.FAIL, log=output)
+ if 'chrome' in self._test_instance.package:
+ logging.warning('Starting MinidumpUploadService...')
+ # TODO(jbudorick): Update this after upstreaming.
+ minidump_intent = intent.Intent(
+ action='%s.crash.ACTION_FIND_ALL' % _CHROME_PACKAGE,
+ package=self._test_instance.package,
+ activity='%s.crash.MinidumpUploadService' % _CHROME_PACKAGE)
+ try:
+ device.RunShellCommand(
+ ['am', 'startservice'] + minidump_intent.am_args,
+ as_root=True, check_return=True)
+ except device_errors.CommandFailedError:
+ logging.exception('Failed to start MinidumpUploadService')
+
+ return result, None
+
+ #override
+ def TearDown(self):
+ pass
+
+ #override
+ def _CreateShards(self, tests):
+ return tests
+
+ #override
+ def _ShouldShard(self):
+ # TODO(mikecase): Run Monkey test concurrently on each attached device.
+ return False
+
+ #override
+ def _GetTests(self):
+ return ['MonkeyTest']
+
+ def _LaunchMonkeyTest(self, device):
+ try:
+ cmd = ['monkey',
+ '-p', self._test_instance.package,
+ '--throttle', str(self._test_instance.throttle),
+ '-s', str(self._test_instance.seed),
+ '--monitor-native-crashes',
+ '--kill-process-after-error']
+ for category in self._test_instance.categories:
+ cmd.extend(['-c', category])
+ for _ in range(self._test_instance.verbose_count):
+ cmd.append('-v')
+ cmd.append(str(self._test_instance.event_count))
+ return device.RunShellCommand(
+ cmd, timeout=self._test_instance.timeout, check_return=True)
+ finally:
+ try:
+ # Kill the monkey test process on the device. If you manually
+ # interrupt the test run, this will prevent the monkey test from
+ # continuing to run.
+ device.KillAll('com.android.commands.monkey')
+ except device_errors.CommandFailedError:
+ pass
diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_test_run.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_test_run.py
new file mode 100644
index 0000000000..645d9c7471
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_test_run.py
@@ -0,0 +1,395 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import fnmatch
+import logging
+import posixpath
+import signal
+try:
+ import _thread as thread
+except ImportError:
+ import thread
+import threading
+
+from devil import base_error
+from devil.android import crash_handler
+from devil.android import device_errors
+from devil.android.sdk import version_codes
+from devil.android.tools import device_recovery
+from devil.utils import signal_handler
+from pylib import valgrind_tools
+from pylib.base import base_test_result
+from pylib.base import test_run
+from pylib.base import test_collection
+from pylib.local.device import local_device_environment
+
+
+_SIGTERM_TEST_LOG = (
+ ' Suite execution terminated, probably due to swarming timeout.\n'
+ ' Your test may not have run.')
+
+
+def SubstituteDeviceRoot(device_path, device_root):
+ if not device_path:
+ return device_root
+ elif isinstance(device_path, list):
+ return posixpath.join(*(p if p else device_root for p in device_path))
+ else:
+ return device_path
+
+
+class TestsTerminated(Exception):
+ pass
+
+
+class InvalidShardingSettings(Exception):
+ def __init__(self, shard_index, total_shards):
+ super(InvalidShardingSettings, self).__init__(
+ 'Invalid sharding settings. shard_index: %d total_shards: %d'
+ % (shard_index, total_shards))
+
+
+class LocalDeviceTestRun(test_run.TestRun):
+
+ def __init__(self, env, test_instance):
+ super(LocalDeviceTestRun, self).__init__(env, test_instance)
+ self._tools = {}
+ # This is intended to be filled by a child class.
+ self._installed_packages = []
+ env.SetPreferredAbis(test_instance.GetPreferredAbis())
+
+ #override
+ def RunTests(self, results):
+ tests = self._GetTests()
+
+ exit_now = threading.Event()
+
+ @local_device_environment.handle_shard_failures
+ def run_tests_on_device(dev, tests, results):
+ # This is performed here instead of during setup because restarting the
+ # device clears app compatibility flags, which will happen if a device
+ # needs to be recovered.
+ SetAppCompatibilityFlagsIfNecessary(self._installed_packages, dev)
+ consecutive_device_errors = 0
+ for test in tests:
+ if not test:
+ logging.warning('No tests in shared. Continuing.')
+ tests.test_completed()
+ continue
+ if exit_now.isSet():
+ thread.exit()
+
+ result = None
+ rerun = None
+ try:
+ result, rerun = crash_handler.RetryOnSystemCrash(
+ lambda d, t=test: self._RunTest(d, t),
+ device=dev)
+ consecutive_device_errors = 0
+ if isinstance(result, base_test_result.BaseTestResult):
+ results.AddResult(result)
+ elif isinstance(result, list):
+ results.AddResults(result)
+ else:
+ raise Exception(
+ 'Unexpected result type: %s' % type(result).__name__)
+ except device_errors.CommandTimeoutError:
+ # Test timeouts don't count as device errors for the purpose
+ # of bad device detection.
+ consecutive_device_errors = 0
+
+ if isinstance(test, list):
+ results.AddResults(
+ base_test_result.BaseTestResult(
+ self._GetUniqueTestName(t),
+ base_test_result.ResultType.TIMEOUT) for t in test)
+ else:
+ results.AddResult(
+ base_test_result.BaseTestResult(
+ self._GetUniqueTestName(test),
+ base_test_result.ResultType.TIMEOUT))
+ except Exception as e: # pylint: disable=broad-except
+ if isinstance(tests, test_collection.TestCollection):
+ rerun = test
+ if (isinstance(e, device_errors.DeviceUnreachableError)
+ or not isinstance(e, base_error.BaseError)):
+ # If we get a device error but believe the device is still
+ # reachable, attempt to continue using it. Otherwise, raise
+ # the exception and terminate this run_tests_on_device call.
+ raise
+
+ consecutive_device_errors += 1
+ if consecutive_device_errors >= 3:
+ # We believe the device is still reachable and may still be usable,
+ # but if it fails repeatedly, we shouldn't attempt to keep using
+ # it.
+ logging.error('Repeated failures on device %s. Abandoning.',
+ str(dev))
+ raise
+
+ logging.exception(
+ 'Attempting to continue using device %s despite failure (%d/3).',
+ str(dev), consecutive_device_errors)
+
+ finally:
+ if isinstance(tests, test_collection.TestCollection):
+ if rerun:
+ tests.add(rerun)
+ tests.test_completed()
+
+ logging.info('Finished running tests on this device.')
+
+ def stop_tests(_signum, _frame):
+ logging.critical('Received SIGTERM. Stopping test execution.')
+ exit_now.set()
+ raise TestsTerminated()
+
+ try:
+ with signal_handler.AddSignalHandler(signal.SIGTERM, stop_tests):
+ self._env.ResetCurrentTry()
+ while self._env.current_try < self._env.max_tries and tests:
+ tries = self._env.current_try
+ grouped_tests = self._GroupTests(tests)
+ logging.info('STARTING TRY #%d/%d', tries + 1, self._env.max_tries)
+ if tries > 0 and self._env.recover_devices:
+ if any(d.build_version_sdk == version_codes.LOLLIPOP_MR1
+ for d in self._env.devices):
+ logging.info(
+ 'Attempting to recover devices due to known issue on L MR1. '
+ 'See crbug.com/787056 for details.')
+ self._env.parallel_devices.pMap(
+ device_recovery.RecoverDevice, None)
+ elif tries + 1 == self._env.max_tries:
+ logging.info(
+ 'Attempting to recover devices prior to last test attempt.')
+ self._env.parallel_devices.pMap(
+ device_recovery.RecoverDevice, None)
+ logging.info('Will run %d tests on %d devices: %s',
+ len(tests), len(self._env.devices),
+ ', '.join(str(d) for d in self._env.devices))
+ for t in tests:
+ logging.debug(' %s', t)
+
+ try_results = base_test_result.TestRunResults()
+ test_names = (self._GetUniqueTestName(t) for t in tests)
+ try_results.AddResults(
+ base_test_result.BaseTestResult(
+ t, base_test_result.ResultType.NOTRUN)
+ for t in test_names if not t.endswith('*'))
+
+ # As soon as we know the names of the tests, we populate |results|.
+ # The tests in try_results will have their results updated by
+ # try_results.AddResult() as they are run.
+ results.append(try_results)
+
+ try:
+ if self._ShouldShard():
+ tc = test_collection.TestCollection(
+ self._CreateShards(grouped_tests))
+ self._env.parallel_devices.pMap(
+ run_tests_on_device, tc, try_results).pGet(None)
+ else:
+ self._env.parallel_devices.pMap(run_tests_on_device,
+ grouped_tests,
+ try_results).pGet(None)
+ except TestsTerminated:
+ for unknown_result in try_results.GetUnknown():
+ try_results.AddResult(
+ base_test_result.BaseTestResult(
+ unknown_result.GetName(),
+ base_test_result.ResultType.TIMEOUT,
+ log=_SIGTERM_TEST_LOG))
+ raise
+
+ self._env.IncrementCurrentTry()
+ tests = self._GetTestsToRetry(tests, try_results)
+
+ logging.info('FINISHED TRY #%d/%d', tries + 1, self._env.max_tries)
+ if tests:
+ logging.info('%d failed tests remain.', len(tests))
+ else:
+ logging.info('All tests completed.')
+ except TestsTerminated:
+ pass
+
+ def _GetTestsToRetry(self, tests, try_results):
+
+ def is_failure_result(test_result):
+ if isinstance(test_result, list):
+ return any(is_failure_result(r) for r in test_result)
+ return (
+ test_result is None
+ or test_result.GetType() not in (
+ base_test_result.ResultType.PASS,
+ base_test_result.ResultType.SKIP))
+
+ all_test_results = {r.GetName(): r for r in try_results.GetAll()}
+
+ tests_and_names = ((t, self._GetUniqueTestName(t)) for t in tests)
+
+ tests_and_results = {}
+ for test, name in tests_and_names:
+ if name.endswith('*'):
+ tests_and_results[name] = (test, [
+ r for n, r in all_test_results.items() if fnmatch.fnmatch(n, name)
+ ])
+ else:
+ tests_and_results[name] = (test, all_test_results.get(name))
+
+ failed_tests_and_results = ((test, result)
+ for test, result in tests_and_results.values()
+ if is_failure_result(result))
+
+ return [t for t, r in failed_tests_and_results if self._ShouldRetry(t, r)]
+
+ def _ApplyExternalSharding(self, tests, shard_index, total_shards):
+ logging.info('Using external sharding settings. This is shard %d/%d',
+ shard_index, total_shards)
+
+ if total_shards < 0 or shard_index < 0 or total_shards <= shard_index:
+ raise InvalidShardingSettings(shard_index, total_shards)
+
+ sharded_tests = []
+
+ # Group tests by tests that should run in the same test invocation - either
+ # unit tests or batched tests.
+ grouped_tests = self._GroupTests(tests)
+
+ # Partition grouped tests approximately evenly across shards.
+ partitioned_tests = self._PartitionTests(grouped_tests, total_shards,
+ float('inf'))
+ if len(partitioned_tests) <= shard_index:
+ return []
+ for t in partitioned_tests[shard_index]:
+ if isinstance(t, list):
+ sharded_tests.extend(t)
+ else:
+ sharded_tests.append(t)
+ return sharded_tests
+
+ # Partition tests evenly into |num_desired_partitions| partitions where
+ # possible. However, many constraints make partitioning perfectly impossible.
+ # If the max_partition_size isn't large enough, extra partitions may be
+ # created (infinite max size should always return precisely the desired
+ # number of partitions). Even if the |max_partition_size| is technically large
+ # enough to hold all of the tests in |num_desired_partitions|, we attempt to
+ # keep test order relatively stable to minimize flakes, so when tests are
+ # grouped (eg. batched tests), we cannot perfectly fill all paritions as that
+ # would require breaking up groups.
+ def _PartitionTests(self, tests, num_desired_partitions, max_partition_size):
+ # pylint: disable=no-self-use
+ partitions = []
+
+ # Sort by hash so we don't put all tests in a slow suite in the same
+ # partition.
+ tests = sorted(
+ tests,
+ key=lambda t: hash(
+ self._GetUniqueTestName(t[0] if isinstance(t, list) else t)))
+
+ def CountTestsIndividually(test):
+ if not isinstance(test, list):
+ return False
+ annotations = test[0]['annotations']
+ # UnitTests tests are really fast, so to balance shards better, count
+ # UnitTests Batches as single tests.
+ return ('Batch' not in annotations
+ or annotations['Batch']['value'] != 'UnitTests')
+
+ num_not_yet_allocated = sum(
+ [len(test) - 1 for test in tests if CountTestsIndividually(test)])
+ num_not_yet_allocated += len(tests)
+
+ # Fast linear partition approximation capped by max_partition_size. We
+ # cannot round-robin or otherwise re-order tests dynamically because we want
+ # test order to remain stable.
+ partition_size = min(num_not_yet_allocated // num_desired_partitions,
+ max_partition_size)
+ partitions.append([])
+ last_partition_size = 0
+ for test in tests:
+ test_count = len(test) if CountTestsIndividually(test) else 1
+ # Make a new shard whenever we would overfill the previous one. However,
+ # if the size of the test group is larger than the max partition size on
+ # its own, just put the group in its own shard instead of splitting up the
+ # group.
+ if (last_partition_size + test_count > partition_size
+ and last_partition_size > 0):
+ num_desired_partitions -= 1
+ if num_desired_partitions <= 0:
+ # Too many tests for number of partitions, just fill all partitions
+ # beyond num_desired_partitions.
+ partition_size = max_partition_size
+ else:
+ # Re-balance remaining partitions.
+ partition_size = min(num_not_yet_allocated // num_desired_partitions,
+ max_partition_size)
+ partitions.append([])
+ partitions[-1].append(test)
+ last_partition_size = test_count
+ else:
+ partitions[-1].append(test)
+ last_partition_size += test_count
+
+ num_not_yet_allocated -= test_count
+
+ if not partitions[-1]:
+ partitions.pop()
+ return partitions
+
+ def GetTool(self, device):
+ if str(device) not in self._tools:
+ self._tools[str(device)] = valgrind_tools.CreateTool(
+ self._env.tool, device)
+ return self._tools[str(device)]
+
+ def _CreateShards(self, tests):
+ raise NotImplementedError
+
+ def _GetUniqueTestName(self, test):
+ # pylint: disable=no-self-use
+ return test
+
+ def _ShouldRetry(self, test, result):
+ # pylint: disable=no-self-use,unused-argument
+ return True
+
+ def _GetTests(self):
+ raise NotImplementedError
+
+ def _GroupTests(self, tests):
+ # pylint: disable=no-self-use
+ return tests
+
+ def _RunTest(self, device, test):
+ raise NotImplementedError
+
+ def _ShouldShard(self):
+ raise NotImplementedError
+
+
+def SetAppCompatibilityFlagsIfNecessary(packages, device):
+ """Sets app compatibility flags on the given packages and device.
+
+ Args:
+ packages: A list of strings containing package names to apply flags to.
+ device: A DeviceUtils instance to apply the flags on.
+ """
+
+ def set_flag_for_packages(flag, enable):
+ enable_str = 'enable' if enable else 'disable'
+ for p in packages:
+ cmd = ['am', 'compat', enable_str, flag, p]
+ device.RunShellCommand(cmd)
+
+ sdk_version = device.build_version_sdk
+ if sdk_version >= version_codes.R:
+ # These flags are necessary to use the legacy storage permissions on R+.
+ # See crbug.com/1173699 for more information.
+ set_flag_for_packages('DEFAULT_SCOPED_STORAGE', False)
+ set_flag_for_packages('FORCE_ENABLE_SCOPED_STORAGE', False)
+
+
+class NoTestsError(Exception):
+ """Error for when no tests are found."""
diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_test_run_test.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_test_run_test.py
new file mode 100755
index 0000000000..0f6c9b5421
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_test_run_test.py
@@ -0,0 +1,171 @@
+#!/usr/bin/env vpython3
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=protected-access
+
+
+import unittest
+
+from pylib.base import base_test_result
+from pylib.local.device import local_device_test_run
+
+import mock # pylint: disable=import-error
+
+
+class SubstituteDeviceRootTest(unittest.TestCase):
+
+ def testNoneDevicePath(self):
+ self.assertEqual(
+ '/fake/device/root',
+ local_device_test_run.SubstituteDeviceRoot(None, '/fake/device/root'))
+
+ def testStringDevicePath(self):
+ self.assertEqual(
+ '/another/fake/device/path',
+ local_device_test_run.SubstituteDeviceRoot('/another/fake/device/path',
+ '/fake/device/root'))
+
+ def testListWithNoneDevicePath(self):
+ self.assertEqual(
+ '/fake/device/root/subpath',
+ local_device_test_run.SubstituteDeviceRoot([None, 'subpath'],
+ '/fake/device/root'))
+
+ def testListWithoutNoneDevicePath(self):
+ self.assertEqual(
+ '/another/fake/device/path',
+ local_device_test_run.SubstituteDeviceRoot(
+ ['/', 'another', 'fake', 'device', 'path'], '/fake/device/root'))
+
+
+class TestLocalDeviceTestRun(local_device_test_run.LocalDeviceTestRun):
+
+ # pylint: disable=abstract-method
+
+ def __init__(self):
+ super(TestLocalDeviceTestRun, self).__init__(
+ mock.MagicMock(), mock.MagicMock())
+
+
+class TestLocalDeviceNonStringTestRun(
+ local_device_test_run.LocalDeviceTestRun):
+
+ # pylint: disable=abstract-method
+
+ def __init__(self):
+ super(TestLocalDeviceNonStringTestRun, self).__init__(
+ mock.MagicMock(), mock.MagicMock())
+
+ def _GetUniqueTestName(self, test):
+ return test['name']
+
+
+class LocalDeviceTestRunTest(unittest.TestCase):
+
+ def testGetTestsToRetry_allTestsPassed(self):
+ results = [
+ base_test_result.BaseTestResult(
+ 'Test1', base_test_result.ResultType.PASS),
+ base_test_result.BaseTestResult(
+ 'Test2', base_test_result.ResultType.PASS),
+ ]
+
+ tests = [r.GetName() for r in results]
+ try_results = base_test_result.TestRunResults()
+ try_results.AddResults(results)
+
+ test_run = TestLocalDeviceTestRun()
+ tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
+ self.assertEqual(0, len(tests_to_retry))
+
+ def testGetTestsToRetry_testFailed(self):
+ results = [
+ base_test_result.BaseTestResult(
+ 'Test1', base_test_result.ResultType.FAIL),
+ base_test_result.BaseTestResult(
+ 'Test2', base_test_result.ResultType.PASS),
+ ]
+
+ tests = [r.GetName() for r in results]
+ try_results = base_test_result.TestRunResults()
+ try_results.AddResults(results)
+
+ test_run = TestLocalDeviceTestRun()
+ tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
+ self.assertEqual(1, len(tests_to_retry))
+ self.assertIn('Test1', tests_to_retry)
+
+ def testGetTestsToRetry_testUnknown(self):
+ results = [
+ base_test_result.BaseTestResult(
+ 'Test2', base_test_result.ResultType.PASS),
+ ]
+
+ tests = ['Test1'] + [r.GetName() for r in results]
+ try_results = base_test_result.TestRunResults()
+ try_results.AddResults(results)
+
+ test_run = TestLocalDeviceTestRun()
+ tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
+ self.assertEqual(1, len(tests_to_retry))
+ self.assertIn('Test1', tests_to_retry)
+
+ def testGetTestsToRetry_wildcardFilter_allPass(self):
+ results = [
+ base_test_result.BaseTestResult(
+ 'TestCase.Test1', base_test_result.ResultType.PASS),
+ base_test_result.BaseTestResult(
+ 'TestCase.Test2', base_test_result.ResultType.PASS),
+ ]
+
+ tests = ['TestCase.*']
+ try_results = base_test_result.TestRunResults()
+ try_results.AddResults(results)
+
+ test_run = TestLocalDeviceTestRun()
+ tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
+ self.assertEqual(0, len(tests_to_retry))
+
+ def testGetTestsToRetry_wildcardFilter_oneFails(self):
+ results = [
+ base_test_result.BaseTestResult(
+ 'TestCase.Test1', base_test_result.ResultType.PASS),
+ base_test_result.BaseTestResult(
+ 'TestCase.Test2', base_test_result.ResultType.FAIL),
+ ]
+
+ tests = ['TestCase.*']
+ try_results = base_test_result.TestRunResults()
+ try_results.AddResults(results)
+
+ test_run = TestLocalDeviceTestRun()
+ tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
+ self.assertEqual(1, len(tests_to_retry))
+ self.assertIn('TestCase.*', tests_to_retry)
+
+ def testGetTestsToRetry_nonStringTests(self):
+ results = [
+ base_test_result.BaseTestResult(
+ 'TestCase.Test1', base_test_result.ResultType.PASS),
+ base_test_result.BaseTestResult(
+ 'TestCase.Test2', base_test_result.ResultType.FAIL),
+ ]
+
+ tests = [
+ {'name': 'TestCase.Test1'},
+ {'name': 'TestCase.Test2'},
+ ]
+ try_results = base_test_result.TestRunResults()
+ try_results.AddResults(results)
+
+ test_run = TestLocalDeviceNonStringTestRun()
+ tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
+ self.assertEqual(1, len(tests_to_retry))
+ self.assertIsInstance(tests_to_retry[0], dict)
+ self.assertEqual(tests[1], tests_to_retry[0])
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/third_party/libwebrtc/build/android/pylib/local/emulator/OWNERS b/third_party/libwebrtc/build/android/pylib/local/emulator/OWNERS
new file mode 100644
index 0000000000..0853590d4b
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/emulator/OWNERS
@@ -0,0 +1,4 @@
+bpastene@chromium.org
+hypan@google.com
+jbudorick@chromium.org
+liaoyuke@chromium.org
diff --git a/third_party/libwebrtc/build/android/pylib/local/emulator/__init__.py b/third_party/libwebrtc/build/android/pylib/local/emulator/__init__.py
new file mode 100644
index 0000000000..4a12e35c92
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/emulator/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/third_party/libwebrtc/build/android/pylib/local/emulator/avd.py b/third_party/libwebrtc/build/android/pylib/local/emulator/avd.py
new file mode 100644
index 0000000000..d32fbd93e7
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/emulator/avd.py
@@ -0,0 +1,629 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import contextlib
+import json
+import logging
+import os
+import socket
+import stat
+import subprocess
+import threading
+
+from google.protobuf import text_format # pylint: disable=import-error
+
+from devil.android import device_utils
+from devil.android.sdk import adb_wrapper
+from devil.utils import cmd_helper
+from devil.utils import timeout_retry
+from py_utils import tempfile_ext
+from pylib import constants
+from pylib.local.emulator import ini
+from pylib.local.emulator.proto import avd_pb2
+
+_ALL_PACKAGES = object()
+_DEFAULT_AVDMANAGER_PATH = os.path.join(
+ constants.ANDROID_SDK_ROOT, 'cmdline-tools', 'latest', 'bin', 'avdmanager')
+# Default to a 480dp mdpi screen (a relatively large phone).
+# See https://developer.android.com/training/multiscreen/screensizes
+# and https://developer.android.com/training/multiscreen/screendensities
+# for more information.
+_DEFAULT_SCREEN_DENSITY = 160
+_DEFAULT_SCREEN_HEIGHT = 960
+_DEFAULT_SCREEN_WIDTH = 480
+
+# Default to swiftshader_indirect since it works for most cases.
+_DEFAULT_GPU_MODE = 'swiftshader_indirect'
+
+
+class AvdException(Exception):
+ """Raised when this module has a problem interacting with an AVD."""
+
+ def __init__(self, summary, command=None, stdout=None, stderr=None):
+ message_parts = [summary]
+ if command:
+ message_parts.append(' command: %s' % ' '.join(command))
+ if stdout:
+ message_parts.append(' stdout:')
+ message_parts.extend(' %s' % line for line in stdout.splitlines())
+ if stderr:
+ message_parts.append(' stderr:')
+ message_parts.extend(' %s' % line for line in stderr.splitlines())
+
+ super(AvdException, self).__init__('\n'.join(message_parts))
+
+
+def _Load(avd_proto_path):
+ """Loads an Avd proto from a textpb file at the given path.
+
+ Should not be called outside of this module.
+
+ Args:
+ avd_proto_path: path to a textpb file containing an Avd message.
+ """
+ with open(avd_proto_path) as avd_proto_file:
+ return text_format.Merge(avd_proto_file.read(), avd_pb2.Avd())
+
+
+class _AvdManagerAgent(object):
+ """Private utility for interacting with avdmanager."""
+
+ def __init__(self, avd_home, sdk_root):
+ """Create an _AvdManagerAgent.
+
+ Args:
+ avd_home: path to ANDROID_AVD_HOME directory.
+ Typically something like /path/to/dir/.android/avd
+ sdk_root: path to SDK root directory.
+ """
+ self._avd_home = avd_home
+ self._sdk_root = sdk_root
+
+ self._env = dict(os.environ)
+
+ # The avdmanager from cmdline-tools would look two levels
+ # up from toolsdir to find the SDK root.
+ # Pass avdmanager a fake directory under the directory in which
+ # we install the system images s.t. avdmanager can find the
+ # system images.
+ fake_tools_dir = os.path.join(self._sdk_root, 'non-existent-tools',
+ 'non-existent-version')
+ self._env.update({
+ 'ANDROID_AVD_HOME':
+ self._avd_home,
+ 'AVDMANAGER_OPTS':
+ '-Dcom.android.sdkmanager.toolsdir=%s' % fake_tools_dir,
+ })
+
+ def Create(self, avd_name, system_image, force=False):
+ """Call `avdmanager create`.
+
+ Args:
+ avd_name: name of the AVD to create.
+ system_image: system image to use for the AVD.
+ force: whether to force creation, overwriting any existing
+ AVD with the same name.
+ """
+ create_cmd = [
+ _DEFAULT_AVDMANAGER_PATH,
+ '-v',
+ 'create',
+ 'avd',
+ '-n',
+ avd_name,
+ '-k',
+ system_image,
+ ]
+ if force:
+ create_cmd += ['--force']
+
+ create_proc = cmd_helper.Popen(
+ create_cmd,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=self._env)
+ output, error = create_proc.communicate(input='\n')
+ if create_proc.returncode != 0:
+ raise AvdException(
+ 'AVD creation failed',
+ command=create_cmd,
+ stdout=output,
+ stderr=error)
+
+ for line in output.splitlines():
+ logging.info(' %s', line)
+
+ def Delete(self, avd_name):
+ """Call `avdmanager delete`.
+
+ Args:
+ avd_name: name of the AVD to delete.
+ """
+ delete_cmd = [
+ _DEFAULT_AVDMANAGER_PATH,
+ '-v',
+ 'delete',
+ 'avd',
+ '-n',
+ avd_name,
+ ]
+ try:
+ for line in cmd_helper.IterCmdOutputLines(delete_cmd, env=self._env):
+ logging.info(' %s', line)
+ except subprocess.CalledProcessError as e:
+ raise AvdException('AVD deletion failed: %s' % str(e), command=delete_cmd)
+
+
+class AvdConfig(object):
+ """Represents a particular AVD configuration.
+
+ This class supports creation, installation, and execution of an AVD
+ from a given Avd proto message, as defined in
+ //build/android/pylib/local/emulator/proto/avd.proto.
+ """
+
+ def __init__(self, avd_proto_path):
+ """Create an AvdConfig object.
+
+ Args:
+ avd_proto_path: path to a textpb file containing an Avd message.
+ """
+ self._config = _Load(avd_proto_path)
+
+ self._emulator_home = os.path.join(constants.DIR_SOURCE_ROOT,
+ self._config.avd_package.dest_path)
+ self._emulator_sdk_root = os.path.join(
+ constants.DIR_SOURCE_ROOT, self._config.emulator_package.dest_path)
+ self._emulator_path = os.path.join(self._emulator_sdk_root, 'emulator',
+ 'emulator')
+
+ self._initialized = False
+ self._initializer_lock = threading.Lock()
+
+ @property
+ def avd_settings(self):
+ return self._config.avd_settings
+
+ def Create(self,
+ force=False,
+ snapshot=False,
+ keep=False,
+ cipd_json_output=None,
+ dry_run=False):
+ """Create an instance of the AVD CIPD package.
+
+ This method:
+ - installs the requisite system image
+ - creates the AVD
+ - modifies the AVD's ini files to support running chromium tests
+ in chromium infrastructure
+ - optionally starts & stops the AVD for snapshotting (default no)
+ - By default creates and uploads an instance of the AVD CIPD package
+ (can be turned off by dry_run flag).
+ - optionally deletes the AVD (default yes)
+
+ Args:
+ force: bool indicating whether to force create the AVD.
+ snapshot: bool indicating whether to snapshot the AVD before creating
+ the CIPD package.
+ keep: bool indicating whether to keep the AVD after creating
+ the CIPD package.
+ cipd_json_output: string path to pass to `cipd create` via -json-output.
+ dry_run: When set to True, it will skip the CIPD package creation
+ after creating the AVD.
+ """
+ logging.info('Installing required packages.')
+ self._InstallCipdPackages(packages=[
+ self._config.emulator_package,
+ self._config.system_image_package,
+ ])
+
+ android_avd_home = os.path.join(self._emulator_home, 'avd')
+
+ if not os.path.exists(android_avd_home):
+ os.makedirs(android_avd_home)
+
+ avd_manager = _AvdManagerAgent(
+ avd_home=android_avd_home, sdk_root=self._emulator_sdk_root)
+
+ logging.info('Creating AVD.')
+ avd_manager.Create(
+ avd_name=self._config.avd_name,
+ system_image=self._config.system_image_name,
+ force=force)
+
+ try:
+ logging.info('Modifying AVD configuration.')
+
+ # Clear out any previous configuration or state from this AVD.
+ root_ini = os.path.join(android_avd_home,
+ '%s.ini' % self._config.avd_name)
+ features_ini = os.path.join(self._emulator_home, 'advancedFeatures.ini')
+ avd_dir = os.path.join(android_avd_home, '%s.avd' % self._config.avd_name)
+ config_ini = os.path.join(avd_dir, 'config.ini')
+
+ with ini.update_ini_file(root_ini) as root_ini_contents:
+ root_ini_contents['path.rel'] = 'avd/%s.avd' % self._config.avd_name
+
+ with ini.update_ini_file(features_ini) as features_ini_contents:
+ # features_ini file will not be refreshed by avdmanager during
+ # creation. So explicitly clear its content to exclude any leftover
+ # from previous creation.
+ features_ini_contents.clear()
+ features_ini_contents.update(self.avd_settings.advanced_features)
+
+ with ini.update_ini_file(config_ini) as config_ini_contents:
+ height = self.avd_settings.screen.height or _DEFAULT_SCREEN_HEIGHT
+ width = self.avd_settings.screen.width or _DEFAULT_SCREEN_WIDTH
+ density = self.avd_settings.screen.density or _DEFAULT_SCREEN_DENSITY
+
+ config_ini_contents.update({
+ 'disk.dataPartition.size': '4G',
+ 'hw.keyboard': 'yes',
+ 'hw.lcd.density': density,
+ 'hw.lcd.height': height,
+ 'hw.lcd.width': width,
+ 'hw.mainKeys': 'no', # Show nav buttons on screen
+ })
+
+ if self.avd_settings.ram_size:
+ config_ini_contents['hw.ramSize'] = self.avd_settings.ram_size
+
+ # Start & stop the AVD.
+ self._Initialize()
+ instance = _AvdInstance(self._emulator_path, self._emulator_home,
+ self._config)
+ # Enable debug for snapshot when it is set to True
+ debug_tags = 'init,snapshot' if snapshot else None
+ instance.Start(read_only=False,
+ snapshot_save=snapshot,
+ debug_tags=debug_tags,
+ gpu_mode=_DEFAULT_GPU_MODE)
+ # Android devices with full-disk encryption are encrypted on first boot,
+ # and then get decrypted to continue the boot process (See details in
+ # https://bit.ly/3agmjcM).
+ # Wait for this step to complete since it can take a while for old OSs
+ # like M, otherwise the avd may have "Encryption Unsuccessful" error.
+ device = device_utils.DeviceUtils(instance.serial)
+ device.WaitUntilFullyBooted(decrypt=True, timeout=180, retries=0)
+
+ # Skip network disabling on pre-N for now since the svc commands fail
+ # on Marshmallow.
+ if device.build_version_sdk > 23:
+ # Always disable the network to prevent built-in system apps from
+ # updating themselves, which could take over package manager and
+ # cause shell command timeout.
+ # Use svc as this also works on the images with build type "user".
+ logging.info('Disabling the network in emulator.')
+ device.RunShellCommand(['svc', 'wifi', 'disable'], check_return=True)
+ device.RunShellCommand(['svc', 'data', 'disable'], check_return=True)
+
+ instance.Stop()
+
+ # The multiinstance lock file seems to interfere with the emulator's
+ # operation in some circumstances (beyond the obvious -read-only ones),
+ # and there seems to be no mechanism by which it gets closed or deleted.
+ # See https://bit.ly/2pWQTH7 for context.
+ multiInstanceLockFile = os.path.join(avd_dir, 'multiinstance.lock')
+ if os.path.exists(multiInstanceLockFile):
+ os.unlink(multiInstanceLockFile)
+
+ package_def_content = {
+ 'package':
+ self._config.avd_package.package_name,
+ 'root':
+ self._emulator_home,
+ 'install_mode':
+ 'copy',
+ 'data': [{
+ 'dir': os.path.relpath(avd_dir, self._emulator_home)
+ }, {
+ 'file': os.path.relpath(root_ini, self._emulator_home)
+ }, {
+ 'file': os.path.relpath(features_ini, self._emulator_home)
+ }],
+ }
+
+ logging.info('Creating AVD CIPD package.')
+ logging.debug('ensure file content: %s',
+ json.dumps(package_def_content, indent=2))
+
+ with tempfile_ext.TemporaryFileName(suffix='.json') as package_def_path:
+ with open(package_def_path, 'w') as package_def_file:
+ json.dump(package_def_content, package_def_file)
+
+ logging.info(' %s', self._config.avd_package.package_name)
+ cipd_create_cmd = [
+ 'cipd',
+ 'create',
+ '-pkg-def',
+ package_def_path,
+ '-tag',
+ 'emulator_version:%s' % self._config.emulator_package.version,
+ '-tag',
+ 'system_image_version:%s' %
+ self._config.system_image_package.version,
+ ]
+ if cipd_json_output:
+ cipd_create_cmd.extend([
+ '-json-output',
+ cipd_json_output,
+ ])
+ logging.info('running %r%s', cipd_create_cmd,
+ ' (dry_run)' if dry_run else '')
+ if not dry_run:
+ try:
+ for line in cmd_helper.IterCmdOutputLines(cipd_create_cmd):
+ logging.info(' %s', line)
+ except subprocess.CalledProcessError as e:
+ raise AvdException(
+ 'CIPD package creation failed: %s' % str(e),
+ command=cipd_create_cmd)
+
+ finally:
+ if not keep:
+ logging.info('Deleting AVD.')
+ avd_manager.Delete(avd_name=self._config.avd_name)
+
+ def Install(self, packages=_ALL_PACKAGES):
+ """Installs the requested CIPD packages and prepares them for use.
+
+ This includes making files writeable and revising some of the
+ emulator's internal config files.
+
+ Returns: None
+ Raises: AvdException on failure to install.
+ """
+ self._InstallCipdPackages(packages=packages)
+ self._MakeWriteable()
+ self._EditConfigs()
+
+ def _InstallCipdPackages(self, packages):
+ pkgs_by_dir = {}
+ if packages is _ALL_PACKAGES:
+ packages = [
+ self._config.avd_package,
+ self._config.emulator_package,
+ self._config.system_image_package,
+ ]
+ for pkg in packages:
+ if not pkg.dest_path in pkgs_by_dir:
+ pkgs_by_dir[pkg.dest_path] = []
+ pkgs_by_dir[pkg.dest_path].append(pkg)
+
+ for pkg_dir, pkgs in list(pkgs_by_dir.items()):
+ logging.info('Installing packages in %s', pkg_dir)
+ cipd_root = os.path.join(constants.DIR_SOURCE_ROOT, pkg_dir)
+ if not os.path.exists(cipd_root):
+ os.makedirs(cipd_root)
+ ensure_path = os.path.join(cipd_root, '.ensure')
+ with open(ensure_path, 'w') as ensure_file:
+ # Make CIPD ensure that all files are present and correct,
+ # even if it thinks the package is installed.
+ ensure_file.write('$ParanoidMode CheckIntegrity\n\n')
+ for pkg in pkgs:
+ ensure_file.write('%s %s\n' % (pkg.package_name, pkg.version))
+ logging.info(' %s %s', pkg.package_name, pkg.version)
+ ensure_cmd = [
+ 'cipd',
+ 'ensure',
+ '-ensure-file',
+ ensure_path,
+ '-root',
+ cipd_root,
+ ]
+ try:
+ for line in cmd_helper.IterCmdOutputLines(ensure_cmd):
+ logging.info(' %s', line)
+ except subprocess.CalledProcessError as e:
+ raise AvdException(
+ 'Failed to install CIPD package %s: %s' % (pkg.package_name,
+ str(e)),
+ command=ensure_cmd)
+
+ def _MakeWriteable(self):
+ # The emulator requires that some files are writable.
+ for dirname, _, filenames in os.walk(self._emulator_home):
+ for f in filenames:
+ path = os.path.join(dirname, f)
+ mode = os.lstat(path).st_mode
+ if mode & stat.S_IRUSR:
+ mode = mode | stat.S_IWUSR
+ os.chmod(path, mode)
+
+ def _EditConfigs(self):
+ android_avd_home = os.path.join(self._emulator_home, 'avd')
+ avd_dir = os.path.join(android_avd_home, '%s.avd' % self._config.avd_name)
+
+ config_path = os.path.join(avd_dir, 'config.ini')
+ if os.path.exists(config_path):
+ with open(config_path) as config_file:
+ config_contents = ini.load(config_file)
+ else:
+ config_contents = {}
+
+ config_contents['hw.sdCard'] = 'true'
+ if self.avd_settings.sdcard.size:
+ sdcard_path = os.path.join(avd_dir, 'cr-sdcard.img')
+ if not os.path.exists(sdcard_path):
+ mksdcard_path = os.path.join(
+ os.path.dirname(self._emulator_path), 'mksdcard')
+ mksdcard_cmd = [
+ mksdcard_path,
+ self.avd_settings.sdcard.size,
+ sdcard_path,
+ ]
+ cmd_helper.RunCmd(mksdcard_cmd)
+
+ config_contents['hw.sdCard.path'] = sdcard_path
+
+ with open(config_path, 'w') as config_file:
+ ini.dump(config_contents, config_file)
+
+ def _Initialize(self):
+ if self._initialized:
+ return
+
+ with self._initializer_lock:
+ if self._initialized:
+ return
+
+ # Emulator start-up looks for the adb daemon. Make sure it's running.
+ adb_wrapper.AdbWrapper.StartServer()
+
+ # Emulator start-up tries to check for the SDK root by looking for
+ # platforms/ and platform-tools/. Ensure they exist.
+ # See http://bit.ly/2YAkyFE for context.
+ required_dirs = [
+ os.path.join(self._emulator_sdk_root, 'platforms'),
+ os.path.join(self._emulator_sdk_root, 'platform-tools'),
+ ]
+ for d in required_dirs:
+ if not os.path.exists(d):
+ os.makedirs(d)
+
+ def CreateInstance(self):
+ """Creates an AVD instance without starting it.
+
+ Returns:
+ An _AvdInstance.
+ """
+ self._Initialize()
+ return _AvdInstance(self._emulator_path, self._emulator_home, self._config)
+
+ def StartInstance(self):
+ """Starts an AVD instance.
+
+ Returns:
+ An _AvdInstance.
+ """
+ instance = self.CreateInstance()
+ instance.Start()
+ return instance
+
+
+class _AvdInstance(object):
+ """Represents a single running instance of an AVD.
+
+ This class should only be created directly by AvdConfig.StartInstance,
+ but its other methods can be freely called.
+ """
+
+ def __init__(self, emulator_path, emulator_home, avd_config):
+ """Create an _AvdInstance object.
+
+ Args:
+ emulator_path: path to the emulator binary.
+ emulator_home: path to the emulator home directory.
+ avd_config: AVD config proto.
+ """
+ self._avd_config = avd_config
+ self._avd_name = avd_config.avd_name
+ self._emulator_home = emulator_home
+ self._emulator_path = emulator_path
+ self._emulator_proc = None
+ self._emulator_serial = None
+ self._sink = None
+
+ def __str__(self):
+ return '%s|%s' % (self._avd_name, (self._emulator_serial or id(self)))
+
+ def Start(self,
+ read_only=True,
+ snapshot_save=False,
+ window=False,
+ writable_system=False,
+ gpu_mode=_DEFAULT_GPU_MODE,
+ debug_tags=None):
+ """Starts the emulator running an instance of the given AVD."""
+
+ with tempfile_ext.TemporaryFileName() as socket_path, (contextlib.closing(
+ socket.socket(socket.AF_UNIX))) as sock:
+ sock.bind(socket_path)
+ emulator_cmd = [
+ self._emulator_path,
+ '-avd',
+ self._avd_name,
+ '-report-console',
+ 'unix:%s' % socket_path,
+ '-no-boot-anim',
+ ]
+
+ if read_only:
+ emulator_cmd.append('-read-only')
+ if not snapshot_save:
+ emulator_cmd.append('-no-snapshot-save')
+ if writable_system:
+ emulator_cmd.append('-writable-system')
+ # Note when "--gpu-mode" is set to "host":
+ # * It needs a valid DISPLAY env, even if "--emulator-window" is false.
+ # Otherwise it may throw errors like "Failed to initialize backend
+ # EGL display". See the code in https://bit.ly/3ruiMlB as an example
+ # to setup the DISPLAY env with xvfb.
+ # * It will not work under remote sessions like chrome remote desktop.
+ if gpu_mode:
+ emulator_cmd.extend(['-gpu', gpu_mode])
+ if debug_tags:
+ emulator_cmd.extend(['-debug', debug_tags])
+
+ emulator_env = {}
+ if self._emulator_home:
+ emulator_env['ANDROID_EMULATOR_HOME'] = self._emulator_home
+ if 'DISPLAY' in os.environ:
+ emulator_env['DISPLAY'] = os.environ.get('DISPLAY')
+ if window:
+ if 'DISPLAY' not in emulator_env:
+ raise AvdException('Emulator failed to start: DISPLAY not defined')
+ else:
+ emulator_cmd.append('-no-window')
+
+ sock.listen(1)
+
+ logging.info('Starting emulator with commands: %s',
+ ' '.join(emulator_cmd))
+
+ # TODO(jbudorick): Add support for logging emulator stdout & stderr at
+ # higher logging levels.
+ # Enable the emulator log when debug_tags is set.
+ if not debug_tags:
+ self._sink = open('/dev/null', 'w')
+ self._emulator_proc = cmd_helper.Popen(
+ emulator_cmd, stdout=self._sink, stderr=self._sink, env=emulator_env)
+
+ # Waits for the emulator to report its serial as requested via
+ # -report-console. See http://bit.ly/2lK3L18 for more.
+ def listen_for_serial(s):
+ logging.info('Waiting for connection from emulator.')
+ with contextlib.closing(s.accept()[0]) as conn:
+ val = conn.recv(1024)
+ return 'emulator-%d' % int(val)
+
+ try:
+ self._emulator_serial = timeout_retry.Run(
+ listen_for_serial, timeout=30, retries=0, args=[sock])
+ logging.info('%s started', self._emulator_serial)
+ except Exception as e:
+ self.Stop()
+ raise AvdException('Emulator failed to start: %s' % str(e))
+
+ def Stop(self):
+ """Stops the emulator process."""
+ if self._emulator_proc:
+ if self._emulator_proc.poll() is None:
+ if self._emulator_serial:
+ device_utils.DeviceUtils(self._emulator_serial).adb.Emu('kill')
+ else:
+ self._emulator_proc.terminate()
+ self._emulator_proc.wait()
+ self._emulator_proc = None
+
+ if self._sink:
+ self._sink.close()
+ self._sink = None
+
+ @property
+ def serial(self):
+ return self._emulator_serial
diff --git a/third_party/libwebrtc/build/android/pylib/local/emulator/ini.py b/third_party/libwebrtc/build/android/pylib/local/emulator/ini.py
new file mode 100644
index 0000000000..2c5409934b
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/emulator/ini.py
@@ -0,0 +1,58 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Basic .ini encoding and decoding."""
+
+
+import contextlib
+import os
+
+
+def loads(ini_str, strict=True):
+ ret = {}
+ for line in ini_str.splitlines():
+ key, val = line.split('=', 1)
+ key = key.strip()
+ val = val.strip()
+ if strict and key in ret:
+ raise ValueError('Multiple entries present for key "%s"' % key)
+ ret[key] = val
+
+ return ret
+
+
+def load(fp):
+ return loads(fp.read())
+
+
+def dumps(obj):
+ ret = ''
+ for k, v in sorted(obj.items()):
+ ret += '%s = %s\n' % (k, str(v))
+ return ret
+
+
+def dump(obj, fp):
+ fp.write(dumps(obj))
+
+
+@contextlib.contextmanager
+def update_ini_file(ini_file_path):
+ """Load and update the contents of an ini file.
+
+ Args:
+ ini_file_path: A string containing the absolute path of the ini file.
+ Yields:
+ The contents of the file, as a dict
+ """
+ if os.path.exists(ini_file_path):
+ with open(ini_file_path) as ini_file:
+ ini_contents = load(ini_file)
+ else:
+ ini_contents = {}
+
+ yield ini_contents
+
+ with open(ini_file_path, 'w') as ini_file:
+ dump(ini_contents, ini_file)
diff --git a/third_party/libwebrtc/build/android/pylib/local/emulator/ini_test.py b/third_party/libwebrtc/build/android/pylib/local/emulator/ini_test.py
new file mode 100755
index 0000000000..279e964304
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/emulator/ini_test.py
@@ -0,0 +1,69 @@
+#! /usr/bin/env vpython3
+# Copyright 2020 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Tests for ini.py."""
+
+
+import textwrap
+import unittest
+
+from pylib.local.emulator import ini
+
+
+class IniTest(unittest.TestCase):
+ def testLoadsBasic(self):
+ ini_str = textwrap.dedent("""\
+ foo.bar = 1
+ foo.baz= example
+ bar.bad =/path/to/thing
+ """)
+ expected = {
+ 'foo.bar': '1',
+ 'foo.baz': 'example',
+ 'bar.bad': '/path/to/thing',
+ }
+ self.assertEqual(expected, ini.loads(ini_str))
+
+ def testLoadsStrictFailure(self):
+ ini_str = textwrap.dedent("""\
+ foo.bar = 1
+ foo.baz = example
+ bar.bad = /path/to/thing
+ foo.bar = duplicate
+ """)
+ with self.assertRaises(ValueError):
+ ini.loads(ini_str, strict=True)
+
+ def testLoadsPermissive(self):
+ ini_str = textwrap.dedent("""\
+ foo.bar = 1
+ foo.baz = example
+ bar.bad = /path/to/thing
+ foo.bar = duplicate
+ """)
+ expected = {
+ 'foo.bar': 'duplicate',
+ 'foo.baz': 'example',
+ 'bar.bad': '/path/to/thing',
+ }
+ self.assertEqual(expected, ini.loads(ini_str, strict=False))
+
+ def testDumpsBasic(self):
+ ini_contents = {
+ 'foo.bar': '1',
+ 'foo.baz': 'example',
+ 'bar.bad': '/path/to/thing',
+ }
+ # ini.dumps is expected to dump to string alphabetically
+ # by key.
+ expected = textwrap.dedent("""\
+ bar.bad = /path/to/thing
+ foo.bar = 1
+ foo.baz = example
+ """)
+ self.assertEqual(expected, ini.dumps(ini_contents))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/libwebrtc/build/android/pylib/local/emulator/local_emulator_environment.py b/third_party/libwebrtc/build/android/pylib/local/emulator/local_emulator_environment.py
new file mode 100644
index 0000000000..3bd3c50a19
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/emulator/local_emulator_environment.py
@@ -0,0 +1,102 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import logging
+
+from six.moves import range # pylint: disable=redefined-builtin
+from devil import base_error
+from devil.android import device_errors
+from devil.android import device_utils
+from devil.utils import parallelizer
+from devil.utils import reraiser_thread
+from devil.utils import timeout_retry
+from pylib.local.device import local_device_environment
+from pylib.local.emulator import avd
+
+# Mirroring https://bit.ly/2OjuxcS#23
+_MAX_ANDROID_EMULATORS = 16
+
+
+class LocalEmulatorEnvironment(local_device_environment.LocalDeviceEnvironment):
+
+ def __init__(self, args, output_manager, error_func):
+ super(LocalEmulatorEnvironment, self).__init__(args, output_manager,
+ error_func)
+ self._avd_config = avd.AvdConfig(args.avd_config)
+ if args.emulator_count < 1:
+ error_func('--emulator-count must be >= 1')
+ elif args.emulator_count >= _MAX_ANDROID_EMULATORS:
+ logging.warning('--emulator-count capped at 16.')
+ self._emulator_count = min(_MAX_ANDROID_EMULATORS, args.emulator_count)
+ self._emulator_window = args.emulator_window
+ self._writable_system = ((hasattr(args, 'use_webview_provider')
+ and args.use_webview_provider)
+ or (hasattr(args, 'replace_system_package')
+ and args.replace_system_package)
+ or (hasattr(args, 'system_packages_to_remove')
+ and args.system_packages_to_remove))
+
+ self._emulator_instances = []
+ self._device_serials = []
+
+ #override
+ def SetUp(self):
+ self._avd_config.Install()
+
+ emulator_instances = [
+ self._avd_config.CreateInstance() for _ in range(self._emulator_count)
+ ]
+
+ def start_emulator_instance(e):
+
+ def impl(e):
+ try:
+ e.Start(
+ window=self._emulator_window,
+ writable_system=self._writable_system)
+ except avd.AvdException:
+ logging.exception('Failed to start emulator instance.')
+ return None
+ try:
+ device_utils.DeviceUtils(e.serial).WaitUntilFullyBooted()
+ except base_error.BaseError:
+ e.Stop()
+ raise
+ return e
+
+ def retry_on_timeout(exc):
+ return (isinstance(exc, device_errors.CommandTimeoutError)
+ or isinstance(exc, reraiser_thread.TimeoutError))
+
+ return timeout_retry.Run(
+ impl,
+ timeout=120 if self._writable_system else 30,
+ retries=2,
+ args=[e],
+ retry_if_func=retry_on_timeout)
+
+ parallel_emulators = parallelizer.SyncParallelizer(emulator_instances)
+ self._emulator_instances = [
+ emu
+ for emu in parallel_emulators.pMap(start_emulator_instance).pGet(None)
+ if emu is not None
+ ]
+ self._device_serials = [e.serial for e in self._emulator_instances]
+
+ if not self._emulator_instances:
+ raise Exception('Failed to start any instances of the emulator.')
+ elif len(self._emulator_instances) < self._emulator_count:
+ logging.warning(
+ 'Running with fewer emulator instances than requested (%d vs %d)',
+ len(self._emulator_instances), self._emulator_count)
+
+ super(LocalEmulatorEnvironment, self).SetUp()
+
+ #override
+ def TearDown(self):
+ try:
+ super(LocalEmulatorEnvironment, self).TearDown()
+ finally:
+ parallelizer.SyncParallelizer(self._emulator_instances).Stop()
diff --git a/third_party/libwebrtc/build/android/pylib/local/emulator/proto/__init__.py b/third_party/libwebrtc/build/android/pylib/local/emulator/proto/__init__.py
new file mode 100644
index 0000000000..4a12e35c92
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/emulator/proto/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/third_party/libwebrtc/build/android/pylib/local/emulator/proto/avd.proto b/third_party/libwebrtc/build/android/pylib/local/emulator/proto/avd.proto
new file mode 100644
index 0000000000..b06da4900b
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/emulator/proto/avd.proto
@@ -0,0 +1,75 @@
+
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+syntax = "proto3";
+
+package tools.android.avd.proto;
+
+message CIPDPackage {
+ // CIPD package name.
+ string package_name = 1;
+ // CIPD package version to use.
+ // Ignored when creating AVD packages.
+ string version = 2;
+ // Path into which the package should be installed.
+ // src-relative.
+ string dest_path = 3;
+}
+
+message ScreenSettings {
+ // Screen height in pixels.
+ uint32 height = 1;
+
+ // Screen width in pixels.
+ uint32 width = 2;
+
+ // Scren density in dpi.
+ uint32 density = 3;
+}
+
+message SdcardSettings {
+ // Size of the sdcard that should be created for this AVD.
+ // Can be anything that `mksdcard` or `avdmanager -c` would accept:
+ // - a number of bytes
+ // - a number followed by K, M, or G, indicating that many
+ // KiB, MiB, or GiB, respectively.
+ string size = 1;
+}
+
+message AvdSettings {
+ // Settings pertaining to the AVD's screen.
+ ScreenSettings screen = 1;
+
+ // Settings pertaining to the AVD's sdcard.
+ SdcardSettings sdcard = 2;
+
+ // Advanced Features for AVD. The <key,value> pairs here will override the
+ // default ones in the given system image.
+ // See https://bit.ly/2P1qK2X for all the available keys.
+ // The values should be on, off, default, or null
+ map<string, string> advanced_features = 3;
+
+ // The physical RAM size on the device, in megabytes.
+ uint32 ram_size = 4;
+}
+
+message Avd {
+ // The emulator to use in running the AVD.
+ CIPDPackage emulator_package = 1;
+
+ // The system image to use.
+ CIPDPackage system_image_package = 2;
+ // The name of the system image to use, as reported by sdkmanager.
+ string system_image_name = 3;
+
+ // The AVD to create or use.
+ // (Only the package_name is used during AVD creation.)
+ CIPDPackage avd_package = 4;
+ // The name of the AVD to create or use.
+ string avd_name = 5;
+
+ // How to configure the AVD at creation.
+ AvdSettings avd_settings = 6;
+}
diff --git a/third_party/libwebrtc/build/android/pylib/local/emulator/proto/avd_pb2.py b/third_party/libwebrtc/build/android/pylib/local/emulator/proto/avd_pb2.py
new file mode 100644
index 0000000000..49cc1aa830
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/emulator/proto/avd_pb2.py
@@ -0,0 +1,362 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: avd.proto
+
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='avd.proto',
+ package='tools.android.avd.proto',
+ syntax='proto3',
+ serialized_options=None,
+ serialized_pb=b'\n\tavd.proto\x12\x17tools.android.avd.proto\"G\n\x0b\x43IPDPackage\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\x11\n\tdest_path\x18\x03 \x01(\t\"@\n\x0eScreenSettings\x12\x0e\n\x06height\x18\x01 \x01(\r\x12\r\n\x05width\x18\x02 \x01(\r\x12\x0f\n\x07\x64\x65nsity\x18\x03 \x01(\r\"\x1e\n\x0eSdcardSettings\x12\x0c\n\x04size\x18\x01 \x01(\t\"\xa1\x02\n\x0b\x41vdSettings\x12\x37\n\x06screen\x18\x01 \x01(\x0b\x32\'.tools.android.avd.proto.ScreenSettings\x12\x37\n\x06sdcard\x18\x02 \x01(\x0b\x32\'.tools.android.avd.proto.SdcardSettings\x12U\n\x11\x61\x64vanced_features\x18\x03 \x03(\x0b\x32:.tools.android.avd.proto.AvdSettings.AdvancedFeaturesEntry\x12\x10\n\x08ram_size\x18\x04 \x01(\r\x1a\x37\n\x15\x41\x64vancedFeaturesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xad\x02\n\x03\x41vd\x12>\n\x10\x65mulator_package\x18\x01 \x01(\x0b\x32$.tools.android.avd.proto.CIPDPackage\x12\x42\n\x14system_image_package\x18\x02 \x01(\x0b\x32$.tools.android.avd.proto.CIPDPackage\x12\x19\n\x11system_image_name\x18\x03 \x01(\t\x12\x39\n\x0b\x61vd_package\x18\x04 \x01(\x0b\x32$.tools.android.avd.proto.CIPDPackage\x12\x10\n\x08\x61vd_name\x18\x05 \x01(\t\x12:\n\x0c\x61vd_settings\x18\x06 \x01(\x0b\x32$.tools.android.avd.proto.AvdSettingsb\x06proto3'
+)
+
+
+
+
+_CIPDPACKAGE = _descriptor.Descriptor(
+ name='CIPDPackage',
+ full_name='tools.android.avd.proto.CIPDPackage',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='package_name', full_name='tools.android.avd.proto.CIPDPackage.package_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='version', full_name='tools.android.avd.proto.CIPDPackage.version', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='dest_path', full_name='tools.android.avd.proto.CIPDPackage.dest_path', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=38,
+ serialized_end=109,
+)
+
+
+_SCREENSETTINGS = _descriptor.Descriptor(
+ name='ScreenSettings',
+ full_name='tools.android.avd.proto.ScreenSettings',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='height', full_name='tools.android.avd.proto.ScreenSettings.height', index=0,
+ number=1, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='width', full_name='tools.android.avd.proto.ScreenSettings.width', index=1,
+ number=2, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='density', full_name='tools.android.avd.proto.ScreenSettings.density', index=2,
+ number=3, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=111,
+ serialized_end=175,
+)
+
+
+_SDCARDSETTINGS = _descriptor.Descriptor(
+ name='SdcardSettings',
+ full_name='tools.android.avd.proto.SdcardSettings',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='size', full_name='tools.android.avd.proto.SdcardSettings.size', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=177,
+ serialized_end=207,
+)
+
+
+_AVDSETTINGS_ADVANCEDFEATURESENTRY = _descriptor.Descriptor(
+ name='AdvancedFeaturesEntry',
+ full_name='tools.android.avd.proto.AvdSettings.AdvancedFeaturesEntry',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='key', full_name='tools.android.avd.proto.AvdSettings.AdvancedFeaturesEntry.key', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='value', full_name='tools.android.avd.proto.AvdSettings.AdvancedFeaturesEntry.value', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=b'8\001',
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=444,
+ serialized_end=499,
+)
+
+_AVDSETTINGS = _descriptor.Descriptor(
+ name='AvdSettings',
+ full_name='tools.android.avd.proto.AvdSettings',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='screen', full_name='tools.android.avd.proto.AvdSettings.screen', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='sdcard', full_name='tools.android.avd.proto.AvdSettings.sdcard', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='advanced_features', full_name='tools.android.avd.proto.AvdSettings.advanced_features', index=2,
+ number=3, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='ram_size', full_name='tools.android.avd.proto.AvdSettings.ram_size', index=3,
+ number=4, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[_AVDSETTINGS_ADVANCEDFEATURESENTRY, ],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=210,
+ serialized_end=499,
+)
+
+
+_AVD = _descriptor.Descriptor(
+ name='Avd',
+ full_name='tools.android.avd.proto.Avd',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='emulator_package', full_name='tools.android.avd.proto.Avd.emulator_package', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='system_image_package', full_name='tools.android.avd.proto.Avd.system_image_package', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='system_image_name', full_name='tools.android.avd.proto.Avd.system_image_name', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='avd_package', full_name='tools.android.avd.proto.Avd.avd_package', index=3,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='avd_name', full_name='tools.android.avd.proto.Avd.avd_name', index=4,
+ number=5, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='avd_settings', full_name='tools.android.avd.proto.Avd.avd_settings', index=5,
+ number=6, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=502,
+ serialized_end=803,
+)
+
+_AVDSETTINGS_ADVANCEDFEATURESENTRY.containing_type = _AVDSETTINGS
+_AVDSETTINGS.fields_by_name['screen'].message_type = _SCREENSETTINGS
+_AVDSETTINGS.fields_by_name['sdcard'].message_type = _SDCARDSETTINGS
+_AVDSETTINGS.fields_by_name['advanced_features'].message_type = _AVDSETTINGS_ADVANCEDFEATURESENTRY
+_AVD.fields_by_name['emulator_package'].message_type = _CIPDPACKAGE
+_AVD.fields_by_name['system_image_package'].message_type = _CIPDPACKAGE
+_AVD.fields_by_name['avd_package'].message_type = _CIPDPACKAGE
+_AVD.fields_by_name['avd_settings'].message_type = _AVDSETTINGS
+DESCRIPTOR.message_types_by_name['CIPDPackage'] = _CIPDPACKAGE
+DESCRIPTOR.message_types_by_name['ScreenSettings'] = _SCREENSETTINGS
+DESCRIPTOR.message_types_by_name['SdcardSettings'] = _SDCARDSETTINGS
+DESCRIPTOR.message_types_by_name['AvdSettings'] = _AVDSETTINGS
+DESCRIPTOR.message_types_by_name['Avd'] = _AVD
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+CIPDPackage = _reflection.GeneratedProtocolMessageType('CIPDPackage', (_message.Message,), {
+ 'DESCRIPTOR' : _CIPDPACKAGE,
+ '__module__' : 'avd_pb2'
+ # @@protoc_insertion_point(class_scope:tools.android.avd.proto.CIPDPackage)
+ })
+_sym_db.RegisterMessage(CIPDPackage)
+
+ScreenSettings = _reflection.GeneratedProtocolMessageType('ScreenSettings', (_message.Message,), {
+ 'DESCRIPTOR' : _SCREENSETTINGS,
+ '__module__' : 'avd_pb2'
+ # @@protoc_insertion_point(class_scope:tools.android.avd.proto.ScreenSettings)
+ })
+_sym_db.RegisterMessage(ScreenSettings)
+
+SdcardSettings = _reflection.GeneratedProtocolMessageType('SdcardSettings', (_message.Message,), {
+ 'DESCRIPTOR' : _SDCARDSETTINGS,
+ '__module__' : 'avd_pb2'
+ # @@protoc_insertion_point(class_scope:tools.android.avd.proto.SdcardSettings)
+ })
+_sym_db.RegisterMessage(SdcardSettings)
+
+AvdSettings = _reflection.GeneratedProtocolMessageType('AvdSettings', (_message.Message,), {
+
+ 'AdvancedFeaturesEntry' : _reflection.GeneratedProtocolMessageType('AdvancedFeaturesEntry', (_message.Message,), {
+ 'DESCRIPTOR' : _AVDSETTINGS_ADVANCEDFEATURESENTRY,
+ '__module__' : 'avd_pb2'
+ # @@protoc_insertion_point(class_scope:tools.android.avd.proto.AvdSettings.AdvancedFeaturesEntry)
+ })
+ ,
+ 'DESCRIPTOR' : _AVDSETTINGS,
+ '__module__' : 'avd_pb2'
+ # @@protoc_insertion_point(class_scope:tools.android.avd.proto.AvdSettings)
+ })
+_sym_db.RegisterMessage(AvdSettings)
+_sym_db.RegisterMessage(AvdSettings.AdvancedFeaturesEntry)
+
+Avd = _reflection.GeneratedProtocolMessageType('Avd', (_message.Message,), {
+ 'DESCRIPTOR' : _AVD,
+ '__module__' : 'avd_pb2'
+ # @@protoc_insertion_point(class_scope:tools.android.avd.proto.Avd)
+ })
+_sym_db.RegisterMessage(Avd)
+
+
+_AVDSETTINGS_ADVANCEDFEATURESENTRY._options = None
+# @@protoc_insertion_point(module_scope)
diff --git a/third_party/libwebrtc/build/android/pylib/local/local_test_server_spawner.py b/third_party/libwebrtc/build/android/pylib/local/local_test_server_spawner.py
new file mode 100644
index 0000000000..f5f9875c24
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/local_test_server_spawner.py
@@ -0,0 +1,101 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import json
+import time
+
+from six.moves import range # pylint: disable=redefined-builtin
+from devil.android import forwarder
+from devil.android import ports
+from pylib.base import test_server
+from pylib.constants import host_paths
+
+with host_paths.SysPath(host_paths.BUILD_COMMON_PATH):
+ import chrome_test_server_spawner
+
+
+# The tests should not need more than one test server instance.
+MAX_TEST_SERVER_INSTANCES = 1
+
+
+def _WaitUntil(predicate, max_attempts=5):
+ """Blocks until the provided predicate (function) is true.
+
+ Returns:
+ Whether the provided predicate was satisfied once (before the timeout).
+ """
+ sleep_time_sec = 0.025
+ for _ in range(1, max_attempts):
+ if predicate():
+ return True
+ time.sleep(sleep_time_sec)
+ sleep_time_sec = min(1, sleep_time_sec * 2) # Don't wait more than 1 sec.
+ return False
+
+
+class PortForwarderAndroid(chrome_test_server_spawner.PortForwarder):
+ def __init__(self, device, tool):
+ self.device = device
+ self.tool = tool
+
+ def Map(self, port_pairs):
+ forwarder.Forwarder.Map(port_pairs, self.device, self.tool)
+
+ def GetDevicePortForHostPort(self, host_port):
+ return forwarder.Forwarder.DevicePortForHostPort(host_port)
+
+ def WaitHostPortAvailable(self, port):
+ return _WaitUntil(lambda: ports.IsHostPortAvailable(port))
+
+ def WaitPortNotAvailable(self, port):
+ return _WaitUntil(lambda: not ports.IsHostPortAvailable(port))
+
+ def WaitDevicePortReady(self, port):
+ return _WaitUntil(lambda: ports.IsDevicePortUsed(self.device, port))
+
+ def Unmap(self, device_port):
+ forwarder.Forwarder.UnmapDevicePort(device_port, self.device)
+
+
+class LocalTestServerSpawner(test_server.TestServer):
+
+ def __init__(self, port, device, tool):
+ super(LocalTestServerSpawner, self).__init__()
+ self._device = device
+ self._spawning_server = chrome_test_server_spawner.SpawningServer(
+ port, PortForwarderAndroid(device, tool), MAX_TEST_SERVER_INSTANCES)
+ self._tool = tool
+
+ @property
+ def server_address(self):
+ return self._spawning_server.server.server_address
+
+ @property
+ def port(self):
+ return self.server_address[1]
+
+ #override
+ def SetUp(self):
+ # See net/test/spawned_test_server/remote_test_server.h for description of
+ # the fields in the config file.
+ test_server_config = json.dumps({
+ 'spawner_url_base': 'http://localhost:%d' % self.port
+ })
+ self._device.WriteFile(
+ '%s/net-test-server-config' % self._device.GetExternalStoragePath(),
+ test_server_config)
+ forwarder.Forwarder.Map(
+ [(self.port, self.port)], self._device, self._tool)
+ self._spawning_server.Start()
+
+ #override
+ def Reset(self):
+ self._spawning_server.CleanupState()
+
+ #override
+ def TearDown(self):
+ self.Reset()
+ self._spawning_server.Stop()
+ forwarder.Forwarder.UnmapDevicePort(self.port, self._device)
diff --git a/third_party/libwebrtc/build/android/pylib/local/machine/__init__.py b/third_party/libwebrtc/build/android/pylib/local/machine/__init__.py
new file mode 100644
index 0000000000..ca3e206fdd
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/machine/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/third_party/libwebrtc/build/android/pylib/local/machine/local_machine_environment.py b/third_party/libwebrtc/build/android/pylib/local/machine/local_machine_environment.py
new file mode 100644
index 0000000000..447204cfd6
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/machine/local_machine_environment.py
@@ -0,0 +1,25 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import devil_chromium
+from pylib import constants
+from pylib.base import environment
+
+
+class LocalMachineEnvironment(environment.Environment):
+
+ def __init__(self, _args, output_manager, _error_func):
+ super(LocalMachineEnvironment, self).__init__(output_manager)
+
+ devil_chromium.Initialize(
+ output_directory=constants.GetOutDirectory())
+
+ #override
+ def SetUp(self):
+ pass
+
+ #override
+ def TearDown(self):
+ pass
diff --git a/third_party/libwebrtc/build/android/pylib/local/machine/local_machine_junit_test_run.py b/third_party/libwebrtc/build/android/pylib/local/machine/local_machine_junit_test_run.py
new file mode 100644
index 0000000000..6cdbf47570
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/machine/local_machine_junit_test_run.py
@@ -0,0 +1,309 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import collections
+import json
+import logging
+import multiprocessing
+import os
+import select
+import subprocess
+import sys
+import zipfile
+
+from six.moves import range # pylint: disable=redefined-builtin
+from pylib import constants
+from pylib.base import base_test_result
+from pylib.base import test_run
+from pylib.constants import host_paths
+from pylib.results import json_results
+from py_utils import tempfile_ext
+
+
+# These Test classes are used for running tests and are excluded in the test
+# runner. See:
+# https://android.googlesource.com/platform/frameworks/testing/+/android-support-test/runner/src/main/java/android/support/test/internal/runner/TestRequestBuilder.java
+# base/test/android/javatests/src/org/chromium/base/test/BaseChromiumAndroidJUnitRunner.java # pylint: disable=line-too-long
+_EXCLUDED_CLASSES_PREFIXES = ('android', 'junit', 'org/bouncycastle/util',
+ 'org/hamcrest', 'org/junit', 'org/mockito')
+
+# Suites we shouldn't shard, usually because they don't contain enough test
+# cases.
+_EXCLUDED_SUITES = {
+ 'password_check_junit_tests',
+ 'touch_to_fill_junit_tests',
+}
+
+
+# It can actually take longer to run if you shard too much, especially on
+# smaller suites. Locally media_base_junit_tests takes 4.3 sec with 1 shard,
+# and 6 sec with 2 or more shards.
+_MIN_CLASSES_PER_SHARD = 8
+
+
+class LocalMachineJunitTestRun(test_run.TestRun):
+ def __init__(self, env, test_instance):
+ super(LocalMachineJunitTestRun, self).__init__(env, test_instance)
+
+ #override
+ def TestPackage(self):
+ return self._test_instance.suite
+
+ #override
+ def SetUp(self):
+ pass
+
+ def _CreateJarArgsList(self, json_result_file_paths, group_test_list, shards):
+ # Creates a list of jar_args. The important thing is each jar_args list
+ # has a different json_results file for writing test results to and that
+ # each list of jar_args has its own test to run as specified in the
+ # -gtest-filter.
+ jar_args_list = [['-json-results-file', result_file]
+ for result_file in json_result_file_paths]
+ for index, jar_arg in enumerate(jar_args_list):
+ if shards > 1:
+ jar_arg.extend(['-gtest-filter', ':'.join(group_test_list[index])])
+ elif self._test_instance.test_filter:
+ jar_arg.extend(['-gtest-filter', self._test_instance.test_filter])
+
+ if self._test_instance.package_filter:
+ jar_arg.extend(['-package-filter', self._test_instance.package_filter])
+ if self._test_instance.runner_filter:
+ jar_arg.extend(['-runner-filter', self._test_instance.runner_filter])
+
+ return jar_args_list
+
+ def _CreateJvmArgsList(self):
+ # Creates a list of jvm_args (robolectric, code coverage, etc...)
+ jvm_args = [
+ '-Drobolectric.dependency.dir=%s' %
+ self._test_instance.robolectric_runtime_deps_dir,
+ '-Ddir.source.root=%s' % constants.DIR_SOURCE_ROOT,
+ '-Drobolectric.resourcesMode=binary',
+ ]
+ if logging.getLogger().isEnabledFor(logging.INFO):
+ jvm_args += ['-Drobolectric.logging=stdout']
+ if self._test_instance.debug_socket:
+ jvm_args += [
+ '-agentlib:jdwp=transport=dt_socket'
+ ',server=y,suspend=y,address=%s' % self._test_instance.debug_socket
+ ]
+
+ if self._test_instance.coverage_dir:
+ if not os.path.exists(self._test_instance.coverage_dir):
+ os.makedirs(self._test_instance.coverage_dir)
+ elif not os.path.isdir(self._test_instance.coverage_dir):
+ raise Exception('--coverage-dir takes a directory, not file path.')
+ if self._test_instance.coverage_on_the_fly:
+ jacoco_coverage_file = os.path.join(
+ self._test_instance.coverage_dir,
+ '%s.exec' % self._test_instance.suite)
+ jacoco_agent_path = os.path.join(host_paths.DIR_SOURCE_ROOT,
+ 'third_party', 'jacoco', 'lib',
+ 'jacocoagent.jar')
+
+ # inclnolocationclasses is false to prevent no class def found error.
+ jacoco_args = '-javaagent:{}=destfile={},inclnolocationclasses=false'
+ jvm_args.append(
+ jacoco_args.format(jacoco_agent_path, jacoco_coverage_file))
+ else:
+ jvm_args.append('-Djacoco-agent.destfile=%s' %
+ os.path.join(self._test_instance.coverage_dir,
+ '%s.exec' % self._test_instance.suite))
+
+ return jvm_args
+
+ #override
+ def RunTests(self, results):
+ wrapper_path = os.path.join(constants.GetOutDirectory(), 'bin', 'helper',
+ self._test_instance.suite)
+
+ # This avoids searching through the classparth jars for tests classes,
+ # which takes about 1-2 seconds.
+ # Do not shard when a test filter is present since we do not know at this
+ # point which tests will be filtered out.
+ if (self._test_instance.shards == 1 or self._test_instance.test_filter
+ or self._test_instance.suite in _EXCLUDED_SUITES):
+ test_classes = []
+ shards = 1
+ else:
+ test_classes = _GetTestClasses(wrapper_path)
+ shards = ChooseNumOfShards(test_classes, self._test_instance.shards)
+
+ logging.info('Running tests on %d shard(s).', shards)
+ group_test_list = GroupTestsForShard(shards, test_classes)
+
+ with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
+ cmd_list = [[wrapper_path] for _ in range(shards)]
+ json_result_file_paths = [
+ os.path.join(temp_dir, 'results%d.json' % i) for i in range(shards)
+ ]
+ jar_args_list = self._CreateJarArgsList(json_result_file_paths,
+ group_test_list, shards)
+ for i in range(shards):
+ cmd_list[i].extend(['--jar-args', '"%s"' % ' '.join(jar_args_list[i])])
+
+ jvm_args = self._CreateJvmArgsList()
+ if jvm_args:
+ for cmd in cmd_list:
+ cmd.extend(['--jvm-args', '"%s"' % ' '.join(jvm_args)])
+
+ AddPropertiesJar(cmd_list, temp_dir, self._test_instance.resource_apk)
+
+ procs = [
+ subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT) for cmd in cmd_list
+ ]
+ PrintProcessesStdout(procs)
+
+ results_list = []
+ try:
+ for json_file_path in json_result_file_paths:
+ with open(json_file_path, 'r') as f:
+ results_list += json_results.ParseResultsFromJson(
+ json.loads(f.read()))
+ except IOError:
+ # In the case of a failure in the JUnit or Robolectric test runner
+ # the output json file may never be written.
+ results_list = [
+ base_test_result.BaseTestResult(
+ 'Test Runner Failure', base_test_result.ResultType.UNKNOWN)
+ ]
+
+ test_run_results = base_test_result.TestRunResults()
+ test_run_results.AddResults(results_list)
+ results.append(test_run_results)
+
+ #override
+ def TearDown(self):
+ pass
+
+
+def AddPropertiesJar(cmd_list, temp_dir, resource_apk):
+ # Create properties file for Robolectric test runners so they can find the
+ # binary resources.
+ properties_jar_path = os.path.join(temp_dir, 'properties.jar')
+ with zipfile.ZipFile(properties_jar_path, 'w') as z:
+ z.writestr('com/android/tools/test_config.properties',
+ 'android_resource_apk=%s' % resource_apk)
+
+ for cmd in cmd_list:
+ cmd.extend(['--classpath', properties_jar_path])
+
+
+def ChooseNumOfShards(test_classes, shards):
+ # Don't override requests to not shard.
+ if shards == 1:
+ return 1
+
+ # Sharding doesn't reduce runtime on just a few tests.
+ if shards > (len(test_classes) // _MIN_CLASSES_PER_SHARD) or shards < 1:
+ shards = max(1, (len(test_classes) // _MIN_CLASSES_PER_SHARD))
+
+ # Local tests of explicit --shard values show that max speed is achieved
+ # at cpu_count() / 2.
+ # Using -XX:TieredStopAtLevel=1 is required for this result. The flag reduces
+ # CPU time by two-thirds, making sharding more effective.
+ shards = max(1, min(shards, multiprocessing.cpu_count() // 2))
+ # Can have at minimum one test_class per shard.
+ shards = min(len(test_classes), shards)
+
+ return shards
+
+
+def GroupTestsForShard(num_of_shards, test_classes):
+ """Groups tests that will be ran on each shard.
+
+ Args:
+ num_of_shards: number of shards to split tests between.
+ test_classes: A list of test_class files in the jar.
+
+ Return:
+ Returns a dictionary containing a list of test classes.
+ """
+ test_dict = {i: [] for i in range(num_of_shards)}
+
+ # Round robin test distribiution to reduce chance that a sequential group of
+ # classes all have an unusually high number of tests.
+ for count, test_cls in enumerate(test_classes):
+ test_cls = test_cls.replace('.class', '*')
+ test_cls = test_cls.replace('/', '.')
+ test_dict[count % num_of_shards].append(test_cls)
+
+ return test_dict
+
+
+def PrintProcessesStdout(procs):
+ """Prints the stdout of all the processes.
+
+ Buffers the stdout of the processes and prints it when finished.
+
+ Args:
+ procs: A list of subprocesses.
+
+ Returns: N/A
+ """
+ streams = [p.stdout for p in procs]
+ outputs = collections.defaultdict(list)
+ first_fd = streams[0].fileno()
+
+ while streams:
+ rstreams, _, _ = select.select(streams, [], [])
+ for stream in rstreams:
+ line = stream.readline()
+ if line:
+ # Print out just one output so user can see work being done rather
+ # than waiting for it all at the end.
+ if stream.fileno() == first_fd:
+ sys.stdout.write(line)
+ else:
+ outputs[stream.fileno()].append(line)
+ else:
+ streams.remove(stream) # End of stream.
+
+ for p in procs:
+ sys.stdout.write(''.join(outputs[p.stdout.fileno()]))
+
+
+def _GetTestClasses(file_path):
+ test_jar_paths = subprocess.check_output([file_path, '--print-classpath'])
+ test_jar_paths = test_jar_paths.split(':')
+
+ test_classes = []
+ for test_jar_path in test_jar_paths:
+ # Avoid searching through jars that are for the test runner.
+ # TODO(crbug.com/1144077): Use robolectric buildconfig file arg.
+ if 'third_party/robolectric/' in test_jar_path:
+ continue
+
+ test_classes += _GetTestClassesFromJar(test_jar_path)
+
+ logging.info('Found %d test classes in class_path jars.', len(test_classes))
+ return test_classes
+
+
+def _GetTestClassesFromJar(test_jar_path):
+ """Returns a list of test classes from a jar.
+
+ Test files end in Test, this is enforced:
+ //tools/android/errorprone_plugin/src/org/chromium/tools/errorprone
+ /plugin/TestClassNameCheck.java
+
+ Args:
+ test_jar_path: Path to the jar.
+
+ Return:
+ Returns a list of test classes that were in the jar.
+ """
+ class_list = []
+ with zipfile.ZipFile(test_jar_path, 'r') as zip_f:
+ for test_class in zip_f.namelist():
+ if test_class.startswith(_EXCLUDED_CLASSES_PREFIXES):
+ continue
+ if test_class.endswith('Test.class') and '$' not in test_class:
+ class_list.append(test_class)
+
+ return class_list
diff --git a/third_party/libwebrtc/build/android/pylib/local/machine/local_machine_junit_test_run_test.py b/third_party/libwebrtc/build/android/pylib/local/machine/local_machine_junit_test_run_test.py
new file mode 100755
index 0000000000..553451d650
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/machine/local_machine_junit_test_run_test.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env vpython3
+# Copyright 2020 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=protected-access
+
+
+import os
+import unittest
+
+from pylib.local.machine import local_machine_junit_test_run
+from py_utils import tempfile_ext
+from mock import patch # pylint: disable=import-error
+
+
+class LocalMachineJunitTestRunTests(unittest.TestCase):
+ def testAddPropertiesJar(self):
+ with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
+ apk = 'resource_apk'
+ cmd_list = []
+ local_machine_junit_test_run.AddPropertiesJar(cmd_list, temp_dir, apk)
+ self.assertEqual(cmd_list, [])
+ cmd_list = [['test1']]
+ local_machine_junit_test_run.AddPropertiesJar(cmd_list, temp_dir, apk)
+ self.assertEqual(
+ cmd_list[0],
+ ['test1', '--classpath',
+ os.path.join(temp_dir, 'properties.jar')])
+ cmd_list = [['test1'], ['test2']]
+ local_machine_junit_test_run.AddPropertiesJar(cmd_list, temp_dir, apk)
+ self.assertEqual(len(cmd_list[0]), 3)
+ self.assertEqual(
+ cmd_list[1],
+ ['test2', '--classpath',
+ os.path.join(temp_dir, 'properties.jar')])
+
+ @patch('multiprocessing.cpu_count')
+ def testChooseNumOfShards(self, mock_cpu_count):
+ mock_cpu_count.return_value = 36
+ # Test shards is 1 when filter is set.
+ test_shards = 1
+ test_classes = [1] * 50
+ shards = local_machine_junit_test_run.ChooseNumOfShards(
+ test_classes, test_shards)
+ self.assertEqual(1, shards)
+
+ # Tests setting shards.
+ test_shards = 4
+ shards = local_machine_junit_test_run.ChooseNumOfShards(
+ test_classes, test_shards)
+ self.assertEqual(4, shards)
+
+ # Tests using min_class per shards.
+ test_classes = [1] * 20
+ test_shards = 8
+ shards = local_machine_junit_test_run.ChooseNumOfShards(
+ test_classes, test_shards)
+ self.assertEqual(2, shards)
+
+ def testGroupTestsForShard(self):
+ test_classes = []
+ results = local_machine_junit_test_run.GroupTestsForShard(1, test_classes)
+ self.assertDictEqual(results, {0: []})
+
+ test_classes = ['dir/test.class'] * 5
+ results = local_machine_junit_test_run.GroupTestsForShard(1, test_classes)
+ self.assertDictEqual(results, {0: ['dir.test*'] * 5})
+
+ test_classes = ['dir/test.class'] * 5
+ results = local_machine_junit_test_run.GroupTestsForShard(2, test_classes)
+ ans_dict = {
+ 0: ['dir.test*'] * 3,
+ 1: ['dir.test*'] * 2,
+ }
+ self.assertDictEqual(results, ans_dict)
+
+ test_classes = ['a10 warthog', 'b17', 'SR71']
+ results = local_machine_junit_test_run.GroupTestsForShard(3, test_classes)
+ ans_dict = {
+ 0: ['a10 warthog'],
+ 1: ['b17'],
+ 2: ['SR71'],
+ }
+ self.assertDictEqual(results, ans_dict)
+
+
+if __name__ == '__main__':
+ unittest.main()