diff options
Diffstat (limited to 'third_party/libwebrtc/build/android/pylib/local/device')
9 files changed, 3681 insertions, 0 deletions
diff --git a/third_party/libwebrtc/build/android/pylib/local/device/__init__.py b/third_party/libwebrtc/build/android/pylib/local/device/__init__.py new file mode 100644 index 0000000000..4d6aabb953 --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/local/device/__init__.py @@ -0,0 +1,3 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_environment.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_environment.py new file mode 100644 index 0000000000..c254d2e8ca --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_environment.py @@ -0,0 +1,328 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +import datetime +import functools +import logging +import os +import shutil +import tempfile +import threading + +import devil_chromium +from devil import base_error +from devil.android import device_denylist +from devil.android import device_errors +from devil.android import device_utils +from devil.android import logcat_monitor +from devil.android.sdk import adb_wrapper +from devil.utils import file_utils +from devil.utils import parallelizer +from pylib import constants +from pylib.constants import host_paths +from pylib.base import environment +from pylib.utils import instrumentation_tracing +from py_trace_event import trace_event + + +LOGCAT_FILTERS = [ + 'chromium:v', + 'cr_*:v', + 'DEBUG:I', + 'StrictMode:D', +] + + +def _DeviceCachePath(device): + file_name = 'device_cache_%s.json' % device.adb.GetDeviceSerial() + return os.path.join(constants.GetOutDirectory(), file_name) + + +def handle_shard_failures(f): + """A decorator that handles device failures for per-device functions. + + Args: + f: the function being decorated. The function must take at least one + argument, and that argument must be the device. + """ + return handle_shard_failures_with(None)(f) + + +# TODO(jbudorick): Refactor this to work as a decorator or context manager. +def handle_shard_failures_with(on_failure): + """A decorator that handles device failures for per-device functions. + + This calls on_failure in the event of a failure. + + Args: + f: the function being decorated. The function must take at least one + argument, and that argument must be the device. + on_failure: A binary function to call on failure. + """ + def decorator(f): + @functools.wraps(f) + def wrapper(dev, *args, **kwargs): + try: + return f(dev, *args, **kwargs) + except device_errors.CommandTimeoutError: + logging.exception('Shard timed out: %s(%s)', f.__name__, str(dev)) + except device_errors.DeviceUnreachableError: + logging.exception('Shard died: %s(%s)', f.__name__, str(dev)) + except base_error.BaseError: + logging.exception('Shard failed: %s(%s)', f.__name__, str(dev)) + except SystemExit: + logging.exception('Shard killed: %s(%s)', f.__name__, str(dev)) + raise + if on_failure: + on_failure(dev, f.__name__) + return None + + return wrapper + + return decorator + + +def place_nomedia_on_device(dev, device_root): + """Places .nomedia file in test data root. + + This helps to prevent system from scanning media files inside test data. + + Args: + dev: Device to place .nomedia file. + device_root: Base path on device to place .nomedia file. + """ + + dev.RunShellCommand(['mkdir', '-p', device_root], check_return=True) + dev.WriteFile('%s/.nomedia' % device_root, 'https://crbug.com/796640') + + +class LocalDeviceEnvironment(environment.Environment): + + def __init__(self, args, output_manager, _error_func): + super(LocalDeviceEnvironment, self).__init__(output_manager) + self._current_try = 0 + self._denylist = (device_denylist.Denylist(args.denylist_file) + if args.denylist_file else None) + self._device_serials = args.test_devices + self._devices_lock = threading.Lock() + self._devices = None + self._concurrent_adb = args.enable_concurrent_adb + self._enable_device_cache = args.enable_device_cache + self._logcat_monitors = [] + self._logcat_output_dir = args.logcat_output_dir + self._logcat_output_file = args.logcat_output_file + self._max_tries = 1 + args.num_retries + self._preferred_abis = None + self._recover_devices = args.recover_devices + self._skip_clear_data = args.skip_clear_data + self._tool_name = args.tool + self._trace_output = None + if hasattr(args, 'trace_output'): + self._trace_output = args.trace_output + self._trace_all = None + if hasattr(args, 'trace_all'): + self._trace_all = args.trace_all + + devil_chromium.Initialize( + output_directory=constants.GetOutDirectory(), + adb_path=args.adb_path) + + # Some things such as Forwarder require ADB to be in the environment path, + # while others like Devil's bundletool.py require Java on the path. + adb_dir = os.path.dirname(adb_wrapper.AdbWrapper.GetAdbPath()) + if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep): + os.environ['PATH'] = os.pathsep.join( + [adb_dir, host_paths.JAVA_PATH, os.environ['PATH']]) + + #override + def SetUp(self): + if self.trace_output and self._trace_all: + to_include = [r"pylib\..*", r"devil\..*", "__main__"] + to_exclude = ["logging"] + instrumentation_tracing.start_instrumenting(self.trace_output, to_include, + to_exclude) + elif self.trace_output: + self.EnableTracing() + + # Must be called before accessing |devices|. + def SetPreferredAbis(self, abis): + assert self._devices is None + self._preferred_abis = abis + + def _InitDevices(self): + device_arg = [] + if self._device_serials: + device_arg = self._device_serials + + self._devices = device_utils.DeviceUtils.HealthyDevices( + self._denylist, + retries=5, + enable_usb_resets=True, + enable_device_files_cache=self._enable_device_cache, + default_retries=self._max_tries - 1, + device_arg=device_arg, + abis=self._preferred_abis) + + if self._logcat_output_file: + self._logcat_output_dir = tempfile.mkdtemp() + + @handle_shard_failures_with(on_failure=self.DenylistDevice) + def prepare_device(d): + d.WaitUntilFullyBooted() + + if self._enable_device_cache: + cache_path = _DeviceCachePath(d) + if os.path.exists(cache_path): + logging.info('Using device cache: %s', cache_path) + with open(cache_path) as f: + d.LoadCacheData(f.read()) + # Delete cached file so that any exceptions cause it to be cleared. + os.unlink(cache_path) + + if self._logcat_output_dir: + logcat_file = os.path.join( + self._logcat_output_dir, + '%s_%s' % (d.adb.GetDeviceSerial(), + datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%S'))) + monitor = logcat_monitor.LogcatMonitor( + d.adb, clear=True, output_file=logcat_file) + self._logcat_monitors.append(monitor) + monitor.Start() + + self.parallel_devices.pMap(prepare_device) + + @property + def current_try(self): + return self._current_try + + def IncrementCurrentTry(self): + self._current_try += 1 + + def ResetCurrentTry(self): + self._current_try = 0 + + @property + def denylist(self): + return self._denylist + + @property + def concurrent_adb(self): + return self._concurrent_adb + + @property + def devices(self): + # Initialize lazily so that host-only tests do not fail when no devices are + # attached. + if self._devices is None: + self._InitDevices() + return self._devices + + @property + def max_tries(self): + return self._max_tries + + @property + def parallel_devices(self): + return parallelizer.SyncParallelizer(self.devices) + + @property + def recover_devices(self): + return self._recover_devices + + @property + def skip_clear_data(self): + return self._skip_clear_data + + @property + def tool(self): + return self._tool_name + + @property + def trace_output(self): + return self._trace_output + + #override + def TearDown(self): + if self.trace_output and self._trace_all: + instrumentation_tracing.stop_instrumenting() + elif self.trace_output: + self.DisableTracing() + + # By default, teardown will invoke ADB. When receiving SIGTERM due to a + # timeout, there's a high probability that ADB is non-responsive. In these + # cases, sending an ADB command will potentially take a long time to time + # out. Before this happens, the process will be hard-killed for not + # responding to SIGTERM fast enough. + if self._received_sigterm: + return + + if not self._devices: + return + + @handle_shard_failures_with(on_failure=self.DenylistDevice) + def tear_down_device(d): + # Write the cache even when not using it so that it will be ready the + # first time that it is enabled. Writing it every time is also necessary + # so that an invalid cache can be flushed just by disabling it for one + # run. + cache_path = _DeviceCachePath(d) + if os.path.exists(os.path.dirname(cache_path)): + with open(cache_path, 'w') as f: + f.write(d.DumpCacheData()) + logging.info('Wrote device cache: %s', cache_path) + else: + logging.warning( + 'Unable to write device cache as %s directory does not exist', + os.path.dirname(cache_path)) + + self.parallel_devices.pMap(tear_down_device) + + for m in self._logcat_monitors: + try: + m.Stop() + m.Close() + _, temp_path = tempfile.mkstemp() + with open(m.output_file, 'r') as infile: + with open(temp_path, 'w') as outfile: + for line in infile: + outfile.write('Device(%s) %s' % (m.adb.GetDeviceSerial(), line)) + shutil.move(temp_path, m.output_file) + except base_error.BaseError: + logging.exception('Failed to stop logcat monitor for %s', + m.adb.GetDeviceSerial()) + except IOError: + logging.exception('Failed to locate logcat for device %s', + m.adb.GetDeviceSerial()) + + if self._logcat_output_file: + file_utils.MergeFiles( + self._logcat_output_file, + [m.output_file for m in self._logcat_monitors + if os.path.exists(m.output_file)]) + shutil.rmtree(self._logcat_output_dir) + + def DenylistDevice(self, device, reason='local_device_failure'): + device_serial = device.adb.GetDeviceSerial() + if self._denylist: + self._denylist.Extend([device_serial], reason=reason) + with self._devices_lock: + self._devices = [d for d in self._devices if str(d) != device_serial] + logging.error('Device %s denylisted: %s', device_serial, reason) + if not self._devices: + raise device_errors.NoDevicesError( + 'All devices were denylisted due to errors') + + @staticmethod + def DisableTracing(): + if not trace_event.trace_is_enabled(): + logging.warning('Tracing is not running.') + else: + trace_event.trace_disable() + + def EnableTracing(self): + if trace_event.trace_is_enabled(): + logging.warning('Tracing is already running.') + else: + trace_event.trace_enable(self._trace_output) diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_gtest_run.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_gtest_run.py new file mode 100644 index 0000000000..c81722da6e --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_gtest_run.py @@ -0,0 +1,896 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +import contextlib +import collections +import itertools +import logging +import math +import os +import posixpath +import subprocess +import shutil +import time + +from six.moves import range # pylint: disable=redefined-builtin +from devil import base_error +from devil.android import crash_handler +from devil.android import device_errors +from devil.android import device_temp_file +from devil.android import logcat_monitor +from devil.android import ports +from devil.android.sdk import version_codes +from devil.utils import reraiser_thread +from incremental_install import installer +from pylib import constants +from pylib.base import base_test_result +from pylib.gtest import gtest_test_instance +from pylib.local import local_test_server_spawner +from pylib.local.device import local_device_environment +from pylib.local.device import local_device_test_run +from pylib.utils import google_storage_helper +from pylib.utils import logdog_helper +from py_trace_event import trace_event +from py_utils import contextlib_ext +from py_utils import tempfile_ext +import tombstones + +_MAX_INLINE_FLAGS_LENGTH = 50 # Arbitrarily chosen. +_EXTRA_COMMAND_LINE_FILE = ( + 'org.chromium.native_test.NativeTest.CommandLineFile') +_EXTRA_COMMAND_LINE_FLAGS = ( + 'org.chromium.native_test.NativeTest.CommandLineFlags') +_EXTRA_COVERAGE_DEVICE_FILE = ( + 'org.chromium.native_test.NativeTest.CoverageDeviceFile') +_EXTRA_STDOUT_FILE = ( + 'org.chromium.native_test.NativeTestInstrumentationTestRunner' + '.StdoutFile') +_EXTRA_TEST = ( + 'org.chromium.native_test.NativeTestInstrumentationTestRunner' + '.Test') +_EXTRA_TEST_LIST = ( + 'org.chromium.native_test.NativeTestInstrumentationTestRunner' + '.TestList') + +_SECONDS_TO_NANOS = int(1e9) + +# Tests that use SpawnedTestServer must run the LocalTestServerSpawner on the +# host machine. +# TODO(jbudorick): Move this up to the test instance if the net test server is +# handled outside of the APK for the remote_device environment. +_SUITE_REQUIRES_TEST_SERVER_SPAWNER = [ + 'components_browsertests', 'content_unittests', 'content_browsertests', + 'net_unittests', 'services_unittests', 'unit_tests' +] + +# These are use for code coverage. +_LLVM_PROFDATA_PATH = os.path.join(constants.DIR_SOURCE_ROOT, 'third_party', + 'llvm-build', 'Release+Asserts', 'bin', + 'llvm-profdata') +# Name of the file extension for profraw data files. +_PROFRAW_FILE_EXTENSION = 'profraw' +# Name of the file where profraw data files are merged. +_MERGE_PROFDATA_FILE_NAME = 'coverage_merged.' + _PROFRAW_FILE_EXTENSION + +# No-op context manager. If we used Python 3, we could change this to +# contextlib.ExitStack() +class _NullContextManager(object): + def __enter__(self): + pass + def __exit__(self, *args): + pass + + +def _GenerateSequentialFileNames(filename): + """Infinite generator of names: 'name.ext', 'name_1.ext', 'name_2.ext', ...""" + yield filename + base, ext = os.path.splitext(filename) + for i in itertools.count(1): + yield '%s_%d%s' % (base, i, ext) + + +def _ExtractTestsFromFilter(gtest_filter): + """Returns the list of tests specified by the given filter. + + Returns: + None if the device should be queried for the test list instead. + """ + # Empty means all tests, - means exclude filter. + if not gtest_filter or '-' in gtest_filter: + return None + + patterns = gtest_filter.split(':') + # For a single pattern, allow it even if it has a wildcard so long as the + # wildcard comes at the end and there is at least one . to prove the scope is + # not too large. + # This heuristic is not necessarily faster, but normally is. + if len(patterns) == 1 and patterns[0].endswith('*'): + no_suffix = patterns[0].rstrip('*') + if '*' not in no_suffix and '.' in no_suffix: + return patterns + + if '*' in gtest_filter: + return None + return patterns + + +def _GetDeviceTimeoutMultiplier(): + # Emulated devices typically run 20-150x slower than real-time. + # Give a way to control this through the DEVICE_TIMEOUT_MULTIPLIER + # environment variable. + multiplier = os.getenv("DEVICE_TIMEOUT_MULTIPLIER") + if multiplier: + return int(multiplier) + return 1 + + +def _MergeCoverageFiles(coverage_dir, profdata_dir): + """Merge coverage data files. + + Each instrumentation activity generates a separate profraw data file. This + merges all profraw files in profdata_dir into a single file in + coverage_dir. This happens after each test, rather than waiting until after + all tests are ran to reduce the memory footprint used by all the profraw + files. + + Args: + coverage_dir: The path to the coverage directory. + profdata_dir: The directory where the profraw data file(s) are located. + + Return: + None + """ + # profdata_dir may not exist if pulling coverage files failed. + if not os.path.exists(profdata_dir): + logging.debug('Profraw directory does not exist.') + return + + merge_file = os.path.join(coverage_dir, _MERGE_PROFDATA_FILE_NAME) + profraw_files = [ + os.path.join(profdata_dir, f) for f in os.listdir(profdata_dir) + if f.endswith(_PROFRAW_FILE_EXTENSION) + ] + + try: + logging.debug('Merging target profraw files into merged profraw file.') + subprocess_cmd = [ + _LLVM_PROFDATA_PATH, + 'merge', + '-o', + merge_file, + '-sparse=true', + ] + # Grow the merge file by merging it with itself and the new files. + if os.path.exists(merge_file): + subprocess_cmd.append(merge_file) + subprocess_cmd.extend(profraw_files) + output = subprocess.check_output(subprocess_cmd) + logging.debug('Merge output: %s', output) + except subprocess.CalledProcessError: + # Don't raise error as that will kill the test run. When code coverage + # generates a report, that will raise the error in the report generation. + logging.error( + 'Failed to merge target profdata files to create merged profraw file.') + + # Free up memory space on bot as all data is in the merge file. + for f in profraw_files: + os.remove(f) + + +def _PullCoverageFiles(device, device_coverage_dir, output_dir): + """Pulls coverage files on device to host directory. + + Args: + device: The working device. + device_coverage_dir: The directory to store coverage data on device. + output_dir: The output directory on host. + """ + try: + if not os.path.exists(output_dir): + os.makedirs(output_dir) + device.PullFile(device_coverage_dir, output_dir) + if not os.listdir(os.path.join(output_dir, 'profraw')): + logging.warning('No coverage data was generated for this run') + except (OSError, base_error.BaseError) as e: + logging.warning('Failed to handle coverage data after tests: %s', e) + finally: + device.RemovePath(device_coverage_dir, force=True, recursive=True) + + +def _GetDeviceCoverageDir(device): + """Gets the directory to generate coverage data on device. + + Args: + device: The working device. + + Returns: + The directory path on the device. + """ + return posixpath.join(device.GetExternalStoragePath(), 'chrome', 'test', + 'coverage', 'profraw') + + +def _GetLLVMProfilePath(device_coverage_dir, suite, coverage_index): + """Gets 'LLVM_PROFILE_FILE' environment variable path. + + Dumping data to ONLY 1 file may cause warning and data overwrite in + browsertests, so that pattern "%2m" is used to expand to 2 raw profiles + at runtime. + + Args: + device_coverage_dir: The directory to generate data on device. + suite: Test suite name. + coverage_index: The incremental index for this test suite. + + Returns: + The path pattern for environment variable 'LLVM_PROFILE_FILE'. + """ + return posixpath.join(device_coverage_dir, + '_'.join([suite, + str(coverage_index), '%2m.profraw'])) + + +class _ApkDelegate(object): + def __init__(self, test_instance, tool): + self._activity = test_instance.activity + self._apk_helper = test_instance.apk_helper + self._test_apk_incremental_install_json = ( + test_instance.test_apk_incremental_install_json) + self._package = test_instance.package + self._runner = test_instance.runner + self._permissions = test_instance.permissions + self._suite = test_instance.suite + self._component = '%s/%s' % (self._package, self._runner) + self._extras = test_instance.extras + self._wait_for_java_debugger = test_instance.wait_for_java_debugger + self._tool = tool + self._coverage_dir = test_instance.coverage_dir + self._coverage_index = 0 + self._use_existing_test_data = test_instance.use_existing_test_data + + def GetTestDataRoot(self, device): + # pylint: disable=no-self-use + return posixpath.join(device.GetExternalStoragePath(), + 'chromium_tests_root') + + def Install(self, device): + if self._use_existing_test_data: + return + if self._test_apk_incremental_install_json: + installer.Install(device, self._test_apk_incremental_install_json, + apk=self._apk_helper, permissions=self._permissions) + else: + device.Install( + self._apk_helper, + allow_downgrade=True, + reinstall=True, + permissions=self._permissions) + + def ResultsDirectory(self, device): + return device.GetApplicationDataDirectory(self._package) + + def Run(self, test, device, flags=None, **kwargs): + extras = dict(self._extras) + device_api = device.build_version_sdk + + if self._coverage_dir and device_api >= version_codes.LOLLIPOP: + device_coverage_dir = _GetDeviceCoverageDir(device) + extras[_EXTRA_COVERAGE_DEVICE_FILE] = _GetLLVMProfilePath( + device_coverage_dir, self._suite, self._coverage_index) + self._coverage_index += 1 + + if ('timeout' in kwargs + and gtest_test_instance.EXTRA_SHARD_NANO_TIMEOUT not in extras): + # Make sure the instrumentation doesn't kill the test before the + # scripts do. The provided timeout value is in seconds, but the + # instrumentation deals with nanoseconds because that's how Android + # handles time. + extras[gtest_test_instance.EXTRA_SHARD_NANO_TIMEOUT] = int( + kwargs['timeout'] * _SECONDS_TO_NANOS) + + # pylint: disable=redefined-variable-type + command_line_file = _NullContextManager() + if flags: + if len(flags) > _MAX_INLINE_FLAGS_LENGTH: + command_line_file = device_temp_file.DeviceTempFile(device.adb) + device.WriteFile(command_line_file.name, '_ %s' % flags) + extras[_EXTRA_COMMAND_LINE_FILE] = command_line_file.name + else: + extras[_EXTRA_COMMAND_LINE_FLAGS] = flags + + test_list_file = _NullContextManager() + if test: + if len(test) > 1: + test_list_file = device_temp_file.DeviceTempFile(device.adb) + device.WriteFile(test_list_file.name, '\n'.join(test)) + extras[_EXTRA_TEST_LIST] = test_list_file.name + else: + extras[_EXTRA_TEST] = test[0] + # pylint: enable=redefined-variable-type + + # We need to use GetAppWritablePath here instead of GetExternalStoragePath + # since we will not have yet applied legacy storage permission workarounds + # on R+. + stdout_file = device_temp_file.DeviceTempFile( + device.adb, dir=device.GetAppWritablePath(), suffix='.gtest_out') + extras[_EXTRA_STDOUT_FILE] = stdout_file.name + + if self._wait_for_java_debugger: + cmd = ['am', 'set-debug-app', '-w', self._package] + device.RunShellCommand(cmd, check_return=True) + logging.warning('*' * 80) + logging.warning('Waiting for debugger to attach to process: %s', + self._package) + logging.warning('*' * 80) + + with command_line_file, test_list_file, stdout_file: + try: + device.StartInstrumentation( + self._component, extras=extras, raw=False, **kwargs) + except device_errors.CommandFailedError: + logging.exception('gtest shard failed.') + except device_errors.CommandTimeoutError: + logging.exception('gtest shard timed out.') + except device_errors.DeviceUnreachableError: + logging.exception('gtest shard device unreachable.') + except Exception: + device.ForceStop(self._package) + raise + finally: + if self._coverage_dir and device_api >= version_codes.LOLLIPOP: + if not os.path.isdir(self._coverage_dir): + os.makedirs(self._coverage_dir) + # TODO(crbug.com/1179004) Use _MergeCoverageFiles when llvm-profdata + # not found is fixed. + _PullCoverageFiles( + device, device_coverage_dir, + os.path.join(self._coverage_dir, str(self._coverage_index))) + + return device.ReadFile(stdout_file.name).splitlines() + + def PullAppFiles(self, device, files, directory): + device_dir = device.GetApplicationDataDirectory(self._package) + host_dir = os.path.join(directory, str(device)) + for f in files: + device_file = posixpath.join(device_dir, f) + host_file = os.path.join(host_dir, *f.split(posixpath.sep)) + for host_file in _GenerateSequentialFileNames(host_file): + if not os.path.exists(host_file): + break + device.PullFile(device_file, host_file) + + def Clear(self, device): + device.ClearApplicationState(self._package, permissions=self._permissions) + + +class _ExeDelegate(object): + + def __init__(self, tr, test_instance, tool): + self._host_dist_dir = test_instance.exe_dist_dir + self._exe_file_name = os.path.basename( + test_instance.exe_dist_dir)[:-len('__dist')] + self._device_dist_dir = posixpath.join( + constants.TEST_EXECUTABLE_DIR, + os.path.basename(test_instance.exe_dist_dir)) + self._test_run = tr + self._tool = tool + self._suite = test_instance.suite + self._coverage_dir = test_instance.coverage_dir + self._coverage_index = 0 + + def GetTestDataRoot(self, device): + # pylint: disable=no-self-use + # pylint: disable=unused-argument + return posixpath.join(constants.TEST_EXECUTABLE_DIR, 'chromium_tests_root') + + def Install(self, device): + # TODO(jbudorick): Look into merging this with normal data deps pushing if + # executables become supported on nonlocal environments. + device.PushChangedFiles([(self._host_dist_dir, self._device_dist_dir)], + delete_device_stale=True) + + def ResultsDirectory(self, device): + # pylint: disable=no-self-use + # pylint: disable=unused-argument + return constants.TEST_EXECUTABLE_DIR + + def Run(self, test, device, flags=None, **kwargs): + tool = self._test_run.GetTool(device).GetTestWrapper() + if tool: + cmd = [tool] + else: + cmd = [] + cmd.append(posixpath.join(self._device_dist_dir, self._exe_file_name)) + + if test: + cmd.append('--gtest_filter=%s' % ':'.join(test)) + if flags: + # TODO(agrieve): This won't work if multiple flags are passed. + cmd.append(flags) + cwd = constants.TEST_EXECUTABLE_DIR + + env = { + 'LD_LIBRARY_PATH': self._device_dist_dir + } + + if self._coverage_dir: + device_coverage_dir = _GetDeviceCoverageDir(device) + env['LLVM_PROFILE_FILE'] = _GetLLVMProfilePath( + device_coverage_dir, self._suite, self._coverage_index) + self._coverage_index += 1 + + if self._tool != 'asan': + env['UBSAN_OPTIONS'] = constants.UBSAN_OPTIONS + + try: + gcov_strip_depth = os.environ['NATIVE_COVERAGE_DEPTH_STRIP'] + external = device.GetExternalStoragePath() + env['GCOV_PREFIX'] = '%s/gcov' % external + env['GCOV_PREFIX_STRIP'] = gcov_strip_depth + except (device_errors.CommandFailedError, KeyError): + pass + + # Executable tests return a nonzero exit code on test failure, which is + # fine from the test runner's perspective; thus check_return=False. + output = device.RunShellCommand( + cmd, cwd=cwd, env=env, check_return=False, large_output=True, **kwargs) + + if self._coverage_dir: + _PullCoverageFiles( + device, device_coverage_dir, + os.path.join(self._coverage_dir, str(self._coverage_index))) + + return output + + def PullAppFiles(self, device, files, directory): + pass + + def Clear(self, device): + device.KillAll(self._exe_file_name, + blocking=True, + timeout=30 * _GetDeviceTimeoutMultiplier(), + quiet=True) + + +class LocalDeviceGtestRun(local_device_test_run.LocalDeviceTestRun): + + def __init__(self, env, test_instance): + assert isinstance(env, local_device_environment.LocalDeviceEnvironment) + assert isinstance(test_instance, gtest_test_instance.GtestTestInstance) + super(LocalDeviceGtestRun, self).__init__(env, test_instance) + + if self._test_instance.apk_helper: + self._installed_packages = [ + self._test_instance.apk_helper.GetPackageName() + ] + + # pylint: disable=redefined-variable-type + if self._test_instance.apk: + self._delegate = _ApkDelegate(self._test_instance, env.tool) + elif self._test_instance.exe_dist_dir: + self._delegate = _ExeDelegate(self, self._test_instance, self._env.tool) + if self._test_instance.isolated_script_test_perf_output: + self._test_perf_output_filenames = _GenerateSequentialFileNames( + self._test_instance.isolated_script_test_perf_output) + else: + self._test_perf_output_filenames = itertools.repeat(None) + # pylint: enable=redefined-variable-type + self._crashes = set() + self._servers = collections.defaultdict(list) + + #override + def TestPackage(self): + return self._test_instance.suite + + #override + def SetUp(self): + @local_device_environment.handle_shard_failures_with( + on_failure=self._env.DenylistDevice) + @trace_event.traced + def individual_device_set_up(device, host_device_tuples): + def install_apk(dev): + # Install test APK. + self._delegate.Install(dev) + + def push_test_data(dev): + if self._test_instance.use_existing_test_data: + return + # Push data dependencies. + device_root = self._delegate.GetTestDataRoot(dev) + host_device_tuples_substituted = [ + (h, local_device_test_run.SubstituteDeviceRoot(d, device_root)) + for h, d in host_device_tuples] + local_device_environment.place_nomedia_on_device(dev, device_root) + dev.PushChangedFiles( + host_device_tuples_substituted, + delete_device_stale=True, + # Some gtest suites, e.g. unit_tests, have data dependencies that + # can take longer than the default timeout to push. See + # crbug.com/791632 for context. + timeout=600 * math.ceil(_GetDeviceTimeoutMultiplier() / 10)) + if not host_device_tuples: + dev.RemovePath(device_root, force=True, recursive=True, rename=True) + dev.RunShellCommand(['mkdir', '-p', device_root], check_return=True) + + def init_tool_and_start_servers(dev): + tool = self.GetTool(dev) + tool.CopyFiles(dev) + tool.SetupEnvironment() + + try: + # See https://crbug.com/1030827. + # This is a hack that may break in the future. We're relying on the + # fact that adb doesn't use ipv6 for it's server, and so doesn't + # listen on ipv6, but ssh remote forwarding does. 5037 is the port + # number adb uses for its server. + if "[::1]:5037" in subprocess.check_output( + "ss -o state listening 'sport = 5037'", shell=True): + logging.error( + 'Test Server cannot be started with a remote-forwarded adb ' + 'server. Continuing anyways, but some tests may fail.') + return + except subprocess.CalledProcessError: + pass + + self._servers[str(dev)] = [] + if self.TestPackage() in _SUITE_REQUIRES_TEST_SERVER_SPAWNER: + self._servers[str(dev)].append( + local_test_server_spawner.LocalTestServerSpawner( + ports.AllocateTestServerPort(), dev, tool)) + + for s in self._servers[str(dev)]: + s.SetUp() + + def bind_crash_handler(step, dev): + return lambda: crash_handler.RetryOnSystemCrash(step, dev) + + # Explicitly enable root to ensure that tests run under deterministic + # conditions. Without this explicit call, EnableRoot() is called from + # push_test_data() when PushChangedFiles() determines that it should use + # _PushChangedFilesZipped(), which is only most of the time. + # Root is required (amongst maybe other reasons) to pull the results file + # from the device, since it lives within the application's data directory + # (via GetApplicationDataDirectory()). + device.EnableRoot() + + steps = [ + bind_crash_handler(s, device) + for s in (install_apk, push_test_data, init_tool_and_start_servers)] + if self._env.concurrent_adb: + reraiser_thread.RunAsync(steps) + else: + for step in steps: + step() + + self._env.parallel_devices.pMap( + individual_device_set_up, + self._test_instance.GetDataDependencies()) + + #override + def _ShouldShard(self): + return True + + #override + def _CreateShards(self, tests): + # _crashes are tests that might crash and make the tests in the same shard + # following the crashed testcase not run. + # Thus we need to create separate shards for each crashed testcase, + # so that other tests can be run. + device_count = len(self._env.devices) + shards = [] + + # Add shards with only one suspect testcase. + shards += [[crash] for crash in self._crashes if crash in tests] + + # Delete suspect testcase from tests. + tests = [test for test in tests if not test in self._crashes] + + max_shard_size = self._test_instance.test_launcher_batch_limit + + shards.extend(self._PartitionTests(tests, device_count, max_shard_size)) + return shards + + #override + def _GetTests(self): + if self._test_instance.extract_test_list_from_filter: + # When the exact list of tests to run is given via command-line (e.g. when + # locally iterating on a specific test), skip querying the device (which + # takes ~3 seconds). + tests = _ExtractTestsFromFilter(self._test_instance.gtest_filter) + if tests: + return tests + + # Even when there's only one device, it still makes sense to retrieve the + # test list so that tests can be split up and run in batches rather than all + # at once (since test output is not streamed). + @local_device_environment.handle_shard_failures_with( + on_failure=self._env.DenylistDevice) + def list_tests(dev): + timeout = 30 * _GetDeviceTimeoutMultiplier() + retries = 1 + if self._test_instance.wait_for_java_debugger: + timeout = None + + flags = [ + f for f in self._test_instance.flags + if f not in ['--wait-for-debugger', '--wait-for-java-debugger'] + ] + flags.append('--gtest_list_tests') + + # TODO(crbug.com/726880): Remove retries when no longer necessary. + for i in range(0, retries+1): + logging.info('flags:') + for f in flags: + logging.info(' %s', f) + + with self._ArchiveLogcat(dev, 'list_tests'): + raw_test_list = crash_handler.RetryOnSystemCrash( + lambda d: self._delegate.Run( + None, d, flags=' '.join(flags), timeout=timeout), + device=dev) + + tests = gtest_test_instance.ParseGTestListTests(raw_test_list) + if not tests: + logging.info('No tests found. Output:') + for l in raw_test_list: + logging.info(' %s', l) + if i < retries: + logging.info('Retrying...') + else: + break + return tests + + # Query all devices in case one fails. + test_lists = self._env.parallel_devices.pMap(list_tests).pGet(None) + + # If all devices failed to list tests, raise an exception. + # Check that tl is not None and is not empty. + if all(not tl for tl in test_lists): + raise device_errors.CommandFailedError( + 'Failed to list tests on any device') + tests = list(sorted(set().union(*[set(tl) for tl in test_lists if tl]))) + tests = self._test_instance.FilterTests(tests) + tests = self._ApplyExternalSharding( + tests, self._test_instance.external_shard_index, + self._test_instance.total_external_shards) + return tests + + def _UploadTestArtifacts(self, device, test_artifacts_dir): + # TODO(jbudorick): Reconcile this with the output manager once + # https://codereview.chromium.org/2933993002/ lands. + if test_artifacts_dir: + with tempfile_ext.NamedTemporaryDirectory() as test_artifacts_host_dir: + device.PullFile(test_artifacts_dir.name, test_artifacts_host_dir) + with tempfile_ext.NamedTemporaryDirectory() as temp_zip_dir: + zip_base_name = os.path.join(temp_zip_dir, 'test_artifacts') + test_artifacts_zip = shutil.make_archive( + zip_base_name, 'zip', test_artifacts_host_dir) + link = google_storage_helper.upload( + google_storage_helper.unique_name( + 'test_artifacts', device=device), + test_artifacts_zip, + bucket='%s/test_artifacts' % ( + self._test_instance.gs_test_artifacts_bucket)) + logging.info('Uploading test artifacts to %s.', link) + return link + return None + + def _PullRenderTestOutput(self, device, render_test_output_device_dir): + # We pull the render tests into a temp directory then copy them over + # individually. Otherwise we end up with a temporary directory name + # in the host output directory. + with tempfile_ext.NamedTemporaryDirectory() as tmp_host_dir: + try: + device.PullFile(render_test_output_device_dir, tmp_host_dir) + except device_errors.CommandFailedError: + logging.exception('Failed to pull render test output dir %s', + render_test_output_device_dir) + temp_host_dir = os.path.join( + tmp_host_dir, os.path.basename(render_test_output_device_dir)) + for output_file in os.listdir(temp_host_dir): + src_path = os.path.join(temp_host_dir, output_file) + dst_path = os.path.join(self._test_instance.render_test_output_dir, + output_file) + shutil.move(src_path, dst_path) + + @contextlib.contextmanager + def _ArchiveLogcat(self, device, test): + if isinstance(test, str): + desc = test + else: + desc = hash(tuple(test)) + + stream_name = 'logcat_%s_shard%s_%s_%s' % ( + desc, self._test_instance.external_shard_index, + time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()), device.serial) + + logcat_file = None + logmon = None + try: + with self._env.output_manager.ArchivedTempfile(stream_name, + 'logcat') as logcat_file: + with logcat_monitor.LogcatMonitor( + device.adb, + filter_specs=local_device_environment.LOGCAT_FILTERS, + output_file=logcat_file.name, + check_error=False) as logmon: + with contextlib_ext.Optional(trace_event.trace(str(test)), + self._env.trace_output): + yield logcat_file + finally: + if logmon: + logmon.Close() + if logcat_file and logcat_file.Link(): + logging.info('Logcat saved to %s', logcat_file.Link()) + + #override + def _RunTest(self, device, test): + # Run the test. + timeout = (self._test_instance.shard_timeout * + self.GetTool(device).GetTimeoutScale() * + _GetDeviceTimeoutMultiplier()) + if self._test_instance.wait_for_java_debugger: + timeout = None + if self._test_instance.store_tombstones: + tombstones.ClearAllTombstones(device) + test_perf_output_filename = next(self._test_perf_output_filenames) + + if self._test_instance.isolated_script_test_output: + suffix = '.json' + else: + suffix = '.xml' + + with device_temp_file.DeviceTempFile( + adb=device.adb, + dir=self._delegate.ResultsDirectory(device), + suffix=suffix) as device_tmp_results_file: + with contextlib_ext.Optional( + device_temp_file.NamedDeviceTemporaryDirectory( + adb=device.adb, dir='/sdcard/'), + self._test_instance.gs_test_artifacts_bucket) as test_artifacts_dir: + with (contextlib_ext.Optional( + device_temp_file.DeviceTempFile( + adb=device.adb, dir=self._delegate.ResultsDirectory(device)), + test_perf_output_filename)) as isolated_script_test_perf_output: + with contextlib_ext.Optional( + device_temp_file.NamedDeviceTemporaryDirectory(adb=device.adb, + dir='/sdcard/'), + self._test_instance.render_test_output_dir + ) as render_test_output_dir: + + flags = list(self._test_instance.flags) + if self._test_instance.enable_xml_result_parsing: + flags.append('--gtest_output=xml:%s' % + device_tmp_results_file.name) + + if self._test_instance.gs_test_artifacts_bucket: + flags.append('--test_artifacts_dir=%s' % test_artifacts_dir.name) + + if self._test_instance.isolated_script_test_output: + flags.append('--isolated-script-test-output=%s' % + device_tmp_results_file.name) + + if test_perf_output_filename: + flags.append('--isolated_script_test_perf_output=%s' % + isolated_script_test_perf_output.name) + + if self._test_instance.render_test_output_dir: + flags.append('--render-test-output-dir=%s' % + render_test_output_dir.name) + + logging.info('flags:') + for f in flags: + logging.info(' %s', f) + + with self._ArchiveLogcat(device, test) as logcat_file: + output = self._delegate.Run(test, + device, + flags=' '.join(flags), + timeout=timeout, + retries=0) + + if self._test_instance.enable_xml_result_parsing: + try: + gtest_xml = device.ReadFile(device_tmp_results_file.name) + except device_errors.CommandFailedError: + logging.exception('Failed to pull gtest results XML file %s', + device_tmp_results_file.name) + gtest_xml = None + + if self._test_instance.isolated_script_test_output: + try: + gtest_json = device.ReadFile(device_tmp_results_file.name) + except device_errors.CommandFailedError: + logging.exception('Failed to pull gtest results JSON file %s', + device_tmp_results_file.name) + gtest_json = None + + if test_perf_output_filename: + try: + device.PullFile(isolated_script_test_perf_output.name, + test_perf_output_filename) + except device_errors.CommandFailedError: + logging.exception('Failed to pull chartjson results %s', + isolated_script_test_perf_output.name) + + test_artifacts_url = self._UploadTestArtifacts( + device, test_artifacts_dir) + + if render_test_output_dir: + self._PullRenderTestOutput(device, render_test_output_dir.name) + + for s in self._servers[str(device)]: + s.Reset() + if self._test_instance.app_files: + self._delegate.PullAppFiles(device, self._test_instance.app_files, + self._test_instance.app_file_dir) + if not self._env.skip_clear_data: + self._delegate.Clear(device) + + for l in output: + logging.info(l) + + # Parse the output. + # TODO(jbudorick): Transition test scripts away from parsing stdout. + if self._test_instance.enable_xml_result_parsing: + results = gtest_test_instance.ParseGTestXML(gtest_xml) + elif self._test_instance.isolated_script_test_output: + results = gtest_test_instance.ParseGTestJSON(gtest_json) + else: + results = gtest_test_instance.ParseGTestOutput( + output, self._test_instance.symbolizer, device.product_cpu_abi) + + tombstones_url = None + for r in results: + if logcat_file: + r.SetLink('logcat', logcat_file.Link()) + + if self._test_instance.gs_test_artifacts_bucket: + r.SetLink('test_artifacts', test_artifacts_url) + + if r.GetType() == base_test_result.ResultType.CRASH: + self._crashes.add(r.GetName()) + if self._test_instance.store_tombstones: + if not tombstones_url: + resolved_tombstones = tombstones.ResolveTombstones( + device, + resolve_all_tombstones=True, + include_stack_symbols=False, + wipe_tombstones=True) + stream_name = 'tombstones_%s_%s' % ( + time.strftime('%Y%m%dT%H%M%S', time.localtime()), + device.serial) + tombstones_url = logdog_helper.text( + stream_name, '\n'.join(resolved_tombstones)) + r.SetLink('tombstones', tombstones_url) + + tests_stripped_disabled_prefix = set() + for t in test: + tests_stripped_disabled_prefix.add( + gtest_test_instance.TestNameWithoutDisabledPrefix(t)) + not_run_tests = tests_stripped_disabled_prefix.difference( + set(r.GetName() for r in results)) + return results, list(not_run_tests) if results else None + + #override + def TearDown(self): + # By default, teardown will invoke ADB. When receiving SIGTERM due to a + # timeout, there's a high probability that ADB is non-responsive. In these + # cases, sending an ADB command will potentially take a long time to time + # out. Before this happens, the process will be hard-killed for not + # responding to SIGTERM fast enough. + if self._received_sigterm: + return + + @local_device_environment.handle_shard_failures + @trace_event.traced + def individual_device_tear_down(dev): + for s in self._servers.get(str(dev), []): + s.TearDown() + + tool = self.GetTool(dev) + tool.CleanUpEnvironment() + + self._env.parallel_devices.pMap(individual_device_tear_down) diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_gtest_run_test.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_gtest_run_test.py new file mode 100755 index 0000000000..b664d58131 --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_gtest_run_test.py @@ -0,0 +1,79 @@ +#!/usr/bin/env vpython3 +# Copyright 2021 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +"""Tests for local_device_gtest_test_run.""" + +# pylint: disable=protected-access + + +import os +import tempfile +import unittest + +from pylib.gtest import gtest_test_instance +from pylib.local.device import local_device_environment +from pylib.local.device import local_device_gtest_run +from py_utils import tempfile_ext + +import mock # pylint: disable=import-error + + +class LocalDeviceGtestRunTest(unittest.TestCase): + def setUp(self): + self._obj = local_device_gtest_run.LocalDeviceGtestRun( + mock.MagicMock(spec=local_device_environment.LocalDeviceEnvironment), + mock.MagicMock(spec=gtest_test_instance.GtestTestInstance)) + + def testExtractTestsFromFilter(self): + # Checks splitting by colons. + self.assertEqual([ + 'b17', + 'm4e3', + 'p51', + ], local_device_gtest_run._ExtractTestsFromFilter('b17:m4e3:p51')) + # Checks the '-' sign. + self.assertIsNone(local_device_gtest_run._ExtractTestsFromFilter('-mk2')) + # Checks the more than one asterick. + self.assertIsNone( + local_device_gtest_run._ExtractTestsFromFilter('.mk2*:.M67*')) + # Checks just an asterick without a period + self.assertIsNone(local_device_gtest_run._ExtractTestsFromFilter('M67*')) + # Checks an asterick at the end with a period. + self.assertEqual(['.M67*'], + local_device_gtest_run._ExtractTestsFromFilter('.M67*')) + + def testGetLLVMProfilePath(self): + path = local_device_gtest_run._GetLLVMProfilePath('test_dir', 'sr71', '5') + self.assertEqual(path, os.path.join('test_dir', 'sr71_5_%2m.profraw')) + + @mock.patch('subprocess.check_output') + def testMergeCoverageFiles(self, mock_sub): + with tempfile_ext.NamedTemporaryDirectory() as cov_tempd: + pro_tempd = os.path.join(cov_tempd, 'profraw') + os.mkdir(pro_tempd) + profdata = tempfile.NamedTemporaryFile( + dir=pro_tempd, + delete=False, + suffix=local_device_gtest_run._PROFRAW_FILE_EXTENSION) + local_device_gtest_run._MergeCoverageFiles(cov_tempd, pro_tempd) + # Merged file should be deleted. + self.assertFalse(os.path.exists(profdata.name)) + self.assertTrue(mock_sub.called) + + @mock.patch('pylib.utils.google_storage_helper.upload') + def testUploadTestArtifacts(self, mock_gsh): + link = self._obj._UploadTestArtifacts(mock.MagicMock(), None) + self.assertFalse(mock_gsh.called) + self.assertIsNone(link) + + result = 'A/10/warthog/path' + mock_gsh.return_value = result + with tempfile_ext.NamedTemporaryFile() as temp_f: + link = self._obj._UploadTestArtifacts(mock.MagicMock(), temp_f) + self.assertTrue(mock_gsh.called) + self.assertEqual(result, link) + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_instrumentation_test_run.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_instrumentation_test_run.py new file mode 100644 index 0000000000..54cb92a39c --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_instrumentation_test_run.py @@ -0,0 +1,1512 @@ +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +import collections +import contextlib +import copy +import hashlib +import json +import logging +import os +import posixpath +import re +import shutil +import sys +import tempfile +import time + +from six.moves import range # pylint: disable=redefined-builtin +from six.moves import zip # pylint: disable=redefined-builtin +from devil import base_error +from devil.android import apk_helper +from devil.android import crash_handler +from devil.android import device_errors +from devil.android import device_temp_file +from devil.android import flag_changer +from devil.android.sdk import shared_prefs +from devil.android import logcat_monitor +from devil.android.tools import system_app +from devil.android.tools import webview_app +from devil.utils import reraiser_thread +from incremental_install import installer +from pylib import constants +from pylib import valgrind_tools +from pylib.base import base_test_result +from pylib.base import output_manager +from pylib.constants import host_paths +from pylib.instrumentation import instrumentation_test_instance +from pylib.local.device import local_device_environment +from pylib.local.device import local_device_test_run +from pylib.output import remote_output_manager +from pylib.utils import chrome_proxy_utils +from pylib.utils import gold_utils +from pylib.utils import instrumentation_tracing +from pylib.utils import shared_preference_utils +from py_trace_event import trace_event +from py_trace_event import trace_time +from py_utils import contextlib_ext +from py_utils import tempfile_ext +import tombstones + +with host_paths.SysPath( + os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party'), 0): + import jinja2 # pylint: disable=import-error + import markupsafe # pylint: disable=import-error,unused-import + + +_JINJA_TEMPLATE_DIR = os.path.join( + host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'pylib', 'instrumentation') +_JINJA_TEMPLATE_FILENAME = 'render_test.html.jinja' + +_WPR_GO_LINUX_X86_64_PATH = os.path.join(host_paths.DIR_SOURCE_ROOT, + 'third_party', 'webpagereplay', 'bin', + 'linux', 'x86_64', 'wpr') + +_TAG = 'test_runner_py' + +TIMEOUT_ANNOTATIONS = [ + ('Manual', 10 * 60 * 60), + ('IntegrationTest', 10 * 60), + ('External', 10 * 60), + ('EnormousTest', 5 * 60), + ('LargeTest', 2 * 60), + ('MediumTest', 30), + ('SmallTest', 10), +] + +# Account for Instrumentation and process init overhead. +FIXED_TEST_TIMEOUT_OVERHEAD = 60 + +# 30 minute max timeout for an instrumentation invocation to avoid shard +# timeouts when tests never finish. The shard timeout is currently 60 minutes, +# so this needs to be less than that. +MAX_BATCH_TEST_TIMEOUT = 30 * 60 + +LOGCAT_FILTERS = ['*:e', 'chromium:v', 'cr_*:v', 'DEBUG:I', + 'StrictMode:D', '%s:I' % _TAG] + +EXTRA_SCREENSHOT_FILE = ( + 'org.chromium.base.test.ScreenshotOnFailureStatement.ScreenshotFile') + +EXTRA_UI_CAPTURE_DIR = ( + 'org.chromium.base.test.util.Screenshooter.ScreenshotDir') + +EXTRA_TRACE_FILE = ('org.chromium.base.test.BaseJUnit4ClassRunner.TraceFile') + +_EXTRA_TEST_LIST = ( + 'org.chromium.base.test.BaseChromiumAndroidJUnitRunner.TestList') + +_EXTRA_PACKAGE_UNDER_TEST = ('org.chromium.chrome.test.pagecontroller.rules.' + 'ChromeUiApplicationTestRule.PackageUnderTest') + +FEATURE_ANNOTATION = 'Feature' +RENDER_TEST_FEATURE_ANNOTATION = 'RenderTest' +WPR_ARCHIVE_FILE_PATH_ANNOTATION = 'WPRArchiveDirectory' +WPR_RECORD_REPLAY_TEST_FEATURE_ANNOTATION = 'WPRRecordReplayTest' + +_DEVICE_GOLD_DIR = 'skia_gold' +# A map of Android product models to SDK ints. +RENDER_TEST_MODEL_SDK_CONFIGS = { + # Android x86 emulator. + 'Android SDK built for x86': [23], + # We would like this to be supported, but it is currently too prone to + # introducing flakiness due to a combination of Gold and Chromium issues. + # See crbug.com/1233700 and skbug.com/12149 for more information. + # 'Pixel 2': [28], +} + +_BATCH_SUFFIX = '_batch' +_TEST_BATCH_MAX_GROUP_SIZE = 256 + + +@contextlib.contextmanager +def _LogTestEndpoints(device, test_name): + device.RunShellCommand( + ['log', '-p', 'i', '-t', _TAG, 'START %s' % test_name], + check_return=True) + try: + yield + finally: + device.RunShellCommand( + ['log', '-p', 'i', '-t', _TAG, 'END %s' % test_name], + check_return=True) + + +def DismissCrashDialogs(device): + # Dismiss any error dialogs. Limit the number in case we have an error + # loop or we are failing to dismiss. + packages = set() + try: + for _ in range(10): + package = device.DismissCrashDialogIfNeeded(timeout=10, retries=1) + if not package: + break + packages.add(package) + except device_errors.CommandFailedError: + logging.exception('Error while attempting to dismiss crash dialog.') + return packages + + +_CURRENT_FOCUS_CRASH_RE = re.compile( + r'\s*mCurrentFocus.*Application (Error|Not Responding): (\S+)}') + + +def _GetTargetPackageName(test_apk): + # apk_under_test does not work for smoke tests, where it is set to an + # apk that is not listed as the targetPackage in the test apk's manifest. + return test_apk.GetAllInstrumentations()[0]['android:targetPackage'] + + +class LocalDeviceInstrumentationTestRun( + local_device_test_run.LocalDeviceTestRun): + def __init__(self, env, test_instance): + super(LocalDeviceInstrumentationTestRun, self).__init__( + env, test_instance) + self._chrome_proxy = None + self._context_managers = collections.defaultdict(list) + self._flag_changers = {} + self._render_tests_device_output_dir = None + self._shared_prefs_to_restore = [] + self._skia_gold_session_manager = None + self._skia_gold_work_dir = None + + #override + def TestPackage(self): + return self._test_instance.suite + + #override + def SetUp(self): + target_package = _GetTargetPackageName(self._test_instance.test_apk) + + @local_device_environment.handle_shard_failures_with( + self._env.DenylistDevice) + @trace_event.traced + def individual_device_set_up(device, host_device_tuples): + steps = [] + + if self._test_instance.replace_system_package: + @trace_event.traced + def replace_package(dev): + # We need the context manager to be applied before modifying any + # shared preference files in case the replacement APK needs to be + # set up, and it needs to be applied while the test is running. + # Thus, it needs to be applied early during setup, but must still be + # applied during _RunTest, which isn't possible using 'with' without + # applying the context manager up in test_runner. Instead, we + # manually invoke its __enter__ and __exit__ methods in setup and + # teardown. + system_app_context = system_app.ReplaceSystemApp( + dev, self._test_instance.replace_system_package.package, + self._test_instance.replace_system_package.replacement_apk) + # Pylint is not smart enough to realize that this field has + # an __enter__ method, and will complain loudly. + # pylint: disable=no-member + system_app_context.__enter__() + # pylint: enable=no-member + self._context_managers[str(dev)].append(system_app_context) + + steps.append(replace_package) + + if self._test_instance.system_packages_to_remove: + + @trace_event.traced + def remove_packages(dev): + logging.info('Attempting to remove system packages %s', + self._test_instance.system_packages_to_remove) + system_app.RemoveSystemApps( + dev, self._test_instance.system_packages_to_remove) + logging.info('Done removing system packages') + + # This should be at the front in case we're removing the package to make + # room for another APK installation later on. Since we disallow + # concurrent adb with this option specified, this should be safe. + steps.insert(0, remove_packages) + + if self._test_instance.use_webview_provider: + @trace_event.traced + def use_webview_provider(dev): + # We need the context manager to be applied before modifying any + # shared preference files in case the replacement APK needs to be + # set up, and it needs to be applied while the test is running. + # Thus, it needs to be applied early during setup, but must still be + # applied during _RunTest, which isn't possible using 'with' without + # applying the context manager up in test_runner. Instead, we + # manually invoke its __enter__ and __exit__ methods in setup and + # teardown. + webview_context = webview_app.UseWebViewProvider( + dev, self._test_instance.use_webview_provider) + # Pylint is not smart enough to realize that this field has + # an __enter__ method, and will complain loudly. + # pylint: disable=no-member + webview_context.__enter__() + # pylint: enable=no-member + self._context_managers[str(dev)].append(webview_context) + + steps.append(use_webview_provider) + + def install_helper(apk, + modules=None, + fake_modules=None, + permissions=None, + additional_locales=None): + + @instrumentation_tracing.no_tracing + @trace_event.traced + def install_helper_internal(d, apk_path=None): + # pylint: disable=unused-argument + d.Install(apk, + modules=modules, + fake_modules=fake_modules, + permissions=permissions, + additional_locales=additional_locales) + + return install_helper_internal + + def incremental_install_helper(apk, json_path, permissions): + + @trace_event.traced + def incremental_install_helper_internal(d, apk_path=None): + # pylint: disable=unused-argument + installer.Install(d, json_path, apk=apk, permissions=permissions) + return incremental_install_helper_internal + + permissions = self._test_instance.test_apk.GetPermissions() + if self._test_instance.test_apk_incremental_install_json: + steps.append(incremental_install_helper( + self._test_instance.test_apk, + self._test_instance. + test_apk_incremental_install_json, + permissions)) + else: + steps.append( + install_helper( + self._test_instance.test_apk, permissions=permissions)) + + steps.extend( + install_helper(apk) for apk in self._test_instance.additional_apks) + + # We'll potentially need the package names later for setting app + # compatibility workarounds. + for apk in (self._test_instance.additional_apks + + [self._test_instance.test_apk]): + self._installed_packages.append(apk_helper.GetPackageName(apk)) + + # The apk under test needs to be installed last since installing other + # apks after will unintentionally clear the fake module directory. + # TODO(wnwen): Make this more robust, fix crbug.com/1010954. + if self._test_instance.apk_under_test: + self._installed_packages.append( + apk_helper.GetPackageName(self._test_instance.apk_under_test)) + permissions = self._test_instance.apk_under_test.GetPermissions() + if self._test_instance.apk_under_test_incremental_install_json: + steps.append( + incremental_install_helper( + self._test_instance.apk_under_test, + self._test_instance.apk_under_test_incremental_install_json, + permissions)) + else: + steps.append( + install_helper(self._test_instance.apk_under_test, + self._test_instance.modules, + self._test_instance.fake_modules, permissions, + self._test_instance.additional_locales)) + + @trace_event.traced + def set_debug_app(dev): + # Set debug app in order to enable reading command line flags on user + # builds + cmd = ['am', 'set-debug-app', '--persistent'] + if self._test_instance.wait_for_java_debugger: + cmd.append('-w') + cmd.append(target_package) + dev.RunShellCommand(cmd, check_return=True) + + @trace_event.traced + def edit_shared_prefs(dev): + for setting in self._test_instance.edit_shared_prefs: + shared_pref = shared_prefs.SharedPrefs( + dev, setting['package'], setting['filename'], + use_encrypted_path=setting.get('supports_encrypted_path', False)) + pref_to_restore = copy.copy(shared_pref) + pref_to_restore.Load() + self._shared_prefs_to_restore.append(pref_to_restore) + + shared_preference_utils.ApplySharedPreferenceSetting( + shared_pref, setting) + + @trace_event.traced + def set_vega_permissions(dev): + # Normally, installation of VrCore automatically grants storage + # permissions. However, since VrCore is part of the system image on + # the Vega standalone headset, we don't install the APK as part of test + # setup. Instead, grant the permissions here so that it can take + # screenshots. + if dev.product_name == 'vega': + dev.GrantPermissions('com.google.vr.vrcore', [ + 'android.permission.WRITE_EXTERNAL_STORAGE', + 'android.permission.READ_EXTERNAL_STORAGE' + ]) + + @instrumentation_tracing.no_tracing + def push_test_data(dev): + device_root = posixpath.join(dev.GetExternalStoragePath(), + 'chromium_tests_root') + host_device_tuples_substituted = [ + (h, local_device_test_run.SubstituteDeviceRoot(d, device_root)) + for h, d in host_device_tuples] + logging.info('Pushing data dependencies.') + for h, d in host_device_tuples_substituted: + logging.debug(' %r -> %r', h, d) + local_device_environment.place_nomedia_on_device(dev, device_root) + dev.PushChangedFiles(host_device_tuples_substituted, + delete_device_stale=True) + if not host_device_tuples_substituted: + dev.RunShellCommand(['rm', '-rf', device_root], check_return=True) + dev.RunShellCommand(['mkdir', '-p', device_root], check_return=True) + + @trace_event.traced + def create_flag_changer(dev): + if self._test_instance.flags: + self._CreateFlagChangerIfNeeded(dev) + logging.debug('Attempting to set flags: %r', + self._test_instance.flags) + self._flag_changers[str(dev)].AddFlags(self._test_instance.flags) + + valgrind_tools.SetChromeTimeoutScale( + dev, self._test_instance.timeout_scale) + + steps += [ + set_debug_app, edit_shared_prefs, push_test_data, create_flag_changer, + set_vega_permissions, DismissCrashDialogs + ] + + def bind_crash_handler(step, dev): + return lambda: crash_handler.RetryOnSystemCrash(step, dev) + + steps = [bind_crash_handler(s, device) for s in steps] + + try: + if self._env.concurrent_adb: + reraiser_thread.RunAsync(steps) + else: + for step in steps: + step() + if self._test_instance.store_tombstones: + tombstones.ClearAllTombstones(device) + except device_errors.CommandFailedError: + if not device.IsOnline(): + raise + + # A bugreport can be large and take a while to generate, so only capture + # one if we're using a remote manager. + if isinstance( + self._env.output_manager, + remote_output_manager.RemoteOutputManager): + logging.error( + 'Error when setting up device for tests. Taking a bugreport for ' + 'investigation. This may take a while...') + report_name = '%s.bugreport' % device.serial + with self._env.output_manager.ArchivedTempfile( + report_name, 'bug_reports') as report_file: + device.TakeBugReport(report_file.name) + logging.error('Bug report saved to %s', report_file.Link()) + raise + + self._env.parallel_devices.pMap( + individual_device_set_up, + self._test_instance.GetDataDependencies()) + # Created here instead of on a per-test basis so that the downloaded + # expectations can be re-used between tests, saving a significant amount + # of time. + self._skia_gold_work_dir = tempfile.mkdtemp() + self._skia_gold_session_manager = gold_utils.AndroidSkiaGoldSessionManager( + self._skia_gold_work_dir, self._test_instance.skia_gold_properties) + if self._test_instance.wait_for_java_debugger: + logging.warning('*' * 80) + logging.warning('Waiting for debugger to attach to process: %s', + target_package) + logging.warning('*' * 80) + + #override + def TearDown(self): + shutil.rmtree(self._skia_gold_work_dir) + self._skia_gold_work_dir = None + self._skia_gold_session_manager = None + # By default, teardown will invoke ADB. When receiving SIGTERM due to a + # timeout, there's a high probability that ADB is non-responsive. In these + # cases, sending an ADB command will potentially take a long time to time + # out. Before this happens, the process will be hard-killed for not + # responding to SIGTERM fast enough. + if self._received_sigterm: + return + + @local_device_environment.handle_shard_failures_with( + self._env.DenylistDevice) + @trace_event.traced + def individual_device_tear_down(dev): + if str(dev) in self._flag_changers: + self._flag_changers[str(dev)].Restore() + + # Remove package-specific configuration + dev.RunShellCommand(['am', 'clear-debug-app'], check_return=True) + + valgrind_tools.SetChromeTimeoutScale(dev, None) + + # Restore any shared preference files that we stored during setup. + # This should be run sometime before the replace package contextmanager + # gets exited so we don't have to special case restoring files of + # replaced system apps. + for pref_to_restore in self._shared_prefs_to_restore: + pref_to_restore.Commit(force_commit=True) + + # Context manager exit handlers are applied in reverse order + # of the enter handlers. + for context in reversed(self._context_managers[str(dev)]): + # See pylint-related comment above with __enter__() + # pylint: disable=no-member + context.__exit__(*sys.exc_info()) + # pylint: enable=no-member + + self._env.parallel_devices.pMap(individual_device_tear_down) + + def _CreateFlagChangerIfNeeded(self, device): + if str(device) not in self._flag_changers: + cmdline_file = 'test-cmdline-file' + if self._test_instance.use_apk_under_test_flags_file: + if self._test_instance.package_info: + cmdline_file = self._test_instance.package_info.cmdline_file + else: + raise Exception('No PackageInfo found but' + '--use-apk-under-test-flags-file is specified.') + self._flag_changers[str(device)] = flag_changer.FlagChanger( + device, cmdline_file) + + #override + def _CreateShards(self, tests): + return tests + + #override + def _GetTests(self): + if self._test_instance.junit4_runner_supports_listing: + raw_tests = self._GetTestsFromRunner() + tests = self._test_instance.ProcessRawTests(raw_tests) + else: + tests = self._test_instance.GetTests() + tests = self._ApplyExternalSharding( + tests, self._test_instance.external_shard_index, + self._test_instance.total_external_shards) + return tests + + #override + def _GroupTests(self, tests): + batched_tests = dict() + other_tests = [] + for test in tests: + annotations = test['annotations'] + if 'Batch' in annotations and 'RequiresRestart' not in annotations: + batch_name = annotations['Batch']['value'] + if not batch_name: + batch_name = test['class'] + + # Feature flags won't work in instrumentation tests unless the activity + # is restarted. + # Tests with identical features are grouped to minimize restarts. + if 'Features$EnableFeatures' in annotations: + batch_name += '|enabled:' + ','.join( + sorted(annotations['Features$EnableFeatures']['value'])) + if 'Features$DisableFeatures' in annotations: + batch_name += '|disabled:' + ','.join( + sorted(annotations['Features$DisableFeatures']['value'])) + + if not batch_name in batched_tests: + batched_tests[batch_name] = [] + batched_tests[batch_name].append(test) + else: + other_tests.append(test) + + all_tests = [] + for _, tests in list(batched_tests.items()): + tests.sort() # Ensure a consistent ordering across external shards. + all_tests.extend([ + tests[i:i + _TEST_BATCH_MAX_GROUP_SIZE] + for i in range(0, len(tests), _TEST_BATCH_MAX_GROUP_SIZE) + ]) + all_tests.extend(other_tests) + return all_tests + + #override + def _GetUniqueTestName(self, test): + return instrumentation_test_instance.GetUniqueTestName(test) + + #override + def _RunTest(self, device, test): + extras = {} + + # Provide package name under test for apk_under_test. + if self._test_instance.apk_under_test: + package_name = self._test_instance.apk_under_test.GetPackageName() + extras[_EXTRA_PACKAGE_UNDER_TEST] = package_name + + flags_to_add = [] + test_timeout_scale = None + if self._test_instance.coverage_directory: + coverage_basename = '%s' % ('%s_%s_group' % + (test[0]['class'], test[0]['method']) + if isinstance(test, list) else '%s_%s' % + (test['class'], test['method'])) + extras['coverage'] = 'true' + coverage_directory = os.path.join( + device.GetExternalStoragePath(), 'chrome', 'test', 'coverage') + if not device.PathExists(coverage_directory): + device.RunShellCommand(['mkdir', '-p', coverage_directory], + check_return=True) + coverage_device_file = os.path.join(coverage_directory, coverage_basename) + coverage_device_file += '.exec' + extras['coverageFile'] = coverage_device_file + + if self._test_instance.enable_breakpad_dump: + # Use external storage directory so that the breakpad dump can be accessed + # by the test APK in addition to the apk_under_test. + breakpad_dump_directory = os.path.join(device.GetExternalStoragePath(), + 'chromium_dumps') + if device.PathExists(breakpad_dump_directory): + device.RemovePath(breakpad_dump_directory, recursive=True) + flags_to_add.append('--breakpad-dump-location=' + breakpad_dump_directory) + + # Save screenshot if screenshot dir is specified (save locally) or if + # a GS bucket is passed (save in cloud). + screenshot_device_file = device_temp_file.DeviceTempFile( + device.adb, suffix='.png', dir=device.GetExternalStoragePath()) + extras[EXTRA_SCREENSHOT_FILE] = screenshot_device_file.name + + # Set up the screenshot directory. This needs to be done for each test so + # that we only get screenshots created by that test. It has to be on + # external storage since the default location doesn't allow file creation + # from the instrumentation test app on Android L and M. + ui_capture_dir = device_temp_file.NamedDeviceTemporaryDirectory( + device.adb, + dir=device.GetExternalStoragePath()) + extras[EXTRA_UI_CAPTURE_DIR] = ui_capture_dir.name + + if self._env.trace_output: + trace_device_file = device_temp_file.DeviceTempFile( + device.adb, suffix='.json', dir=device.GetExternalStoragePath()) + extras[EXTRA_TRACE_FILE] = trace_device_file.name + + target = '%s/%s' % (self._test_instance.test_package, + self._test_instance.junit4_runner_class) + if isinstance(test, list): + + def name_and_timeout(t): + n = instrumentation_test_instance.GetTestName(t) + i = self._GetTimeoutFromAnnotations(t['annotations'], n) + return (n, i) + + test_names, timeouts = list(zip(*(name_and_timeout(t) for t in test))) + + test_name = instrumentation_test_instance.GetTestName( + test[0]) + _BATCH_SUFFIX + extras['class'] = ','.join(test_names) + test_display_name = test_name + timeout = min(MAX_BATCH_TEST_TIMEOUT, + FIXED_TEST_TIMEOUT_OVERHEAD + sum(timeouts)) + else: + assert test['is_junit4'] + test_name = instrumentation_test_instance.GetTestName(test) + test_display_name = self._GetUniqueTestName(test) + + extras['class'] = test_name + if 'flags' in test and test['flags']: + flags_to_add.extend(test['flags']) + timeout = FIXED_TEST_TIMEOUT_OVERHEAD + self._GetTimeoutFromAnnotations( + test['annotations'], test_display_name) + + test_timeout_scale = self._GetTimeoutScaleFromAnnotations( + test['annotations']) + if test_timeout_scale and test_timeout_scale != 1: + valgrind_tools.SetChromeTimeoutScale( + device, test_timeout_scale * self._test_instance.timeout_scale) + + if self._test_instance.wait_for_java_debugger: + timeout = None + logging.info('preparing to run %s: %s', test_display_name, test) + + if _IsRenderTest(test): + # TODO(mikecase): Add DeviceTempDirectory class and use that instead. + self._render_tests_device_output_dir = posixpath.join( + device.GetExternalStoragePath(), 'render_test_output_dir') + flags_to_add.append('--render-test-output-dir=%s' % + self._render_tests_device_output_dir) + + if _IsWPRRecordReplayTest(test): + wpr_archive_relative_path = _GetWPRArchivePath(test) + if not wpr_archive_relative_path: + raise RuntimeError('Could not find the WPR archive file path ' + 'from annotation.') + wpr_archive_path = os.path.join(host_paths.DIR_SOURCE_ROOT, + wpr_archive_relative_path) + if not os.path.isdir(wpr_archive_path): + raise RuntimeError('WPRArchiveDirectory annotation should point ' + 'to a directory only. ' + '{0} exist: {1}'.format( + wpr_archive_path, + os.path.exists(wpr_archive_path))) + + # Some linux version does not like # in the name. Replaces it with __. + archive_path = os.path.join( + wpr_archive_path, + _ReplaceUncommonChars(self._GetUniqueTestName(test)) + '.wprgo') + + if not os.path.exists(_WPR_GO_LINUX_X86_64_PATH): + # If we got to this stage, then we should have + # checkout_android set. + raise RuntimeError( + 'WPR Go binary not found at {}'.format(_WPR_GO_LINUX_X86_64_PATH)) + # Tells the server to use the binaries retrieved from CIPD. + chrome_proxy_utils.ChromeProxySession.SetWPRServerBinary( + _WPR_GO_LINUX_X86_64_PATH) + self._chrome_proxy = chrome_proxy_utils.ChromeProxySession() + self._chrome_proxy.wpr_record_mode = self._test_instance.wpr_record_mode + self._chrome_proxy.Start(device, archive_path) + flags_to_add.extend(self._chrome_proxy.GetFlags()) + + if flags_to_add: + self._CreateFlagChangerIfNeeded(device) + self._flag_changers[str(device)].PushFlags(add=flags_to_add) + + time_ms = lambda: int(time.time() * 1e3) + start_ms = time_ms() + + with ui_capture_dir: + with self._ArchiveLogcat(device, test_name) as logcat_file: + output = device.StartInstrumentation( + target, raw=True, extras=extras, timeout=timeout, retries=0) + + duration_ms = time_ms() - start_ms + + with contextlib_ext.Optional( + trace_event.trace('ProcessResults'), + self._env.trace_output): + output = self._test_instance.MaybeDeobfuscateLines(output) + # TODO(jbudorick): Make instrumentation tests output a JSON so this + # doesn't have to parse the output. + result_code, result_bundle, statuses = ( + self._test_instance.ParseAmInstrumentRawOutput(output)) + results = self._test_instance.GenerateTestResults( + result_code, result_bundle, statuses, duration_ms, + device.product_cpu_abi, self._test_instance.symbolizer) + + if self._env.trace_output: + self._SaveTraceData(trace_device_file, device, test['class']) + + + def restore_flags(): + if flags_to_add: + self._flag_changers[str(device)].Restore() + + def restore_timeout_scale(): + if test_timeout_scale: + valgrind_tools.SetChromeTimeoutScale( + device, self._test_instance.timeout_scale) + + def handle_coverage_data(): + if self._test_instance.coverage_directory: + try: + if not os.path.exists(self._test_instance.coverage_directory): + os.makedirs(self._test_instance.coverage_directory) + device.PullFile(coverage_device_file, + self._test_instance.coverage_directory) + device.RemovePath(coverage_device_file, True) + except (OSError, base_error.BaseError) as e: + logging.warning('Failed to handle coverage data after tests: %s', e) + + def handle_render_test_data(): + if _IsRenderTest(test): + # Render tests do not cause test failure by default. So we have to + # check to see if any failure images were generated even if the test + # does not fail. + try: + self._ProcessRenderTestResults(device, results) + finally: + device.RemovePath(self._render_tests_device_output_dir, + recursive=True, + force=True) + self._render_tests_device_output_dir = None + + def pull_ui_screen_captures(): + screenshots = [] + for filename in device.ListDirectory(ui_capture_dir.name): + if filename.endswith('.json'): + screenshots.append(pull_ui_screenshot(filename)) + if screenshots: + json_archive_name = 'ui_capture_%s_%s.json' % ( + test_name.replace('#', '.'), + time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime())) + with self._env.output_manager.ArchivedTempfile( + json_archive_name, 'ui_capture', output_manager.Datatype.JSON + ) as json_archive: + json.dump(screenshots, json_archive) + _SetLinkOnResults(results, test_name, 'ui screenshot', + json_archive.Link()) + + def pull_ui_screenshot(filename): + source_dir = ui_capture_dir.name + json_path = posixpath.join(source_dir, filename) + json_data = json.loads(device.ReadFile(json_path)) + image_file_path = posixpath.join(source_dir, json_data['location']) + with self._env.output_manager.ArchivedTempfile( + json_data['location'], 'ui_capture', output_manager.Datatype.PNG + ) as image_archive: + device.PullFile(image_file_path, image_archive.name) + json_data['image_link'] = image_archive.Link() + return json_data + + def stop_chrome_proxy(): + # Removes the port forwarding + if self._chrome_proxy: + self._chrome_proxy.Stop(device) + if not self._chrome_proxy.wpr_replay_mode: + logging.info('WPR Record test generated archive file %s', + self._chrome_proxy.wpr_archive_path) + self._chrome_proxy = None + + + # While constructing the TestResult objects, we can parallelize several + # steps that involve ADB. These steps should NOT depend on any info in + # the results! Things such as whether the test CRASHED have not yet been + # determined. + post_test_steps = [ + restore_flags, restore_timeout_scale, stop_chrome_proxy, + handle_coverage_data, handle_render_test_data, pull_ui_screen_captures + ] + if self._env.concurrent_adb: + reraiser_thread.RunAsync(post_test_steps) + else: + for step in post_test_steps: + step() + + if logcat_file: + _SetLinkOnResults(results, test_name, 'logcat', logcat_file.Link()) + + # Update the result name if the test used flags. + if flags_to_add: + for r in results: + if r.GetName() == test_name: + r.SetName(test_display_name) + + # Add UNKNOWN results for any missing tests. + iterable_test = test if isinstance(test, list) else [test] + test_names = set(self._GetUniqueTestName(t) for t in iterable_test) + results_names = set(r.GetName() for r in results) + results.extend( + base_test_result.BaseTestResult(u, base_test_result.ResultType.UNKNOWN) + for u in test_names.difference(results_names)) + + # Update the result type if we detect a crash. + try: + crashed_packages = DismissCrashDialogs(device) + # Assume test package convention of ".test" suffix + if any(p in self._test_instance.test_package for p in crashed_packages): + for r in results: + if r.GetType() == base_test_result.ResultType.UNKNOWN: + r.SetType(base_test_result.ResultType.CRASH) + elif (crashed_packages and len(results) == 1 + and results[0].GetType() != base_test_result.ResultType.PASS): + # Add log message and set failure reason if: + # 1) The app crash was likely not caused by the test. + # AND + # 2) The app crash possibly caused the test to fail. + # Crashes of the package under test are assumed to be the test's fault. + _AppendToLogForResult( + results[0], 'OS displayed error dialogs for {}'.format( + ', '.join(crashed_packages))) + results[0].SetFailureReason('{} Crashed'.format( + ','.join(crashed_packages))) + except device_errors.CommandTimeoutError: + logging.warning('timed out when detecting/dismissing error dialogs') + # Attach screenshot to the test to help with debugging the dialog boxes. + self._SaveScreenshot(device, screenshot_device_file, test_display_name, + results, 'dialog_box_screenshot') + + # The crash result can be set above or in + # InstrumentationTestRun.GenerateTestResults. If a test crashes, + # subprocesses such as the one used by EmbeddedTestServerRule can be left + # alive in a bad state, so kill them now. + for r in results: + if r.GetType() == base_test_result.ResultType.CRASH: + for apk in self._test_instance.additional_apks: + device.ForceStop(apk.GetPackageName()) + + # Handle failures by: + # - optionally taking a screenshot + # - logging the raw output at INFO level + # - clearing the application state while persisting permissions + if any(r.GetType() not in (base_test_result.ResultType.PASS, + base_test_result.ResultType.SKIP) + for r in results): + self._SaveScreenshot(device, screenshot_device_file, test_display_name, + results, 'post_test_screenshot') + + logging.info('detected failure in %s. raw output:', test_display_name) + for l in output: + logging.info(' %s', l) + if not self._env.skip_clear_data: + if self._test_instance.package_info: + permissions = (self._test_instance.apk_under_test.GetPermissions() + if self._test_instance.apk_under_test else None) + device.ClearApplicationState(self._test_instance.package_info.package, + permissions=permissions) + if self._test_instance.enable_breakpad_dump: + device.RemovePath(breakpad_dump_directory, recursive=True) + else: + logging.debug('raw output from %s:', test_display_name) + for l in output: + logging.debug(' %s', l) + + if self._test_instance.store_tombstones: + resolved_tombstones = tombstones.ResolveTombstones( + device, + resolve_all_tombstones=True, + include_stack_symbols=False, + wipe_tombstones=True, + tombstone_symbolizer=self._test_instance.symbolizer) + if resolved_tombstones: + tombstone_filename = 'tombstones_%s_%s' % (time.strftime( + '%Y%m%dT%H%M%S-UTC', time.gmtime()), device.serial) + with self._env.output_manager.ArchivedTempfile( + tombstone_filename, 'tombstones') as tombstone_file: + tombstone_file.write('\n'.join(resolved_tombstones)) + + # Associate tombstones with first crashing test. + for result in results: + if result.GetType() == base_test_result.ResultType.CRASH: + result.SetLink('tombstones', tombstone_file.Link()) + break + else: + # We don't always detect crashes correctly. In this case, + # associate with the first test. + results[0].SetLink('tombstones', tombstone_file.Link()) + + unknown_tests = set(r.GetName() for r in results + if r.GetType() == base_test_result.ResultType.UNKNOWN) + + # If a test that is batched crashes, the rest of the tests in that batch + # won't be ran and will have their status left as unknown in results, + # so rerun the tests. (see crbug/1127935) + # Need to "unbatch" the tests, so that on subsequent tries, the tests can + # get ran individually. This prevents an unrecognized crash from preventing + # the tests in the batch from being ran. Running the test as unbatched does + # not happen until a retry happens at the local_device_test_run/environment + # level. + tests_to_rerun = [] + for t in iterable_test: + if self._GetUniqueTestName(t) in unknown_tests: + prior_attempts = t.get('run_attempts', 0) + t['run_attempts'] = prior_attempts + 1 + # It's possible every test in the batch could crash, so need to + # try up to as many times as tests that there are. + if prior_attempts < len(results): + if t['annotations']: + t['annotations'].pop('Batch', None) + tests_to_rerun.append(t) + + # If we have a crash that isn't recognized as a crash in a batch, the tests + # will be marked as unknown. Sometimes a test failure causes a crash, but + # the crash isn't recorded because the failure was detected first. + # When the UNKNOWN tests are reran while unbatched and pass, + # they'll have an UNKNOWN, PASS status, so will be improperly marked as + # flaky, so change status to NOTRUN and don't try rerunning. They will + # get rerun individually at the local_device_test_run/environment level. + # as the "Batch" annotation was removed. + found_crash_or_fail = False + for r in results: + if (r.GetType() == base_test_result.ResultType.CRASH + or r.GetType() == base_test_result.ResultType.FAIL): + found_crash_or_fail = True + break + if not found_crash_or_fail: + # Don't bother rerunning since the unrecognized crashes in + # the batch will keep failing. + tests_to_rerun = None + for r in results: + if r.GetType() == base_test_result.ResultType.UNKNOWN: + r.SetType(base_test_result.ResultType.NOTRUN) + + return results, tests_to_rerun if tests_to_rerun else None + + def _GetTestsFromRunner(self): + test_apk_path = self._test_instance.test_apk.path + pickle_path = '%s-runner.pickle' % test_apk_path + # For incremental APKs, the code doesn't live in the apk, so instead check + # the timestamp of the target's .stamp file. + if self._test_instance.test_apk_incremental_install_json: + with open(self._test_instance.test_apk_incremental_install_json) as f: + data = json.load(f) + out_dir = constants.GetOutDirectory() + test_mtime = max( + os.path.getmtime(os.path.join(out_dir, p)) for p in data['dex_files']) + else: + test_mtime = os.path.getmtime(test_apk_path) + + try: + return instrumentation_test_instance.GetTestsFromPickle( + pickle_path, test_mtime) + except instrumentation_test_instance.TestListPickleException as e: + logging.info('Could not get tests from pickle: %s', e) + logging.info('Getting tests by having %s list them.', + self._test_instance.junit4_runner_class) + def list_tests(d): + def _run(dev): + # We need to use GetAppWritablePath instead of GetExternalStoragePath + # here because we will not have applied legacy storage workarounds on R+ + # yet. + with device_temp_file.DeviceTempFile( + dev.adb, suffix='.json', + dir=dev.GetAppWritablePath()) as dev_test_list_json: + junit4_runner_class = self._test_instance.junit4_runner_class + test_package = self._test_instance.test_package + extras = { + 'log': 'true', + # Workaround for https://github.com/mockito/mockito/issues/922 + 'notPackage': 'net.bytebuddy', + } + extras[_EXTRA_TEST_LIST] = dev_test_list_json.name + target = '%s/%s' % (test_package, junit4_runner_class) + timeout = 240 + if self._test_instance.wait_for_java_debugger: + timeout = None + with self._ArchiveLogcat(dev, 'list_tests'): + test_list_run_output = dev.StartInstrumentation( + target, extras=extras, retries=0, timeout=timeout) + if any(test_list_run_output): + logging.error('Unexpected output while listing tests:') + for line in test_list_run_output: + logging.error(' %s', line) + with tempfile_ext.NamedTemporaryDirectory() as host_dir: + host_file = os.path.join(host_dir, 'list_tests.json') + dev.PullFile(dev_test_list_json.name, host_file) + with open(host_file, 'r') as host_file: + return json.load(host_file) + + return crash_handler.RetryOnSystemCrash(_run, d) + + raw_test_lists = self._env.parallel_devices.pMap(list_tests).pGet(None) + + # If all devices failed to list tests, raise an exception. + # Check that tl is not None and is not empty. + if all(not tl for tl in raw_test_lists): + raise device_errors.CommandFailedError( + 'Failed to list tests on any device') + + # Get the first viable list of raw tests + raw_tests = [tl for tl in raw_test_lists if tl][0] + + instrumentation_test_instance.SaveTestsToPickle(pickle_path, raw_tests) + return raw_tests + + @contextlib.contextmanager + def _ArchiveLogcat(self, device, test_name): + stream_name = 'logcat_%s_shard%s_%s_%s' % ( + test_name.replace('#', '.'), self._test_instance.external_shard_index, + time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()), device.serial) + + logcat_file = None + logmon = None + try: + with self._env.output_manager.ArchivedTempfile( + stream_name, 'logcat') as logcat_file: + with logcat_monitor.LogcatMonitor( + device.adb, + filter_specs=local_device_environment.LOGCAT_FILTERS, + output_file=logcat_file.name, + transform_func=self._test_instance.MaybeDeobfuscateLines, + check_error=False) as logmon: + with _LogTestEndpoints(device, test_name): + with contextlib_ext.Optional( + trace_event.trace(test_name), + self._env.trace_output): + yield logcat_file + finally: + if logmon: + logmon.Close() + if logcat_file and logcat_file.Link(): + logging.info('Logcat saved to %s', logcat_file.Link()) + + def _SaveTraceData(self, trace_device_file, device, test_class): + trace_host_file = self._env.trace_output + + if device.FileExists(trace_device_file.name): + try: + java_trace_json = device.ReadFile(trace_device_file.name) + except IOError: + raise Exception('error pulling trace file from device') + finally: + trace_device_file.close() + + process_name = '%s (device %s)' % (test_class, device.serial) + process_hash = int(hashlib.md5(process_name).hexdigest()[:6], 16) + + java_trace = json.loads(java_trace_json) + java_trace.sort(key=lambda event: event['ts']) + + get_date_command = 'echo $EPOCHREALTIME' + device_time = device.RunShellCommand(get_date_command, single_line=True) + device_time = float(device_time) * 1e6 + system_time = trace_time.Now() + time_difference = system_time - device_time + + threads_to_add = set() + for event in java_trace: + # Ensure thread ID and thread name will be linked in the metadata. + threads_to_add.add((event['tid'], event['name'])) + + event['pid'] = process_hash + + # Adjust time stamp to align with Python trace times (from + # trace_time.Now()). + event['ts'] += time_difference + + for tid, thread_name in threads_to_add: + thread_name_metadata = {'pid': process_hash, 'tid': tid, + 'ts': 0, 'ph': 'M', 'cat': '__metadata', + 'name': 'thread_name', + 'args': {'name': thread_name}} + java_trace.append(thread_name_metadata) + + process_name_metadata = {'pid': process_hash, 'tid': 0, 'ts': 0, + 'ph': 'M', 'cat': '__metadata', + 'name': 'process_name', + 'args': {'name': process_name}} + java_trace.append(process_name_metadata) + + java_trace_json = json.dumps(java_trace) + java_trace_json = java_trace_json.rstrip(' ]') + + with open(trace_host_file, 'r') as host_handle: + host_contents = host_handle.readline() + + if host_contents: + java_trace_json = ',%s' % java_trace_json.lstrip(' [') + + with open(trace_host_file, 'a') as host_handle: + host_handle.write(java_trace_json) + + def _SaveScreenshot(self, device, screenshot_device_file, test_name, results, + link_name): + screenshot_filename = '%s-%s.png' % ( + test_name, time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime())) + if device.FileExists(screenshot_device_file.name): + with self._env.output_manager.ArchivedTempfile( + screenshot_filename, 'screenshot', + output_manager.Datatype.PNG) as screenshot_host_file: + try: + device.PullFile(screenshot_device_file.name, + screenshot_host_file.name) + finally: + screenshot_device_file.close() + _SetLinkOnResults(results, test_name, link_name, + screenshot_host_file.Link()) + + def _ProcessRenderTestResults(self, device, results): + if not self._render_tests_device_output_dir: + return + self._ProcessSkiaGoldRenderTestResults(device, results) + + def _IsRetryWithoutPatch(self): + """Checks whether this test run is a retry without a patch/CL. + + Returns: + True iff this is being run on a trybot and the current step is a retry + without the patch applied, otherwise False. + """ + is_tryjob = self._test_instance.skia_gold_properties.IsTryjobRun() + # Builders automatically pass in --gtest_repeat, + # --test-launcher-retry-limit, --test-launcher-batch-limit, and + # --gtest_filter when running a step without a CL applied, but not for + # steps with the CL applied. + # TODO(skbug.com/12100): Check this in a less hacky way if a way can be + # found to check the actual step name. Ideally, this would not be necessary + # at all, but will be until Chromium stops doing step retries on trybots + # (extremely unlikely) or Gold is updated to not clobber earlier results + # (more likely, but a ways off). + has_filter = bool(self._test_instance.test_filter) + has_batch_limit = self._test_instance.test_launcher_batch_limit is not None + return is_tryjob and has_filter and has_batch_limit + + def _ProcessSkiaGoldRenderTestResults(self, device, results): + gold_dir = posixpath.join(self._render_tests_device_output_dir, + _DEVICE_GOLD_DIR) + if not device.FileExists(gold_dir): + return + + gold_properties = self._test_instance.skia_gold_properties + with tempfile_ext.NamedTemporaryDirectory() as host_dir: + use_luci = not (gold_properties.local_pixel_tests + or gold_properties.no_luci_auth) + + # Pull everything at once instead of pulling individually, as it's + # slightly faster since each command over adb has some overhead compared + # to doing the same thing locally. + host_dir = os.path.join(host_dir, _DEVICE_GOLD_DIR) + device.PullFile(gold_dir, host_dir) + for image_name in os.listdir(host_dir): + if not image_name.endswith('.png'): + continue + + render_name = image_name[:-4] + json_name = render_name + '.json' + json_path = os.path.join(host_dir, json_name) + image_path = os.path.join(host_dir, image_name) + full_test_name = None + if not os.path.exists(json_path): + _FailTestIfNecessary(results, full_test_name) + _AppendToLog( + results, full_test_name, + 'Unable to find corresponding JSON file for image %s ' + 'when doing Skia Gold comparison.' % image_name) + continue + + # Add 'ignore': '1' if a comparison failure would not be surfaced, as + # that implies that we aren't actively maintaining baselines for the + # test. This helps prevent unrelated CLs from getting comments posted to + # them. + should_rewrite = False + with open(json_path) as infile: + # All the key/value pairs in the JSON file are strings, so convert + # to a bool. + json_dict = json.load(infile) + fail_on_unsupported = json_dict.get('fail_on_unsupported_configs', + 'false') + fail_on_unsupported = fail_on_unsupported.lower() == 'true' + # Grab the full test name so we can associate the comparison with a + # particular test, which is necessary if tests are batched together. + # Remove the key/value pair from the JSON since we don't need/want to + # upload it to Gold. + full_test_name = json_dict.get('full_test_name') + if 'full_test_name' in json_dict: + should_rewrite = True + del json_dict['full_test_name'] + + running_on_unsupported = ( + device.build_version_sdk not in RENDER_TEST_MODEL_SDK_CONFIGS.get( + device.product_model, []) and not fail_on_unsupported) + should_ignore_in_gold = running_on_unsupported + # We still want to fail the test even if we're ignoring the image in + # Gold if we're running on a supported configuration, so + # should_ignore_in_gold != should_hide_failure. + should_hide_failure = running_on_unsupported + if should_ignore_in_gold: + should_rewrite = True + json_dict['ignore'] = '1' + if should_rewrite: + with open(json_path, 'w') as outfile: + json.dump(json_dict, outfile) + + gold_session = self._skia_gold_session_manager.GetSkiaGoldSession( + keys_input=json_path) + + try: + status, error = gold_session.RunComparison( + name=render_name, + png_file=image_path, + output_manager=self._env.output_manager, + use_luci=use_luci, + force_dryrun=self._IsRetryWithoutPatch()) + except Exception as e: # pylint: disable=broad-except + _FailTestIfNecessary(results, full_test_name) + _AppendToLog(results, full_test_name, + 'Skia Gold comparison raised exception: %s' % e) + continue + + if not status: + continue + + # Don't fail the test if we ran on an unsupported configuration unless + # the test has explicitly opted in, as it's likely that baselines + # aren't maintained for that configuration. + if should_hide_failure: + if self._test_instance.skia_gold_properties.local_pixel_tests: + _AppendToLog( + results, full_test_name, + 'Gold comparison for %s failed, but model %s with SDK ' + '%d is not a supported configuration. This failure would be ' + 'ignored on the bots, but failing since tests are being run ' + 'locally.' % + (render_name, device.product_model, device.build_version_sdk)) + else: + _AppendToLog( + results, full_test_name, + 'Gold comparison for %s failed, but model %s with SDK ' + '%d is not a supported configuration, so ignoring failure.' % + (render_name, device.product_model, device.build_version_sdk)) + continue + + _FailTestIfNecessary(results, full_test_name) + failure_log = ( + 'Skia Gold reported failure for RenderTest %s. See ' + 'RENDER_TESTS.md for how to fix this failure.' % render_name) + status_codes =\ + self._skia_gold_session_manager.GetSessionClass().StatusCodes + if status == status_codes.AUTH_FAILURE: + _AppendToLog(results, full_test_name, + 'Gold authentication failed with output %s' % error) + elif status == status_codes.INIT_FAILURE: + _AppendToLog(results, full_test_name, + 'Gold initialization failed with output %s' % error) + elif status == status_codes.COMPARISON_FAILURE_REMOTE: + public_triage_link, internal_triage_link =\ + gold_session.GetTriageLinks(render_name) + if not public_triage_link: + _AppendToLog( + results, full_test_name, + 'Failed to get triage link for %s, raw output: %s' % + (render_name, error)) + _AppendToLog( + results, full_test_name, 'Reason for no triage link: %s' % + gold_session.GetTriageLinkOmissionReason(render_name)) + continue + if gold_properties.IsTryjobRun(): + _SetLinkOnResults(results, full_test_name, + 'Public Skia Gold triage link for entire CL', + public_triage_link) + _SetLinkOnResults(results, full_test_name, + 'Internal Skia Gold triage link for entire CL', + internal_triage_link) + else: + _SetLinkOnResults( + results, full_test_name, + 'Public Skia Gold triage link for %s' % render_name, + public_triage_link) + _SetLinkOnResults( + results, full_test_name, + 'Internal Skia Gold triage link for %s' % render_name, + internal_triage_link) + _AppendToLog(results, full_test_name, failure_log) + + elif status == status_codes.COMPARISON_FAILURE_LOCAL: + given_link = gold_session.GetGivenImageLink(render_name) + closest_link = gold_session.GetClosestImageLink(render_name) + diff_link = gold_session.GetDiffImageLink(render_name) + + processed_template_output = _GenerateRenderTestHtml( + render_name, given_link, closest_link, diff_link) + with self._env.output_manager.ArchivedTempfile( + '%s.html' % render_name, 'gold_local_diffs', + output_manager.Datatype.HTML) as html_results: + html_results.write(processed_template_output) + _SetLinkOnResults(results, full_test_name, render_name, + html_results.Link()) + _AppendToLog( + results, full_test_name, + 'See %s link for diff image with closest positive.' % render_name) + elif status == status_codes.LOCAL_DIFF_FAILURE: + _AppendToLog(results, full_test_name, + 'Failed to generate diffs from Gold: %s' % error) + else: + logging.error( + 'Given unhandled SkiaGoldSession StatusCode %s with error %s', + status, error) + + #override + def _ShouldRetry(self, test, result): + # We've tried to disable retries in the past with mixed results. + # See crbug.com/619055 for historical context and crbug.com/797002 + # for ongoing efforts. + if 'Batch' in test['annotations'] and test['annotations']['Batch'][ + 'value'] == 'UnitTests': + return False + del test, result + return True + + #override + def _ShouldShard(self): + return True + + @classmethod + def _GetTimeoutScaleFromAnnotations(cls, annotations): + try: + return int(annotations.get('TimeoutScale', {}).get('value', 1)) + except ValueError as e: + logging.warning("Non-integer value of TimeoutScale ignored. (%s)", str(e)) + return 1 + + @classmethod + def _GetTimeoutFromAnnotations(cls, annotations, test_name): + for k, v in TIMEOUT_ANNOTATIONS: + if k in annotations: + timeout = v + break + else: + logging.warning('Using default 1 minute timeout for %s', test_name) + timeout = 60 + + timeout *= cls._GetTimeoutScaleFromAnnotations(annotations) + + return timeout + + +def _IsWPRRecordReplayTest(test): + """Determines whether a test or a list of tests is a WPR RecordReplay Test.""" + if not isinstance(test, list): + test = [test] + return any([ + WPR_RECORD_REPLAY_TEST_FEATURE_ANNOTATION in t['annotations'].get( + FEATURE_ANNOTATION, {}).get('value', ()) for t in test + ]) + + +def _GetWPRArchivePath(test): + """Retrieves the archive path from the WPRArchiveDirectory annotation.""" + return test['annotations'].get(WPR_ARCHIVE_FILE_PATH_ANNOTATION, + {}).get('value', ()) + + +def _ReplaceUncommonChars(original): + """Replaces uncommon characters with __.""" + if not original: + raise ValueError('parameter should not be empty') + + uncommon_chars = ['#'] + for char in uncommon_chars: + original = original.replace(char, '__') + return original + + +def _IsRenderTest(test): + """Determines if a test or list of tests has a RenderTest amongst them.""" + if not isinstance(test, list): + test = [test] + return any([RENDER_TEST_FEATURE_ANNOTATION in t['annotations'].get( + FEATURE_ANNOTATION, {}).get('value', ()) for t in test]) + + +def _GenerateRenderTestHtml(image_name, failure_link, golden_link, diff_link): + """Generates a RenderTest results page. + + Displays the generated (failure) image, the golden image, and the diff + between them. + + Args: + image_name: The name of the image whose comparison failed. + failure_link: The URL to the generated/failure image. + golden_link: The URL to the golden image. + diff_link: The URL to the diff image between the failure and golden images. + + Returns: + A string containing the generated HTML. + """ + jinja2_env = jinja2.Environment( + loader=jinja2.FileSystemLoader(_JINJA_TEMPLATE_DIR), trim_blocks=True) + template = jinja2_env.get_template(_JINJA_TEMPLATE_FILENAME) + # pylint: disable=no-member + return template.render( + test_name=image_name, + failure_link=failure_link, + golden_link=golden_link, + diff_link=diff_link) + + +def _FailTestIfNecessary(results, full_test_name): + """Marks the given results as failed if it wasn't already. + + Marks the result types as ResultType.FAIL unless they were already some sort + of failure type, e.g. ResultType.CRASH. + + Args: + results: A list of base_test_result.BaseTestResult objects. + full_test_name: A string containing the full name of the test, e.g. + org.chromium.chrome.SomeTestClass#someTestMethod. + """ + found_matching_test = _MatchingTestInResults(results, full_test_name) + if not found_matching_test and _ShouldReportNoMatchingResult(full_test_name): + logging.error( + 'Could not find result specific to %s, failing all tests in the batch.', + full_test_name) + for result in results: + if found_matching_test and result.GetName() != full_test_name: + continue + if result.GetType() not in [ + base_test_result.ResultType.FAIL, base_test_result.ResultType.CRASH, + base_test_result.ResultType.TIMEOUT, base_test_result.ResultType.UNKNOWN + ]: + result.SetType(base_test_result.ResultType.FAIL) + + +def _AppendToLog(results, full_test_name, line): + """Appends the given line to the end of the logs of the given results. + + Args: + results: A list of base_test_result.BaseTestResult objects. + full_test_name: A string containing the full name of the test, e.g. + org.chromium.chrome.SomeTestClass#someTestMethod. + line: A string to be appended as a neww line to the log of |result|. + """ + found_matching_test = _MatchingTestInResults(results, full_test_name) + if not found_matching_test and _ShouldReportNoMatchingResult(full_test_name): + logging.error( + 'Could not find result specific to %s, appending to log of all tests ' + 'in the batch.', full_test_name) + for result in results: + if found_matching_test and result.GetName() != full_test_name: + continue + _AppendToLogForResult(result, line) + + +def _AppendToLogForResult(result, line): + result.SetLog(result.GetLog() + '\n' + line) + + +def _SetLinkOnResults(results, full_test_name, link_name, link): + """Sets the given link on the given results. + + Args: + results: A list of base_test_result.BaseTestResult objects. + full_test_name: A string containing the full name of the test, e.g. + org.chromium.chrome.SomeTestClass#someTestMethod. + link_name: A string containing the name of the link being set. + link: A string containing the lkink being set. + """ + found_matching_test = _MatchingTestInResults(results, full_test_name) + if not found_matching_test and _ShouldReportNoMatchingResult(full_test_name): + logging.error( + 'Could not find result specific to %s, adding link to results of all ' + 'tests in the batch.', full_test_name) + for result in results: + if found_matching_test and result.GetName() != full_test_name: + continue + result.SetLink(link_name, link) + + +def _MatchingTestInResults(results, full_test_name): + """Checks if any tests named |full_test_name| are in |results|. + + Args: + results: A list of base_test_result.BaseTestResult objects. + full_test_name: A string containing the full name of the test, e.g. + org.chromium.chrome.Some + + Returns: + True if one of the results in |results| has the same name as + |full_test_name|, otherwise False. + """ + return any([r for r in results if r.GetName() == full_test_name]) + + +def _ShouldReportNoMatchingResult(full_test_name): + """Determines whether a failure to find a matching result is actually bad. + + Args: + full_test_name: A string containing the full name of the test, e.g. + org.chromium.chrome.Some + + Returns: + False if the failure to find a matching result is expected and should not + be reported, otherwise True. + """ + if full_test_name is not None and full_test_name.endswith(_BATCH_SUFFIX): + # Handle batched tests, whose reported name is the first test's name + + # "_batch". + return False + return True diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_instrumentation_test_run_test.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_instrumentation_test_run_test.py new file mode 100755 index 0000000000..948e34c17a --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_instrumentation_test_run_test.py @@ -0,0 +1,169 @@ +#!/usr/bin/env vpython3 +# Copyright 2017 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Tests for local_device_instrumentation_test_run.""" + +# pylint: disable=protected-access + + +import unittest + +from pylib.base import base_test_result +from pylib.base import mock_environment +from pylib.base import mock_test_instance +from pylib.local.device import local_device_instrumentation_test_run + + +class LocalDeviceInstrumentationTestRunTest(unittest.TestCase): + + def setUp(self): + super(LocalDeviceInstrumentationTestRunTest, self).setUp() + self._env = mock_environment.MockEnvironment() + self._ti = mock_test_instance.MockTestInstance() + self._obj = ( + local_device_instrumentation_test_run.LocalDeviceInstrumentationTestRun( + self._env, self._ti)) + + # TODO(crbug.com/797002): Decide whether the _ShouldRetry hook is worth + # retaining and remove these tests if not. + + def testShouldRetry_failure(self): + test = { + 'annotations': {}, + 'class': 'SadTest', + 'method': 'testFailure', + 'is_junit4': True, + } + result = base_test_result.BaseTestResult( + 'SadTest.testFailure', base_test_result.ResultType.FAIL) + self.assertTrue(self._obj._ShouldRetry(test, result)) + + def testShouldRetry_retryOnFailure(self): + test = { + 'annotations': {'RetryOnFailure': None}, + 'class': 'SadTest', + 'method': 'testRetryOnFailure', + 'is_junit4': True, + } + result = base_test_result.BaseTestResult( + 'SadTest.testRetryOnFailure', base_test_result.ResultType.FAIL) + self.assertTrue(self._obj._ShouldRetry(test, result)) + + def testShouldRetry_notRun(self): + test = { + 'annotations': {}, + 'class': 'SadTest', + 'method': 'testNotRun', + 'is_junit4': True, + } + result = base_test_result.BaseTestResult( + 'SadTest.testNotRun', base_test_result.ResultType.NOTRUN) + self.assertTrue(self._obj._ShouldRetry(test, result)) + + def testIsWPRRecordReplayTest_matchedWithKey(self): + test = { + 'annotations': { + 'Feature': { + 'value': ['WPRRecordReplayTest', 'dummy'] + } + }, + 'class': 'WPRDummyTest', + 'method': 'testRun', + 'is_junit4': True, + } + self.assertTrue( + local_device_instrumentation_test_run._IsWPRRecordReplayTest(test)) + + def testIsWPRRecordReplayTest_noMatchedKey(self): + test = { + 'annotations': { + 'Feature': { + 'value': ['abc', 'dummy'] + } + }, + 'class': 'WPRDummyTest', + 'method': 'testRun', + 'is_junit4': True, + } + self.assertFalse( + local_device_instrumentation_test_run._IsWPRRecordReplayTest(test)) + + def testGetWPRArchivePath_matchedWithKey(self): + test = { + 'annotations': { + 'WPRArchiveDirectory': { + 'value': 'abc' + } + }, + 'class': 'WPRDummyTest', + 'method': 'testRun', + 'is_junit4': True, + } + self.assertEqual( + local_device_instrumentation_test_run._GetWPRArchivePath(test), 'abc') + + def testGetWPRArchivePath_noMatchedWithKey(self): + test = { + 'annotations': { + 'Feature': { + 'value': 'abc' + } + }, + 'class': 'WPRDummyTest', + 'method': 'testRun', + 'is_junit4': True, + } + self.assertFalse( + local_device_instrumentation_test_run._GetWPRArchivePath(test)) + + def testIsRenderTest_matchedWithKey(self): + test = { + 'annotations': { + 'Feature': { + 'value': ['RenderTest', 'dummy'] + } + }, + 'class': 'DummyTest', + 'method': 'testRun', + 'is_junit4': True, + } + self.assertTrue(local_device_instrumentation_test_run._IsRenderTest(test)) + + def testIsRenderTest_noMatchedKey(self): + test = { + 'annotations': { + 'Feature': { + 'value': ['abc', 'dummy'] + } + }, + 'class': 'DummyTest', + 'method': 'testRun', + 'is_junit4': True, + } + self.assertFalse(local_device_instrumentation_test_run._IsRenderTest(test)) + + def testReplaceUncommonChars(self): + original = 'abc#edf' + self.assertEqual( + local_device_instrumentation_test_run._ReplaceUncommonChars(original), + 'abc__edf') + original = 'abc#edf#hhf' + self.assertEqual( + local_device_instrumentation_test_run._ReplaceUncommonChars(original), + 'abc__edf__hhf') + original = 'abcedfhhf' + self.assertEqual( + local_device_instrumentation_test_run._ReplaceUncommonChars(original), + 'abcedfhhf') + original = None + with self.assertRaises(ValueError): + local_device_instrumentation_test_run._ReplaceUncommonChars(original) + original = '' + with self.assertRaises(ValueError): + local_device_instrumentation_test_run._ReplaceUncommonChars(original) + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_monkey_test_run.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_monkey_test_run.py new file mode 100644 index 0000000000..71dd9bd793 --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_monkey_test_run.py @@ -0,0 +1,128 @@ +# Copyright 2016 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +import logging + +from six.moves import range # pylint: disable=redefined-builtin +from devil.android import device_errors +from devil.android.sdk import intent +from pylib import constants +from pylib.base import base_test_result +from pylib.local.device import local_device_test_run + + +_CHROME_PACKAGE = constants.PACKAGE_INFO['chrome'].package + +class LocalDeviceMonkeyTestRun(local_device_test_run.LocalDeviceTestRun): + def __init__(self, env, test_instance): + super(LocalDeviceMonkeyTestRun, self).__init__(env, test_instance) + + def TestPackage(self): + return 'monkey' + + #override + def SetUp(self): + pass + + #override + def _RunTest(self, device, test): + device.ClearApplicationState(self._test_instance.package) + + # Chrome crashes are not always caught by Monkey test runner. + # Launch Chrome and verify Chrome has the same PID before and after + # the test. + device.StartActivity( + intent.Intent(package=self._test_instance.package, + activity=self._test_instance.activity, + action='android.intent.action.MAIN'), + blocking=True, force_stop=True) + before_pids = device.GetPids(self._test_instance.package) + + output = '' + if before_pids: + if len(before_pids.get(self._test_instance.package, [])) > 1: + raise Exception( + 'At most one instance of process %s expected but found pids: ' + '%s' % (self._test_instance.package, before_pids)) + output = '\n'.join(self._LaunchMonkeyTest(device)) + after_pids = device.GetPids(self._test_instance.package) + + crashed = True + if not self._test_instance.package in before_pids: + logging.error('Failed to start the process.') + elif not self._test_instance.package in after_pids: + logging.error('Process %s has died.', + before_pids[self._test_instance.package]) + elif (before_pids[self._test_instance.package] != + after_pids[self._test_instance.package]): + logging.error('Detected process restart %s -> %s', + before_pids[self._test_instance.package], + after_pids[self._test_instance.package]) + else: + crashed = False + + success_pattern = 'Events injected: %d' % self._test_instance.event_count + if success_pattern in output and not crashed: + result = base_test_result.BaseTestResult( + test, base_test_result.ResultType.PASS, log=output) + else: + result = base_test_result.BaseTestResult( + test, base_test_result.ResultType.FAIL, log=output) + if 'chrome' in self._test_instance.package: + logging.warning('Starting MinidumpUploadService...') + # TODO(jbudorick): Update this after upstreaming. + minidump_intent = intent.Intent( + action='%s.crash.ACTION_FIND_ALL' % _CHROME_PACKAGE, + package=self._test_instance.package, + activity='%s.crash.MinidumpUploadService' % _CHROME_PACKAGE) + try: + device.RunShellCommand( + ['am', 'startservice'] + minidump_intent.am_args, + as_root=True, check_return=True) + except device_errors.CommandFailedError: + logging.exception('Failed to start MinidumpUploadService') + + return result, None + + #override + def TearDown(self): + pass + + #override + def _CreateShards(self, tests): + return tests + + #override + def _ShouldShard(self): + # TODO(mikecase): Run Monkey test concurrently on each attached device. + return False + + #override + def _GetTests(self): + return ['MonkeyTest'] + + def _LaunchMonkeyTest(self, device): + try: + cmd = ['monkey', + '-p', self._test_instance.package, + '--throttle', str(self._test_instance.throttle), + '-s', str(self._test_instance.seed), + '--monitor-native-crashes', + '--kill-process-after-error'] + for category in self._test_instance.categories: + cmd.extend(['-c', category]) + for _ in range(self._test_instance.verbose_count): + cmd.append('-v') + cmd.append(str(self._test_instance.event_count)) + return device.RunShellCommand( + cmd, timeout=self._test_instance.timeout, check_return=True) + finally: + try: + # Kill the monkey test process on the device. If you manually + # interrupt the test run, this will prevent the monkey test from + # continuing to run. + device.KillAll('com.android.commands.monkey') + except device_errors.CommandFailedError: + pass diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_test_run.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_test_run.py new file mode 100644 index 0000000000..645d9c7471 --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_test_run.py @@ -0,0 +1,395 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import fnmatch +import logging +import posixpath +import signal +try: + import _thread as thread +except ImportError: + import thread +import threading + +from devil import base_error +from devil.android import crash_handler +from devil.android import device_errors +from devil.android.sdk import version_codes +from devil.android.tools import device_recovery +from devil.utils import signal_handler +from pylib import valgrind_tools +from pylib.base import base_test_result +from pylib.base import test_run +from pylib.base import test_collection +from pylib.local.device import local_device_environment + + +_SIGTERM_TEST_LOG = ( + ' Suite execution terminated, probably due to swarming timeout.\n' + ' Your test may not have run.') + + +def SubstituteDeviceRoot(device_path, device_root): + if not device_path: + return device_root + elif isinstance(device_path, list): + return posixpath.join(*(p if p else device_root for p in device_path)) + else: + return device_path + + +class TestsTerminated(Exception): + pass + + +class InvalidShardingSettings(Exception): + def __init__(self, shard_index, total_shards): + super(InvalidShardingSettings, self).__init__( + 'Invalid sharding settings. shard_index: %d total_shards: %d' + % (shard_index, total_shards)) + + +class LocalDeviceTestRun(test_run.TestRun): + + def __init__(self, env, test_instance): + super(LocalDeviceTestRun, self).__init__(env, test_instance) + self._tools = {} + # This is intended to be filled by a child class. + self._installed_packages = [] + env.SetPreferredAbis(test_instance.GetPreferredAbis()) + + #override + def RunTests(self, results): + tests = self._GetTests() + + exit_now = threading.Event() + + @local_device_environment.handle_shard_failures + def run_tests_on_device(dev, tests, results): + # This is performed here instead of during setup because restarting the + # device clears app compatibility flags, which will happen if a device + # needs to be recovered. + SetAppCompatibilityFlagsIfNecessary(self._installed_packages, dev) + consecutive_device_errors = 0 + for test in tests: + if not test: + logging.warning('No tests in shared. Continuing.') + tests.test_completed() + continue + if exit_now.isSet(): + thread.exit() + + result = None + rerun = None + try: + result, rerun = crash_handler.RetryOnSystemCrash( + lambda d, t=test: self._RunTest(d, t), + device=dev) + consecutive_device_errors = 0 + if isinstance(result, base_test_result.BaseTestResult): + results.AddResult(result) + elif isinstance(result, list): + results.AddResults(result) + else: + raise Exception( + 'Unexpected result type: %s' % type(result).__name__) + except device_errors.CommandTimeoutError: + # Test timeouts don't count as device errors for the purpose + # of bad device detection. + consecutive_device_errors = 0 + + if isinstance(test, list): + results.AddResults( + base_test_result.BaseTestResult( + self._GetUniqueTestName(t), + base_test_result.ResultType.TIMEOUT) for t in test) + else: + results.AddResult( + base_test_result.BaseTestResult( + self._GetUniqueTestName(test), + base_test_result.ResultType.TIMEOUT)) + except Exception as e: # pylint: disable=broad-except + if isinstance(tests, test_collection.TestCollection): + rerun = test + if (isinstance(e, device_errors.DeviceUnreachableError) + or not isinstance(e, base_error.BaseError)): + # If we get a device error but believe the device is still + # reachable, attempt to continue using it. Otherwise, raise + # the exception and terminate this run_tests_on_device call. + raise + + consecutive_device_errors += 1 + if consecutive_device_errors >= 3: + # We believe the device is still reachable and may still be usable, + # but if it fails repeatedly, we shouldn't attempt to keep using + # it. + logging.error('Repeated failures on device %s. Abandoning.', + str(dev)) + raise + + logging.exception( + 'Attempting to continue using device %s despite failure (%d/3).', + str(dev), consecutive_device_errors) + + finally: + if isinstance(tests, test_collection.TestCollection): + if rerun: + tests.add(rerun) + tests.test_completed() + + logging.info('Finished running tests on this device.') + + def stop_tests(_signum, _frame): + logging.critical('Received SIGTERM. Stopping test execution.') + exit_now.set() + raise TestsTerminated() + + try: + with signal_handler.AddSignalHandler(signal.SIGTERM, stop_tests): + self._env.ResetCurrentTry() + while self._env.current_try < self._env.max_tries and tests: + tries = self._env.current_try + grouped_tests = self._GroupTests(tests) + logging.info('STARTING TRY #%d/%d', tries + 1, self._env.max_tries) + if tries > 0 and self._env.recover_devices: + if any(d.build_version_sdk == version_codes.LOLLIPOP_MR1 + for d in self._env.devices): + logging.info( + 'Attempting to recover devices due to known issue on L MR1. ' + 'See crbug.com/787056 for details.') + self._env.parallel_devices.pMap( + device_recovery.RecoverDevice, None) + elif tries + 1 == self._env.max_tries: + logging.info( + 'Attempting to recover devices prior to last test attempt.') + self._env.parallel_devices.pMap( + device_recovery.RecoverDevice, None) + logging.info('Will run %d tests on %d devices: %s', + len(tests), len(self._env.devices), + ', '.join(str(d) for d in self._env.devices)) + for t in tests: + logging.debug(' %s', t) + + try_results = base_test_result.TestRunResults() + test_names = (self._GetUniqueTestName(t) for t in tests) + try_results.AddResults( + base_test_result.BaseTestResult( + t, base_test_result.ResultType.NOTRUN) + for t in test_names if not t.endswith('*')) + + # As soon as we know the names of the tests, we populate |results|. + # The tests in try_results will have their results updated by + # try_results.AddResult() as they are run. + results.append(try_results) + + try: + if self._ShouldShard(): + tc = test_collection.TestCollection( + self._CreateShards(grouped_tests)) + self._env.parallel_devices.pMap( + run_tests_on_device, tc, try_results).pGet(None) + else: + self._env.parallel_devices.pMap(run_tests_on_device, + grouped_tests, + try_results).pGet(None) + except TestsTerminated: + for unknown_result in try_results.GetUnknown(): + try_results.AddResult( + base_test_result.BaseTestResult( + unknown_result.GetName(), + base_test_result.ResultType.TIMEOUT, + log=_SIGTERM_TEST_LOG)) + raise + + self._env.IncrementCurrentTry() + tests = self._GetTestsToRetry(tests, try_results) + + logging.info('FINISHED TRY #%d/%d', tries + 1, self._env.max_tries) + if tests: + logging.info('%d failed tests remain.', len(tests)) + else: + logging.info('All tests completed.') + except TestsTerminated: + pass + + def _GetTestsToRetry(self, tests, try_results): + + def is_failure_result(test_result): + if isinstance(test_result, list): + return any(is_failure_result(r) for r in test_result) + return ( + test_result is None + or test_result.GetType() not in ( + base_test_result.ResultType.PASS, + base_test_result.ResultType.SKIP)) + + all_test_results = {r.GetName(): r for r in try_results.GetAll()} + + tests_and_names = ((t, self._GetUniqueTestName(t)) for t in tests) + + tests_and_results = {} + for test, name in tests_and_names: + if name.endswith('*'): + tests_and_results[name] = (test, [ + r for n, r in all_test_results.items() if fnmatch.fnmatch(n, name) + ]) + else: + tests_and_results[name] = (test, all_test_results.get(name)) + + failed_tests_and_results = ((test, result) + for test, result in tests_and_results.values() + if is_failure_result(result)) + + return [t for t, r in failed_tests_and_results if self._ShouldRetry(t, r)] + + def _ApplyExternalSharding(self, tests, shard_index, total_shards): + logging.info('Using external sharding settings. This is shard %d/%d', + shard_index, total_shards) + + if total_shards < 0 or shard_index < 0 or total_shards <= shard_index: + raise InvalidShardingSettings(shard_index, total_shards) + + sharded_tests = [] + + # Group tests by tests that should run in the same test invocation - either + # unit tests or batched tests. + grouped_tests = self._GroupTests(tests) + + # Partition grouped tests approximately evenly across shards. + partitioned_tests = self._PartitionTests(grouped_tests, total_shards, + float('inf')) + if len(partitioned_tests) <= shard_index: + return [] + for t in partitioned_tests[shard_index]: + if isinstance(t, list): + sharded_tests.extend(t) + else: + sharded_tests.append(t) + return sharded_tests + + # Partition tests evenly into |num_desired_partitions| partitions where + # possible. However, many constraints make partitioning perfectly impossible. + # If the max_partition_size isn't large enough, extra partitions may be + # created (infinite max size should always return precisely the desired + # number of partitions). Even if the |max_partition_size| is technically large + # enough to hold all of the tests in |num_desired_partitions|, we attempt to + # keep test order relatively stable to minimize flakes, so when tests are + # grouped (eg. batched tests), we cannot perfectly fill all paritions as that + # would require breaking up groups. + def _PartitionTests(self, tests, num_desired_partitions, max_partition_size): + # pylint: disable=no-self-use + partitions = [] + + # Sort by hash so we don't put all tests in a slow suite in the same + # partition. + tests = sorted( + tests, + key=lambda t: hash( + self._GetUniqueTestName(t[0] if isinstance(t, list) else t))) + + def CountTestsIndividually(test): + if not isinstance(test, list): + return False + annotations = test[0]['annotations'] + # UnitTests tests are really fast, so to balance shards better, count + # UnitTests Batches as single tests. + return ('Batch' not in annotations + or annotations['Batch']['value'] != 'UnitTests') + + num_not_yet_allocated = sum( + [len(test) - 1 for test in tests if CountTestsIndividually(test)]) + num_not_yet_allocated += len(tests) + + # Fast linear partition approximation capped by max_partition_size. We + # cannot round-robin or otherwise re-order tests dynamically because we want + # test order to remain stable. + partition_size = min(num_not_yet_allocated // num_desired_partitions, + max_partition_size) + partitions.append([]) + last_partition_size = 0 + for test in tests: + test_count = len(test) if CountTestsIndividually(test) else 1 + # Make a new shard whenever we would overfill the previous one. However, + # if the size of the test group is larger than the max partition size on + # its own, just put the group in its own shard instead of splitting up the + # group. + if (last_partition_size + test_count > partition_size + and last_partition_size > 0): + num_desired_partitions -= 1 + if num_desired_partitions <= 0: + # Too many tests for number of partitions, just fill all partitions + # beyond num_desired_partitions. + partition_size = max_partition_size + else: + # Re-balance remaining partitions. + partition_size = min(num_not_yet_allocated // num_desired_partitions, + max_partition_size) + partitions.append([]) + partitions[-1].append(test) + last_partition_size = test_count + else: + partitions[-1].append(test) + last_partition_size += test_count + + num_not_yet_allocated -= test_count + + if not partitions[-1]: + partitions.pop() + return partitions + + def GetTool(self, device): + if str(device) not in self._tools: + self._tools[str(device)] = valgrind_tools.CreateTool( + self._env.tool, device) + return self._tools[str(device)] + + def _CreateShards(self, tests): + raise NotImplementedError + + def _GetUniqueTestName(self, test): + # pylint: disable=no-self-use + return test + + def _ShouldRetry(self, test, result): + # pylint: disable=no-self-use,unused-argument + return True + + def _GetTests(self): + raise NotImplementedError + + def _GroupTests(self, tests): + # pylint: disable=no-self-use + return tests + + def _RunTest(self, device, test): + raise NotImplementedError + + def _ShouldShard(self): + raise NotImplementedError + + +def SetAppCompatibilityFlagsIfNecessary(packages, device): + """Sets app compatibility flags on the given packages and device. + + Args: + packages: A list of strings containing package names to apply flags to. + device: A DeviceUtils instance to apply the flags on. + """ + + def set_flag_for_packages(flag, enable): + enable_str = 'enable' if enable else 'disable' + for p in packages: + cmd = ['am', 'compat', enable_str, flag, p] + device.RunShellCommand(cmd) + + sdk_version = device.build_version_sdk + if sdk_version >= version_codes.R: + # These flags are necessary to use the legacy storage permissions on R+. + # See crbug.com/1173699 for more information. + set_flag_for_packages('DEFAULT_SCOPED_STORAGE', False) + set_flag_for_packages('FORCE_ENABLE_SCOPED_STORAGE', False) + + +class NoTestsError(Exception): + """Error for when no tests are found.""" diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_test_run_test.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_test_run_test.py new file mode 100755 index 0000000000..0f6c9b5421 --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_test_run_test.py @@ -0,0 +1,171 @@ +#!/usr/bin/env vpython3 +# Copyright 2016 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# pylint: disable=protected-access + + +import unittest + +from pylib.base import base_test_result +from pylib.local.device import local_device_test_run + +import mock # pylint: disable=import-error + + +class SubstituteDeviceRootTest(unittest.TestCase): + + def testNoneDevicePath(self): + self.assertEqual( + '/fake/device/root', + local_device_test_run.SubstituteDeviceRoot(None, '/fake/device/root')) + + def testStringDevicePath(self): + self.assertEqual( + '/another/fake/device/path', + local_device_test_run.SubstituteDeviceRoot('/another/fake/device/path', + '/fake/device/root')) + + def testListWithNoneDevicePath(self): + self.assertEqual( + '/fake/device/root/subpath', + local_device_test_run.SubstituteDeviceRoot([None, 'subpath'], + '/fake/device/root')) + + def testListWithoutNoneDevicePath(self): + self.assertEqual( + '/another/fake/device/path', + local_device_test_run.SubstituteDeviceRoot( + ['/', 'another', 'fake', 'device', 'path'], '/fake/device/root')) + + +class TestLocalDeviceTestRun(local_device_test_run.LocalDeviceTestRun): + + # pylint: disable=abstract-method + + def __init__(self): + super(TestLocalDeviceTestRun, self).__init__( + mock.MagicMock(), mock.MagicMock()) + + +class TestLocalDeviceNonStringTestRun( + local_device_test_run.LocalDeviceTestRun): + + # pylint: disable=abstract-method + + def __init__(self): + super(TestLocalDeviceNonStringTestRun, self).__init__( + mock.MagicMock(), mock.MagicMock()) + + def _GetUniqueTestName(self, test): + return test['name'] + + +class LocalDeviceTestRunTest(unittest.TestCase): + + def testGetTestsToRetry_allTestsPassed(self): + results = [ + base_test_result.BaseTestResult( + 'Test1', base_test_result.ResultType.PASS), + base_test_result.BaseTestResult( + 'Test2', base_test_result.ResultType.PASS), + ] + + tests = [r.GetName() for r in results] + try_results = base_test_result.TestRunResults() + try_results.AddResults(results) + + test_run = TestLocalDeviceTestRun() + tests_to_retry = test_run._GetTestsToRetry(tests, try_results) + self.assertEqual(0, len(tests_to_retry)) + + def testGetTestsToRetry_testFailed(self): + results = [ + base_test_result.BaseTestResult( + 'Test1', base_test_result.ResultType.FAIL), + base_test_result.BaseTestResult( + 'Test2', base_test_result.ResultType.PASS), + ] + + tests = [r.GetName() for r in results] + try_results = base_test_result.TestRunResults() + try_results.AddResults(results) + + test_run = TestLocalDeviceTestRun() + tests_to_retry = test_run._GetTestsToRetry(tests, try_results) + self.assertEqual(1, len(tests_to_retry)) + self.assertIn('Test1', tests_to_retry) + + def testGetTestsToRetry_testUnknown(self): + results = [ + base_test_result.BaseTestResult( + 'Test2', base_test_result.ResultType.PASS), + ] + + tests = ['Test1'] + [r.GetName() for r in results] + try_results = base_test_result.TestRunResults() + try_results.AddResults(results) + + test_run = TestLocalDeviceTestRun() + tests_to_retry = test_run._GetTestsToRetry(tests, try_results) + self.assertEqual(1, len(tests_to_retry)) + self.assertIn('Test1', tests_to_retry) + + def testGetTestsToRetry_wildcardFilter_allPass(self): + results = [ + base_test_result.BaseTestResult( + 'TestCase.Test1', base_test_result.ResultType.PASS), + base_test_result.BaseTestResult( + 'TestCase.Test2', base_test_result.ResultType.PASS), + ] + + tests = ['TestCase.*'] + try_results = base_test_result.TestRunResults() + try_results.AddResults(results) + + test_run = TestLocalDeviceTestRun() + tests_to_retry = test_run._GetTestsToRetry(tests, try_results) + self.assertEqual(0, len(tests_to_retry)) + + def testGetTestsToRetry_wildcardFilter_oneFails(self): + results = [ + base_test_result.BaseTestResult( + 'TestCase.Test1', base_test_result.ResultType.PASS), + base_test_result.BaseTestResult( + 'TestCase.Test2', base_test_result.ResultType.FAIL), + ] + + tests = ['TestCase.*'] + try_results = base_test_result.TestRunResults() + try_results.AddResults(results) + + test_run = TestLocalDeviceTestRun() + tests_to_retry = test_run._GetTestsToRetry(tests, try_results) + self.assertEqual(1, len(tests_to_retry)) + self.assertIn('TestCase.*', tests_to_retry) + + def testGetTestsToRetry_nonStringTests(self): + results = [ + base_test_result.BaseTestResult( + 'TestCase.Test1', base_test_result.ResultType.PASS), + base_test_result.BaseTestResult( + 'TestCase.Test2', base_test_result.ResultType.FAIL), + ] + + tests = [ + {'name': 'TestCase.Test1'}, + {'name': 'TestCase.Test2'}, + ] + try_results = base_test_result.TestRunResults() + try_results.AddResults(results) + + test_run = TestLocalDeviceNonStringTestRun() + tests_to_retry = test_run._GetTestsToRetry(tests, try_results) + self.assertEqual(1, len(tests_to_retry)) + self.assertIsInstance(tests_to_retry[0], dict) + self.assertEqual(tests[1], tests_to_retry[0]) + + +if __name__ == '__main__': + unittest.main(verbosity=2) |