summaryrefslogtreecommitdiffstats
path: root/third_party/libwebrtc/build/android/pylib
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/libwebrtc/build/android/pylib')
-rw-r--r--third_party/libwebrtc/build/android/pylib/__init__.py45
-rw-r--r--third_party/libwebrtc/build/android/pylib/android/__init__.py3
-rw-r--r--third_party/libwebrtc/build/android/pylib/android/logcat_symbolizer.py99
-rw-r--r--third_party/libwebrtc/build/android/pylib/base/__init__.py3
-rw-r--r--third_party/libwebrtc/build/android/pylib/base/base_test_result.py276
-rw-r--r--third_party/libwebrtc/build/android/pylib/base/base_test_result_unittest.py83
-rw-r--r--third_party/libwebrtc/build/android/pylib/base/environment.py49
-rw-r--r--third_party/libwebrtc/build/android/pylib/base/environment_factory.py34
-rw-r--r--third_party/libwebrtc/build/android/pylib/base/mock_environment.py11
-rw-r--r--third_party/libwebrtc/build/android/pylib/base/mock_test_instance.py11
-rw-r--r--third_party/libwebrtc/build/android/pylib/base/output_manager.py159
-rw-r--r--third_party/libwebrtc/build/android/pylib/base/output_manager_factory.py18
-rw-r--r--third_party/libwebrtc/build/android/pylib/base/output_manager_test_case.py15
-rw-r--r--third_party/libwebrtc/build/android/pylib/base/test_collection.py81
-rw-r--r--third_party/libwebrtc/build/android/pylib/base/test_exception.py8
-rw-r--r--third_party/libwebrtc/build/android/pylib/base/test_instance.py40
-rw-r--r--third_party/libwebrtc/build/android/pylib/base/test_instance_factory.py26
-rw-r--r--third_party/libwebrtc/build/android/pylib/base/test_run.py50
-rw-r--r--third_party/libwebrtc/build/android/pylib/base/test_run_factory.py36
-rw-r--r--third_party/libwebrtc/build/android/pylib/base/test_server.py18
-rw-r--r--third_party/libwebrtc/build/android/pylib/constants/__init__.py288
-rw-r--r--third_party/libwebrtc/build/android/pylib/constants/host_paths.py97
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/constants/host_paths_unittest.py51
-rw-r--r--third_party/libwebrtc/build/android/pylib/content_settings.py80
-rw-r--r--third_party/libwebrtc/build/android/pylib/device/__init__.py0
-rw-r--r--third_party/libwebrtc/build/android/pylib/device/commands/BUILD.gn20
-rw-r--r--third_party/libwebrtc/build/android/pylib/device/commands/java/src/org/chromium/android/commands/unzip/Unzip.java93
-rw-r--r--third_party/libwebrtc/build/android/pylib/device_settings.py201
-rw-r--r--third_party/libwebrtc/build/android/pylib/dex/__init__.py3
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/dex/dex_parser.py551
-rw-r--r--third_party/libwebrtc/build/android/pylib/gtest/__init__.py3
-rw-r--r--third_party/libwebrtc/build/android/pylib/gtest/filter/OWNERS1
-rw-r--r--third_party/libwebrtc/build/android/pylib/gtest/filter/base_unittests_disabled25
-rw-r--r--third_party/libwebrtc/build/android/pylib/gtest/filter/base_unittests_emulator_additional_disabled10
-rw-r--r--third_party/libwebrtc/build/android/pylib/gtest/filter/breakpad_unittests_disabled9
-rw-r--r--third_party/libwebrtc/build/android/pylib/gtest/filter/content_browsertests_disabled45
-rw-r--r--third_party/libwebrtc/build/android/pylib/gtest/filter/unit_tests_disabled74
-rw-r--r--third_party/libwebrtc/build/android/pylib/gtest/gtest_config.py57
-rw-r--r--third_party/libwebrtc/build/android/pylib/gtest/gtest_test_instance.py627
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/gtest/gtest_test_instance_test.py348
-rw-r--r--third_party/libwebrtc/build/android/pylib/instrumentation/__init__.py3
-rw-r--r--third_party/libwebrtc/build/android/pylib/instrumentation/instrumentation_parser.py112
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/instrumentation/instrumentation_parser_test.py135
-rw-r--r--third_party/libwebrtc/build/android/pylib/instrumentation/instrumentation_test_instance.py1171
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/instrumentation/instrumentation_test_instance_test.py1250
-rw-r--r--third_party/libwebrtc/build/android/pylib/instrumentation/json_perf_parser.py162
-rw-r--r--third_party/libwebrtc/build/android/pylib/instrumentation/render_test.html.jinja40
-rw-r--r--third_party/libwebrtc/build/android/pylib/instrumentation/test_result.py33
-rw-r--r--third_party/libwebrtc/build/android/pylib/junit/__init__.py3
-rw-r--r--third_party/libwebrtc/build/android/pylib/junit/junit_test_instance.py76
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/__init__.py3
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/device/__init__.py3
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/device/local_device_environment.py328
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/device/local_device_gtest_run.py896
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/local/device/local_device_gtest_run_test.py79
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/device/local_device_instrumentation_test_run.py1512
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/local/device/local_device_instrumentation_test_run_test.py169
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/device/local_device_monkey_test_run.py128
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/device/local_device_test_run.py395
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/local/device/local_device_test_run_test.py171
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/emulator/OWNERS4
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/emulator/__init__.py3
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/emulator/avd.py629
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/emulator/ini.py58
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/local/emulator/ini_test.py69
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/emulator/local_emulator_environment.py102
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/emulator/proto/__init__.py3
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/emulator/proto/avd.proto75
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/emulator/proto/avd_pb2.py362
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/local_test_server_spawner.py101
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/machine/__init__.py3
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/machine/local_machine_environment.py25
-rw-r--r--third_party/libwebrtc/build/android/pylib/local/machine/local_machine_junit_test_run.py309
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/local/machine/local_machine_junit_test_run_test.py89
-rw-r--r--third_party/libwebrtc/build/android/pylib/monkey/__init__.py0
-rw-r--r--third_party/libwebrtc/build/android/pylib/monkey/monkey_test_instance.py73
-rw-r--r--third_party/libwebrtc/build/android/pylib/output/__init__.py3
-rw-r--r--third_party/libwebrtc/build/android/pylib/output/local_output_manager.py49
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/output/local_output_manager_test.py34
-rw-r--r--third_party/libwebrtc/build/android/pylib/output/noop_output_manager.py42
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/output/noop_output_manager_test.py27
-rw-r--r--third_party/libwebrtc/build/android/pylib/output/remote_output_manager.py89
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/output/remote_output_manager_test.py32
-rw-r--r--third_party/libwebrtc/build/android/pylib/pexpect.py21
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/restart_adbd.sh20
-rw-r--r--third_party/libwebrtc/build/android/pylib/results/__init__.py3
-rw-r--r--third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/__init__.py3
-rw-r--r--third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/json_results_generator.py702
-rw-r--r--third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/json_results_generator_unittest.py213
-rw-r--r--third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/results_uploader.py176
-rw-r--r--third_party/libwebrtc/build/android/pylib/results/json_results.py239
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/results/json_results_test.py311
-rw-r--r--third_party/libwebrtc/build/android/pylib/results/presentation/__init__.py3
-rw-r--r--third_party/libwebrtc/build/android/pylib/results/presentation/javascript/main_html.js193
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/results/presentation/standard_gtest_merge.py173
-rw-r--r--third_party/libwebrtc/build/android/pylib/results/presentation/template/main.html93
-rw-r--r--third_party/libwebrtc/build/android/pylib/results/presentation/template/table.html60
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/results/presentation/test_results_presentation.py549
-rw-r--r--third_party/libwebrtc/build/android/pylib/results/report_results.py136
-rw-r--r--third_party/libwebrtc/build/android/pylib/symbols/__init__.py0
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/symbols/apk_lib_dump.py61
-rw-r--r--third_party/libwebrtc/build/android/pylib/symbols/apk_native_libs.py419
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/symbols/apk_native_libs_unittest.py397
-rw-r--r--third_party/libwebrtc/build/android/pylib/symbols/deobfuscator.py178
-rw-r--r--third_party/libwebrtc/build/android/pylib/symbols/elf_symbolizer.py497
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/symbols/elf_symbolizer_unittest.py196
-rw-r--r--third_party/libwebrtc/build/android/pylib/symbols/mock_addr2line/__init__.py0
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/symbols/mock_addr2line/mock_addr2line81
-rw-r--r--third_party/libwebrtc/build/android/pylib/symbols/stack_symbolizer.py86
-rw-r--r--third_party/libwebrtc/build/android/pylib/symbols/symbol_utils.py813
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/symbols/symbol_utils_unittest.py942
-rw-r--r--third_party/libwebrtc/build/android/pylib/utils/__init__.py0
-rw-r--r--third_party/libwebrtc/build/android/pylib/utils/app_bundle_utils.py169
-rw-r--r--third_party/libwebrtc/build/android/pylib/utils/argparse_utils.py52
-rw-r--r--third_party/libwebrtc/build/android/pylib/utils/chrome_proxy_utils.py171
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/utils/chrome_proxy_utils_test.py235
-rw-r--r--third_party/libwebrtc/build/android/pylib/utils/decorators.py37
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/utils/decorators_test.py104
-rw-r--r--third_party/libwebrtc/build/android/pylib/utils/device_dependencies.py136
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/utils/device_dependencies_test.py52
-rw-r--r--third_party/libwebrtc/build/android/pylib/utils/dexdump.py136
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/utils/dexdump_test.py141
-rw-r--r--third_party/libwebrtc/build/android/pylib/utils/gold_utils.py78
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/utils/gold_utils_test.py123
-rw-r--r--third_party/libwebrtc/build/android/pylib/utils/google_storage_helper.py129
-rw-r--r--third_party/libwebrtc/build/android/pylib/utils/instrumentation_tracing.py204
-rw-r--r--third_party/libwebrtc/build/android/pylib/utils/local_utils.py19
-rw-r--r--third_party/libwebrtc/build/android/pylib/utils/logdog_helper.py96
-rw-r--r--third_party/libwebrtc/build/android/pylib/utils/logging_utils.py136
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/utils/maven_downloader.py140
-rw-r--r--third_party/libwebrtc/build/android/pylib/utils/proguard.py285
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/utils/proguard_test.py495
-rw-r--r--third_party/libwebrtc/build/android/pylib/utils/repo_utils.py22
-rw-r--r--third_party/libwebrtc/build/android/pylib/utils/shared_preference_utils.py116
-rw-r--r--third_party/libwebrtc/build/android/pylib/utils/simpleperf.py260
-rw-r--r--third_party/libwebrtc/build/android/pylib/utils/test_filter.py148
-rwxr-xr-xthird_party/libwebrtc/build/android/pylib/utils/test_filter_test.py247
-rw-r--r--third_party/libwebrtc/build/android/pylib/utils/time_profile.py45
-rw-r--r--third_party/libwebrtc/build/android/pylib/utils/xvfb.py58
-rw-r--r--third_party/libwebrtc/build/android/pylib/valgrind_tools.py116
140 files changed, 23013 insertions, 0 deletions
diff --git a/third_party/libwebrtc/build/android/pylib/__init__.py b/third_party/libwebrtc/build/android/pylib/__init__.py
new file mode 100644
index 0000000000..86a3527e2e
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/__init__.py
@@ -0,0 +1,45 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import os
+import sys
+
+
+_SRC_PATH = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '..', '..', '..'))
+
+_THIRD_PARTY_PATH = os.path.join(_SRC_PATH, 'third_party')
+
+_CATAPULT_PATH = os.path.join(_THIRD_PARTY_PATH, 'catapult')
+
+_DEVIL_PATH = os.path.join(_CATAPULT_PATH, 'devil')
+
+_PYTRACE_PATH = os.path.join(_CATAPULT_PATH, 'common', 'py_trace_event')
+
+_PY_UTILS_PATH = os.path.join(_CATAPULT_PATH, 'common', 'py_utils')
+
+_SIX_PATH = os.path.join(_THIRD_PARTY_PATH, 'six', 'src')
+
+_TRACE2HTML_PATH = os.path.join(_CATAPULT_PATH, 'tracing')
+
+_BUILD_UTIL_PATH = os.path.join(_SRC_PATH, 'build', 'util')
+
+if _DEVIL_PATH not in sys.path:
+ sys.path.append(_DEVIL_PATH)
+
+if _PYTRACE_PATH not in sys.path:
+ sys.path.append(_PYTRACE_PATH)
+
+if _PY_UTILS_PATH not in sys.path:
+ sys.path.append(_PY_UTILS_PATH)
+
+if _TRACE2HTML_PATH not in sys.path:
+ sys.path.append(_TRACE2HTML_PATH)
+
+if _SIX_PATH not in sys.path:
+ sys.path.append(_SIX_PATH)
+
+if _BUILD_UTIL_PATH not in sys.path:
+ sys.path.insert(0, _BUILD_UTIL_PATH)
diff --git a/third_party/libwebrtc/build/android/pylib/android/__init__.py b/third_party/libwebrtc/build/android/pylib/android/__init__.py
new file mode 100644
index 0000000000..a67c3501b2
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/android/__init__.py
@@ -0,0 +1,3 @@
+# Copyright (c) 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/third_party/libwebrtc/build/android/pylib/android/logcat_symbolizer.py b/third_party/libwebrtc/build/android/pylib/android/logcat_symbolizer.py
new file mode 100644
index 0000000000..c9f5336184
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/android/logcat_symbolizer.py
@@ -0,0 +1,99 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import re
+
+from devil.android import logcat_monitor
+
+BACKTRACE_LINE_RE = re.compile(r'#\d+')
+THREADTIME_RE = re.compile(
+ logcat_monitor.LogcatMonitor.THREADTIME_RE_FORMAT % (
+ r' *\S* *', r' *\S* *', r' *\S* *', r' *\S* *', r'.*'))
+
+def SymbolizeLogcat(logcat, dest, symbolizer, abi):
+ """Symbolize stack trace in the logcat.
+
+ Symbolize the logcat and write the symbolized logcat to a new file.
+
+ Args:
+ logcat: Path to logcat file.
+ dest: Path to where to write the symbolized logcat.
+ symbolizer: The stack symbolizer to symbolize stack trace in logcat.
+ abi: The device's product_cpu_abi. Symbolizer needs it to symbolize.
+
+ A sample logcat that needs to be symbolized, after stripping the prefix,
+ such as '08-07 18:39:37.692 28649 28649 E Ion : ', would be:
+ Build fingerprint: 'google/shamu/shamu:7.1.1/NMF20B/3370:userdebug/dev-keys'
+ Revision: '0'
+ ABI: 'arm'
+ pid: 28936, tid: 28936, name: chromium.chrome >>> org.chromium.chrome <<<
+ signal 6 (SIGABRT), code -6 (SI_TKILL), fault addr --------
+ Abort message: '[FATAL:debug_urls.cc(151)] Check failed: false.
+ #00 0x63e16c41 /data/app/org.chromium.chrome-1/lib/arm/libchrome.so+0x0006cc4
+ #01 0x63f19be3 /data/app/org.chromium.chrome-1/lib/arm/libchrome.so+0x0016fbe
+ #02 0x63f19737 /data/app/org.chromium.chrome-1/lib/arm/libchrome.so+0x0016f73
+ #03 0x63f18ddf /data/app/org.chromium.chrome-1/lib/arm/libchrome.so+0x0016edd
+ #04 0x63f18b79 /data/app/org.chromium.chrome-1/lib/arm/libchrome.so+0x0016eb7
+ #05 0xab53f319 /system/lib/libart.so+0x000a3319
+ #06
+ r0 00000000 r1 00007108 r2 00000006 r3 00000008
+ r4 ae60258c r5 00000006 r6 ae602534 r7 0000010c
+ r8 bede5cd0 r9 00000030 sl 00000000 fp 9265a800
+ ip 0000000b sp bede5c38 lr ac8e5537 pc ac8e7da0 cpsr 600f0010
+
+ backtrace:
+ #00 pc 00049da0 /system/lib/libc.so (tgkill+12)
+ #01 pc 00047533 /system/lib/libc.so (pthread_kill+34)
+ #02 pc 0001d635 /system/lib/libc.so (raise+10)
+ #03 pc 00019181 /system/lib/libc.so (__libc_android_abort+34)
+ #04 pc 00017048 /system/lib/libc.so (abort+4)
+ #05 pc 00948605 /data/app/org.chromium.chrome-1/lib/arm/libchrome.so
+ #06 pc 002c9f73 /data/app/org.chromium.chrome-1/lib/arm/libchrome.so
+ #07 pc 003ccbe1 /data/app/org.chromium.chrome-1/lib/arm/libchrome.so
+ #08 pc 003cc735 /data/app/org.chromium.chrome-1/lib/arm/libchrome.so
+ #09 pc 003cbddf /data/app/org.chromium.chrome-1/lib/arm/libchrome.so
+ #10 pc 003cbb77 /data/app/org.chromium.chrome-1/lib/arm/libchrome.so
+ """
+
+ with open(logcat) as logcat_file:
+ with open(dest, 'w') as dest_file:
+ # The current stack script will only print out the symbolized stack,
+ # and completely ignore logs other than the crash log that is used for
+ # symbolization, if any exists. Thus the code here extracts the
+ # crash log inside the logcat and pass only the crash log to the script,
+ # because we don't want to lose other information in the logcat that,
+ # if passed to the stack script, will just be ignored by it.
+ # TODO(crbug.com/755225): Rewrite the logic here.
+ outside_of_crash_log = True
+ in_lower_half_crash = False
+ data_to_symbolize = []
+
+ for line in logcat_file:
+ if outside_of_crash_log:
+ # Check whether it is the start of crash log.
+ if 'Build fingerprint: ' in line:
+ outside_of_crash_log = False
+ # Only include necessary information for symbolization.
+ # The logic here that removes date, time, proc_id etc.
+ # should be in sync with _THREADTIME_RE_FORMAT in logcat_monitor.
+ data_to_symbolize.append(
+ re.search(THREADTIME_RE, line).group(7))
+ else:
+ dest_file.write(line)
+ else:
+ # Once we have reached the end of the backtrace section,
+ # we will start symbolizing.
+ if in_lower_half_crash and not bool(BACKTRACE_LINE_RE.search(line)):
+ outside_of_crash_log = True
+ in_lower_half_crash = False
+ symbolized_lines = symbolizer.ExtractAndResolveNativeStackTraces(
+ data_to_symbolize, abi)
+ dest_file.write('\n'.join(symbolized_lines) + '\n' + line)
+ data_to_symbolize = []
+ else:
+ if not in_lower_half_crash and 'backtrace:' in line:
+ in_lower_half_crash = True
+ data_to_symbolize.append(
+ re.search(THREADTIME_RE, line).group(7))
diff --git a/third_party/libwebrtc/build/android/pylib/base/__init__.py b/third_party/libwebrtc/build/android/pylib/base/__init__.py
new file mode 100644
index 0000000000..96196cffb2
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/base/__init__.py
@@ -0,0 +1,3 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/third_party/libwebrtc/build/android/pylib/base/base_test_result.py b/third_party/libwebrtc/build/android/pylib/base/base_test_result.py
new file mode 100644
index 0000000000..1741f132d5
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/base/base_test_result.py
@@ -0,0 +1,276 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Module containing base test results classes."""
+
+
+import functools
+import threading
+
+from lib.results import result_types # pylint: disable=import-error
+
+
+class ResultType(object):
+ """Class enumerating test types.
+
+ Wraps the results defined in //build/util/lib/results/.
+ """
+ PASS = result_types.PASS
+ SKIP = result_types.SKIP
+ FAIL = result_types.FAIL
+ CRASH = result_types.CRASH
+ TIMEOUT = result_types.TIMEOUT
+ UNKNOWN = result_types.UNKNOWN
+ NOTRUN = result_types.NOTRUN
+
+ @staticmethod
+ def GetTypes():
+ """Get a list of all test types."""
+ return [ResultType.PASS, ResultType.SKIP, ResultType.FAIL,
+ ResultType.CRASH, ResultType.TIMEOUT, ResultType.UNKNOWN,
+ ResultType.NOTRUN]
+
+
+@functools.total_ordering
+class BaseTestResult(object):
+ """Base class for a single test result."""
+
+ def __init__(self, name, test_type, duration=0, log='', failure_reason=None):
+ """Construct a BaseTestResult.
+
+ Args:
+ name: Name of the test which defines uniqueness.
+ test_type: Type of the test result as defined in ResultType.
+ duration: Time it took for the test to run in milliseconds.
+ log: An optional string listing any errors.
+ """
+ assert name
+ assert test_type in ResultType.GetTypes()
+ self._name = name
+ self._test_type = test_type
+ self._duration = duration
+ self._log = log
+ self._failure_reason = failure_reason
+ self._links = {}
+
+ def __str__(self):
+ return self._name
+
+ def __repr__(self):
+ return self._name
+
+ def __eq__(self, other):
+ return self.GetName() == other.GetName()
+
+ def __lt__(self, other):
+ return self.GetName() == other.GetName()
+
+ def __hash__(self):
+ return hash(self._name)
+
+ def SetName(self, name):
+ """Set the test name.
+
+ Because we're putting this into a set, this should only be used if moving
+ this test result into another set.
+ """
+ self._name = name
+
+ def GetName(self):
+ """Get the test name."""
+ return self._name
+
+ def SetType(self, test_type):
+ """Set the test result type."""
+ assert test_type in ResultType.GetTypes()
+ self._test_type = test_type
+
+ def GetType(self):
+ """Get the test result type."""
+ return self._test_type
+
+ def GetDuration(self):
+ """Get the test duration."""
+ return self._duration
+
+ def SetLog(self, log):
+ """Set the test log."""
+ self._log = log
+
+ def GetLog(self):
+ """Get the test log."""
+ return self._log
+
+ def SetFailureReason(self, failure_reason):
+ """Set the reason the test failed.
+
+ This should be the first failure the test encounters and exclude any stack
+ trace.
+ """
+ self._failure_reason = failure_reason
+
+ def GetFailureReason(self):
+ """Get the reason the test failed.
+
+ Returns None if the test did not fail or if the reason the test failed is
+ unknown.
+ """
+ return self._failure_reason
+
+ def SetLink(self, name, link_url):
+ """Set link with test result data."""
+ self._links[name] = link_url
+
+ def GetLinks(self):
+ """Get dict containing links to test result data."""
+ return self._links
+
+
+class TestRunResults(object):
+ """Set of results for a test run."""
+
+ def __init__(self):
+ self._links = {}
+ self._results = set()
+ self._results_lock = threading.RLock()
+
+ def SetLink(self, name, link_url):
+ """Add link with test run results data."""
+ self._links[name] = link_url
+
+ def GetLinks(self):
+ """Get dict containing links to test run result data."""
+ return self._links
+
+ def GetLogs(self):
+ """Get the string representation of all test logs."""
+ with self._results_lock:
+ s = []
+ for test_type in ResultType.GetTypes():
+ if test_type != ResultType.PASS:
+ for t in sorted(self._GetType(test_type)):
+ log = t.GetLog()
+ if log:
+ s.append('[%s] %s:' % (test_type, t))
+ s.append(log)
+ return '\n'.join(s)
+
+ def GetGtestForm(self):
+ """Get the gtest string representation of this object."""
+ with self._results_lock:
+ s = []
+ plural = lambda n, s, p: '%d %s' % (n, p if n != 1 else s)
+ tests = lambda n: plural(n, 'test', 'tests')
+
+ s.append('[==========] %s ran.' % (tests(len(self.GetAll()))))
+ s.append('[ PASSED ] %s.' % (tests(len(self.GetPass()))))
+
+ skipped = self.GetSkip()
+ if skipped:
+ s.append('[ SKIPPED ] Skipped %s, listed below:' % tests(len(skipped)))
+ for t in sorted(skipped):
+ s.append('[ SKIPPED ] %s' % str(t))
+
+ all_failures = self.GetFail().union(self.GetCrash(), self.GetTimeout(),
+ self.GetUnknown())
+ if all_failures:
+ s.append('[ FAILED ] %s, listed below:' % tests(len(all_failures)))
+ for t in sorted(self.GetFail()):
+ s.append('[ FAILED ] %s' % str(t))
+ for t in sorted(self.GetCrash()):
+ s.append('[ FAILED ] %s (CRASHED)' % str(t))
+ for t in sorted(self.GetTimeout()):
+ s.append('[ FAILED ] %s (TIMEOUT)' % str(t))
+ for t in sorted(self.GetUnknown()):
+ s.append('[ FAILED ] %s (UNKNOWN)' % str(t))
+ s.append('')
+ s.append(plural(len(all_failures), 'FAILED TEST', 'FAILED TESTS'))
+ return '\n'.join(s)
+
+ def GetShortForm(self):
+ """Get the short string representation of this object."""
+ with self._results_lock:
+ s = []
+ s.append('ALL: %d' % len(self._results))
+ for test_type in ResultType.GetTypes():
+ s.append('%s: %d' % (test_type, len(self._GetType(test_type))))
+ return ''.join([x.ljust(15) for x in s])
+
+ def __str__(self):
+ return self.GetGtestForm()
+
+ def AddResult(self, result):
+ """Add |result| to the set.
+
+ Args:
+ result: An instance of BaseTestResult.
+ """
+ assert isinstance(result, BaseTestResult)
+ with self._results_lock:
+ self._results.discard(result)
+ self._results.add(result)
+
+ def AddResults(self, results):
+ """Add |results| to the set.
+
+ Args:
+ results: An iterable of BaseTestResult objects.
+ """
+ with self._results_lock:
+ for t in results:
+ self.AddResult(t)
+
+ def AddTestRunResults(self, results):
+ """Add the set of test results from |results|.
+
+ Args:
+ results: An instance of TestRunResults.
+ """
+ assert isinstance(results, TestRunResults), (
+ 'Expected TestRunResult object: %s' % type(results))
+ with self._results_lock:
+ # pylint: disable=W0212
+ self._results.update(results._results)
+
+ def GetAll(self):
+ """Get the set of all test results."""
+ with self._results_lock:
+ return self._results.copy()
+
+ def _GetType(self, test_type):
+ """Get the set of test results with the given test type."""
+ with self._results_lock:
+ return set(t for t in self._results if t.GetType() == test_type)
+
+ def GetPass(self):
+ """Get the set of all passed test results."""
+ return self._GetType(ResultType.PASS)
+
+ def GetSkip(self):
+ """Get the set of all skipped test results."""
+ return self._GetType(ResultType.SKIP)
+
+ def GetFail(self):
+ """Get the set of all failed test results."""
+ return self._GetType(ResultType.FAIL)
+
+ def GetCrash(self):
+ """Get the set of all crashed test results."""
+ return self._GetType(ResultType.CRASH)
+
+ def GetTimeout(self):
+ """Get the set of all timed out test results."""
+ return self._GetType(ResultType.TIMEOUT)
+
+ def GetUnknown(self):
+ """Get the set of all unknown test results."""
+ return self._GetType(ResultType.UNKNOWN)
+
+ def GetNotPass(self):
+ """Get the set of all non-passed test results."""
+ return self.GetAll() - self.GetPass()
+
+ def DidRunPass(self):
+ """Return whether the test run was successful."""
+ return not self.GetNotPass() - self.GetSkip()
diff --git a/third_party/libwebrtc/build/android/pylib/base/base_test_result_unittest.py b/third_party/libwebrtc/build/android/pylib/base/base_test_result_unittest.py
new file mode 100644
index 0000000000..294467e5fc
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/base/base_test_result_unittest.py
@@ -0,0 +1,83 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unittests for TestRunResults."""
+
+
+import unittest
+
+from pylib.base.base_test_result import BaseTestResult
+from pylib.base.base_test_result import TestRunResults
+from pylib.base.base_test_result import ResultType
+
+
+class TestTestRunResults(unittest.TestCase):
+ def setUp(self):
+ self.p1 = BaseTestResult('p1', ResultType.PASS, log='pass1')
+ other_p1 = BaseTestResult('p1', ResultType.PASS)
+ self.p2 = BaseTestResult('p2', ResultType.PASS)
+ self.f1 = BaseTestResult('f1', ResultType.FAIL, log='failure1')
+ self.c1 = BaseTestResult('c1', ResultType.CRASH, log='crash1')
+ self.u1 = BaseTestResult('u1', ResultType.UNKNOWN)
+ self.tr = TestRunResults()
+ self.tr.AddResult(self.p1)
+ self.tr.AddResult(other_p1)
+ self.tr.AddResult(self.p2)
+ self.tr.AddResults(set([self.f1, self.c1, self.u1]))
+
+ def testGetAll(self):
+ self.assertFalse(
+ self.tr.GetAll().symmetric_difference(
+ [self.p1, self.p2, self.f1, self.c1, self.u1]))
+
+ def testGetPass(self):
+ self.assertFalse(self.tr.GetPass().symmetric_difference(
+ [self.p1, self.p2]))
+
+ def testGetNotPass(self):
+ self.assertFalse(self.tr.GetNotPass().symmetric_difference(
+ [self.f1, self.c1, self.u1]))
+
+ def testGetAddTestRunResults(self):
+ tr2 = TestRunResults()
+ other_p1 = BaseTestResult('p1', ResultType.PASS)
+ f2 = BaseTestResult('f2', ResultType.FAIL)
+ tr2.AddResult(other_p1)
+ tr2.AddResult(f2)
+ tr2.AddTestRunResults(self.tr)
+ self.assertFalse(
+ tr2.GetAll().symmetric_difference(
+ [self.p1, self.p2, self.f1, self.c1, self.u1, f2]))
+
+ def testGetLogs(self):
+ log_print = ('[FAIL] f1:\n'
+ 'failure1\n'
+ '[CRASH] c1:\n'
+ 'crash1')
+ self.assertEqual(self.tr.GetLogs(), log_print)
+
+ def testGetShortForm(self):
+ short_print = ('ALL: 5 PASS: 2 FAIL: 1 '
+ 'CRASH: 1 TIMEOUT: 0 UNKNOWN: 1 ')
+ self.assertEqual(self.tr.GetShortForm(), short_print)
+
+ def testGetGtestForm(self):
+ gtest_print = ('[==========] 5 tests ran.\n'
+ '[ PASSED ] 2 tests.\n'
+ '[ FAILED ] 3 tests, listed below:\n'
+ '[ FAILED ] f1\n'
+ '[ FAILED ] c1 (CRASHED)\n'
+ '[ FAILED ] u1 (UNKNOWN)\n'
+ '\n'
+ '3 FAILED TESTS')
+ self.assertEqual(gtest_print, self.tr.GetGtestForm())
+
+ def testRunPassed(self):
+ self.assertFalse(self.tr.DidRunPass())
+ tr2 = TestRunResults()
+ self.assertTrue(tr2.DidRunPass())
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/libwebrtc/build/android/pylib/base/environment.py b/third_party/libwebrtc/build/android/pylib/base/environment.py
new file mode 100644
index 0000000000..744c392c1b
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/base/environment.py
@@ -0,0 +1,49 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class Environment(object):
+ """An environment in which tests can be run.
+
+ This is expected to handle all logic that is applicable to an entire specific
+ environment but is independent of the test type.
+
+ Examples include:
+ - The local device environment, for running tests on devices attached to
+ the local machine.
+ - The local machine environment, for running tests directly on the local
+ machine.
+ """
+
+ def __init__(self, output_manager):
+ """Environment constructor.
+
+ Args:
+ output_manager: Instance of |output_manager.OutputManager| used to
+ save test output.
+ """
+ self._output_manager = output_manager
+
+ # Some subclasses have different teardown behavior on receiving SIGTERM.
+ self._received_sigterm = False
+
+ def SetUp(self):
+ raise NotImplementedError
+
+ def TearDown(self):
+ raise NotImplementedError
+
+ def __enter__(self):
+ self.SetUp()
+ return self
+
+ def __exit__(self, _exc_type, _exc_val, _exc_tb):
+ self.TearDown()
+
+ @property
+ def output_manager(self):
+ return self._output_manager
+
+ def ReceivedSigterm(self):
+ self._received_sigterm = True
diff --git a/third_party/libwebrtc/build/android/pylib/base/environment_factory.py b/third_party/libwebrtc/build/android/pylib/base/environment_factory.py
new file mode 100644
index 0000000000..491730ff8f
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/base/environment_factory.py
@@ -0,0 +1,34 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+from pylib import constants
+from pylib.local.device import local_device_environment
+from pylib.local.machine import local_machine_environment
+
+try:
+ # local_emulator_environment depends on //tools.
+ # If a client pulls in the //build subtree but not the //tools
+ # one, fail at emulator environment creation time.
+ from pylib.local.emulator import local_emulator_environment
+except ImportError:
+ local_emulator_environment = None
+
+
+def CreateEnvironment(args, output_manager, error_func):
+
+ if args.environment == 'local':
+ if args.command not in constants.LOCAL_MACHINE_TESTS:
+ if args.avd_config:
+ if not local_emulator_environment:
+ error_func('emulator environment requested but not available.')
+ return local_emulator_environment.LocalEmulatorEnvironment(
+ args, output_manager, error_func)
+ return local_device_environment.LocalDeviceEnvironment(
+ args, output_manager, error_func)
+ else:
+ return local_machine_environment.LocalMachineEnvironment(
+ args, output_manager, error_func)
+
+ error_func('Unable to create %s environment.' % args.environment)
diff --git a/third_party/libwebrtc/build/android/pylib/base/mock_environment.py b/third_party/libwebrtc/build/android/pylib/base/mock_environment.py
new file mode 100644
index 0000000000..02fa5afee3
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/base/mock_environment.py
@@ -0,0 +1,11 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+from pylib.base import environment
+
+import mock # pylint: disable=import-error
+
+
+MockEnvironment = mock.MagicMock(environment.Environment)
diff --git a/third_party/libwebrtc/build/android/pylib/base/mock_test_instance.py b/third_party/libwebrtc/build/android/pylib/base/mock_test_instance.py
new file mode 100644
index 0000000000..57b4e62adb
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/base/mock_test_instance.py
@@ -0,0 +1,11 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+from pylib.base import test_instance
+
+import mock # pylint: disable=import-error
+
+
+MockTestInstance = mock.MagicMock(test_instance.TestInstance)
diff --git a/third_party/libwebrtc/build/android/pylib/base/output_manager.py b/third_party/libwebrtc/build/android/pylib/base/output_manager.py
new file mode 100644
index 0000000000..83af7e268a
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/base/output_manager.py
@@ -0,0 +1,159 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import contextlib
+import logging
+import os
+import tempfile
+
+from devil.utils import reraiser_thread
+
+
+class Datatype(object):
+ HTML = 'text/html'
+ JSON = 'application/json'
+ PNG = 'image/png'
+ TEXT = 'text/plain'
+
+
+class OutputManager(object):
+
+ def __init__(self):
+ """OutputManager Constructor.
+
+ This class provides a simple interface to save test output. Subclasses
+ of this will allow users to save test results in the cloud or locally.
+ """
+ self._allow_upload = False
+ self._thread_group = None
+
+ @contextlib.contextmanager
+ def ArchivedTempfile(
+ self, out_filename, out_subdir, datatype=Datatype.TEXT):
+ """Archive file contents asynchonously and then deletes file.
+
+ Args:
+ out_filename: Name for saved file.
+ out_subdir: Directory to save |out_filename| to.
+ datatype: Datatype of file.
+
+ Returns:
+ An ArchivedFile file. This file will be uploaded async when the context
+ manager exits. AFTER the context manager exits, you can get the link to
+ where the file will be stored using the Link() API. You can use typical
+ file APIs to write and flish the ArchivedFile. You can also use file.name
+ to get the local filepath to where the underlying file exists. If you do
+ this, you are responsible of flushing the file before exiting the context
+ manager.
+ """
+ if not self._allow_upload:
+ raise Exception('Must run |SetUp| before attempting to upload!')
+
+ f = self._CreateArchivedFile(out_filename, out_subdir, datatype)
+ try:
+ yield f
+ finally:
+ f.PrepareArchive()
+
+ def archive():
+ try:
+ f.Archive()
+ finally:
+ f.Delete()
+
+ thread = reraiser_thread.ReraiserThread(func=archive)
+ thread.start()
+ self._thread_group.Add(thread)
+
+ def _CreateArchivedFile(self, out_filename, out_subdir, datatype):
+ """Returns an instance of ArchivedFile."""
+ raise NotImplementedError
+
+ def SetUp(self):
+ self._allow_upload = True
+ self._thread_group = reraiser_thread.ReraiserThreadGroup()
+
+ def TearDown(self):
+ self._allow_upload = False
+ logging.info('Finishing archiving output.')
+ self._thread_group.JoinAll()
+
+ def __enter__(self):
+ self.SetUp()
+ return self
+
+ def __exit__(self, _exc_type, _exc_val, _exc_tb):
+ self.TearDown()
+
+
+class ArchivedFile(object):
+
+ def __init__(self, out_filename, out_subdir, datatype):
+ self._out_filename = out_filename
+ self._out_subdir = out_subdir
+ self._datatype = datatype
+
+ self._f = tempfile.NamedTemporaryFile(delete=False)
+ self._ready_to_archive = False
+
+ @property
+ def name(self):
+ return self._f.name
+
+ def write(self, *args, **kwargs):
+ if self._ready_to_archive:
+ raise Exception('Cannot write to file after archiving has begun!')
+ self._f.write(*args, **kwargs)
+
+ def flush(self, *args, **kwargs):
+ if self._ready_to_archive:
+ raise Exception('Cannot flush file after archiving has begun!')
+ self._f.flush(*args, **kwargs)
+
+ def Link(self):
+ """Returns location of archived file."""
+ if not self._ready_to_archive:
+ raise Exception('Cannot get link to archived file before archiving '
+ 'has begun')
+ return self._Link()
+
+ def _Link(self):
+ """Note for when overriding this function.
+
+ This function will certainly be called before the file
+ has finished being archived. Therefore, this needs to be able to know the
+ exact location of the archived file before it is finished being archived.
+ """
+ raise NotImplementedError
+
+ def PrepareArchive(self):
+ """Meant to be called synchronously to prepare file for async archiving."""
+ self.flush()
+ self._ready_to_archive = True
+ self._PrepareArchive()
+
+ def _PrepareArchive(self):
+ """Note for when overriding this function.
+
+ This function is needed for things such as computing the location of
+ content addressed files. This is called after the file is written but
+ before archiving has begun.
+ """
+ pass
+
+ def Archive(self):
+ """Archives file."""
+ if not self._ready_to_archive:
+ raise Exception('File is not ready to archive. Be sure you are not '
+ 'writing to the file and PrepareArchive has been called')
+ self._Archive()
+
+ def _Archive(self):
+ raise NotImplementedError
+
+ def Delete(self):
+ """Deletes the backing file."""
+ self._f.close()
+ os.remove(self.name)
diff --git a/third_party/libwebrtc/build/android/pylib/base/output_manager_factory.py b/third_party/libwebrtc/build/android/pylib/base/output_manager_factory.py
new file mode 100644
index 0000000000..e5d0692881
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/base/output_manager_factory.py
@@ -0,0 +1,18 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+from pylib import constants
+from pylib.output import local_output_manager
+from pylib.output import remote_output_manager
+from pylib.utils import local_utils
+
+
+def CreateOutputManager(args):
+ if args.local_output or not local_utils.IsOnSwarming():
+ return local_output_manager.LocalOutputManager(
+ output_dir=constants.GetOutDirectory())
+ else:
+ return remote_output_manager.RemoteOutputManager(
+ bucket=args.gs_results_bucket)
diff --git a/third_party/libwebrtc/build/android/pylib/base/output_manager_test_case.py b/third_party/libwebrtc/build/android/pylib/base/output_manager_test_case.py
new file mode 100644
index 0000000000..0d83f082f8
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/base/output_manager_test_case.py
@@ -0,0 +1,15 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import os.path
+import unittest
+
+
+class OutputManagerTestCase(unittest.TestCase):
+
+ def assertUsableTempFile(self, archived_tempfile):
+ self.assertTrue(bool(archived_tempfile.name))
+ self.assertTrue(os.path.exists(archived_tempfile.name))
+ self.assertTrue(os.path.isfile(archived_tempfile.name))
diff --git a/third_party/libwebrtc/build/android/pylib/base/test_collection.py b/third_party/libwebrtc/build/android/pylib/base/test_collection.py
new file mode 100644
index 0000000000..34f21fe873
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/base/test_collection.py
@@ -0,0 +1,81 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import threading
+
+class TestCollection(object):
+ """A threadsafe collection of tests.
+
+ Args:
+ tests: List of tests to put in the collection.
+ """
+
+ def __init__(self, tests=None):
+ if not tests:
+ tests = []
+ self._lock = threading.Lock()
+ self._tests = []
+ self._tests_in_progress = 0
+ # Used to signal that an item is available or all items have been handled.
+ self._item_available_or_all_done = threading.Event()
+ for t in tests:
+ self.add(t)
+
+ def _pop(self):
+ """Pop a test from the collection.
+
+ Waits until a test is available or all tests have been handled.
+
+ Returns:
+ A test or None if all tests have been handled.
+ """
+ while True:
+ # Wait for a test to be available or all tests to have been handled.
+ self._item_available_or_all_done.wait()
+ with self._lock:
+ # Check which of the two conditions triggered the signal.
+ if self._tests_in_progress == 0:
+ return None
+ try:
+ return self._tests.pop(0)
+ except IndexError:
+ # Another thread beat us to the available test, wait again.
+ self._item_available_or_all_done.clear()
+
+ def add(self, test):
+ """Add a test to the collection.
+
+ Args:
+ test: A test to add.
+ """
+ with self._lock:
+ self._tests.append(test)
+ self._item_available_or_all_done.set()
+ self._tests_in_progress += 1
+
+ def test_completed(self):
+ """Indicate that a test has been fully handled."""
+ with self._lock:
+ self._tests_in_progress -= 1
+ if self._tests_in_progress == 0:
+ # All tests have been handled, signal all waiting threads.
+ self._item_available_or_all_done.set()
+
+ def __iter__(self):
+ """Iterate through tests in the collection until all have been handled."""
+ while True:
+ r = self._pop()
+ if r is None:
+ break
+ yield r
+
+ def __len__(self):
+ """Return the number of tests currently in the collection."""
+ return len(self._tests)
+
+ def test_names(self):
+ """Return a list of the names of the tests currently in the collection."""
+ with self._lock:
+ return list(t.test for t in self._tests)
diff --git a/third_party/libwebrtc/build/android/pylib/base/test_exception.py b/third_party/libwebrtc/build/android/pylib/base/test_exception.py
new file mode 100644
index 0000000000..c98d2cb73e
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/base/test_exception.py
@@ -0,0 +1,8 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class TestException(Exception):
+ """Base class for exceptions thrown by the test runner."""
+ pass
diff --git a/third_party/libwebrtc/build/android/pylib/base/test_instance.py b/third_party/libwebrtc/build/android/pylib/base/test_instance.py
new file mode 100644
index 0000000000..7b1099cffa
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/base/test_instance.py
@@ -0,0 +1,40 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class TestInstance(object):
+ """A type of test.
+
+ This is expected to handle all logic that is test-type specific but
+ independent of the environment or device.
+
+ Examples include:
+ - gtests
+ - instrumentation tests
+ """
+
+ def __init__(self):
+ pass
+
+ def TestType(self):
+ raise NotImplementedError
+
+ # pylint: disable=no-self-use
+ def GetPreferredAbis(self):
+ return None
+
+ # pylint: enable=no-self-use
+
+ def SetUp(self):
+ raise NotImplementedError
+
+ def TearDown(self):
+ raise NotImplementedError
+
+ def __enter__(self):
+ self.SetUp()
+ return self
+
+ def __exit__(self, _exc_type, _exc_val, _exc_tb):
+ self.TearDown()
diff --git a/third_party/libwebrtc/build/android/pylib/base/test_instance_factory.py b/third_party/libwebrtc/build/android/pylib/base/test_instance_factory.py
new file mode 100644
index 0000000000..d276428d71
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/base/test_instance_factory.py
@@ -0,0 +1,26 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+from pylib.gtest import gtest_test_instance
+from pylib.instrumentation import instrumentation_test_instance
+from pylib.junit import junit_test_instance
+from pylib.monkey import monkey_test_instance
+from pylib.utils import device_dependencies
+
+
+def CreateTestInstance(args, error_func):
+
+ if args.command == 'gtest':
+ return gtest_test_instance.GtestTestInstance(
+ args, device_dependencies.GetDataDependencies, error_func)
+ elif args.command == 'instrumentation':
+ return instrumentation_test_instance.InstrumentationTestInstance(
+ args, device_dependencies.GetDataDependencies, error_func)
+ elif args.command == 'junit':
+ return junit_test_instance.JunitTestInstance(args, error_func)
+ elif args.command == 'monkey':
+ return monkey_test_instance.MonkeyTestInstance(args, error_func)
+
+ error_func('Unable to create %s test instance.' % args.command)
diff --git a/third_party/libwebrtc/build/android/pylib/base/test_run.py b/third_party/libwebrtc/build/android/pylib/base/test_run.py
new file mode 100644
index 0000000000..fc72d3a547
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/base/test_run.py
@@ -0,0 +1,50 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class TestRun(object):
+ """An execution of a particular test on a particular device.
+
+ This is expected to handle all logic that is specific to the combination of
+ environment and test type.
+
+ Examples include:
+ - local gtests
+ - local instrumentation tests
+ """
+
+ def __init__(self, env, test_instance):
+ self._env = env
+ self._test_instance = test_instance
+
+ # Some subclasses have different teardown behavior on receiving SIGTERM.
+ self._received_sigterm = False
+
+ def TestPackage(self):
+ raise NotImplementedError
+
+ def SetUp(self):
+ raise NotImplementedError
+
+ def RunTests(self, results):
+ """Runs Tests and populates |results|.
+
+ Args:
+ results: An array that should be populated with
+ |base_test_result.TestRunResults| objects.
+ """
+ raise NotImplementedError
+
+ def TearDown(self):
+ raise NotImplementedError
+
+ def __enter__(self):
+ self.SetUp()
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.TearDown()
+
+ def ReceivedSigterm(self):
+ self._received_sigterm = True
diff --git a/third_party/libwebrtc/build/android/pylib/base/test_run_factory.py b/third_party/libwebrtc/build/android/pylib/base/test_run_factory.py
new file mode 100644
index 0000000000..f62ba77a2e
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/base/test_run_factory.py
@@ -0,0 +1,36 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+from pylib.gtest import gtest_test_instance
+from pylib.instrumentation import instrumentation_test_instance
+from pylib.junit import junit_test_instance
+from pylib.monkey import monkey_test_instance
+from pylib.local.device import local_device_environment
+from pylib.local.device import local_device_gtest_run
+from pylib.local.device import local_device_instrumentation_test_run
+from pylib.local.device import local_device_monkey_test_run
+from pylib.local.machine import local_machine_environment
+from pylib.local.machine import local_machine_junit_test_run
+
+
+def CreateTestRun(env, test_instance, error_func):
+ if isinstance(env, local_device_environment.LocalDeviceEnvironment):
+ if isinstance(test_instance, gtest_test_instance.GtestTestInstance):
+ return local_device_gtest_run.LocalDeviceGtestRun(env, test_instance)
+ if isinstance(test_instance,
+ instrumentation_test_instance.InstrumentationTestInstance):
+ return (local_device_instrumentation_test_run
+ .LocalDeviceInstrumentationTestRun(env, test_instance))
+ if isinstance(test_instance, monkey_test_instance.MonkeyTestInstance):
+ return (local_device_monkey_test_run
+ .LocalDeviceMonkeyTestRun(env, test_instance))
+
+ if isinstance(env, local_machine_environment.LocalMachineEnvironment):
+ if isinstance(test_instance, junit_test_instance.JunitTestInstance):
+ return (local_machine_junit_test_run
+ .LocalMachineJunitTestRun(env, test_instance))
+
+ error_func('Unable to create test run for %s tests in %s environment'
+ % (str(test_instance), str(env)))
diff --git a/third_party/libwebrtc/build/android/pylib/base/test_server.py b/third_party/libwebrtc/build/android/pylib/base/test_server.py
new file mode 100644
index 0000000000..763e1212c3
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/base/test_server.py
@@ -0,0 +1,18 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+class TestServer(object):
+ """Base class for any server that needs to be set up for the tests."""
+
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def SetUp(self):
+ raise NotImplementedError
+
+ def Reset(self):
+ raise NotImplementedError
+
+ def TearDown(self):
+ raise NotImplementedError
diff --git a/third_party/libwebrtc/build/android/pylib/constants/__init__.py b/third_party/libwebrtc/build/android/pylib/constants/__init__.py
new file mode 100644
index 0000000000..e87b8fe67d
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/constants/__init__.py
@@ -0,0 +1,288 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Defines a set of constants shared by test runners and other scripts."""
+
+# TODO(jbudorick): Split these constants into coherent modules.
+
+# pylint: disable=W0212
+
+
+import collections
+import glob
+import logging
+import os
+import subprocess
+
+import devil.android.sdk.keyevent
+from devil.android.constants import chrome
+from devil.android.sdk import version_codes
+from devil.constants import exit_codes
+
+
+keyevent = devil.android.sdk.keyevent
+
+
+DIR_SOURCE_ROOT = os.environ.get('CHECKOUT_SOURCE_ROOT',
+ os.path.abspath(os.path.join(os.path.dirname(__file__),
+ os.pardir, os.pardir, os.pardir, os.pardir)))
+
+PACKAGE_INFO = dict(chrome.PACKAGE_INFO)
+PACKAGE_INFO.update({
+ 'legacy_browser':
+ chrome.PackageInfo('com.google.android.browser',
+ 'com.android.browser.BrowserActivity', None, None),
+ 'chromecast_shell':
+ chrome.PackageInfo('com.google.android.apps.mediashell',
+ 'com.google.android.apps.mediashell.MediaShellActivity',
+ 'castshell-command-line', None),
+ 'android_webview_shell':
+ chrome.PackageInfo('org.chromium.android_webview.shell',
+ 'org.chromium.android_webview.shell.AwShellActivity',
+ 'android-webview-command-line', None),
+ 'gtest':
+ chrome.PackageInfo('org.chromium.native_test',
+ 'org.chromium.native_test.NativeUnitTestActivity',
+ 'chrome-native-tests-command-line', None),
+ 'android_browsertests':
+ chrome.PackageInfo('org.chromium.android_browsertests_apk',
+ ('org.chromium.android_browsertests_apk' +
+ '.ChromeBrowserTestsActivity'),
+ 'chrome-native-tests-command-line', None),
+ 'components_browsertests':
+ chrome.PackageInfo('org.chromium.components_browsertests_apk',
+ ('org.chromium.components_browsertests_apk' +
+ '.ComponentsBrowserTestsActivity'),
+ 'chrome-native-tests-command-line', None),
+ 'content_browsertests':
+ chrome.PackageInfo(
+ 'org.chromium.content_browsertests_apk',
+ 'org.chromium.content_browsertests_apk.ContentBrowserTestsActivity',
+ 'chrome-native-tests-command-line', None),
+ 'chromedriver_webview_shell':
+ chrome.PackageInfo('org.chromium.chromedriver_webview_shell',
+ 'org.chromium.chromedriver_webview_shell.Main', None,
+ None),
+ 'android_webview_cts':
+ chrome.PackageInfo('com.android.webview',
+ 'com.android.cts.webkit.WebViewStartupCtsActivity',
+ 'webview-command-line', None),
+ 'android_google_webview_cts':
+ chrome.PackageInfo('com.google.android.webview',
+ 'com.android.cts.webkit.WebViewStartupCtsActivity',
+ 'webview-command-line', None),
+ 'android_system_webview_shell':
+ chrome.PackageInfo('org.chromium.webview_shell',
+ 'org.chromium.webview_shell.WebViewBrowserActivity',
+ 'webview-command-line', None),
+ 'android_webview_ui_test':
+ chrome.PackageInfo('org.chromium.webview_ui_test',
+ 'org.chromium.webview_ui_test.WebViewUiTestActivity',
+ 'webview-command-line', None),
+ 'weblayer_browsertests':
+ chrome.PackageInfo(
+ 'org.chromium.weblayer_browsertests_apk',
+ 'org.chromium.weblayer_browsertests_apk.WebLayerBrowserTestsActivity',
+ 'chrome-native-tests-command-line', None),
+})
+
+
+# Ports arrangement for various test servers used in Chrome for Android.
+# Lighttpd server will attempt to use 9000 as default port, if unavailable it
+# will find a free port from 8001 - 8999.
+LIGHTTPD_DEFAULT_PORT = 9000
+LIGHTTPD_RANDOM_PORT_FIRST = 8001
+LIGHTTPD_RANDOM_PORT_LAST = 8999
+TEST_SYNC_SERVER_PORT = 9031
+TEST_SEARCH_BY_IMAGE_SERVER_PORT = 9041
+TEST_POLICY_SERVER_PORT = 9051
+
+
+TEST_EXECUTABLE_DIR = '/data/local/tmp'
+# Directories for common java libraries for SDK build.
+# These constants are defined in build/android/ant/common.xml
+SDK_BUILD_JAVALIB_DIR = 'lib.java'
+SDK_BUILD_TEST_JAVALIB_DIR = 'test.lib.java'
+SDK_BUILD_APKS_DIR = 'apks'
+
+ADB_KEYS_FILE = '/data/misc/adb/adb_keys'
+
+PERF_OUTPUT_DIR = os.path.join(DIR_SOURCE_ROOT, 'out', 'step_results')
+# The directory on the device where perf test output gets saved to.
+DEVICE_PERF_OUTPUT_DIR = (
+ '/data/data/' + PACKAGE_INFO['chrome'].package + '/files')
+
+SCREENSHOTS_DIR = os.path.join(DIR_SOURCE_ROOT, 'out_screenshots')
+
+ANDROID_SDK_BUILD_TOOLS_VERSION = '31.0.0'
+ANDROID_SDK_ROOT = os.path.join(DIR_SOURCE_ROOT, 'third_party', 'android_sdk',
+ 'public')
+ANDROID_SDK_TOOLS = os.path.join(ANDROID_SDK_ROOT,
+ 'build-tools', ANDROID_SDK_BUILD_TOOLS_VERSION)
+ANDROID_NDK_ROOT = os.path.join(DIR_SOURCE_ROOT,
+ 'third_party', 'android_ndk')
+
+BAD_DEVICES_JSON = os.path.join(DIR_SOURCE_ROOT,
+ os.environ.get('CHROMIUM_OUT_DIR', 'out'),
+ 'bad_devices.json')
+
+UPSTREAM_FLAKINESS_SERVER = 'test-results.appspot.com'
+
+# TODO(jbudorick): Remove once unused.
+DEVICE_LOCAL_PROPERTIES_PATH = '/data/local.prop'
+
+# Configure ubsan to print stack traces in the format understood by "stack" so
+# that they will be symbolized, and disable signal handlers because they
+# interfere with the breakpad and sandbox tests.
+# This value is duplicated in
+# base/android/java/src/org/chromium/base/library_loader/LibraryLoader.java
+UBSAN_OPTIONS = (
+ 'print_stacktrace=1 stack_trace_format=\'#%n pc %o %m\' '
+ 'handle_segv=0 handle_sigbus=0 handle_sigfpe=0')
+
+# TODO(jbudorick): Rework this into testing/buildbot/
+PYTHON_UNIT_TEST_SUITES = {
+ 'pylib_py_unittests': {
+ 'path':
+ os.path.join(DIR_SOURCE_ROOT, 'build', 'android'),
+ 'test_modules': [
+ 'devil.android.device_utils_test',
+ 'devil.android.md5sum_test',
+ 'devil.utils.cmd_helper_test',
+ 'pylib.results.json_results_test',
+ 'pylib.utils.proguard_test',
+ ]
+ },
+ 'gyp_py_unittests': {
+ 'path':
+ os.path.join(DIR_SOURCE_ROOT, 'build', 'android', 'gyp'),
+ 'test_modules': [
+ 'java_cpp_enum_tests',
+ 'java_cpp_strings_tests',
+ 'java_google_api_keys_tests',
+ 'extract_unwind_tables_tests',
+ ]
+ },
+}
+
+LOCAL_MACHINE_TESTS = ['junit', 'python']
+VALID_ENVIRONMENTS = ['local']
+VALID_TEST_TYPES = ['gtest', 'instrumentation', 'junit', 'linker', 'monkey',
+ 'perf', 'python']
+VALID_DEVICE_TYPES = ['Android', 'iOS']
+
+
+def SetBuildType(build_type):
+ """Set the BUILDTYPE environment variable.
+
+ NOTE: Using this function is deprecated, in favor of SetOutputDirectory(),
+ it is still maintained for a few scripts that typically call it
+ to implement their --release and --debug command-line options.
+
+ When writing a new script, consider supporting an --output-dir or
+ --chromium-output-dir option instead, and calling SetOutputDirectory()
+ instead.
+
+ NOTE: If CHROMIUM_OUTPUT_DIR if defined, or if SetOutputDirectory() was
+ called previously, this will be completely ignored.
+ """
+ chromium_output_dir = os.environ.get('CHROMIUM_OUTPUT_DIR')
+ if chromium_output_dir:
+ logging.warning(
+ 'SetBuildType("%s") ignored since CHROMIUM_OUTPUT_DIR is already '
+ 'defined as (%s)', build_type, chromium_output_dir)
+ os.environ['BUILDTYPE'] = build_type
+
+
+def SetOutputDirectory(output_directory):
+ """Set the Chromium output directory.
+
+ This must be called early by scripts that rely on GetOutDirectory() or
+ CheckOutputDirectory(). Typically by providing an --output-dir or
+ --chromium-output-dir option.
+ """
+ os.environ['CHROMIUM_OUTPUT_DIR'] = output_directory
+
+
+# The message that is printed when the Chromium output directory cannot
+# be found. Note that CHROMIUM_OUT_DIR and BUILDTYPE are not mentioned
+# intentionally to encourage the use of CHROMIUM_OUTPUT_DIR instead.
+_MISSING_OUTPUT_DIR_MESSAGE = '\
+The Chromium output directory could not be found. Please use an option such as \
+--output-directory to provide it (see --help for details). Otherwise, \
+define the CHROMIUM_OUTPUT_DIR environment variable.'
+
+
+def GetOutDirectory():
+ """Returns the Chromium build output directory.
+
+ NOTE: This is determined in the following way:
+ - From a previous call to SetOutputDirectory()
+ - Otherwise, from the CHROMIUM_OUTPUT_DIR env variable, if it is defined.
+ - Otherwise, from the current Chromium source directory, and a previous
+ call to SetBuildType() or the BUILDTYPE env variable, in combination
+ with the optional CHROMIUM_OUT_DIR env variable.
+ """
+ if 'CHROMIUM_OUTPUT_DIR' in os.environ:
+ return os.path.abspath(os.path.join(
+ DIR_SOURCE_ROOT, os.environ.get('CHROMIUM_OUTPUT_DIR')))
+
+ build_type = os.environ.get('BUILDTYPE')
+ if not build_type:
+ raise EnvironmentError(_MISSING_OUTPUT_DIR_MESSAGE)
+
+ return os.path.abspath(os.path.join(
+ DIR_SOURCE_ROOT, os.environ.get('CHROMIUM_OUT_DIR', 'out'),
+ build_type))
+
+
+def CheckOutputDirectory():
+ """Checks that the Chromium output directory is set, or can be found.
+
+ If it is not already set, this will also perform a little auto-detection:
+
+ - If the current directory contains a build.ninja file, use it as
+ the output directory.
+
+ - If CHROME_HEADLESS is defined in the environment (e.g. on a bot),
+ look if there is a single output directory under DIR_SOURCE_ROOT/out/,
+ and if so, use it as the output directory.
+
+ Raises:
+ Exception: If no output directory is detected.
+ """
+ output_dir = os.environ.get('CHROMIUM_OUTPUT_DIR')
+ if output_dir:
+ return
+
+ build_type = os.environ.get('BUILDTYPE')
+ if build_type and len(build_type) > 1:
+ return
+
+ # If CWD is an output directory, then assume it's the desired one.
+ if os.path.exists('build.ninja'):
+ output_dir = os.getcwd()
+ SetOutputDirectory(output_dir)
+ return
+
+ # When running on bots, see if the output directory is obvious.
+ # TODO(http://crbug.com/833808): Get rid of this by ensuring bots always set
+ # CHROMIUM_OUTPUT_DIR correctly.
+ if os.environ.get('CHROME_HEADLESS'):
+ dirs = glob.glob(os.path.join(DIR_SOURCE_ROOT, 'out', '*', 'build.ninja'))
+ if len(dirs) == 1:
+ SetOutputDirectory(dirs[0])
+ return
+
+ raise Exception(
+ 'Chromium output directory not set, and CHROME_HEADLESS detected. ' +
+ 'However, multiple out dirs exist: %r' % dirs)
+
+ raise Exception(_MISSING_OUTPUT_DIR_MESSAGE)
+
+
+# Exit codes
+ERROR_EXIT_CODE = exit_codes.ERROR
+INFRA_EXIT_CODE = exit_codes.INFRA
+WARNING_EXIT_CODE = exit_codes.WARNING
diff --git a/third_party/libwebrtc/build/android/pylib/constants/host_paths.py b/third_party/libwebrtc/build/android/pylib/constants/host_paths.py
new file mode 100644
index 0000000000..aa5907eb15
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/constants/host_paths.py
@@ -0,0 +1,97 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import contextlib
+import os
+import sys
+
+from pylib import constants
+
+DIR_SOURCE_ROOT = os.environ.get(
+ 'CHECKOUT_SOURCE_ROOT',
+ os.path.abspath(os.path.join(os.path.dirname(__file__),
+ os.pardir, os.pardir, os.pardir, os.pardir)))
+
+BUILD_COMMON_PATH = os.path.join(
+ DIR_SOURCE_ROOT, 'build', 'util', 'lib', 'common')
+
+# third-party libraries
+ANDROID_PLATFORM_DEVELOPMENT_SCRIPTS_PATH = os.path.join(
+ DIR_SOURCE_ROOT, 'third_party', 'android_platform', 'development',
+ 'scripts')
+BUILD_PATH = os.path.join(DIR_SOURCE_ROOT, 'build')
+DEVIL_PATH = os.path.join(
+ DIR_SOURCE_ROOT, 'third_party', 'catapult', 'devil')
+JAVA_PATH = os.path.join(DIR_SOURCE_ROOT, 'third_party', 'jdk', 'current',
+ 'bin')
+TRACING_PATH = os.path.join(
+ DIR_SOURCE_ROOT, 'third_party', 'catapult', 'tracing')
+
+@contextlib.contextmanager
+def SysPath(path, position=None):
+ if position is None:
+ sys.path.append(path)
+ else:
+ sys.path.insert(position, path)
+ try:
+ yield
+ finally:
+ if sys.path[-1] == path:
+ sys.path.pop()
+ else:
+ sys.path.remove(path)
+
+
+# Map of CPU architecture name to (toolchain_name, binprefix) pairs.
+# TODO(digit): Use the build_vars.json file generated by gn.
+_TOOL_ARCH_MAP = {
+ 'arm': ('arm-linux-androideabi-4.9', 'arm-linux-androideabi'),
+ 'arm64': ('aarch64-linux-android-4.9', 'aarch64-linux-android'),
+ 'x86': ('x86-4.9', 'i686-linux-android'),
+ 'x86_64': ('x86_64-4.9', 'x86_64-linux-android'),
+ 'x64': ('x86_64-4.9', 'x86_64-linux-android'),
+ 'mips': ('mipsel-linux-android-4.9', 'mipsel-linux-android'),
+}
+
+# Cache used to speed up the results of ToolPath()
+# Maps (arch, tool_name) pairs to fully qualified program paths.
+# Useful because ToolPath() is called repeatedly for demangling C++ symbols.
+_cached_tool_paths = {}
+
+
+def ToolPath(tool, cpu_arch):
+ """Return a fully qualifed path to an arch-specific toolchain program.
+
+ Args:
+ tool: Unprefixed toolchain program name (e.g. 'objdump')
+ cpu_arch: Target CPU architecture (e.g. 'arm64')
+ Returns:
+ Fully qualified path (e.g. ..../aarch64-linux-android-objdump')
+ Raises:
+ Exception if the toolchain could not be found.
+ """
+ tool_path = _cached_tool_paths.get((tool, cpu_arch))
+ if tool_path:
+ return tool_path
+
+ toolchain_source, toolchain_prefix = _TOOL_ARCH_MAP.get(
+ cpu_arch, (None, None))
+ if not toolchain_source:
+ raise Exception('Could not find tool chain for ' + cpu_arch)
+
+ toolchain_subdir = (
+ 'toolchains/%s/prebuilt/linux-x86_64/bin' % toolchain_source)
+
+ tool_path = os.path.join(constants.ANDROID_NDK_ROOT,
+ toolchain_subdir,
+ toolchain_prefix + '-' + tool)
+
+ _cached_tool_paths[(tool, cpu_arch)] = tool_path
+ return tool_path
+
+
+def GetAaptPath():
+ """Returns the path to the 'aapt' executable."""
+ return os.path.join(constants.ANDROID_SDK_TOOLS, 'aapt')
diff --git a/third_party/libwebrtc/build/android/pylib/constants/host_paths_unittest.py b/third_party/libwebrtc/build/android/pylib/constants/host_paths_unittest.py
new file mode 100755
index 0000000000..f64f5c7552
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/constants/host_paths_unittest.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python3
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import logging
+import os
+import unittest
+
+import six
+import pylib.constants as constants
+import pylib.constants.host_paths as host_paths
+
+# This map corresponds to the binprefix of NDK prebuilt toolchains for various
+# target CPU architectures. Note that 'x86_64' and 'x64' are the same.
+_EXPECTED_NDK_TOOL_SUBDIR_MAP = {
+ 'arm': 'toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64/bin/' +
+ 'arm-linux-androideabi-',
+ 'arm64':
+ 'toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64/bin/' +
+ 'aarch64-linux-android-',
+ 'x86': 'toolchains/x86-4.9/prebuilt/linux-x86_64/bin/i686-linux-android-',
+ 'x86_64':
+ 'toolchains/x86_64-4.9/prebuilt/linux-x86_64/bin/x86_64-linux-android-',
+ 'x64':
+ 'toolchains/x86_64-4.9/prebuilt/linux-x86_64/bin/x86_64-linux-android-',
+ 'mips':
+ 'toolchains/mipsel-linux-android-4.9/prebuilt/linux-x86_64/bin/' +
+ 'mipsel-linux-android-'
+}
+
+
+class HostPathsTest(unittest.TestCase):
+ def setUp(self):
+ logging.getLogger().setLevel(logging.ERROR)
+
+ def test_GetAaptPath(self):
+ _EXPECTED_AAPT_PATH = os.path.join(constants.ANDROID_SDK_TOOLS, 'aapt')
+ self.assertEqual(host_paths.GetAaptPath(), _EXPECTED_AAPT_PATH)
+ self.assertEqual(host_paths.GetAaptPath(), _EXPECTED_AAPT_PATH)
+
+ def test_ToolPath(self):
+ for cpu_arch, binprefix in six.iteritems(_EXPECTED_NDK_TOOL_SUBDIR_MAP):
+ expected_binprefix = os.path.join(constants.ANDROID_NDK_ROOT, binprefix)
+ expected_path = expected_binprefix + 'foo'
+ self.assertEqual(host_paths.ToolPath('foo', cpu_arch), expected_path)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/libwebrtc/build/android/pylib/content_settings.py b/third_party/libwebrtc/build/android/pylib/content_settings.py
new file mode 100644
index 0000000000..5ea7c525ed
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/content_settings.py
@@ -0,0 +1,80 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class ContentSettings(dict):
+
+ """A dict interface to interact with device content settings.
+
+ System properties are key/value pairs as exposed by adb shell content.
+ """
+
+ def __init__(self, table, device):
+ super(ContentSettings, self).__init__()
+ self._table = table
+ self._device = device
+
+ @staticmethod
+ def _GetTypeBinding(value):
+ if isinstance(value, bool):
+ return 'b'
+ if isinstance(value, float):
+ return 'f'
+ if isinstance(value, int):
+ return 'i'
+ if isinstance(value, int):
+ return 'l'
+ if isinstance(value, str):
+ return 's'
+ raise ValueError('Unsupported type %s' % type(value))
+
+ def iteritems(self):
+ # Example row:
+ # 'Row: 0 _id=13, name=logging_id2, value=-1fccbaa546705b05'
+ for row in self._device.RunShellCommand(
+ 'content query --uri content://%s' % self._table, as_root=True):
+ fields = row.split(', ')
+ key = None
+ value = None
+ for field in fields:
+ k, _, v = field.partition('=')
+ if k == 'name':
+ key = v
+ elif k == 'value':
+ value = v
+ if not key:
+ continue
+ if not value:
+ value = ''
+ yield key, value
+
+ def __getitem__(self, key):
+ return self._device.RunShellCommand(
+ 'content query --uri content://%s --where "name=\'%s\'" '
+ '--projection value' % (self._table, key), as_root=True).strip()
+
+ def __setitem__(self, key, value):
+ if key in self:
+ self._device.RunShellCommand(
+ 'content update --uri content://%s '
+ '--bind value:%s:%s --where "name=\'%s\'"' % (
+ self._table,
+ self._GetTypeBinding(value), value, key),
+ as_root=True)
+ else:
+ self._device.RunShellCommand(
+ 'content insert --uri content://%s '
+ '--bind name:%s:%s --bind value:%s:%s' % (
+ self._table,
+ self._GetTypeBinding(key), key,
+ self._GetTypeBinding(value), value),
+ as_root=True)
+
+ def __delitem__(self, key):
+ self._device.RunShellCommand(
+ 'content delete --uri content://%s '
+ '--bind name:%s:%s' % (
+ self._table,
+ self._GetTypeBinding(key), key),
+ as_root=True)
diff --git a/third_party/libwebrtc/build/android/pylib/device/__init__.py b/third_party/libwebrtc/build/android/pylib/device/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/device/__init__.py
diff --git a/third_party/libwebrtc/build/android/pylib/device/commands/BUILD.gn b/third_party/libwebrtc/build/android/pylib/device/commands/BUILD.gn
new file mode 100644
index 0000000000..13b69f618c
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/device/commands/BUILD.gn
@@ -0,0 +1,20 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/android/rules.gni")
+
+group("commands") {
+ data_deps = [ ":chromium_commands_java" ]
+}
+
+android_library("unzip_java") {
+ jacoco_never_instrument = true
+ sources = [ "java/src/org/chromium/android/commands/unzip/Unzip.java" ]
+}
+
+dist_dex("chromium_commands_java") {
+ deps = [ ":unzip_java" ]
+ output = "$root_build_dir/lib.java/chromium_commands.dex.jar"
+ data = [ output ]
+}
diff --git a/third_party/libwebrtc/build/android/pylib/device/commands/java/src/org/chromium/android/commands/unzip/Unzip.java b/third_party/libwebrtc/build/android/pylib/device/commands/java/src/org/chromium/android/commands/unzip/Unzip.java
new file mode 100644
index 0000000000..cf0ff67af2
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/device/commands/java/src/org/chromium/android/commands/unzip/Unzip.java
@@ -0,0 +1,93 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.android.commands.unzip;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintStream;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipInputStream;
+
+/**
+ * Minimal implementation of the command-line unzip utility for Android.
+ */
+public class Unzip {
+
+ private static final String TAG = "Unzip";
+
+ public static void main(String[] args) {
+ try {
+ (new Unzip()).run(args);
+ } catch (RuntimeException e) {
+ e.printStackTrace();
+ System.exit(1);
+ }
+ }
+
+ private void showUsage(PrintStream s) {
+ s.println("Usage:");
+ s.println("unzip [zipfile]");
+ }
+
+ @SuppressWarnings("Finally")
+ private void unzip(String[] args) {
+ ZipInputStream zis = null;
+ try {
+ String zipfile = args[0];
+ zis = new ZipInputStream(new BufferedInputStream(new FileInputStream(zipfile)));
+ ZipEntry ze = null;
+
+ byte[] bytes = new byte[1024];
+ while ((ze = zis.getNextEntry()) != null) {
+ File outputFile = new File(ze.getName());
+ if (ze.isDirectory()) {
+ if (!outputFile.exists() && !outputFile.mkdirs()) {
+ throw new RuntimeException(
+ "Failed to create directory: " + outputFile.toString());
+ }
+ } else {
+ File parentDir = outputFile.getParentFile();
+ if (!parentDir.exists() && !parentDir.mkdirs()) {
+ throw new RuntimeException(
+ "Failed to create directory: " + parentDir.toString());
+ }
+ OutputStream out = new BufferedOutputStream(new FileOutputStream(outputFile));
+ int actual_bytes = 0;
+ int total_bytes = 0;
+ while ((actual_bytes = zis.read(bytes)) != -1) {
+ out.write(bytes, 0, actual_bytes);
+ total_bytes += actual_bytes;
+ }
+ out.close();
+ }
+ zis.closeEntry();
+ }
+
+ } catch (IOException e) {
+ throw new RuntimeException("Error while unzipping", e);
+ } finally {
+ try {
+ if (zis != null) zis.close();
+ } catch (IOException e) {
+ throw new RuntimeException("Error while closing zip: " + e.toString());
+ }
+ }
+ }
+
+ public void run(String[] args) {
+ if (args.length != 1) {
+ showUsage(System.err);
+ throw new RuntimeException("Incorrect usage!");
+ }
+
+ unzip(args);
+ }
+}
+
diff --git a/third_party/libwebrtc/build/android/pylib/device_settings.py b/third_party/libwebrtc/build/android/pylib/device_settings.py
new file mode 100644
index 0000000000..c4d2bb3da3
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/device_settings.py
@@ -0,0 +1,201 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import logging
+import six
+
+from pylib import content_settings
+
+_LOCK_SCREEN_SETTINGS_PATH = '/data/system/locksettings.db'
+_ALTERNATE_LOCK_SCREEN_SETTINGS_PATH = (
+ '/data/data/com.android.providers.settings/databases/settings.db')
+PASSWORD_QUALITY_UNSPECIFIED = '0'
+_COMPATIBLE_BUILD_TYPES = ['userdebug', 'eng']
+
+
+def ConfigureContentSettings(device, desired_settings):
+ """Configures device content setings from a list.
+
+ Many settings are documented at:
+ http://developer.android.com/reference/android/provider/Settings.Global.html
+ http://developer.android.com/reference/android/provider/Settings.Secure.html
+ http://developer.android.com/reference/android/provider/Settings.System.html
+
+ Many others are undocumented.
+
+ Args:
+ device: A DeviceUtils instance for the device to configure.
+ desired_settings: A list of (table, [(key: value), ...]) for all
+ settings to configure.
+ """
+ for table, key_value in desired_settings:
+ settings = content_settings.ContentSettings(table, device)
+ for key, value in key_value:
+ settings[key] = value
+ logging.info('\n%s %s', table, (80 - len(table)) * '-')
+ for key, value in sorted(six.iteritems(settings)):
+ logging.info('\t%s: %s', key, value)
+
+
+def SetLockScreenSettings(device):
+ """Sets lock screen settings on the device.
+
+ On certain device/Android configurations we need to disable the lock screen in
+ a different database. Additionally, the password type must be set to
+ DevicePolicyManager.PASSWORD_QUALITY_UNSPECIFIED.
+ Lock screen settings are stored in sqlite on the device in:
+ /data/system/locksettings.db
+
+ IMPORTANT: The first column is used as a primary key so that all rows with the
+ same value for that column are removed from the table prior to inserting the
+ new values.
+
+ Args:
+ device: A DeviceUtils instance for the device to configure.
+
+ Raises:
+ Exception if the setting was not properly set.
+ """
+ if device.build_type not in _COMPATIBLE_BUILD_TYPES:
+ logging.warning('Unable to disable lockscreen on %s builds.',
+ device.build_type)
+ return
+
+ def get_lock_settings(table):
+ return [(table, 'lockscreen.disabled', '1'),
+ (table, 'lockscreen.password_type', PASSWORD_QUALITY_UNSPECIFIED),
+ (table, 'lockscreen.password_type_alternate',
+ PASSWORD_QUALITY_UNSPECIFIED)]
+
+ if device.FileExists(_LOCK_SCREEN_SETTINGS_PATH):
+ db = _LOCK_SCREEN_SETTINGS_PATH
+ locksettings = get_lock_settings('locksettings')
+ columns = ['name', 'user', 'value']
+ generate_values = lambda k, v: [k, '0', v]
+ elif device.FileExists(_ALTERNATE_LOCK_SCREEN_SETTINGS_PATH):
+ db = _ALTERNATE_LOCK_SCREEN_SETTINGS_PATH
+ locksettings = get_lock_settings('secure') + get_lock_settings('system')
+ columns = ['name', 'value']
+ generate_values = lambda k, v: [k, v]
+ else:
+ logging.warning('Unable to find database file to set lock screen settings.')
+ return
+
+ for table, key, value in locksettings:
+ # Set the lockscreen setting for default user '0'
+ values = generate_values(key, value)
+
+ cmd = """begin transaction;
+delete from '%(table)s' where %(primary_key)s='%(primary_value)s';
+insert into '%(table)s' (%(columns)s) values (%(values)s);
+commit transaction;""" % {
+ 'table': table,
+ 'primary_key': columns[0],
+ 'primary_value': values[0],
+ 'columns': ', '.join(columns),
+ 'values': ', '.join(["'%s'" % value for value in values])
+ }
+ output_msg = device.RunShellCommand('sqlite3 %s "%s"' % (db, cmd),
+ as_root=True)
+ if output_msg:
+ logging.info(' '.join(output_msg))
+
+
+ENABLE_LOCATION_SETTINGS = [
+ # Note that setting these in this order is required in order for all of
+ # them to take and stick through a reboot.
+ ('com.google.settings/partner', [
+ ('use_location_for_services', 1),
+ ]),
+ ('settings/secure', [
+ # Ensure Geolocation is enabled and allowed for tests.
+ ('location_providers_allowed', 'gps,network'),
+ ]),
+ ('com.google.settings/partner', [
+ ('network_location_opt_in', 1),
+ ])
+]
+
+DISABLE_LOCATION_SETTINGS = [
+ ('com.google.settings/partner', [
+ ('use_location_for_services', 0),
+ ]),
+ ('settings/secure', [
+ # Ensure Geolocation is disabled.
+ ('location_providers_allowed', ''),
+ ]),
+]
+
+ENABLE_MOCK_LOCATION_SETTINGS = [
+ ('settings/secure', [
+ ('mock_location', 1),
+ ]),
+]
+
+DISABLE_MOCK_LOCATION_SETTINGS = [
+ ('settings/secure', [
+ ('mock_location', 0),
+ ]),
+]
+
+DETERMINISTIC_DEVICE_SETTINGS = [
+ ('settings/global', [
+ ('assisted_gps_enabled', 0),
+
+ # Disable "auto time" and "auto time zone" to avoid network-provided time
+ # to overwrite the device's datetime and timezone synchronized from host
+ # when running tests later. See b/6569849.
+ ('auto_time', 0),
+ ('auto_time_zone', 0),
+
+ ('development_settings_enabled', 1),
+
+ # Flag for allowing ActivityManagerService to send ACTION_APP_ERROR intents
+ # on application crashes and ANRs. If this is disabled, the crash/ANR dialog
+ # will never display the "Report" button.
+ # Type: int ( 0 = disallow, 1 = allow )
+ ('send_action_app_error', 0),
+
+ ('stay_on_while_plugged_in', 3),
+
+ ('verifier_verify_adb_installs', 0),
+ ]),
+ ('settings/secure', [
+ ('allowed_geolocation_origins',
+ 'http://www.google.co.uk http://www.google.com'),
+
+ # Ensure that we never get random dialogs like "Unfortunately the process
+ # android.process.acore has stopped", which steal the focus, and make our
+ # automation fail (because the dialog steals the focus then mistakenly
+ # receives the injected user input events).
+ ('anr_show_background', 0),
+
+ ('lockscreen.disabled', 1),
+
+ ('screensaver_enabled', 0),
+
+ ('skip_first_use_hints', 1),
+ ]),
+ ('settings/system', [
+ # Don't want devices to accidentally rotate the screen as that could
+ # affect performance measurements.
+ ('accelerometer_rotation', 0),
+
+ ('lockscreen.disabled', 1),
+
+ # Turn down brightness and disable auto-adjust so that devices run cooler.
+ ('screen_brightness', 5),
+ ('screen_brightness_mode', 0),
+
+ ('user_rotation', 0),
+ ]),
+]
+
+NETWORK_DISABLED_SETTINGS = [
+ ('settings/global', [
+ ('airplane_mode_on', 1),
+ ('wifi_on', 0),
+ ]),
+]
diff --git a/third_party/libwebrtc/build/android/pylib/dex/__init__.py b/third_party/libwebrtc/build/android/pylib/dex/__init__.py
new file mode 100644
index 0000000000..4a12e35c92
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/dex/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/third_party/libwebrtc/build/android/pylib/dex/dex_parser.py b/third_party/libwebrtc/build/android/pylib/dex/dex_parser.py
new file mode 100755
index 0000000000..1ff8d25276
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/dex/dex_parser.py
@@ -0,0 +1,551 @@
+#!/usr/bin/env python3
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Utilities for optimistically parsing dex files.
+
+This file is not meant to provide a generic tool for analyzing dex files.
+A DexFile class that exposes access to several memory items in the dex format
+is provided, but it does not include error handling or validation.
+"""
+
+
+
+import argparse
+import collections
+import errno
+import os
+import re
+import struct
+import sys
+import zipfile
+
+# https://source.android.com/devices/tech/dalvik/dex-format#header-item
+_DEX_HEADER_FMT = (
+ ('magic', '8s'),
+ ('checksum', 'I'),
+ ('signature', '20s'),
+ ('file_size', 'I'),
+ ('header_size', 'I'),
+ ('endian_tag', 'I'),
+ ('link_size', 'I'),
+ ('link_off', 'I'),
+ ('map_off', 'I'),
+ ('string_ids_size', 'I'),
+ ('string_ids_off', 'I'),
+ ('type_ids_size', 'I'),
+ ('type_ids_off', 'I'),
+ ('proto_ids_size', 'I'),
+ ('proto_ids_off', 'I'),
+ ('field_ids_size', 'I'),
+ ('field_ids_off', 'I'),
+ ('method_ids_size', 'I'),
+ ('method_ids_off', 'I'),
+ ('class_defs_size', 'I'),
+ ('class_defs_off', 'I'),
+ ('data_size', 'I'),
+ ('data_off', 'I'),
+)
+
+DexHeader = collections.namedtuple('DexHeader',
+ ','.join(t[0] for t in _DEX_HEADER_FMT))
+
+# Simple memory items.
+_TypeIdItem = collections.namedtuple('TypeIdItem', 'descriptor_idx')
+_ProtoIdItem = collections.namedtuple(
+ 'ProtoIdItem', 'shorty_idx,return_type_idx,parameters_off')
+_MethodIdItem = collections.namedtuple('MethodIdItem',
+ 'type_idx,proto_idx,name_idx')
+_TypeItem = collections.namedtuple('TypeItem', 'type_idx')
+_StringDataItem = collections.namedtuple('StringItem', 'utf16_size,data')
+_ClassDefItem = collections.namedtuple(
+ 'ClassDefItem',
+ 'class_idx,access_flags,superclass_idx,interfaces_off,source_file_idx,'
+ 'annotations_off,class_data_off,static_values_off')
+
+
+class _MemoryItemList(object):
+ """Base class for repeated memory items."""
+
+ def __init__(self,
+ reader,
+ offset,
+ size,
+ factory,
+ alignment=None,
+ first_item_offset=None):
+ """Creates the item list using the specific item factory.
+
+ Args:
+ reader: _DexReader used for decoding the memory item.
+ offset: Offset from start of the file to the item list, serving as the
+ key for some item types.
+ size: Number of memory items in the list.
+ factory: Function to extract each memory item from a _DexReader.
+ alignment: Optional integer specifying the alignment for the memory
+ section represented by this list.
+ first_item_offset: Optional, specifies a different offset to use for
+ extracting memory items (default is to use offset).
+ """
+ self.offset = offset
+ self.size = size
+ reader.Seek(first_item_offset or offset)
+ self._items = [factory(reader) for _ in range(size)]
+
+ if alignment:
+ reader.AlignUpTo(alignment)
+
+ def __iter__(self):
+ return iter(self._items)
+
+ def __getitem__(self, key):
+ return self._items[key]
+
+ def __len__(self):
+ return len(self._items)
+
+ def __repr__(self):
+ item_type_part = ''
+ if self.size != 0:
+ item_type = type(self._items[0])
+ item_type_part = ', item type={}'.format(item_type.__name__)
+
+ return '{}(offset={:#x}, size={}{})'.format(
+ type(self).__name__, self.offset, self.size, item_type_part)
+
+
+class _TypeIdItemList(_MemoryItemList):
+
+ def __init__(self, reader, offset, size):
+ factory = lambda x: _TypeIdItem(x.ReadUInt())
+ super(_TypeIdItemList, self).__init__(reader, offset, size, factory)
+
+
+class _ProtoIdItemList(_MemoryItemList):
+
+ def __init__(self, reader, offset, size):
+ factory = lambda x: _ProtoIdItem(x.ReadUInt(), x.ReadUInt(), x.ReadUInt())
+ super(_ProtoIdItemList, self).__init__(reader, offset, size, factory)
+
+
+class _MethodIdItemList(_MemoryItemList):
+
+ def __init__(self, reader, offset, size):
+ factory = (
+ lambda x: _MethodIdItem(x.ReadUShort(), x.ReadUShort(), x.ReadUInt()))
+ super(_MethodIdItemList, self).__init__(reader, offset, size, factory)
+
+
+class _StringItemList(_MemoryItemList):
+
+ def __init__(self, reader, offset, size):
+ reader.Seek(offset)
+ string_item_offsets = iter([reader.ReadUInt() for _ in range(size)])
+
+ def factory(x):
+ data_offset = next(string_item_offsets)
+ string = x.ReadString(data_offset)
+ return _StringDataItem(len(string), string)
+
+ super(_StringItemList, self).__init__(reader, offset, size, factory)
+
+
+class _TypeListItem(_MemoryItemList):
+
+ def __init__(self, reader):
+ offset = reader.Tell()
+ size = reader.ReadUInt()
+ factory = lambda x: _TypeItem(x.ReadUShort())
+ # This is necessary because we need to extract the size of the type list
+ # (in other cases the list size is provided in the header).
+ first_item_offset = reader.Tell()
+ super(_TypeListItem, self).__init__(
+ reader,
+ offset,
+ size,
+ factory,
+ alignment=4,
+ first_item_offset=first_item_offset)
+
+
+class _TypeListItemList(_MemoryItemList):
+
+ def __init__(self, reader, offset, size):
+ super(_TypeListItemList, self).__init__(reader, offset, size, _TypeListItem)
+
+
+class _ClassDefItemList(_MemoryItemList):
+
+ def __init__(self, reader, offset, size):
+ reader.Seek(offset)
+
+ def factory(x):
+ return _ClassDefItem(*(x.ReadUInt()
+ for _ in range(len(_ClassDefItem._fields))))
+
+ super(_ClassDefItemList, self).__init__(reader, offset, size, factory)
+
+
+class _DexMapItem(object):
+
+ def __init__(self, reader):
+ self.type = reader.ReadUShort()
+ reader.ReadUShort()
+ self.size = reader.ReadUInt()
+ self.offset = reader.ReadUInt()
+
+ def __repr__(self):
+ return '_DexMapItem(type={}, size={}, offset={:#x})'.format(
+ self.type, self.size, self.offset)
+
+
+class _DexMapList(object):
+ # Full list of type codes:
+ # https://source.android.com/devices/tech/dalvik/dex-format#type-codes
+ TYPE_TYPE_LIST = 0x1001
+
+ def __init__(self, reader, offset):
+ self._map = {}
+ reader.Seek(offset)
+ self._size = reader.ReadUInt()
+ for _ in range(self._size):
+ item = _DexMapItem(reader)
+ self._map[item.type] = item
+
+ def __getitem__(self, key):
+ return self._map[key]
+
+ def __contains__(self, key):
+ return key in self._map
+
+ def __repr__(self):
+ return '_DexMapList(size={}, items={})'.format(self._size, self._map)
+
+
+class _DexReader(object):
+
+ def __init__(self, data):
+ self._data = data
+ self._pos = 0
+
+ def Seek(self, offset):
+ self._pos = offset
+
+ def Tell(self):
+ return self._pos
+
+ def ReadUByte(self):
+ return self._ReadData('<B')
+
+ def ReadUShort(self):
+ return self._ReadData('<H')
+
+ def ReadUInt(self):
+ return self._ReadData('<I')
+
+ def ReadString(self, data_offset):
+ string_length, string_offset = self._ReadULeb128(data_offset)
+ string_data_offset = string_offset + data_offset
+ return self._DecodeMUtf8(string_length, string_data_offset)
+
+ def AlignUpTo(self, align_unit):
+ off_by = self._pos % align_unit
+ if off_by:
+ self.Seek(self._pos + align_unit - off_by)
+
+ def ReadHeader(self):
+ header_fmt = '<' + ''.join(t[1] for t in _DEX_HEADER_FMT)
+ return DexHeader._make(struct.unpack_from(header_fmt, self._data))
+
+ def _ReadData(self, fmt):
+ ret = struct.unpack_from(fmt, self._data, self._pos)[0]
+ self._pos += struct.calcsize(fmt)
+ return ret
+
+ def _ReadULeb128(self, data_offset):
+ """Returns a tuple of (uleb128 value, number of bytes occupied).
+
+ From DWARF3 spec: http://dwarfstd.org/doc/Dwarf3.pdf
+
+ Args:
+ data_offset: Location of the unsigned LEB128.
+ """
+ value = 0
+ shift = 0
+ cur_offset = data_offset
+ while True:
+ byte = self._data[cur_offset]
+ cur_offset += 1
+ value |= (byte & 0b01111111) << shift
+ if (byte & 0b10000000) == 0:
+ break
+ shift += 7
+
+ return value, cur_offset - data_offset
+
+ def _DecodeMUtf8(self, string_length, offset):
+ """Returns the string located at the specified offset.
+
+ See https://source.android.com/devices/tech/dalvik/dex-format#mutf-8
+
+ Ported from the Android Java implementation:
+ https://android.googlesource.com/platform/dalvik/+/fe107fb6e3f308ac5174ebdc5a794ee880c741d9/dx/src/com/android/dex/Mutf8.java#34
+
+ Args:
+ string_length: The length of the decoded string.
+ offset: Offset to the beginning of the string.
+ """
+ self.Seek(offset)
+ ret = ''
+
+ for _ in range(string_length):
+ a = self.ReadUByte()
+ if a == 0:
+ raise _MUTf8DecodeError('Early string termination encountered',
+ string_length, offset)
+ if (a & 0x80) == 0x00:
+ code = a
+ elif (a & 0xe0) == 0xc0:
+ b = self.ReadUByte()
+ if (b & 0xc0) != 0x80:
+ raise _MUTf8DecodeError('Error in byte 2', string_length, offset)
+ code = ((a & 0x1f) << 6) | (b & 0x3f)
+ elif (a & 0xf0) == 0xe0:
+ b = self.ReadUByte()
+ c = self.ReadUByte()
+ if (b & 0xc0) != 0x80 or (c & 0xc0) != 0x80:
+ raise _MUTf8DecodeError('Error in byte 3 or 4', string_length, offset)
+ code = ((a & 0x0f) << 12) | ((b & 0x3f) << 6) | (c & 0x3f)
+ else:
+ raise _MUTf8DecodeError('Bad byte', string_length, offset)
+
+ try:
+ ret += unichr(code)
+ except NameError:
+ ret += chr(code)
+
+ if self.ReadUByte() != 0x00:
+ raise _MUTf8DecodeError('Expected string termination', string_length,
+ offset)
+
+ return ret
+
+
+class _MUTf8DecodeError(Exception):
+
+ def __init__(self, message, length, offset):
+ message += ' (decoded string length: {}, string data offset: {:#x})'.format(
+ length, offset)
+ super(_MUTf8DecodeError, self).__init__(message)
+
+
+class DexFile(object):
+ """Represents a single dex file.
+
+ Parses and exposes access to dex file structure and contents, as described
+ at https://source.android.com/devices/tech/dalvik/dex-format
+
+ Fields:
+ reader: _DexReader object used to decode dex file contents.
+ header: DexHeader for this dex file.
+ map_list: _DexMapList object containing list of dex file contents.
+ type_item_list: _TypeIdItemList containing type_id_items.
+ proto_item_list: _ProtoIdItemList containing proto_id_items.
+ method_item_list: _MethodIdItemList containing method_id_items.
+ string_item_list: _StringItemList containing string_data_items that are
+ referenced by index in other sections.
+ type_list_item_list: _TypeListItemList containing _TypeListItems.
+ _TypeListItems are referenced by their offsets from other dex items.
+ class_def_item_list: _ClassDefItemList containing _ClassDefItems.
+ """
+ _CLASS_ACCESS_FLAGS = {
+ 0x1: 'public',
+ 0x2: 'private',
+ 0x4: 'protected',
+ 0x8: 'static',
+ 0x10: 'final',
+ 0x200: 'interface',
+ 0x400: 'abstract',
+ 0x1000: 'synthetic',
+ 0x2000: 'annotation',
+ 0x4000: 'enum',
+ }
+
+ def __init__(self, data):
+ """Decodes dex file memory sections.
+
+ Args:
+ data: bytearray containing the contents of a dex file.
+ """
+ self.reader = _DexReader(data)
+ self.header = self.reader.ReadHeader()
+ self.map_list = _DexMapList(self.reader, self.header.map_off)
+ self.type_item_list = _TypeIdItemList(self.reader, self.header.type_ids_off,
+ self.header.type_ids_size)
+ self.proto_item_list = _ProtoIdItemList(
+ self.reader, self.header.proto_ids_off, self.header.proto_ids_size)
+ self.method_item_list = _MethodIdItemList(
+ self.reader, self.header.method_ids_off, self.header.method_ids_size)
+ self.string_item_list = _StringItemList(
+ self.reader, self.header.string_ids_off, self.header.string_ids_size)
+ self.class_def_item_list = _ClassDefItemList(
+ self.reader, self.header.class_defs_off, self.header.class_defs_size)
+
+ type_list_key = _DexMapList.TYPE_TYPE_LIST
+ if type_list_key in self.map_list:
+ map_list_item = self.map_list[type_list_key]
+ self.type_list_item_list = _TypeListItemList(
+ self.reader, map_list_item.offset, map_list_item.size)
+ else:
+ self.type_list_item_list = _TypeListItemList(self.reader, 0, 0)
+ self._type_lists_by_offset = {
+ type_list.offset: type_list
+ for type_list in self.type_list_item_list
+ }
+
+ def GetString(self, string_item_idx):
+ string_item = self.string_item_list[string_item_idx]
+ return string_item.data
+
+ def GetTypeString(self, type_item_idx):
+ type_item = self.type_item_list[type_item_idx]
+ return self.GetString(type_item.descriptor_idx)
+
+ def GetTypeListStringsByOffset(self, offset):
+ if not offset:
+ return ()
+ type_list = self._type_lists_by_offset[offset]
+ return tuple(self.GetTypeString(item.type_idx) for item in type_list)
+
+ @staticmethod
+ def ResolveClassAccessFlags(access_flags):
+ return tuple(flag_string
+ for flag, flag_string in DexFile._CLASS_ACCESS_FLAGS.items()
+ if flag & access_flags)
+
+ def IterMethodSignatureParts(self):
+ """Yields the string components of dex methods in a dex file.
+
+ Yields:
+ Tuples that look like:
+ (class name, return type, method name, (parameter type, ...)).
+ """
+ for method_item in self.method_item_list:
+ class_name_string = self.GetTypeString(method_item.type_idx)
+ method_name_string = self.GetString(method_item.name_idx)
+ proto_item = self.proto_item_list[method_item.proto_idx]
+ return_type_string = self.GetTypeString(proto_item.return_type_idx)
+ parameter_types = self.GetTypeListStringsByOffset(
+ proto_item.parameters_off)
+ yield (class_name_string, return_type_string, method_name_string,
+ parameter_types)
+
+ def __repr__(self):
+ items = [
+ self.header,
+ self.map_list,
+ self.type_item_list,
+ self.proto_item_list,
+ self.method_item_list,
+ self.string_item_list,
+ self.type_list_item_list,
+ self.class_def_item_list,
+ ]
+ return '\n'.join(str(item) for item in items)
+
+
+class _DumpCommand(object):
+
+ def __init__(self, dexfile):
+ self._dexfile = dexfile
+
+ def Run(self):
+ raise NotImplementedError()
+
+
+class _DumpMethods(_DumpCommand):
+
+ def Run(self):
+ for parts in self._dexfile.IterMethodSignatureParts():
+ class_type, return_type, method_name, parameter_types = parts
+ print('{} {} (return type={}, parameters={})'.format(
+ class_type, method_name, return_type, parameter_types))
+
+
+class _DumpStrings(_DumpCommand):
+
+ def Run(self):
+ for string_item in self._dexfile.string_item_list:
+ # Some strings are likely to be non-ascii (vs. methods/classes).
+ print(string_item.data.encode('utf-8'))
+
+
+class _DumpClasses(_DumpCommand):
+
+ def Run(self):
+ for class_item in self._dexfile.class_def_item_list:
+ class_string = self._dexfile.GetTypeString(class_item.class_idx)
+ superclass_string = self._dexfile.GetTypeString(class_item.superclass_idx)
+ interfaces = self._dexfile.GetTypeListStringsByOffset(
+ class_item.interfaces_off)
+ access_flags = DexFile.ResolveClassAccessFlags(class_item.access_flags)
+ print('{} (superclass={}, interfaces={}, access_flags={})'.format(
+ class_string, superclass_string, interfaces, access_flags))
+
+
+class _DumpSummary(_DumpCommand):
+
+ def Run(self):
+ print(self._dexfile)
+
+
+def _DumpDexItems(dexfile_data, name, item):
+ dexfile = DexFile(bytearray(dexfile_data))
+ print('dex_parser: Dumping {} for {}'.format(item, name))
+ cmds = {
+ 'summary': _DumpSummary,
+ 'methods': _DumpMethods,
+ 'strings': _DumpStrings,
+ 'classes': _DumpClasses,
+ }
+ try:
+ cmds[item](dexfile).Run()
+ except IOError as e:
+ if e.errno == errno.EPIPE:
+ # Assume we're piping to "less", do nothing.
+ pass
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Dump dex contents to stdout.')
+ parser.add_argument(
+ 'input', help='Input (.dex, .jar, .zip, .aab, .apk) file path.')
+ parser.add_argument(
+ 'item',
+ choices=('methods', 'strings', 'classes', 'summary'),
+ help='Item to dump',
+ nargs='?',
+ default='summary')
+ args = parser.parse_args()
+
+ if os.path.splitext(args.input)[1] in ('.apk', '.jar', '.zip', '.aab'):
+ with zipfile.ZipFile(args.input) as z:
+ dex_file_paths = [
+ f for f in z.namelist() if re.match(r'.*classes[0-9]*\.dex$', f)
+ ]
+ if not dex_file_paths:
+ print('Error: {} does not contain any classes.dex files'.format(
+ args.input))
+ sys.exit(1)
+
+ for path in dex_file_paths:
+ _DumpDexItems(z.read(path), path, args.item)
+
+ else:
+ with open(args.input) as f:
+ _DumpDexItems(f.read(), args.input, args.item)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/third_party/libwebrtc/build/android/pylib/gtest/__init__.py b/third_party/libwebrtc/build/android/pylib/gtest/__init__.py
new file mode 100644
index 0000000000..96196cffb2
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/gtest/__init__.py
@@ -0,0 +1,3 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/third_party/libwebrtc/build/android/pylib/gtest/filter/OWNERS b/third_party/libwebrtc/build/android/pylib/gtest/filter/OWNERS
new file mode 100644
index 0000000000..72e8ffc0db
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/gtest/filter/OWNERS
@@ -0,0 +1 @@
+*
diff --git a/third_party/libwebrtc/build/android/pylib/gtest/filter/base_unittests_disabled b/third_party/libwebrtc/build/android/pylib/gtest/filter/base_unittests_disabled
new file mode 100644
index 0000000000..533d3e167b
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/gtest/filter/base_unittests_disabled
@@ -0,0 +1,25 @@
+# List of suppressions
+
+# Android will not support StackTrace.
+StackTrace.*
+#
+# Sometimes this is automatically generated by run_tests.py
+VerifyPathControlledByUserTest.Symlinks
+
+# http://crbug.com/138845
+MessagePumpLibeventTest.TestWatchingFromBadThread
+
+StringPrintfTest.StringPrintfMisc
+StringPrintfTest.StringAppendfString
+StringPrintfTest.StringAppendfInt
+StringPrintfTest.StringPrintfBounds
+# TODO(jrg): Fails on bots. Works locally. Figure out why. 2/6/12
+FieldTrialTest.*
+# Flaky?
+ScopedJavaRefTest.RefCounts
+FileTest.MemoryCorruption
+MessagePumpLibeventTest.QuitOutsideOfRun
+ScopedFD.ScopedFDCrashesOnCloseFailure
+
+# http://crbug.com/245043
+StackContainer.BufferAlignment
diff --git a/third_party/libwebrtc/build/android/pylib/gtest/filter/base_unittests_emulator_additional_disabled b/third_party/libwebrtc/build/android/pylib/gtest/filter/base_unittests_emulator_additional_disabled
new file mode 100644
index 0000000000..6bec7d015b
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/gtest/filter/base_unittests_emulator_additional_disabled
@@ -0,0 +1,10 @@
+# Additional list of suppressions from emulator
+#
+# Automatically generated by run_tests.py
+PathServiceTest.Get
+SharedMemoryTest.OpenClose
+StringPrintfTest.StringAppendfInt
+StringPrintfTest.StringAppendfString
+StringPrintfTest.StringPrintfBounds
+StringPrintfTest.StringPrintfMisc
+VerifyPathControlledByUserTest.Symlinks
diff --git a/third_party/libwebrtc/build/android/pylib/gtest/filter/breakpad_unittests_disabled b/third_party/libwebrtc/build/android/pylib/gtest/filter/breakpad_unittests_disabled
new file mode 100644
index 0000000000..cefc64fd5e
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/gtest/filter/breakpad_unittests_disabled
@@ -0,0 +1,9 @@
+FileIDStripTest.StripSelf
+# crbug.com/303960
+ExceptionHandlerTest.InstructionPointerMemoryNullPointer
+# crbug.com/171419
+MinidumpWriterTest.MappingInfoContained
+# crbug.com/310088
+MinidumpWriterTest.MinidumpSizeLimit
+# crbug.com/375838
+ElfCoreDumpTest.ValidCoreFile
diff --git a/third_party/libwebrtc/build/android/pylib/gtest/filter/content_browsertests_disabled b/third_party/libwebrtc/build/android/pylib/gtest/filter/content_browsertests_disabled
new file mode 100644
index 0000000000..9c891214de
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/gtest/filter/content_browsertests_disabled
@@ -0,0 +1,45 @@
+# List of suppressions
+# Timeouts
+DatabaseTest.*
+
+# Crashes
+RenderFrameHostManagerTest.IgnoreRendererDebugURLsWhenCrashed
+
+# Plugins are not supported.
+BrowserPluginThreadedCompositorPixelTest.*
+BrowserPluginHostTest.*
+BrowserPluginTest.*
+PluginTest.*
+
+# http://crbug.com/463740
+CrossPlatformAccessibilityBrowserTest.SelectedEditableTextAccessibility
+
+# http://crbug.com/297230
+RenderAccessibilityImplTest.DetachAccessibilityObject
+
+# http://crbug.com/187500
+RenderViewImplTest.ImeComposition
+RenderViewImplTest.InsertCharacters
+RenderViewImplTest.OnHandleKeyboardEvent
+RenderViewImplTest.OnNavStateChanged
+# ZoomLevel is not used on Android
+RenderFrameImplTest.ZoomLimit
+RendererAccessibilityTest.SendFullAccessibilityTreeOnReload
+RendererAccessibilityTest.HideAccessibilityObject
+RendererAccessibilityTest.ShowAccessibilityObject
+RendererAccessibilityTest.TextSelectionShouldSendRoot
+
+# http://crbug.com/386227
+IndexedDBBrowserTest.VersionChangeCrashResilience
+
+# http://crbug.com/233118
+IndexedDBBrowserTest.NullKeyPathPersistence
+
+# http://crbug.com/338421
+GinBrowserTest.GinAndGarbageCollection
+
+# http://crbug.com/343604
+MSE_ClearKey/EncryptedMediaTest.ConfigChangeVideo/0
+
+# http://crbug.com/1039450
+ProprietaryCodec/WebRtcMediaRecorderTest.*
diff --git a/third_party/libwebrtc/build/android/pylib/gtest/filter/unit_tests_disabled b/third_party/libwebrtc/build/android/pylib/gtest/filter/unit_tests_disabled
new file mode 100644
index 0000000000..97811c83a4
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/gtest/filter/unit_tests_disabled
@@ -0,0 +1,74 @@
+# List of suppressions
+
+# The UDP related tests currently do not work on Android because
+# we lack a UDP forwarder tool.
+NetworkStatsTestUDP.*
+
+# Missing test resource of 16MB.
+HistoryProfileTest.TypicalProfileVersion
+
+# crbug.com/139408
+SQLitePersistentCookieStoreTest.TestDontLoadOldSessionCookies
+SQLitePersistentCookieStoreTest.PersistIsPersistent
+
+# crbug.com/139433
+AutofillTableTest.AutofillProfile*
+AutofillTableTest.UpdateAutofillProfile
+
+# crbug.com/139400
+AutofillProfileTest.*
+CreditCardTest.SetInfoExpirationMonth
+
+# Tests crashing in the APK
+# l10n_util.cc(655)] Check failed: std::string::npos != pos
+DownloadItemModelTest.InterruptStatus
+# l10n_util.cc(655)] Check failed: std::string::npos != pos
+PageInfoTest.OnSiteDataAccessed
+
+# crbug.com/139423
+ValueStoreFrontendTest.GetExistingData
+
+# crbug.com/139421
+ChromeSelectFilePolicyTest.ExpectAsynchronousListenerCall
+
+# http://crbug.com/139033
+ChromeDownloadManagerDelegateTest.StartDownload_PromptAlways
+
+# crbug.com/139411
+AutocompleteProviderTest.*
+HistoryContentsProviderBodyOnlyTest.*
+HistoryContentsProviderTest.*
+HQPOrderingTest.*
+SearchProviderTest.*
+
+ProtocolHandlerRegistryTest.TestOSRegistrationFailure
+
+# crbug.com/139418
+SQLiteServerBoundCertStoreTest.TestUpgradeV1
+SQLiteServerBoundCertStoreTest.TestUpgradeV2
+
+ProfileSyncComponentsFactoryImplTest.*
+PermissionsTest.GetWarningMessages_Plugins
+ImageOperations.ResizeShouldAverageColors
+
+# crbug.com/139643
+VariationsUtilTest.DisableAfterInitialization
+VariationsUtilTest.AssociateGoogleVariationID
+VariationsUtilTest.NoAssociation
+
+# crbug.com/141473
+AutofillManagerTest.UpdatePasswordSyncState
+AutofillManagerTest.UpdatePasswordGenerationState
+
+# crbug.com/145843
+EntropyProviderTest.UseOneTimeRandomizationSHA1
+EntropyProviderTest.UseOneTimeRandomizationPermuted
+
+# crbug.com/147500
+ManifestTest.RestrictedKeys
+
+# crbug.com/256259
+DiagnosticsModelTest.RunAll
+
+# Death tests are not supported with apks.
+*DeathTest*
diff --git a/third_party/libwebrtc/build/android/pylib/gtest/gtest_config.py b/third_party/libwebrtc/build/android/pylib/gtest/gtest_config.py
new file mode 100644
index 0000000000..3ac195586c
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/gtest/gtest_config.py
@@ -0,0 +1,57 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Configuration file for android gtest suites."""
+
+# Add new suites here before upgrading them to the stable list below.
+EXPERIMENTAL_TEST_SUITES = [
+ 'components_browsertests',
+ 'heap_profiler_unittests',
+ 'devtools_bridge_tests',
+]
+
+TELEMETRY_EXPERIMENTAL_TEST_SUITES = [
+ 'telemetry_unittests',
+]
+
+# Do not modify this list without approval of an android owner.
+# This list determines which suites are run by default, both for local
+# testing and on android trybots running on commit-queue.
+STABLE_TEST_SUITES = [
+ 'android_webview_unittests',
+ 'base_unittests',
+ 'blink_unittests',
+ 'breakpad_unittests',
+ 'cc_unittests',
+ 'components_unittests',
+ 'content_browsertests',
+ 'content_unittests',
+ 'events_unittests',
+ 'gl_tests',
+ 'gl_unittests',
+ 'gpu_unittests',
+ 'ipc_tests',
+ 'media_unittests',
+ 'midi_unittests',
+ 'net_unittests',
+ 'sandbox_linux_unittests',
+ 'skia_unittests',
+ 'sql_unittests',
+ 'storage_unittests',
+ 'ui_android_unittests',
+ 'ui_base_unittests',
+ 'ui_touch_selection_unittests',
+ 'unit_tests_apk',
+]
+
+# Tests fail in component=shared_library build, which is required for ASan.
+# http://crbug.com/344868
+ASAN_EXCLUDED_TEST_SUITES = [
+ 'breakpad_unittests',
+ 'sandbox_linux_unittests',
+
+ # The internal ASAN recipe cannot run step "unit_tests_apk", this is the
+ # only internal recipe affected. See http://crbug.com/607850
+ 'unit_tests_apk',
+]
diff --git a/third_party/libwebrtc/build/android/pylib/gtest/gtest_test_instance.py b/third_party/libwebrtc/build/android/pylib/gtest/gtest_test_instance.py
new file mode 100644
index 0000000000..5800992b64
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/gtest/gtest_test_instance.py
@@ -0,0 +1,627 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+
+import json
+import logging
+import os
+import re
+import tempfile
+import threading
+import xml.etree.ElementTree
+
+import six
+from devil.android import apk_helper
+from pylib import constants
+from pylib.constants import host_paths
+from pylib.base import base_test_result
+from pylib.base import test_instance
+from pylib.symbols import stack_symbolizer
+from pylib.utils import test_filter
+
+
+with host_paths.SysPath(host_paths.BUILD_COMMON_PATH):
+ import unittest_util # pylint: disable=import-error
+
+
+BROWSER_TEST_SUITES = [
+ 'android_browsertests',
+ 'android_sync_integration_tests',
+ 'components_browsertests',
+ 'content_browsertests',
+ 'hybrid_browsertest',
+ 'weblayer_browsertests',
+]
+
+# The max number of tests to run on a shard during the test run.
+MAX_SHARDS = 256
+
+RUN_IN_SUB_THREAD_TEST_SUITES = [
+ # Multiprocess tests should be run outside of the main thread.
+ 'base_unittests', # file_locking_unittest.cc uses a child process.
+ 'gwp_asan_unittests',
+ 'ipc_perftests',
+ 'ipc_tests',
+ 'mojo_perftests',
+ 'mojo_unittests',
+ 'net_unittests'
+]
+
+
+# Used for filtering large data deps at a finer grain than what's allowed in
+# isolate files since pushing deps to devices is expensive.
+# Wildcards are allowed.
+_DEPS_EXCLUSION_LIST = [
+ 'chrome/test/data/extensions/api_test',
+ 'chrome/test/data/extensions/secure_shell',
+ 'chrome/test/data/firefox*',
+ 'chrome/test/data/gpu',
+ 'chrome/test/data/image_decoding',
+ 'chrome/test/data/import',
+ 'chrome/test/data/page_cycler',
+ 'chrome/test/data/perf',
+ 'chrome/test/data/pyauto_private',
+ 'chrome/test/data/safari_import',
+ 'chrome/test/data/scroll',
+ 'chrome/test/data/third_party',
+ 'third_party/hunspell_dictionaries/*.dic',
+ # crbug.com/258690
+ 'webkit/data/bmp_decoder',
+ 'webkit/data/ico_decoder',
+]
+
+
+_EXTRA_NATIVE_TEST_ACTIVITY = (
+ 'org.chromium.native_test.NativeTestInstrumentationTestRunner.'
+ 'NativeTestActivity')
+_EXTRA_RUN_IN_SUB_THREAD = (
+ 'org.chromium.native_test.NativeTest.RunInSubThread')
+EXTRA_SHARD_NANO_TIMEOUT = (
+ 'org.chromium.native_test.NativeTestInstrumentationTestRunner.'
+ 'ShardNanoTimeout')
+_EXTRA_SHARD_SIZE_LIMIT = (
+ 'org.chromium.native_test.NativeTestInstrumentationTestRunner.'
+ 'ShardSizeLimit')
+
+# TODO(jbudorick): Remove these once we're no longer parsing stdout to generate
+# results.
+_RE_TEST_STATUS = re.compile(
+ # Test state.
+ r'\[ +((?:RUN)|(?:FAILED)|(?:OK)|(?:CRASHED)|(?:SKIPPED)) +\] ?'
+ # Test name.
+ r'([^ ]+)?'
+ # Optional parameters.
+ r'(?:, where'
+ # Type parameter
+ r'(?: TypeParam = [^()]*(?: and)?)?'
+ # Value parameter
+ r'(?: GetParam\(\) = [^()]*)?'
+ # End of optional parameters.
+ ')?'
+ # Optional test execution time.
+ r'(?: \((\d+) ms\))?$')
+# Crash detection constants.
+_RE_TEST_ERROR = re.compile(r'FAILURES!!! Tests run: \d+,'
+ r' Failures: \d+, Errors: 1')
+_RE_TEST_CURRENTLY_RUNNING = re.compile(
+ r'\[ERROR:.*?\] Currently running: (.*)')
+_RE_TEST_DCHECK_FATAL = re.compile(r'\[.*:FATAL:.*\] (.*)')
+_RE_DISABLED = re.compile(r'DISABLED_')
+_RE_FLAKY = re.compile(r'FLAKY_')
+
+# Detect stack line in stdout.
+_STACK_LINE_RE = re.compile(r'\s*#\d+')
+
+def ParseGTestListTests(raw_list):
+ """Parses a raw test list as provided by --gtest_list_tests.
+
+ Args:
+ raw_list: The raw test listing with the following format:
+
+ IPCChannelTest.
+ SendMessageInChannelConnected
+ IPCSyncChannelTest.
+ Simple
+ DISABLED_SendWithTimeoutMixedOKAndTimeout
+
+ Returns:
+ A list of all tests. For the above raw listing:
+
+ [IPCChannelTest.SendMessageInChannelConnected, IPCSyncChannelTest.Simple,
+ IPCSyncChannelTest.DISABLED_SendWithTimeoutMixedOKAndTimeout]
+ """
+ ret = []
+ current = ''
+ for test in raw_list:
+ if not test:
+ continue
+ if not test.startswith(' '):
+ test_case = test.split()[0]
+ if test_case.endswith('.'):
+ current = test_case
+ else:
+ test = test.strip()
+ if test and not 'YOU HAVE' in test:
+ test_name = test.split()[0]
+ ret += [current + test_name]
+ return ret
+
+
+def ParseGTestOutput(output, symbolizer, device_abi):
+ """Parses raw gtest output and returns a list of results.
+
+ Args:
+ output: A list of output lines.
+ symbolizer: The symbolizer used to symbolize stack.
+ device_abi: Device abi that is needed for symbolization.
+ Returns:
+ A list of base_test_result.BaseTestResults.
+ """
+ duration = 0
+ fallback_result_type = None
+ log = []
+ stack = []
+ result_type = None
+ results = []
+ test_name = None
+
+ def symbolize_stack_and_merge_with_log():
+ log_string = '\n'.join(log or [])
+ if not stack:
+ stack_string = ''
+ else:
+ stack_string = '\n'.join(
+ symbolizer.ExtractAndResolveNativeStackTraces(
+ stack, device_abi))
+ return '%s\n%s' % (log_string, stack_string)
+
+ def handle_possibly_unknown_test():
+ if test_name is not None:
+ results.append(
+ base_test_result.BaseTestResult(
+ TestNameWithoutDisabledPrefix(test_name),
+ # If we get here, that means we started a test, but it did not
+ # produce a definitive test status output, so assume it crashed.
+ # crbug/1191716
+ fallback_result_type or base_test_result.ResultType.CRASH,
+ duration,
+ log=symbolize_stack_and_merge_with_log()))
+
+ for l in output:
+ matcher = _RE_TEST_STATUS.match(l)
+ if matcher:
+ if matcher.group(1) == 'RUN':
+ handle_possibly_unknown_test()
+ duration = 0
+ fallback_result_type = None
+ log = []
+ stack = []
+ result_type = None
+ elif matcher.group(1) == 'OK':
+ result_type = base_test_result.ResultType.PASS
+ elif matcher.group(1) == 'SKIPPED':
+ result_type = base_test_result.ResultType.SKIP
+ elif matcher.group(1) == 'FAILED':
+ result_type = base_test_result.ResultType.FAIL
+ elif matcher.group(1) == 'CRASHED':
+ fallback_result_type = base_test_result.ResultType.CRASH
+ # Be aware that test name and status might not appear on same line.
+ test_name = matcher.group(2) if matcher.group(2) else test_name
+ duration = int(matcher.group(3)) if matcher.group(3) else 0
+
+ else:
+ # Can possibly add more matchers, such as different results from DCHECK.
+ currently_running_matcher = _RE_TEST_CURRENTLY_RUNNING.match(l)
+ dcheck_matcher = _RE_TEST_DCHECK_FATAL.match(l)
+
+ if currently_running_matcher:
+ test_name = currently_running_matcher.group(1)
+ result_type = base_test_result.ResultType.CRASH
+ duration = None # Don't know. Not using 0 as this is unknown vs 0.
+ elif dcheck_matcher:
+ result_type = base_test_result.ResultType.CRASH
+ duration = None # Don't know. Not using 0 as this is unknown vs 0.
+
+ if log is not None:
+ if not matcher and _STACK_LINE_RE.match(l):
+ stack.append(l)
+ else:
+ log.append(l)
+
+ if result_type and test_name:
+ # Don't bother symbolizing output if the test passed.
+ if result_type == base_test_result.ResultType.PASS:
+ stack = []
+ results.append(base_test_result.BaseTestResult(
+ TestNameWithoutDisabledPrefix(test_name), result_type, duration,
+ log=symbolize_stack_and_merge_with_log()))
+ test_name = None
+
+ handle_possibly_unknown_test()
+
+ return results
+
+
+def ParseGTestXML(xml_content):
+ """Parse gtest XML result."""
+ results = []
+ if not xml_content:
+ return results
+
+ html = six.moves.html_parser.HTMLParser()
+
+ testsuites = xml.etree.ElementTree.fromstring(xml_content)
+ for testsuite in testsuites:
+ suite_name = testsuite.attrib['name']
+ for testcase in testsuite:
+ case_name = testcase.attrib['name']
+ result_type = base_test_result.ResultType.PASS
+ log = []
+ for failure in testcase:
+ result_type = base_test_result.ResultType.FAIL
+ log.append(html.unescape(failure.attrib['message']))
+
+ results.append(base_test_result.BaseTestResult(
+ '%s.%s' % (suite_name, TestNameWithoutDisabledPrefix(case_name)),
+ result_type,
+ int(float(testcase.attrib['time']) * 1000),
+ log=('\n'.join(log) if log else '')))
+
+ return results
+
+
+def ParseGTestJSON(json_content):
+ """Parse results in the JSON Test Results format."""
+ results = []
+ if not json_content:
+ return results
+
+ json_data = json.loads(json_content)
+
+ openstack = list(json_data['tests'].items())
+
+ while openstack:
+ name, value = openstack.pop()
+
+ if 'expected' in value and 'actual' in value:
+ if value['actual'] == 'PASS':
+ result_type = base_test_result.ResultType.PASS
+ elif value['actual'] == 'SKIP':
+ result_type = base_test_result.ResultType.SKIP
+ elif value['actual'] == 'CRASH':
+ result_type = base_test_result.ResultType.CRASH
+ elif value['actual'] == 'TIMEOUT':
+ result_type = base_test_result.ResultType.TIMEOUT
+ else:
+ result_type = base_test_result.ResultType.FAIL
+ results.append(base_test_result.BaseTestResult(name, result_type))
+ else:
+ openstack += [("%s.%s" % (name, k), v) for k, v in six.iteritems(value)]
+
+ return results
+
+
+def TestNameWithoutDisabledPrefix(test_name):
+ """Modify the test name without disabled prefix if prefix 'DISABLED_' or
+ 'FLAKY_' presents.
+
+ Args:
+ test_name: The name of a test.
+ Returns:
+ A test name without prefix 'DISABLED_' or 'FLAKY_'.
+ """
+ disabled_prefixes = [_RE_DISABLED, _RE_FLAKY]
+ for dp in disabled_prefixes:
+ test_name = dp.sub('', test_name)
+ return test_name
+
+class GtestTestInstance(test_instance.TestInstance):
+
+ def __init__(self, args, data_deps_delegate, error_func):
+ super(GtestTestInstance, self).__init__()
+ # TODO(jbudorick): Support multiple test suites.
+ if len(args.suite_name) > 1:
+ raise ValueError('Platform mode currently supports only 1 gtest suite')
+ self._coverage_dir = args.coverage_dir
+ self._exe_dist_dir = None
+ self._external_shard_index = args.test_launcher_shard_index
+ self._extract_test_list_from_filter = args.extract_test_list_from_filter
+ self._filter_tests_lock = threading.Lock()
+ self._gs_test_artifacts_bucket = args.gs_test_artifacts_bucket
+ self._isolated_script_test_output = args.isolated_script_test_output
+ self._isolated_script_test_perf_output = (
+ args.isolated_script_test_perf_output)
+ self._render_test_output_dir = args.render_test_output_dir
+ self._shard_timeout = args.shard_timeout
+ self._store_tombstones = args.store_tombstones
+ self._suite = args.suite_name[0]
+ self._symbolizer = stack_symbolizer.Symbolizer(None)
+ self._total_external_shards = args.test_launcher_total_shards
+ self._wait_for_java_debugger = args.wait_for_java_debugger
+ self._use_existing_test_data = args.use_existing_test_data
+
+ # GYP:
+ if args.executable_dist_dir:
+ self._exe_dist_dir = os.path.abspath(args.executable_dist_dir)
+ else:
+ # TODO(agrieve): Remove auto-detection once recipes pass flag explicitly.
+ exe_dist_dir = os.path.join(constants.GetOutDirectory(),
+ '%s__dist' % self._suite)
+
+ if os.path.exists(exe_dist_dir):
+ self._exe_dist_dir = exe_dist_dir
+
+ incremental_part = ''
+ if args.test_apk_incremental_install_json:
+ incremental_part = '_incremental'
+
+ self._test_launcher_batch_limit = MAX_SHARDS
+ if (args.test_launcher_batch_limit
+ and 0 < args.test_launcher_batch_limit < MAX_SHARDS):
+ self._test_launcher_batch_limit = args.test_launcher_batch_limit
+
+ apk_path = os.path.join(
+ constants.GetOutDirectory(), '%s_apk' % self._suite,
+ '%s-debug%s.apk' % (self._suite, incremental_part))
+ self._test_apk_incremental_install_json = (
+ args.test_apk_incremental_install_json)
+ if not os.path.exists(apk_path):
+ self._apk_helper = None
+ else:
+ self._apk_helper = apk_helper.ApkHelper(apk_path)
+ self._extras = {
+ _EXTRA_NATIVE_TEST_ACTIVITY: self._apk_helper.GetActivityName(),
+ }
+ if self._suite in RUN_IN_SUB_THREAD_TEST_SUITES:
+ self._extras[_EXTRA_RUN_IN_SUB_THREAD] = 1
+ if self._suite in BROWSER_TEST_SUITES:
+ self._extras[_EXTRA_SHARD_SIZE_LIMIT] = 1
+ self._extras[EXTRA_SHARD_NANO_TIMEOUT] = int(1e9 * self._shard_timeout)
+ self._shard_timeout = 10 * self._shard_timeout
+ if args.wait_for_java_debugger:
+ self._extras[EXTRA_SHARD_NANO_TIMEOUT] = int(1e15) # Forever
+
+ if not self._apk_helper and not self._exe_dist_dir:
+ error_func('Could not find apk or executable for %s' % self._suite)
+
+ self._data_deps = []
+ self._gtest_filter = test_filter.InitializeFilterFromArgs(args)
+ self._run_disabled = args.run_disabled
+
+ self._data_deps_delegate = data_deps_delegate
+ self._runtime_deps_path = args.runtime_deps_path
+ if not self._runtime_deps_path:
+ logging.warning('No data dependencies will be pushed.')
+
+ if args.app_data_files:
+ self._app_data_files = args.app_data_files
+ if args.app_data_file_dir:
+ self._app_data_file_dir = args.app_data_file_dir
+ else:
+ self._app_data_file_dir = tempfile.mkdtemp()
+ logging.critical('Saving app files to %s', self._app_data_file_dir)
+ else:
+ self._app_data_files = None
+ self._app_data_file_dir = None
+
+ self._flags = None
+ self._initializeCommandLineFlags(args)
+
+ # TODO(jbudorick): Remove this once it's deployed.
+ self._enable_xml_result_parsing = args.enable_xml_result_parsing
+
+ def _initializeCommandLineFlags(self, args):
+ self._flags = []
+ if args.command_line_flags:
+ self._flags.extend(args.command_line_flags)
+ if args.device_flags_file:
+ with open(args.device_flags_file) as f:
+ stripped_lines = (l.strip() for l in f)
+ self._flags.extend(flag for flag in stripped_lines if flag)
+ if args.run_disabled:
+ self._flags.append('--gtest_also_run_disabled_tests')
+
+ @property
+ def activity(self):
+ return self._apk_helper and self._apk_helper.GetActivityName()
+
+ @property
+ def apk(self):
+ return self._apk_helper and self._apk_helper.path
+
+ @property
+ def apk_helper(self):
+ return self._apk_helper
+
+ @property
+ def app_file_dir(self):
+ return self._app_data_file_dir
+
+ @property
+ def app_files(self):
+ return self._app_data_files
+
+ @property
+ def coverage_dir(self):
+ return self._coverage_dir
+
+ @property
+ def enable_xml_result_parsing(self):
+ return self._enable_xml_result_parsing
+
+ @property
+ def exe_dist_dir(self):
+ return self._exe_dist_dir
+
+ @property
+ def external_shard_index(self):
+ return self._external_shard_index
+
+ @property
+ def extract_test_list_from_filter(self):
+ return self._extract_test_list_from_filter
+
+ @property
+ def extras(self):
+ return self._extras
+
+ @property
+ def flags(self):
+ return self._flags
+
+ @property
+ def gs_test_artifacts_bucket(self):
+ return self._gs_test_artifacts_bucket
+
+ @property
+ def gtest_filter(self):
+ return self._gtest_filter
+
+ @property
+ def isolated_script_test_output(self):
+ return self._isolated_script_test_output
+
+ @property
+ def isolated_script_test_perf_output(self):
+ return self._isolated_script_test_perf_output
+
+ @property
+ def render_test_output_dir(self):
+ return self._render_test_output_dir
+
+ @property
+ def package(self):
+ return self._apk_helper and self._apk_helper.GetPackageName()
+
+ @property
+ def permissions(self):
+ return self._apk_helper and self._apk_helper.GetPermissions()
+
+ @property
+ def runner(self):
+ return self._apk_helper and self._apk_helper.GetInstrumentationName()
+
+ @property
+ def shard_timeout(self):
+ return self._shard_timeout
+
+ @property
+ def store_tombstones(self):
+ return self._store_tombstones
+
+ @property
+ def suite(self):
+ return self._suite
+
+ @property
+ def symbolizer(self):
+ return self._symbolizer
+
+ @property
+ def test_apk_incremental_install_json(self):
+ return self._test_apk_incremental_install_json
+
+ @property
+ def test_launcher_batch_limit(self):
+ return self._test_launcher_batch_limit
+
+ @property
+ def total_external_shards(self):
+ return self._total_external_shards
+
+ @property
+ def wait_for_java_debugger(self):
+ return self._wait_for_java_debugger
+
+ @property
+ def use_existing_test_data(self):
+ return self._use_existing_test_data
+
+ #override
+ def TestType(self):
+ return 'gtest'
+
+ #override
+ def GetPreferredAbis(self):
+ if not self._apk_helper:
+ return None
+ return self._apk_helper.GetAbis()
+
+ #override
+ def SetUp(self):
+ """Map data dependencies via isolate."""
+ self._data_deps.extend(
+ self._data_deps_delegate(self._runtime_deps_path))
+
+ def GetDataDependencies(self):
+ """Returns the test suite's data dependencies.
+
+ Returns:
+ A list of (host_path, device_path) tuples to push. If device_path is
+ None, the client is responsible for determining where to push the file.
+ """
+ return self._data_deps
+
+ def FilterTests(self, test_list, disabled_prefixes=None):
+ """Filters |test_list| based on prefixes and, if present, a filter string.
+
+ Args:
+ test_list: The list of tests to filter.
+ disabled_prefixes: A list of test prefixes to filter. Defaults to
+ DISABLED_, FLAKY_, FAILS_, PRE_, and MANUAL_
+ Returns:
+ A filtered list of tests to run.
+ """
+ gtest_filter_strings = [
+ self._GenerateDisabledFilterString(disabled_prefixes)]
+ if self._gtest_filter:
+ gtest_filter_strings.append(self._gtest_filter)
+
+ filtered_test_list = test_list
+ # This lock is required because on older versions of Python
+ # |unittest_util.FilterTestNames| use of |fnmatch| is not threadsafe.
+ with self._filter_tests_lock:
+ for gtest_filter_string in gtest_filter_strings:
+ logging.debug('Filtering tests using: %s', gtest_filter_string)
+ filtered_test_list = unittest_util.FilterTestNames(
+ filtered_test_list, gtest_filter_string)
+
+ if self._run_disabled and self._gtest_filter:
+ out_filtered_test_list = list(set(test_list)-set(filtered_test_list))
+ for test in out_filtered_test_list:
+ test_name_no_disabled = TestNameWithoutDisabledPrefix(test)
+ if test_name_no_disabled != test and unittest_util.FilterTestNames(
+ [test_name_no_disabled], self._gtest_filter):
+ filtered_test_list.append(test)
+ return filtered_test_list
+
+ def _GenerateDisabledFilterString(self, disabled_prefixes):
+ disabled_filter_items = []
+
+ if disabled_prefixes is None:
+ disabled_prefixes = ['FAILS_', 'PRE_']
+ if '--run-manual' not in self._flags:
+ disabled_prefixes += ['MANUAL_']
+ if not self._run_disabled:
+ disabled_prefixes += ['DISABLED_', 'FLAKY_']
+
+ disabled_filter_items += ['%s*' % dp for dp in disabled_prefixes]
+ disabled_filter_items += ['*.%s*' % dp for dp in disabled_prefixes]
+
+ disabled_tests_file_path = os.path.join(
+ host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'pylib', 'gtest',
+ 'filter', '%s_disabled' % self._suite)
+ if disabled_tests_file_path and os.path.exists(disabled_tests_file_path):
+ with open(disabled_tests_file_path) as disabled_tests_file:
+ disabled_filter_items += [
+ '%s' % l for l in (line.strip() for line in disabled_tests_file)
+ if l and not l.startswith('#')]
+
+ return '*-%s' % ':'.join(disabled_filter_items)
+
+ #override
+ def TearDown(self):
+ """Do nothing."""
+ pass
diff --git a/third_party/libwebrtc/build/android/pylib/gtest/gtest_test_instance_test.py b/third_party/libwebrtc/build/android/pylib/gtest/gtest_test_instance_test.py
new file mode 100755
index 0000000000..993e2c78c4
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/gtest/gtest_test_instance_test.py
@@ -0,0 +1,348 @@
+#!/usr/bin/env vpython3
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import unittest
+
+from pylib.base import base_test_result
+from pylib.gtest import gtest_test_instance
+
+
+class GtestTestInstanceTests(unittest.TestCase):
+
+ def testParseGTestListTests_simple(self):
+ raw_output = [
+ 'TestCaseOne.',
+ ' testOne',
+ ' testTwo',
+ 'TestCaseTwo.',
+ ' testThree',
+ ' testFour',
+ ]
+ actual = gtest_test_instance.ParseGTestListTests(raw_output)
+ expected = [
+ 'TestCaseOne.testOne',
+ 'TestCaseOne.testTwo',
+ 'TestCaseTwo.testThree',
+ 'TestCaseTwo.testFour',
+ ]
+ self.assertEqual(expected, actual)
+
+ def testParseGTestListTests_typeParameterized_old(self):
+ raw_output = [
+ 'TPTestCase/WithTypeParam/0.',
+ ' testOne',
+ ' testTwo',
+ ]
+ actual = gtest_test_instance.ParseGTestListTests(raw_output)
+ expected = [
+ 'TPTestCase/WithTypeParam/0.testOne',
+ 'TPTestCase/WithTypeParam/0.testTwo',
+ ]
+ self.assertEqual(expected, actual)
+
+ def testParseGTestListTests_typeParameterized_new(self):
+ raw_output = [
+ 'TPTestCase/WithTypeParam/0. # TypeParam = TypeParam0',
+ ' testOne',
+ ' testTwo',
+ ]
+ actual = gtest_test_instance.ParseGTestListTests(raw_output)
+ expected = [
+ 'TPTestCase/WithTypeParam/0.testOne',
+ 'TPTestCase/WithTypeParam/0.testTwo',
+ ]
+ self.assertEqual(expected, actual)
+
+ def testParseGTestListTests_valueParameterized_old(self):
+ raw_output = [
+ 'VPTestCase.',
+ ' testWithValueParam/0',
+ ' testWithValueParam/1',
+ ]
+ actual = gtest_test_instance.ParseGTestListTests(raw_output)
+ expected = [
+ 'VPTestCase.testWithValueParam/0',
+ 'VPTestCase.testWithValueParam/1',
+ ]
+ self.assertEqual(expected, actual)
+
+ def testParseGTestListTests_valueParameterized_new(self):
+ raw_output = [
+ 'VPTestCase.',
+ ' testWithValueParam/0 # GetParam() = 0',
+ ' testWithValueParam/1 # GetParam() = 1',
+ ]
+ actual = gtest_test_instance.ParseGTestListTests(raw_output)
+ expected = [
+ 'VPTestCase.testWithValueParam/0',
+ 'VPTestCase.testWithValueParam/1',
+ ]
+ self.assertEqual(expected, actual)
+
+ def testParseGTestListTests_emptyTestName(self):
+ raw_output = [
+ 'TestCase.',
+ ' ',
+ ' nonEmptyTestName',
+ ]
+ actual = gtest_test_instance.ParseGTestListTests(raw_output)
+ expected = [
+ 'TestCase.nonEmptyTestName',
+ ]
+ self.assertEqual(expected, actual)
+
+ def testParseGTestOutput_pass(self):
+ raw_output = [
+ '[ RUN ] FooTest.Bar',
+ '[ OK ] FooTest.Bar (1 ms)',
+ ]
+ actual = gtest_test_instance.ParseGTestOutput(raw_output, None, None)
+ self.assertEqual(1, len(actual))
+ self.assertEqual('FooTest.Bar', actual[0].GetName())
+ self.assertEqual(1, actual[0].GetDuration())
+ self.assertEqual(base_test_result.ResultType.PASS, actual[0].GetType())
+
+ def testParseGTestOutput_fail(self):
+ raw_output = [
+ '[ RUN ] FooTest.Bar',
+ '[ FAILED ] FooTest.Bar (1 ms)',
+ ]
+ actual = gtest_test_instance.ParseGTestOutput(raw_output, None, None)
+ self.assertEqual(1, len(actual))
+ self.assertEqual('FooTest.Bar', actual[0].GetName())
+ self.assertEqual(1, actual[0].GetDuration())
+ self.assertEqual(base_test_result.ResultType.FAIL, actual[0].GetType())
+
+ def testParseGTestOutput_crash(self):
+ raw_output = [
+ '[ RUN ] FooTest.Bar',
+ '[ CRASHED ] FooTest.Bar (1 ms)',
+ ]
+ actual = gtest_test_instance.ParseGTestOutput(raw_output, None, None)
+ self.assertEqual(1, len(actual))
+ self.assertEqual('FooTest.Bar', actual[0].GetName())
+ self.assertEqual(1, actual[0].GetDuration())
+ self.assertEqual(base_test_result.ResultType.CRASH, actual[0].GetType())
+
+ def testParseGTestOutput_errorCrash(self):
+ raw_output = [
+ '[ RUN ] FooTest.Bar',
+ '[ERROR:blah] Currently running: FooTest.Bar',
+ ]
+ actual = gtest_test_instance.ParseGTestOutput(raw_output, None, None)
+ self.assertEqual(1, len(actual))
+ self.assertEqual('FooTest.Bar', actual[0].GetName())
+ self.assertIsNone(actual[0].GetDuration())
+ self.assertEqual(base_test_result.ResultType.CRASH, actual[0].GetType())
+
+ def testParseGTestOutput_fatalDcheck(self):
+ raw_output = [
+ '[ RUN ] FooTest.Bar',
+ '[0324/183029.116334:FATAL:test_timeouts.cc(103)] Check failed: !init',
+ ]
+ actual = gtest_test_instance.ParseGTestOutput(raw_output, None, None)
+ self.assertEqual(1, len(actual))
+ self.assertEqual('FooTest.Bar', actual[0].GetName())
+ self.assertIsNone(actual[0].GetDuration())
+ self.assertEqual(base_test_result.ResultType.CRASH, actual[0].GetType())
+
+ def testParseGTestOutput_unknown(self):
+ raw_output = [
+ '[ RUN ] FooTest.Bar',
+ ]
+ actual = gtest_test_instance.ParseGTestOutput(raw_output, None, None)
+ self.assertEqual(1, len(actual))
+ self.assertEqual('FooTest.Bar', actual[0].GetName())
+ self.assertEqual(0, actual[0].GetDuration())
+ self.assertEqual(base_test_result.ResultType.CRASH, actual[0].GetType())
+
+ def testParseGTestOutput_nonterminalUnknown(self):
+ raw_output = [
+ '[ RUN ] FooTest.Bar',
+ '[ RUN ] FooTest.Baz',
+ '[ OK ] FooTest.Baz (1 ms)',
+ ]
+ actual = gtest_test_instance.ParseGTestOutput(raw_output, None, None)
+ self.assertEqual(2, len(actual))
+
+ self.assertEqual('FooTest.Bar', actual[0].GetName())
+ self.assertEqual(0, actual[0].GetDuration())
+ self.assertEqual(base_test_result.ResultType.CRASH, actual[0].GetType())
+
+ self.assertEqual('FooTest.Baz', actual[1].GetName())
+ self.assertEqual(1, actual[1].GetDuration())
+ self.assertEqual(base_test_result.ResultType.PASS, actual[1].GetType())
+
+ def testParseGTestOutput_deathTestCrashOk(self):
+ raw_output = [
+ '[ RUN ] FooTest.Bar',
+ '[ CRASHED ]',
+ '[ OK ] FooTest.Bar (1 ms)',
+ ]
+ actual = gtest_test_instance.ParseGTestOutput(raw_output, None, None)
+ self.assertEqual(1, len(actual))
+
+ self.assertEqual('FooTest.Bar', actual[0].GetName())
+ self.assertEqual(1, actual[0].GetDuration())
+ self.assertEqual(base_test_result.ResultType.PASS, actual[0].GetType())
+
+ def testParseGTestOutput_typeParameterized(self):
+ raw_output = [
+ '[ RUN ] Baz/FooTest.Bar/0',
+ '[ FAILED ] Baz/FooTest.Bar/0, where TypeParam = (1 ms)',
+ ]
+ actual = gtest_test_instance.ParseGTestOutput(raw_output, None, None)
+ self.assertEqual(1, len(actual))
+ self.assertEqual('Baz/FooTest.Bar/0', actual[0].GetName())
+ self.assertEqual(1, actual[0].GetDuration())
+ self.assertEqual(base_test_result.ResultType.FAIL, actual[0].GetType())
+
+ def testParseGTestOutput_valueParameterized(self):
+ raw_output = [
+ '[ RUN ] Baz/FooTest.Bar/0',
+ '[ FAILED ] Baz/FooTest.Bar/0,' +
+ ' where GetParam() = 4-byte object <00-00 00-00> (1 ms)',
+ ]
+ actual = gtest_test_instance.ParseGTestOutput(raw_output, None, None)
+ self.assertEqual(1, len(actual))
+ self.assertEqual('Baz/FooTest.Bar/0', actual[0].GetName())
+ self.assertEqual(1, actual[0].GetDuration())
+ self.assertEqual(base_test_result.ResultType.FAIL, actual[0].GetType())
+
+ def testParseGTestOutput_typeAndValueParameterized(self):
+ raw_output = [
+ '[ RUN ] Baz/FooTest.Bar/0',
+ '[ FAILED ] Baz/FooTest.Bar/0,' +
+ ' where TypeParam = and GetParam() = (1 ms)',
+ ]
+ actual = gtest_test_instance.ParseGTestOutput(raw_output, None, None)
+ self.assertEqual(1, len(actual))
+ self.assertEqual('Baz/FooTest.Bar/0', actual[0].GetName())
+ self.assertEqual(1, actual[0].GetDuration())
+ self.assertEqual(base_test_result.ResultType.FAIL, actual[0].GetType())
+
+ def testParseGTestOutput_skippedTest(self):
+ raw_output = [
+ '[ RUN ] FooTest.Bar',
+ '[ SKIPPED ] FooTest.Bar (1 ms)',
+ ]
+ actual = gtest_test_instance.ParseGTestOutput(raw_output, None, None)
+ self.assertEqual(1, len(actual))
+ self.assertEqual('FooTest.Bar', actual[0].GetName())
+ self.assertEqual(1, actual[0].GetDuration())
+ self.assertEqual(base_test_result.ResultType.SKIP, actual[0].GetType())
+
+ def testParseGTestXML_none(self):
+ actual = gtest_test_instance.ParseGTestXML(None)
+ self.assertEqual([], actual)
+
+ def testParseGTestJSON_none(self):
+ actual = gtest_test_instance.ParseGTestJSON(None)
+ self.assertEqual([], actual)
+
+ def testParseGTestJSON_example(self):
+ raw_json = """
+ {
+ "tests": {
+ "mojom_tests": {
+ "parse": {
+ "ast_unittest": {
+ "ASTTest": {
+ "testNodeBase": {
+ "expected": "PASS",
+ "actual": "PASS",
+ "artifacts": {
+ "screenshot": ["screenshots/page.png"]
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "interrupted": false,
+ "path_delimiter": ".",
+ "version": 3,
+ "seconds_since_epoch": 1406662283.764424,
+ "num_failures_by_type": {
+ "FAIL": 0,
+ "PASS": 1
+ },
+ "artifact_types": {
+ "screenshot": "image/png"
+ }
+ }"""
+ actual = gtest_test_instance.ParseGTestJSON(raw_json)
+ self.assertEqual(1, len(actual))
+ self.assertEqual('mojom_tests.parse.ast_unittest.ASTTest.testNodeBase',
+ actual[0].GetName())
+ self.assertEqual(base_test_result.ResultType.PASS, actual[0].GetType())
+
+ def testParseGTestJSON_skippedTest_example(self):
+ raw_json = """
+ {
+ "tests": {
+ "mojom_tests": {
+ "parse": {
+ "ast_unittest": {
+ "ASTTest": {
+ "testNodeBase": {
+ "expected": "SKIP",
+ "actual": "SKIP"
+ }
+ }
+ }
+ }
+ }
+ },
+ "interrupted": false,
+ "path_delimiter": ".",
+ "version": 3,
+ "seconds_since_epoch": 1406662283.764424,
+ "num_failures_by_type": {
+ "SKIP": 1
+ }
+ }"""
+ actual = gtest_test_instance.ParseGTestJSON(raw_json)
+ self.assertEqual(1, len(actual))
+ self.assertEqual('mojom_tests.parse.ast_unittest.ASTTest.testNodeBase',
+ actual[0].GetName())
+ self.assertEqual(base_test_result.ResultType.SKIP, actual[0].GetType())
+
+ def testTestNameWithoutDisabledPrefix_disabled(self):
+ test_name_list = [
+ 'A.DISABLED_B',
+ 'DISABLED_A.B',
+ 'DISABLED_A.DISABLED_B',
+ ]
+ for test_name in test_name_list:
+ actual = gtest_test_instance \
+ .TestNameWithoutDisabledPrefix(test_name)
+ expected = 'A.B'
+ self.assertEqual(expected, actual)
+
+ def testTestNameWithoutDisabledPrefix_flaky(self):
+ test_name_list = [
+ 'A.FLAKY_B',
+ 'FLAKY_A.B',
+ 'FLAKY_A.FLAKY_B',
+ ]
+ for test_name in test_name_list:
+ actual = gtest_test_instance \
+ .TestNameWithoutDisabledPrefix(test_name)
+ expected = 'A.B'
+ self.assertEqual(expected, actual)
+
+ def testTestNameWithoutDisabledPrefix_notDisabledOrFlaky(self):
+ test_name = 'A.B'
+ actual = gtest_test_instance \
+ .TestNameWithoutDisabledPrefix(test_name)
+ expected = 'A.B'
+ self.assertEqual(expected, actual)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/third_party/libwebrtc/build/android/pylib/instrumentation/__init__.py b/third_party/libwebrtc/build/android/pylib/instrumentation/__init__.py
new file mode 100644
index 0000000000..96196cffb2
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/instrumentation/__init__.py
@@ -0,0 +1,3 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/third_party/libwebrtc/build/android/pylib/instrumentation/instrumentation_parser.py b/third_party/libwebrtc/build/android/pylib/instrumentation/instrumentation_parser.py
new file mode 100644
index 0000000000..d22fd48f4b
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/instrumentation/instrumentation_parser.py
@@ -0,0 +1,112 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import logging
+import re
+
+# http://developer.android.com/reference/android/test/InstrumentationTestRunner.html
+STATUS_CODE_START = 1
+STATUS_CODE_OK = 0
+STATUS_CODE_ERROR = -1
+STATUS_CODE_FAILURE = -2
+
+# AndroidJUnitRunner would status output -3 to indicate a test is skipped
+STATUS_CODE_SKIP = -3
+
+# AndroidJUnitRunner outputs -4 to indicate a failed assumption
+# "A test for which an assumption fails should not generate a test
+# case failure"
+# http://junit.org/junit4/javadoc/4.12/org/junit/AssumptionViolatedException.html
+STATUS_CODE_ASSUMPTION_FAILURE = -4
+
+STATUS_CODE_TEST_DURATION = 1337
+
+# When a test batch fails due to post-test Assertion failures (eg.
+# LifetimeAssert).
+STATUS_CODE_BATCH_FAILURE = 1338
+
+# http://developer.android.com/reference/android/app/Activity.html
+RESULT_CODE_OK = -1
+RESULT_CODE_CANCELED = 0
+
+_INSTR_LINE_RE = re.compile(r'^\s*INSTRUMENTATION_([A-Z_]+): (.*)$')
+
+
+class InstrumentationParser(object):
+
+ def __init__(self, stream):
+ """An incremental parser for the output of Android instrumentation tests.
+
+ Example:
+
+ stream = adb.IterShell('am instrument -r ...')
+ parser = InstrumentationParser(stream)
+
+ for code, bundle in parser.IterStatus():
+ # do something with each instrumentation status
+ print('status:', code, bundle)
+
+ # do something with the final instrumentation result
+ code, bundle = parser.GetResult()
+ print('result:', code, bundle)
+
+ Args:
+ stream: a sequence of lines as produced by the raw output of an
+ instrumentation test (e.g. by |am instrument -r|).
+ """
+ self._stream = stream
+ self._code = None
+ self._bundle = None
+
+ def IterStatus(self):
+ """Iterate over statuses as they are produced by the instrumentation test.
+
+ Yields:
+ A tuple (code, bundle) for each instrumentation status found in the
+ output.
+ """
+ def join_bundle_values(bundle):
+ for key in bundle:
+ bundle[key] = '\n'.join(bundle[key])
+ return bundle
+
+ bundle = {'STATUS': {}, 'RESULT': {}}
+ header = None
+ key = None
+ for line in self._stream:
+ m = _INSTR_LINE_RE.match(line)
+ if m:
+ header, value = m.groups()
+ key = None
+ if header in ['STATUS', 'RESULT'] and '=' in value:
+ key, value = value.split('=', 1)
+ bundle[header][key] = [value]
+ elif header == 'STATUS_CODE':
+ yield int(value), join_bundle_values(bundle['STATUS'])
+ bundle['STATUS'] = {}
+ elif header == 'CODE':
+ self._code = int(value)
+ else:
+ logging.warning('Unknown INSTRUMENTATION_%s line: %s', header, value)
+ elif key is not None:
+ bundle[header][key].append(line)
+
+ self._bundle = join_bundle_values(bundle['RESULT'])
+
+ def GetResult(self):
+ """Return the final instrumentation result.
+
+ Returns:
+ A pair (code, bundle) with the final instrumentation result. The |code|
+ may be None if no instrumentation result was found in the output.
+
+ Raises:
+ AssertionError if attempting to get the instrumentation result before
+ exhausting |IterStatus| first.
+ """
+ assert self._bundle is not None, (
+ 'The IterStatus generator must be exhausted before reading the final'
+ ' instrumentation result.')
+ return self._code, self._bundle
diff --git a/third_party/libwebrtc/build/android/pylib/instrumentation/instrumentation_parser_test.py b/third_party/libwebrtc/build/android/pylib/instrumentation/instrumentation_parser_test.py
new file mode 100755
index 0000000000..0cd163d038
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/instrumentation/instrumentation_parser_test.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env vpython3
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+"""Unit tests for instrumentation.InstrumentationParser."""
+
+
+import unittest
+
+from pylib.instrumentation import instrumentation_parser
+
+
+class InstrumentationParserTest(unittest.TestCase):
+
+ def testInstrumentationParser_nothing(self):
+ parser = instrumentation_parser.InstrumentationParser([''])
+ statuses = list(parser.IterStatus())
+ code, bundle = parser.GetResult()
+ self.assertEqual(None, code)
+ self.assertEqual({}, bundle)
+ self.assertEqual([], statuses)
+
+ def testInstrumentationParser_noMatchingStarts(self):
+ raw_output = [
+ '',
+ 'this.is.a.test.package.TestClass:.',
+ 'Test result for =.',
+ 'Time: 1.234',
+ '',
+ 'OK (1 test)',
+ ]
+
+ parser = instrumentation_parser.InstrumentationParser(raw_output)
+ statuses = list(parser.IterStatus())
+ code, bundle = parser.GetResult()
+ self.assertEqual(None, code)
+ self.assertEqual({}, bundle)
+ self.assertEqual([], statuses)
+
+ def testInstrumentationParser_resultAndCode(self):
+ raw_output = [
+ 'INSTRUMENTATION_RESULT: shortMsg=foo bar',
+ 'INSTRUMENTATION_RESULT: longMsg=a foo',
+ 'walked into',
+ 'a bar',
+ 'INSTRUMENTATION_CODE: -1',
+ ]
+
+ parser = instrumentation_parser.InstrumentationParser(raw_output)
+ statuses = list(parser.IterStatus())
+ code, bundle = parser.GetResult()
+ self.assertEqual(-1, code)
+ self.assertEqual(
+ {'shortMsg': 'foo bar', 'longMsg': 'a foo\nwalked into\na bar'}, bundle)
+ self.assertEqual([], statuses)
+
+ def testInstrumentationParser_oneStatus(self):
+ raw_output = [
+ 'INSTRUMENTATION_STATUS: foo=1',
+ 'INSTRUMENTATION_STATUS: bar=hello',
+ 'INSTRUMENTATION_STATUS: world=false',
+ 'INSTRUMENTATION_STATUS: class=this.is.a.test.package.TestClass',
+ 'INSTRUMENTATION_STATUS: test=testMethod',
+ 'INSTRUMENTATION_STATUS_CODE: 0',
+ ]
+
+ parser = instrumentation_parser.InstrumentationParser(raw_output)
+ statuses = list(parser.IterStatus())
+
+ expected = [
+ (0, {
+ 'foo': '1',
+ 'bar': 'hello',
+ 'world': 'false',
+ 'class': 'this.is.a.test.package.TestClass',
+ 'test': 'testMethod',
+ })
+ ]
+ self.assertEqual(expected, statuses)
+
+ def testInstrumentationParser_multiStatus(self):
+ raw_output = [
+ 'INSTRUMENTATION_STATUS: class=foo',
+ 'INSTRUMENTATION_STATUS: test=bar',
+ 'INSTRUMENTATION_STATUS_CODE: 1',
+ 'INSTRUMENTATION_STATUS: test_skipped=true',
+ 'INSTRUMENTATION_STATUS_CODE: 0',
+ 'INSTRUMENTATION_STATUS: class=hello',
+ 'INSTRUMENTATION_STATUS: test=world',
+ 'INSTRUMENTATION_STATUS: stack=',
+ 'foo/bar.py (27)',
+ 'hello/world.py (42)',
+ 'test/file.py (1)',
+ 'INSTRUMENTATION_STATUS_CODE: -1',
+ ]
+
+ parser = instrumentation_parser.InstrumentationParser(raw_output)
+ statuses = list(parser.IterStatus())
+
+ expected = [
+ (1, {'class': 'foo', 'test': 'bar',}),
+ (0, {'test_skipped': 'true'}),
+ (-1, {
+ 'class': 'hello',
+ 'test': 'world',
+ 'stack': '\nfoo/bar.py (27)\nhello/world.py (42)\ntest/file.py (1)',
+ }),
+ ]
+ self.assertEqual(expected, statuses)
+
+ def testInstrumentationParser_statusResultAndCode(self):
+ raw_output = [
+ 'INSTRUMENTATION_STATUS: class=foo',
+ 'INSTRUMENTATION_STATUS: test=bar',
+ 'INSTRUMENTATION_STATUS_CODE: 1',
+ 'INSTRUMENTATION_RESULT: result=hello',
+ 'world',
+ '',
+ '',
+ 'INSTRUMENTATION_CODE: 0',
+ ]
+
+ parser = instrumentation_parser.InstrumentationParser(raw_output)
+ statuses = list(parser.IterStatus())
+ code, bundle = parser.GetResult()
+
+ self.assertEqual(0, code)
+ self.assertEqual({'result': 'hello\nworld\n\n'}, bundle)
+ self.assertEqual([(1, {'class': 'foo', 'test': 'bar'})], statuses)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/third_party/libwebrtc/build/android/pylib/instrumentation/instrumentation_test_instance.py b/third_party/libwebrtc/build/android/pylib/instrumentation/instrumentation_test_instance.py
new file mode 100644
index 0000000000..b4a13c9031
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/instrumentation/instrumentation_test_instance.py
@@ -0,0 +1,1171 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import copy
+import logging
+import os
+import pickle
+import re
+
+import six
+from devil.android import apk_helper
+from pylib import constants
+from pylib.base import base_test_result
+from pylib.base import test_exception
+from pylib.base import test_instance
+from pylib.constants import host_paths
+from pylib.instrumentation import test_result
+from pylib.instrumentation import instrumentation_parser
+from pylib.symbols import deobfuscator
+from pylib.symbols import stack_symbolizer
+from pylib.utils import dexdump
+from pylib.utils import gold_utils
+from pylib.utils import instrumentation_tracing
+from pylib.utils import proguard
+from pylib.utils import shared_preference_utils
+from pylib.utils import test_filter
+
+
+with host_paths.SysPath(host_paths.BUILD_COMMON_PATH):
+ import unittest_util # pylint: disable=import-error
+
+# Ref: http://developer.android.com/reference/android/app/Activity.html
+_ACTIVITY_RESULT_CANCELED = 0
+_ACTIVITY_RESULT_OK = -1
+
+_COMMAND_LINE_PARAMETER = 'cmdlinearg-parameter'
+_DEFAULT_ANNOTATIONS = [
+ 'SmallTest', 'MediumTest', 'LargeTest', 'EnormousTest', 'IntegrationTest']
+_EXCLUDE_UNLESS_REQUESTED_ANNOTATIONS = [
+ 'DisabledTest', 'FlakyTest', 'Manual']
+_VALID_ANNOTATIONS = set(_DEFAULT_ANNOTATIONS +
+ _EXCLUDE_UNLESS_REQUESTED_ANNOTATIONS)
+
+_TEST_LIST_JUNIT4_RUNNERS = [
+ 'org.chromium.base.test.BaseChromiumAndroidJUnitRunner']
+
+_SKIP_PARAMETERIZATION = 'SkipCommandLineParameterization'
+_PARAMETERIZED_COMMAND_LINE_FLAGS = 'ParameterizedCommandLineFlags'
+_PARAMETERIZED_COMMAND_LINE_FLAGS_SWITCHES = (
+ 'ParameterizedCommandLineFlags$Switches')
+_NATIVE_CRASH_RE = re.compile('(process|native) crash', re.IGNORECASE)
+_PICKLE_FORMAT_VERSION = 12
+
+# The ID of the bundle value Instrumentation uses to report which test index the
+# results are for in a collection of tests. Note that this index is 1-based.
+_BUNDLE_CURRENT_ID = 'current'
+# The ID of the bundle value Instrumentation uses to report the test class.
+_BUNDLE_CLASS_ID = 'class'
+# The ID of the bundle value Instrumentation uses to report the test name.
+_BUNDLE_TEST_ID = 'test'
+# The ID of the bundle value Instrumentation uses to report if a test was
+# skipped.
+_BUNDLE_SKIPPED_ID = 'test_skipped'
+# The ID of the bundle value Instrumentation uses to report the crash stack, if
+# the test crashed.
+_BUNDLE_STACK_ID = 'stack'
+
+# The ID of the bundle value Chrome uses to report the test duration.
+_BUNDLE_DURATION_ID = 'duration_ms'
+
+class MissingSizeAnnotationError(test_exception.TestException):
+ def __init__(self, class_name):
+ super(MissingSizeAnnotationError, self).__init__(class_name +
+ ': Test method is missing required size annotation. Add one of: ' +
+ ', '.join('@' + a for a in _VALID_ANNOTATIONS))
+
+
+class CommandLineParameterizationException(test_exception.TestException):
+
+ def __init__(self, msg):
+ super(CommandLineParameterizationException, self).__init__(msg)
+
+
+class TestListPickleException(test_exception.TestException):
+ pass
+
+
+# TODO(jbudorick): Make these private class methods of
+# InstrumentationTestInstance once the instrumentation junit3_runner_class is
+# deprecated.
+def ParseAmInstrumentRawOutput(raw_output):
+ """Parses the output of an |am instrument -r| call.
+
+ Args:
+ raw_output: the output of an |am instrument -r| call as a list of lines
+ Returns:
+ A 3-tuple containing:
+ - the instrumentation code as an integer
+ - the instrumentation result as a list of lines
+ - the instrumentation statuses received as a list of 2-tuples
+ containing:
+ - the status code as an integer
+ - the bundle dump as a dict mapping string keys to a list of
+ strings, one for each line.
+ """
+ parser = instrumentation_parser.InstrumentationParser(raw_output)
+ statuses = list(parser.IterStatus())
+ code, bundle = parser.GetResult()
+ return (code, bundle, statuses)
+
+
+def GenerateTestResults(result_code, result_bundle, statuses, duration_ms,
+ device_abi, symbolizer):
+ """Generate test results from |statuses|.
+
+ Args:
+ result_code: The overall status code as an integer.
+ result_bundle: The summary bundle dump as a dict.
+ statuses: A list of 2-tuples containing:
+ - the status code as an integer
+ - the bundle dump as a dict mapping string keys to string values
+ Note that this is the same as the third item in the 3-tuple returned by
+ |_ParseAmInstrumentRawOutput|.
+ duration_ms: The duration of the test in milliseconds.
+ device_abi: The device_abi, which is needed for symbolization.
+ symbolizer: The symbolizer used to symbolize stack.
+
+ Returns:
+ A list containing an instance of InstrumentationTestResult for each test
+ parsed.
+ """
+
+ results = []
+
+ current_result = None
+ cumulative_duration = 0
+
+ for status_code, bundle in statuses:
+ # If the last test was a failure already, don't override that failure with
+ # post-test failures that could be caused by the original failure.
+ if (status_code == instrumentation_parser.STATUS_CODE_BATCH_FAILURE
+ and current_result.GetType() != base_test_result.ResultType.FAIL):
+ current_result.SetType(base_test_result.ResultType.FAIL)
+ _MaybeSetLog(bundle, current_result, symbolizer, device_abi)
+ continue
+
+ if status_code == instrumentation_parser.STATUS_CODE_TEST_DURATION:
+ # For the first result, duration will be set below to the difference
+ # between the reported and actual durations to account for overhead like
+ # starting instrumentation.
+ if results:
+ current_duration = int(bundle.get(_BUNDLE_DURATION_ID, duration_ms))
+ current_result.SetDuration(current_duration)
+ cumulative_duration += current_duration
+ continue
+
+ test_class = bundle.get(_BUNDLE_CLASS_ID, '')
+ test_method = bundle.get(_BUNDLE_TEST_ID, '')
+ if test_class and test_method:
+ test_name = '%s#%s' % (test_class, test_method)
+ else:
+ continue
+
+ if status_code == instrumentation_parser.STATUS_CODE_START:
+ if current_result:
+ results.append(current_result)
+ current_result = test_result.InstrumentationTestResult(
+ test_name, base_test_result.ResultType.UNKNOWN, duration_ms)
+ else:
+ if status_code == instrumentation_parser.STATUS_CODE_OK:
+ if bundle.get(_BUNDLE_SKIPPED_ID, '').lower() in ('true', '1', 'yes'):
+ current_result.SetType(base_test_result.ResultType.SKIP)
+ elif current_result.GetType() == base_test_result.ResultType.UNKNOWN:
+ current_result.SetType(base_test_result.ResultType.PASS)
+ elif status_code == instrumentation_parser.STATUS_CODE_SKIP:
+ current_result.SetType(base_test_result.ResultType.SKIP)
+ elif status_code == instrumentation_parser.STATUS_CODE_ASSUMPTION_FAILURE:
+ current_result.SetType(base_test_result.ResultType.SKIP)
+ else:
+ if status_code not in (instrumentation_parser.STATUS_CODE_ERROR,
+ instrumentation_parser.STATUS_CODE_FAILURE):
+ logging.error('Unrecognized status code %d. Handling as an error.',
+ status_code)
+ current_result.SetType(base_test_result.ResultType.FAIL)
+ _MaybeSetLog(bundle, current_result, symbolizer, device_abi)
+
+ if current_result:
+ if current_result.GetType() == base_test_result.ResultType.UNKNOWN:
+ crashed = (result_code == _ACTIVITY_RESULT_CANCELED and any(
+ _NATIVE_CRASH_RE.search(l) for l in six.itervalues(result_bundle)))
+ if crashed:
+ current_result.SetType(base_test_result.ResultType.CRASH)
+
+ results.append(current_result)
+
+ if results:
+ logging.info('Adding cumulative overhead to test %s: %dms',
+ results[0].GetName(), duration_ms - cumulative_duration)
+ results[0].SetDuration(duration_ms - cumulative_duration)
+
+ return results
+
+
+def _MaybeSetLog(bundle, current_result, symbolizer, device_abi):
+ if _BUNDLE_STACK_ID in bundle:
+ stack = bundle[_BUNDLE_STACK_ID]
+ if symbolizer and device_abi:
+ current_result.SetLog('%s\n%s' % (stack, '\n'.join(
+ symbolizer.ExtractAndResolveNativeStackTraces(stack, device_abi))))
+ else:
+ current_result.SetLog(stack)
+
+ current_result.SetFailureReason(_ParseExceptionMessage(stack))
+
+
+def _ParseExceptionMessage(stack):
+ """Extracts the exception message from the given stack trace.
+ """
+ # This interprets stack traces reported via InstrumentationResultPrinter:
+ # https://source.chromium.org/chromium/chromium/src/+/main:third_party/android_support_test_runner/runner/src/main/java/android/support/test/internal/runner/listener/InstrumentationResultPrinter.java;l=181?q=InstrumentationResultPrinter&type=cs
+ # This is a standard Java stack trace, of the form:
+ # <Result of Exception.toString()>
+ # at SomeClass.SomeMethod(...)
+ # at ...
+ lines = stack.split('\n')
+ for i, line in enumerate(lines):
+ if line.startswith('\tat'):
+ return '\n'.join(lines[0:i])
+ # No call stack found, so assume everything is the exception message.
+ return stack
+
+
+def FilterTests(tests, filter_str=None, annotations=None,
+ excluded_annotations=None):
+ """Filter a list of tests
+
+ Args:
+ tests: a list of tests. e.g. [
+ {'annotations": {}, 'class': 'com.example.TestA', 'method':'test1'},
+ {'annotations": {}, 'class': 'com.example.TestB', 'method':'test2'}]
+ filter_str: googletest-style filter string.
+ annotations: a dict of wanted annotations for test methods.
+ excluded_annotations: a dict of annotations to exclude.
+
+ Return:
+ A list of filtered tests
+ """
+
+ def test_names_from_pattern(combined_pattern, test_names):
+ patterns = combined_pattern.split(':')
+
+ hashable_patterns = set()
+ filename_patterns = []
+ for pattern in patterns:
+ if ('*' in pattern or '?' in pattern or '[' in pattern):
+ filename_patterns.append(pattern)
+ else:
+ hashable_patterns.add(pattern)
+
+ filter_test_names = set(
+ unittest_util.FilterTestNames(test_names, ':'.join(
+ filename_patterns))) if len(filename_patterns) > 0 else set()
+
+ for test_name in test_names:
+ if test_name in hashable_patterns:
+ filter_test_names.add(test_name)
+
+ return filter_test_names
+
+ def get_test_names(test):
+ test_names = set()
+ # Allow fully-qualified name as well as an omitted package.
+ unqualified_class_test = {
+ 'class': test['class'].split('.')[-1],
+ 'method': test['method']
+ }
+
+ test_name = GetTestName(test, sep='.')
+ test_names.add(test_name)
+
+ unqualified_class_test_name = GetTestName(unqualified_class_test, sep='.')
+ test_names.add(unqualified_class_test_name)
+
+ unique_test_name = GetUniqueTestName(test, sep='.')
+ test_names.add(unique_test_name)
+
+ if test['is_junit4']:
+ junit4_test_name = GetTestNameWithoutParameterPostfix(test, sep='.')
+ test_names.add(junit4_test_name)
+
+ unqualified_junit4_test_name = \
+ GetTestNameWithoutParameterPostfix(unqualified_class_test, sep='.')
+ test_names.add(unqualified_junit4_test_name)
+ return test_names
+
+ def get_tests_from_names(tests, test_names, tests_to_names):
+ ''' Returns the tests for which the given names apply
+
+ Args:
+ tests: a list of tests. e.g. [
+ {'annotations": {}, 'class': 'com.example.TestA', 'method':'test1'},
+ {'annotations": {}, 'class': 'com.example.TestB', 'method':'test2'}]
+ test_names: a collection of names determining tests to return.
+
+ Return:
+ A list of tests that match the given test names
+ '''
+ filtered_tests = []
+ for t in tests:
+ current_test_names = tests_to_names[id(t)]
+
+ for current_test_name in current_test_names:
+ if current_test_name in test_names:
+ filtered_tests.append(t)
+ break
+
+ return filtered_tests
+
+ def remove_tests_from_names(tests, remove_test_names, tests_to_names):
+ ''' Returns the tests from the given list with given names removed
+
+ Args:
+ tests: a list of tests. e.g. [
+ {'annotations": {}, 'class': 'com.example.TestA', 'method':'test1'},
+ {'annotations": {}, 'class': 'com.example.TestB', 'method':'test2'}]
+ remove_test_names: a collection of names determining tests to remove.
+ tests_to_names: a dcitionary of test ids to a collection of applicable
+ names for that test
+
+ Return:
+ A list of tests that don't match the given test names
+ '''
+ filtered_tests = []
+
+ for t in tests:
+ for name in tests_to_names[id(t)]:
+ if name in remove_test_names:
+ break
+ else:
+ filtered_tests.append(t)
+ return filtered_tests
+
+ def gtests_filter(tests, combined_filter):
+ ''' Returns the tests after the filter_str has been applied
+
+ Args:
+ tests: a list of tests. e.g. [
+ {'annotations": {}, 'class': 'com.example.TestA', 'method':'test1'},
+ {'annotations": {}, 'class': 'com.example.TestB', 'method':'test2'}]
+ combined_filter: the filter string representing tests to exclude
+
+ Return:
+ A list of tests that should still be included after the filter_str is
+ applied to their names
+ '''
+
+ if not combined_filter:
+ return tests
+
+ # Collect all test names
+ all_test_names = set()
+ tests_to_names = {}
+ for t in tests:
+ tests_to_names[id(t)] = get_test_names(t)
+ for name in tests_to_names[id(t)]:
+ all_test_names.add(name)
+
+ pattern_groups = filter_str.split('-')
+ negative_pattern = pattern_groups[1] if len(pattern_groups) > 1 else None
+ positive_pattern = pattern_groups[0]
+
+ if positive_pattern:
+ # Only use the test names that match the positive pattern
+ positive_test_names = test_names_from_pattern(positive_pattern,
+ all_test_names)
+ tests = get_tests_from_names(tests, positive_test_names, tests_to_names)
+
+ if negative_pattern:
+ # Remove any test the negative filter matches
+ remove_names = test_names_from_pattern(negative_pattern, all_test_names)
+ tests = remove_tests_from_names(tests, remove_names, tests_to_names)
+
+ return tests
+
+ def annotation_filter(all_annotations):
+ if not annotations:
+ return True
+ return any_annotation_matches(annotations, all_annotations)
+
+ def excluded_annotation_filter(all_annotations):
+ if not excluded_annotations:
+ return True
+ return not any_annotation_matches(excluded_annotations,
+ all_annotations)
+
+ def any_annotation_matches(filter_annotations, all_annotations):
+ return any(
+ ak in all_annotations
+ and annotation_value_matches(av, all_annotations[ak])
+ for ak, av in filter_annotations)
+
+ def annotation_value_matches(filter_av, av):
+ if filter_av is None:
+ return True
+ elif isinstance(av, dict):
+ tav_from_dict = av['value']
+ # If tav_from_dict is an int, the 'in' operator breaks, so convert
+ # filter_av and manually compare. See https://crbug.com/1019707
+ if isinstance(tav_from_dict, int):
+ return int(filter_av) == tav_from_dict
+ else:
+ return filter_av in tav_from_dict
+ elif isinstance(av, list):
+ return filter_av in av
+ return filter_av == av
+
+ return_tests = []
+ for t in gtests_filter(tests, filter_str):
+ # Enforce that all tests declare their size.
+ if not any(a in _VALID_ANNOTATIONS for a in t['annotations']):
+ raise MissingSizeAnnotationError(GetTestName(t))
+
+ if (not annotation_filter(t['annotations'])
+ or not excluded_annotation_filter(t['annotations'])):
+ continue
+ return_tests.append(t)
+
+ return return_tests
+
+# TODO(yolandyan): remove this once the tests are converted to junit4
+def GetAllTestsFromJar(test_jar):
+ pickle_path = '%s-proguard.pickle' % test_jar
+ try:
+ tests = GetTestsFromPickle(pickle_path, os.path.getmtime(test_jar))
+ except TestListPickleException as e:
+ logging.info('Could not get tests from pickle: %s', e)
+ logging.info('Getting tests from JAR via proguard.')
+ tests = _GetTestsFromProguard(test_jar)
+ SaveTestsToPickle(pickle_path, tests)
+ return tests
+
+
+def GetAllTestsFromApk(test_apk):
+ pickle_path = '%s-dexdump.pickle' % test_apk
+ try:
+ tests = GetTestsFromPickle(pickle_path, os.path.getmtime(test_apk))
+ except TestListPickleException as e:
+ logging.info('Could not get tests from pickle: %s', e)
+ logging.info('Getting tests from dex via dexdump.')
+ tests = _GetTestsFromDexdump(test_apk)
+ SaveTestsToPickle(pickle_path, tests)
+ return tests
+
+def GetTestsFromPickle(pickle_path, test_mtime):
+ if not os.path.exists(pickle_path):
+ raise TestListPickleException('%s does not exist.' % pickle_path)
+ if os.path.getmtime(pickle_path) <= test_mtime:
+ raise TestListPickleException('File is stale: %s' % pickle_path)
+
+ with open(pickle_path, 'r') as f:
+ pickle_data = pickle.load(f)
+ if pickle_data['VERSION'] != _PICKLE_FORMAT_VERSION:
+ raise TestListPickleException('PICKLE_FORMAT_VERSION has changed.')
+ return pickle_data['TEST_METHODS']
+
+
+# TODO(yolandyan): remove this once the test listing from java runner lands
+@instrumentation_tracing.no_tracing
+def _GetTestsFromProguard(jar_path):
+ p = proguard.Dump(jar_path)
+ class_lookup = dict((c['class'], c) for c in p['classes'])
+
+ def is_test_class(c):
+ return c['class'].endswith('Test')
+
+ def is_test_method(m):
+ return m['method'].startswith('test')
+
+ def recursive_class_annotations(c):
+ s = c['superclass']
+ if s in class_lookup:
+ a = recursive_class_annotations(class_lookup[s])
+ else:
+ a = {}
+ a.update(c['annotations'])
+ return a
+
+ def stripped_test_class(c):
+ return {
+ 'class': c['class'],
+ 'annotations': recursive_class_annotations(c),
+ 'methods': [m for m in c['methods'] if is_test_method(m)],
+ 'superclass': c['superclass'],
+ }
+
+ return [stripped_test_class(c) for c in p['classes']
+ if is_test_class(c)]
+
+
+def _GetTestsFromDexdump(test_apk):
+ dex_dumps = dexdump.Dump(test_apk)
+ tests = []
+
+ def get_test_methods(methods):
+ return [
+ {
+ 'method': m,
+ # No annotation info is available from dexdump.
+ # Set MediumTest annotation for default.
+ 'annotations': {'MediumTest': None},
+ } for m in methods if m.startswith('test')]
+
+ for dump in dex_dumps:
+ for package_name, package_info in six.iteritems(dump):
+ for class_name, class_info in six.iteritems(package_info['classes']):
+ if class_name.endswith('Test'):
+ tests.append({
+ 'class': '%s.%s' % (package_name, class_name),
+ 'annotations': {},
+ 'methods': get_test_methods(class_info['methods']),
+ 'superclass': class_info['superclass'],
+ })
+ return tests
+
+def SaveTestsToPickle(pickle_path, tests):
+ pickle_data = {
+ 'VERSION': _PICKLE_FORMAT_VERSION,
+ 'TEST_METHODS': tests,
+ }
+ with open(pickle_path, 'wb') as pickle_file:
+ pickle.dump(pickle_data, pickle_file)
+
+
+class MissingJUnit4RunnerException(test_exception.TestException):
+ """Raised when JUnit4 runner is not provided or specified in apk manifest"""
+
+ def __init__(self):
+ super(MissingJUnit4RunnerException, self).__init__(
+ 'JUnit4 runner is not provided or specified in test apk manifest.')
+
+
+def GetTestName(test, sep='#'):
+ """Gets the name of the given test.
+
+ Note that this may return the same name for more than one test, e.g. if a
+ test is being run multiple times with different parameters.
+
+ Args:
+ test: the instrumentation test dict.
+ sep: the character(s) that should join the class name and the method name.
+ Returns:
+ The test name as a string.
+ """
+ test_name = '%s%s%s' % (test['class'], sep, test['method'])
+ assert ' *-:' not in test_name, (
+ 'The test name must not contain any of the characters in " *-:". See '
+ 'https://crbug.com/912199')
+ return test_name
+
+
+def GetTestNameWithoutParameterPostfix(
+ test, sep='#', parameterization_sep='__'):
+ """Gets the name of the given JUnit4 test without parameter postfix.
+
+ For most WebView JUnit4 javatests, each test is parameterizatized with
+ "__sandboxed_mode" to run in both non-sandboxed mode and sandboxed mode.
+
+ This function returns the name of the test without parameterization
+ so test filters can match both parameterized and non-parameterized tests.
+
+ Args:
+ test: the instrumentation test dict.
+ sep: the character(s) that should join the class name and the method name.
+ parameterization_sep: the character(s) that seperate method name and method
+ parameterization postfix.
+ Returns:
+ The test name without parameter postfix as a string.
+ """
+ name = GetTestName(test, sep=sep)
+ return name.split(parameterization_sep)[0]
+
+
+def GetUniqueTestName(test, sep='#'):
+ """Gets the unique name of the given test.
+
+ This will include text to disambiguate between tests for which GetTestName
+ would return the same name.
+
+ Args:
+ test: the instrumentation test dict.
+ sep: the character(s) that should join the class name and the method name.
+ Returns:
+ The unique test name as a string.
+ """
+ display_name = GetTestName(test, sep=sep)
+ if test.get('flags', [None])[0]:
+ sanitized_flags = [x.replace('-', '_') for x in test['flags']]
+ display_name = '%s_with_%s' % (display_name, '_'.join(sanitized_flags))
+
+ assert ' *-:' not in display_name, (
+ 'The test name must not contain any of the characters in " *-:". See '
+ 'https://crbug.com/912199')
+
+ return display_name
+
+
+class InstrumentationTestInstance(test_instance.TestInstance):
+
+ def __init__(self, args, data_deps_delegate, error_func):
+ super(InstrumentationTestInstance, self).__init__()
+
+ self._additional_apks = []
+ self._apk_under_test = None
+ self._apk_under_test_incremental_install_json = None
+ self._modules = None
+ self._fake_modules = None
+ self._additional_locales = None
+ self._package_info = None
+ self._suite = None
+ self._test_apk = None
+ self._test_apk_incremental_install_json = None
+ self._test_jar = None
+ self._test_package = None
+ self._junit3_runner_class = None
+ self._junit4_runner_class = None
+ self._junit4_runner_supports_listing = None
+ self._test_support_apk = None
+ self._initializeApkAttributes(args, error_func)
+
+ self._data_deps = None
+ self._data_deps_delegate = None
+ self._runtime_deps_path = None
+ self._initializeDataDependencyAttributes(args, data_deps_delegate)
+
+ self._annotations = None
+ self._excluded_annotations = None
+ self._test_filter = None
+ self._initializeTestFilterAttributes(args)
+
+ self._flags = None
+ self._use_apk_under_test_flags_file = False
+ self._initializeFlagAttributes(args)
+
+ self._screenshot_dir = None
+ self._timeout_scale = None
+ self._wait_for_java_debugger = None
+ self._initializeTestControlAttributes(args)
+
+ self._coverage_directory = None
+ self._initializeTestCoverageAttributes(args)
+
+ self._store_tombstones = False
+ self._symbolizer = None
+ self._enable_breakpad_dump = False
+ self._enable_java_deobfuscation = False
+ self._deobfuscator = None
+ self._initializeLogAttributes(args)
+
+ self._edit_shared_prefs = []
+ self._initializeEditPrefsAttributes(args)
+
+ self._replace_system_package = None
+ self._initializeReplaceSystemPackageAttributes(args)
+
+ self._system_packages_to_remove = None
+ self._initializeSystemPackagesToRemoveAttributes(args)
+
+ self._use_webview_provider = None
+ self._initializeUseWebviewProviderAttributes(args)
+
+ self._skia_gold_properties = None
+ self._initializeSkiaGoldAttributes(args)
+
+ self._test_launcher_batch_limit = None
+ self._initializeTestLauncherAttributes(args)
+
+ self._wpr_enable_record = args.wpr_enable_record
+
+ self._external_shard_index = args.test_launcher_shard_index
+ self._total_external_shards = args.test_launcher_total_shards
+
+ def _initializeApkAttributes(self, args, error_func):
+ if args.apk_under_test:
+ apk_under_test_path = args.apk_under_test
+ if (not args.apk_under_test.endswith('.apk')
+ and not args.apk_under_test.endswith('.apks')):
+ apk_under_test_path = os.path.join(
+ constants.GetOutDirectory(), constants.SDK_BUILD_APKS_DIR,
+ '%s.apk' % args.apk_under_test)
+
+ # TODO(jbudorick): Move the realpath up to the argument parser once
+ # APK-by-name is no longer supported.
+ apk_under_test_path = os.path.realpath(apk_under_test_path)
+
+ if not os.path.exists(apk_under_test_path):
+ error_func('Unable to find APK under test: %s' % apk_under_test_path)
+
+ self._apk_under_test = apk_helper.ToHelper(apk_under_test_path)
+
+ test_apk_path = args.test_apk
+ if not os.path.exists(test_apk_path):
+ test_apk_path = os.path.join(
+ constants.GetOutDirectory(), constants.SDK_BUILD_APKS_DIR,
+ '%s.apk' % args.test_apk)
+ # TODO(jbudorick): Move the realpath up to the argument parser once
+ # APK-by-name is no longer supported.
+ test_apk_path = os.path.realpath(test_apk_path)
+
+ if not os.path.exists(test_apk_path):
+ error_func('Unable to find test APK: %s' % test_apk_path)
+
+ self._test_apk = apk_helper.ToHelper(test_apk_path)
+ self._suite = os.path.splitext(os.path.basename(args.test_apk))[0]
+
+ self._apk_under_test_incremental_install_json = (
+ args.apk_under_test_incremental_install_json)
+ self._test_apk_incremental_install_json = (
+ args.test_apk_incremental_install_json)
+
+ if self._test_apk_incremental_install_json:
+ assert self._suite.endswith('_incremental')
+ self._suite = self._suite[:-len('_incremental')]
+
+ self._modules = args.modules
+ self._fake_modules = args.fake_modules
+ self._additional_locales = args.additional_locales
+
+ self._test_jar = args.test_jar
+ self._test_support_apk = apk_helper.ToHelper(os.path.join(
+ constants.GetOutDirectory(), constants.SDK_BUILD_TEST_JAVALIB_DIR,
+ '%sSupport.apk' % self._suite))
+
+ if not self._test_jar:
+ logging.warning('Test jar not specified. Test runner will not have '
+ 'Java annotation info available. May not handle test '
+ 'timeouts correctly.')
+ elif not os.path.exists(self._test_jar):
+ error_func('Unable to find test JAR: %s' % self._test_jar)
+
+ self._test_package = self._test_apk.GetPackageName()
+ all_instrumentations = self._test_apk.GetAllInstrumentations()
+ all_junit3_runner_classes = [
+ x for x in all_instrumentations if ('0xffffffff' in x.get(
+ 'chromium-junit3', ''))]
+ all_junit4_runner_classes = [
+ x for x in all_instrumentations if ('0xffffffff' not in x.get(
+ 'chromium-junit3', ''))]
+
+ if len(all_junit3_runner_classes) > 1:
+ logging.warning('This test apk has more than one JUnit3 instrumentation')
+ if len(all_junit4_runner_classes) > 1:
+ logging.warning('This test apk has more than one JUnit4 instrumentation')
+
+ self._junit3_runner_class = (
+ all_junit3_runner_classes[0]['android:name']
+ if all_junit3_runner_classes else self.test_apk.GetInstrumentationName())
+
+ self._junit4_runner_class = (
+ all_junit4_runner_classes[0]['android:name']
+ if all_junit4_runner_classes else None)
+
+ if self._junit4_runner_class:
+ if self._test_apk_incremental_install_json:
+ self._junit4_runner_supports_listing = next(
+ (True for x in self._test_apk.GetAllMetadata()
+ if 'real-instr' in x[0] and x[1] in _TEST_LIST_JUNIT4_RUNNERS),
+ False)
+ else:
+ self._junit4_runner_supports_listing = (
+ self._junit4_runner_class in _TEST_LIST_JUNIT4_RUNNERS)
+
+ self._package_info = None
+ if self._apk_under_test:
+ package_under_test = self._apk_under_test.GetPackageName()
+ for package_info in six.itervalues(constants.PACKAGE_INFO):
+ if package_under_test == package_info.package:
+ self._package_info = package_info
+ break
+ if not self._package_info:
+ logging.warning(("Unable to find package info for %s. " +
+ "(This may just mean that the test package is " +
+ "currently being installed.)"),
+ self._test_package)
+
+ for apk in args.additional_apks:
+ if not os.path.exists(apk):
+ error_func('Unable to find additional APK: %s' % apk)
+ self._additional_apks = (
+ [apk_helper.ToHelper(x) for x in args.additional_apks])
+
+ def _initializeDataDependencyAttributes(self, args, data_deps_delegate):
+ self._data_deps = []
+ self._data_deps_delegate = data_deps_delegate
+ self._runtime_deps_path = args.runtime_deps_path
+
+ if not self._runtime_deps_path:
+ logging.warning('No data dependencies will be pushed.')
+
+ def _initializeTestFilterAttributes(self, args):
+ self._test_filter = test_filter.InitializeFilterFromArgs(args)
+
+ def annotation_element(a):
+ a = a.split('=', 1)
+ return (a[0], a[1] if len(a) == 2 else None)
+
+ if args.annotation_str:
+ self._annotations = [
+ annotation_element(a) for a in args.annotation_str.split(',')]
+ elif not self._test_filter:
+ self._annotations = [
+ annotation_element(a) for a in _DEFAULT_ANNOTATIONS]
+ else:
+ self._annotations = []
+
+ if args.exclude_annotation_str:
+ self._excluded_annotations = [
+ annotation_element(a) for a in args.exclude_annotation_str.split(',')]
+ else:
+ self._excluded_annotations = []
+
+ requested_annotations = set(a[0] for a in self._annotations)
+ if not args.run_disabled:
+ self._excluded_annotations.extend(
+ annotation_element(a) for a in _EXCLUDE_UNLESS_REQUESTED_ANNOTATIONS
+ if a not in requested_annotations)
+
+ def _initializeFlagAttributes(self, args):
+ self._use_apk_under_test_flags_file = args.use_apk_under_test_flags_file
+ self._flags = ['--enable-test-intents']
+ if args.command_line_flags:
+ self._flags.extend(args.command_line_flags)
+ if args.device_flags_file:
+ with open(args.device_flags_file) as device_flags_file:
+ stripped_lines = (l.strip() for l in device_flags_file)
+ self._flags.extend(flag for flag in stripped_lines if flag)
+ if args.strict_mode and args.strict_mode != 'off' and (
+ # TODO(yliuyliu): Turn on strict mode for coverage once
+ # crbug/1006397 is fixed.
+ not args.coverage_dir):
+ self._flags.append('--strict-mode=' + args.strict_mode)
+
+ def _initializeTestControlAttributes(self, args):
+ self._screenshot_dir = args.screenshot_dir
+ self._timeout_scale = args.timeout_scale or 1
+ self._wait_for_java_debugger = args.wait_for_java_debugger
+
+ def _initializeTestCoverageAttributes(self, args):
+ self._coverage_directory = args.coverage_dir
+
+ def _initializeLogAttributes(self, args):
+ self._enable_breakpad_dump = args.enable_breakpad_dump
+ self._enable_java_deobfuscation = args.enable_java_deobfuscation
+ self._store_tombstones = args.store_tombstones
+ self._symbolizer = stack_symbolizer.Symbolizer(
+ self.apk_under_test.path if self.apk_under_test else None)
+
+ def _initializeEditPrefsAttributes(self, args):
+ if not hasattr(args, 'shared_prefs_file') or not args.shared_prefs_file:
+ return
+ if not isinstance(args.shared_prefs_file, str):
+ logging.warning("Given non-string for a filepath")
+ return
+ self._edit_shared_prefs = shared_preference_utils.ExtractSettingsFromJson(
+ args.shared_prefs_file)
+
+ def _initializeReplaceSystemPackageAttributes(self, args):
+ if (not hasattr(args, 'replace_system_package')
+ or not args.replace_system_package):
+ return
+ self._replace_system_package = args.replace_system_package
+
+ def _initializeSystemPackagesToRemoveAttributes(self, args):
+ if (not hasattr(args, 'system_packages_to_remove')
+ or not args.system_packages_to_remove):
+ return
+ self._system_packages_to_remove = args.system_packages_to_remove
+
+ def _initializeUseWebviewProviderAttributes(self, args):
+ if (not hasattr(args, 'use_webview_provider')
+ or not args.use_webview_provider):
+ return
+ self._use_webview_provider = args.use_webview_provider
+
+ def _initializeSkiaGoldAttributes(self, args):
+ self._skia_gold_properties = gold_utils.AndroidSkiaGoldProperties(args)
+
+ def _initializeTestLauncherAttributes(self, args):
+ if hasattr(args, 'test_launcher_batch_limit'):
+ self._test_launcher_batch_limit = args.test_launcher_batch_limit
+
+ @property
+ def additional_apks(self):
+ return self._additional_apks
+
+ @property
+ def apk_under_test(self):
+ return self._apk_under_test
+
+ @property
+ def apk_under_test_incremental_install_json(self):
+ return self._apk_under_test_incremental_install_json
+
+ @property
+ def modules(self):
+ return self._modules
+
+ @property
+ def fake_modules(self):
+ return self._fake_modules
+
+ @property
+ def additional_locales(self):
+ return self._additional_locales
+
+ @property
+ def coverage_directory(self):
+ return self._coverage_directory
+
+ @property
+ def edit_shared_prefs(self):
+ return self._edit_shared_prefs
+
+ @property
+ def enable_breakpad_dump(self):
+ return self._enable_breakpad_dump
+
+ @property
+ def external_shard_index(self):
+ return self._external_shard_index
+
+ @property
+ def flags(self):
+ return self._flags
+
+ @property
+ def junit3_runner_class(self):
+ return self._junit3_runner_class
+
+ @property
+ def junit4_runner_class(self):
+ return self._junit4_runner_class
+
+ @property
+ def junit4_runner_supports_listing(self):
+ return self._junit4_runner_supports_listing
+
+ @property
+ def package_info(self):
+ return self._package_info
+
+ @property
+ def replace_system_package(self):
+ return self._replace_system_package
+
+ @property
+ def use_webview_provider(self):
+ return self._use_webview_provider
+
+ @property
+ def screenshot_dir(self):
+ return self._screenshot_dir
+
+ @property
+ def skia_gold_properties(self):
+ return self._skia_gold_properties
+
+ @property
+ def store_tombstones(self):
+ return self._store_tombstones
+
+ @property
+ def suite(self):
+ return self._suite
+
+ @property
+ def symbolizer(self):
+ return self._symbolizer
+
+ @property
+ def system_packages_to_remove(self):
+ return self._system_packages_to_remove
+
+ @property
+ def test_apk(self):
+ return self._test_apk
+
+ @property
+ def test_apk_incremental_install_json(self):
+ return self._test_apk_incremental_install_json
+
+ @property
+ def test_filter(self):
+ return self._test_filter
+
+ @property
+ def test_jar(self):
+ return self._test_jar
+
+ @property
+ def test_launcher_batch_limit(self):
+ return self._test_launcher_batch_limit
+
+ @property
+ def test_support_apk(self):
+ return self._test_support_apk
+
+ @property
+ def test_package(self):
+ return self._test_package
+
+ @property
+ def timeout_scale(self):
+ return self._timeout_scale
+
+ @property
+ def total_external_shards(self):
+ return self._total_external_shards
+
+ @property
+ def use_apk_under_test_flags_file(self):
+ return self._use_apk_under_test_flags_file
+
+ @property
+ def wait_for_java_debugger(self):
+ return self._wait_for_java_debugger
+
+ @property
+ def wpr_record_mode(self):
+ return self._wpr_enable_record
+
+ @property
+ def wpr_replay_mode(self):
+ return not self._wpr_enable_record
+
+ #override
+ def TestType(self):
+ return 'instrumentation'
+
+ #override
+ def GetPreferredAbis(self):
+ # We could alternatively take the intersection of what they all support,
+ # but it should never be the case that they support different things.
+ apks = [self._test_apk, self._apk_under_test] + self._additional_apks
+ for apk in apks:
+ if apk:
+ ret = apk.GetAbis()
+ if ret:
+ return ret
+ return []
+
+ #override
+ def SetUp(self):
+ self._data_deps.extend(
+ self._data_deps_delegate(self._runtime_deps_path))
+ if self._enable_java_deobfuscation:
+ self._deobfuscator = deobfuscator.DeobfuscatorPool(
+ self.test_apk.path + '.mapping')
+
+ def GetDataDependencies(self):
+ return self._data_deps
+
+ def GetTests(self):
+ if self.test_jar:
+ raw_tests = GetAllTestsFromJar(self.test_jar)
+ else:
+ raw_tests = GetAllTestsFromApk(self.test_apk.path)
+ return self.ProcessRawTests(raw_tests)
+
+ def MaybeDeobfuscateLines(self, lines):
+ if not self._deobfuscator:
+ return lines
+ return self._deobfuscator.TransformLines(lines)
+
+ def ProcessRawTests(self, raw_tests):
+ inflated_tests = self._ParameterizeTestsWithFlags(
+ self._InflateTests(raw_tests))
+ if self._junit4_runner_class is None and any(
+ t['is_junit4'] for t in inflated_tests):
+ raise MissingJUnit4RunnerException()
+ filtered_tests = FilterTests(
+ inflated_tests, self._test_filter, self._annotations,
+ self._excluded_annotations)
+ if self._test_filter and not filtered_tests:
+ for t in inflated_tests:
+ logging.debug(' %s', GetUniqueTestName(t))
+ logging.warning('Unmatched Filter: %s', self._test_filter)
+ return filtered_tests
+
+ # pylint: disable=no-self-use
+ def _InflateTests(self, tests):
+ inflated_tests = []
+ for c in tests:
+ for m in c['methods']:
+ a = dict(c['annotations'])
+ a.update(m['annotations'])
+ inflated_tests.append({
+ 'class': c['class'],
+ 'method': m['method'],
+ 'annotations': a,
+ # TODO(https://crbug.com/1084729): Remove is_junit4.
+ 'is_junit4': True
+ })
+ return inflated_tests
+
+ def _ParameterizeTestsWithFlags(self, tests):
+
+ def _checkParameterization(annotations):
+ types = [
+ _PARAMETERIZED_COMMAND_LINE_FLAGS_SWITCHES,
+ _PARAMETERIZED_COMMAND_LINE_FLAGS,
+ ]
+ if types[0] in annotations and types[1] in annotations:
+ raise CommandLineParameterizationException(
+ 'Multiple command-line parameterization types: {}.'.format(
+ ', '.join(types)))
+
+ def _switchesToFlags(switches):
+ return ['--{}'.format(s) for s in switches if s]
+
+ def _annotationToSwitches(clazz, methods):
+ if clazz == _PARAMETERIZED_COMMAND_LINE_FLAGS_SWITCHES:
+ return [methods['value']]
+ elif clazz == _PARAMETERIZED_COMMAND_LINE_FLAGS:
+ list_of_switches = []
+ for annotation in methods['value']:
+ for clazz, methods in six.iteritems(annotation):
+ list_of_switches += _annotationToSwitches(clazz, methods)
+ return list_of_switches
+ else:
+ return []
+
+ def _setTestFlags(test, flags):
+ if flags:
+ test['flags'] = flags
+ elif 'flags' in test:
+ del test['flags']
+
+ new_tests = []
+ for t in tests:
+ annotations = t['annotations']
+ list_of_switches = []
+ _checkParameterization(annotations)
+ if _SKIP_PARAMETERIZATION not in annotations:
+ for clazz, methods in six.iteritems(annotations):
+ list_of_switches += _annotationToSwitches(clazz, methods)
+ if list_of_switches:
+ _setTestFlags(t, _switchesToFlags(list_of_switches[0]))
+ for p in list_of_switches[1:]:
+ parameterized_t = copy.copy(t)
+ _setTestFlags(parameterized_t, _switchesToFlags(p))
+ new_tests.append(parameterized_t)
+ return tests + new_tests
+
+ @staticmethod
+ def ParseAmInstrumentRawOutput(raw_output):
+ return ParseAmInstrumentRawOutput(raw_output)
+
+ @staticmethod
+ def GenerateTestResults(result_code, result_bundle, statuses, duration_ms,
+ device_abi, symbolizer):
+ return GenerateTestResults(result_code, result_bundle, statuses,
+ duration_ms, device_abi, symbolizer)
+
+ #override
+ def TearDown(self):
+ self.symbolizer.CleanUp()
+ if self._deobfuscator:
+ self._deobfuscator.Close()
+ self._deobfuscator = None
diff --git a/third_party/libwebrtc/build/android/pylib/instrumentation/instrumentation_test_instance_test.py b/third_party/libwebrtc/build/android/pylib/instrumentation/instrumentation_test_instance_test.py
new file mode 100755
index 0000000000..f3b0d4f6e6
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/instrumentation/instrumentation_test_instance_test.py
@@ -0,0 +1,1250 @@
+#!/usr/bin/env vpython3
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unit tests for instrumentation_test_instance."""
+
+# pylint: disable=protected-access
+
+
+import collections
+import tempfile
+import unittest
+
+from six.moves import range # pylint: disable=redefined-builtin
+from pylib.base import base_test_result
+from pylib.instrumentation import instrumentation_test_instance
+
+import mock # pylint: disable=import-error
+
+_INSTRUMENTATION_TEST_INSTANCE_PATH = (
+ 'pylib.instrumentation.instrumentation_test_instance.%s')
+
+class InstrumentationTestInstanceTest(unittest.TestCase):
+
+ def setUp(self):
+ options = mock.Mock()
+ options.tool = ''
+
+ @staticmethod
+ def createTestInstance():
+ c = _INSTRUMENTATION_TEST_INSTANCE_PATH % 'InstrumentationTestInstance'
+ # yapf: disable
+ with mock.patch('%s._initializeApkAttributes' % c), (
+ mock.patch('%s._initializeDataDependencyAttributes' % c)), (
+ mock.patch('%s._initializeTestFilterAttributes' %c)), (
+ mock.patch('%s._initializeFlagAttributes' % c)), (
+ mock.patch('%s._initializeTestControlAttributes' % c)), (
+ mock.patch('%s._initializeTestCoverageAttributes' % c)), (
+ mock.patch('%s._initializeSkiaGoldAttributes' % c)):
+ # yapf: enable
+ return instrumentation_test_instance.InstrumentationTestInstance(
+ mock.MagicMock(), mock.MagicMock(), lambda s: None)
+
+ _FlagAttributesArgs = collections.namedtuple('_FlagAttributesArgs', [
+ 'command_line_flags', 'device_flags_file', 'strict_mode',
+ 'use_apk_under_test_flags_file', 'coverage_dir'
+ ])
+
+ def createFlagAttributesArgs(self,
+ command_line_flags=None,
+ device_flags_file=None,
+ strict_mode=None,
+ use_apk_under_test_flags_file=False,
+ coverage_dir=None):
+ return self._FlagAttributesArgs(command_line_flags, device_flags_file,
+ strict_mode, use_apk_under_test_flags_file,
+ coverage_dir)
+
+ def test_initializeFlagAttributes_commandLineFlags(self):
+ o = self.createTestInstance()
+ args = self.createFlagAttributesArgs(command_line_flags=['--foo', '--bar'])
+ o._initializeFlagAttributes(args)
+ self.assertEqual(o._flags, ['--enable-test-intents', '--foo', '--bar'])
+
+ def test_initializeFlagAttributes_deviceFlagsFile(self):
+ o = self.createTestInstance()
+ with tempfile.NamedTemporaryFile(mode='w') as flags_file:
+ flags_file.write('\n'.join(['--foo', '--bar']))
+ flags_file.flush()
+
+ args = self.createFlagAttributesArgs(device_flags_file=flags_file.name)
+ o._initializeFlagAttributes(args)
+ self.assertEqual(o._flags, ['--enable-test-intents', '--foo', '--bar'])
+
+ def test_initializeFlagAttributes_strictModeOn(self):
+ o = self.createTestInstance()
+ args = self.createFlagAttributesArgs(strict_mode='on')
+ o._initializeFlagAttributes(args)
+ self.assertEqual(o._flags, ['--enable-test-intents', '--strict-mode=on'])
+
+ def test_initializeFlagAttributes_strictModeOn_coverageOn(self):
+ o = self.createTestInstance()
+ args = self.createFlagAttributesArgs(
+ strict_mode='on', coverage_dir='/coverage/dir')
+ o._initializeFlagAttributes(args)
+ self.assertEqual(o._flags, ['--enable-test-intents'])
+
+ def test_initializeFlagAttributes_strictModeOff(self):
+ o = self.createTestInstance()
+ args = self.createFlagAttributesArgs(strict_mode='off')
+ o._initializeFlagAttributes(args)
+ self.assertEqual(o._flags, ['--enable-test-intents'])
+
+ def testGetTests_noFilter(self):
+ o = self.createTestInstance()
+ raw_tests = [
+ {
+ 'annotations': {'Feature': {'value': ['Foo']}},
+ 'class': 'org.chromium.test.SampleTest',
+ 'superclass': 'java.lang.Object',
+ 'methods': [
+ {
+ 'annotations': {'SmallTest': None},
+ 'method': 'testMethod1',
+ },
+ {
+ 'annotations': {'MediumTest': None},
+ 'method': 'testMethod2',
+ },
+ ],
+ },
+ {
+ 'annotations': {'Feature': {'value': ['Bar']}},
+ 'class': 'org.chromium.test.SampleTest2',
+ 'superclass': 'java.lang.Object',
+ 'methods': [
+ {
+ 'annotations': {'SmallTest': None},
+ 'method': 'testMethod1',
+ },
+ ],
+ }
+ ]
+
+ expected_tests = [
+ {
+ 'annotations': {
+ 'Feature': {'value': ['Foo']},
+ 'SmallTest': None,
+ },
+ 'class': 'org.chromium.test.SampleTest',
+ 'method': 'testMethod1',
+ 'is_junit4': True,
+ },
+ {
+ 'annotations': {
+ 'Feature': {'value': ['Foo']},
+ 'MediumTest': None,
+ },
+ 'class': 'org.chromium.test.SampleTest',
+ 'method': 'testMethod2',
+ 'is_junit4': True,
+ },
+ {
+ 'annotations': {
+ 'Feature': {'value': ['Bar']},
+ 'SmallTest': None,
+ },
+ 'class': 'org.chromium.test.SampleTest2',
+ 'method': 'testMethod1',
+ 'is_junit4': True,
+ },
+ ]
+
+ o._test_jar = 'path/to/test.jar'
+ o._junit4_runner_class = 'J4Runner'
+ actual_tests = o.ProcessRawTests(raw_tests)
+
+ self.assertEqual(actual_tests, expected_tests)
+
+ def testGetTests_simpleGtestFilter(self):
+ o = self.createTestInstance()
+ raw_tests = [
+ {
+ 'annotations': {'Feature': {'value': ['Foo']}},
+ 'class': 'org.chromium.test.SampleTest',
+ 'superclass': 'java.lang.Object',
+ 'methods': [
+ {
+ 'annotations': {'SmallTest': None},
+ 'method': 'testMethod1',
+ },
+ {
+ 'annotations': {'MediumTest': None},
+ 'method': 'testMethod2',
+ },
+ ],
+ }
+ ]
+
+ expected_tests = [
+ {
+ 'annotations': {
+ 'Feature': {'value': ['Foo']},
+ 'SmallTest': None,
+ },
+ 'class': 'org.chromium.test.SampleTest',
+ 'is_junit4': True,
+ 'method': 'testMethod1',
+ },
+ ]
+
+ o._test_filter = 'org.chromium.test.SampleTest.testMethod1'
+ o._test_jar = 'path/to/test.jar'
+ o._junit4_runner_class = 'J4Runner'
+ actual_tests = o.ProcessRawTests(raw_tests)
+
+ self.assertEqual(actual_tests, expected_tests)
+
+ def testGetTests_simpleGtestPositiveAndNegativeFilter(self):
+ o = self.createTestInstance()
+ raw_tests = [{
+ 'annotations': {
+ 'Feature': {
+ 'value': ['Foo']
+ }
+ },
+ 'class':
+ 'org.chromium.test.SampleTest',
+ 'superclass':
+ 'java.lang.Object',
+ 'methods': [
+ {
+ 'annotations': {
+ 'SmallTest': None
+ },
+ 'method': 'testMethod1',
+ },
+ {
+ 'annotations': {
+ 'MediumTest': None
+ },
+ 'method': 'testMethod2',
+ },
+ ],
+ }, {
+ 'annotations': {
+ 'Feature': {
+ 'value': ['Foo']
+ }
+ },
+ 'class':
+ 'org.chromium.test.SampleTest2',
+ 'superclass':
+ 'java.lang.Object',
+ 'methods': [{
+ 'annotations': {
+ 'SmallTest': None
+ },
+ 'method': 'testMethod1',
+ }],
+ }]
+
+ expected_tests = [
+ {
+ 'annotations': {
+ 'Feature': {
+ 'value': ['Foo']
+ },
+ 'SmallTest': None,
+ },
+ 'class': 'org.chromium.test.SampleTest',
+ 'is_junit4': True,
+ 'method': 'testMethod1',
+ },
+ ]
+
+ o._test_filter = \
+ 'org.chromium.test.SampleTest.*-org.chromium.test.SampleTest.testMethod2'
+ o._test_jar = 'path/to/test.jar'
+ o._junit4_runner_class = 'J4Runner'
+ actual_tests = o.ProcessRawTests(raw_tests)
+
+ self.assertEqual(actual_tests, expected_tests)
+
+ def testGetTests_simpleGtestUnqualifiedNameFilter(self):
+ o = self.createTestInstance()
+ raw_tests = [
+ {
+ 'annotations': {'Feature': {'value': ['Foo']}},
+ 'class': 'org.chromium.test.SampleTest',
+ 'superclass': 'java.lang.Object',
+ 'methods': [
+ {
+ 'annotations': {'SmallTest': None},
+ 'method': 'testMethod1',
+ },
+ {
+ 'annotations': {'MediumTest': None},
+ 'method': 'testMethod2',
+ },
+ ],
+ }
+ ]
+
+ expected_tests = [
+ {
+ 'annotations': {
+ 'Feature': {'value': ['Foo']},
+ 'SmallTest': None,
+ },
+ 'class': 'org.chromium.test.SampleTest',
+ 'is_junit4': True,
+ 'method': 'testMethod1',
+ },
+ ]
+
+ o._test_filter = 'SampleTest.testMethod1'
+ o._test_jar = 'path/to/test.jar'
+ o._junit4_runner_class = 'J4Runner'
+ actual_tests = o.ProcessRawTests(raw_tests)
+
+ self.assertEqual(actual_tests, expected_tests)
+
+ def testGetTests_parameterizedTestGtestFilter(self):
+ o = self.createTestInstance()
+ raw_tests = [
+ {
+ 'annotations': {'Feature': {'value': ['Foo']}},
+ 'class': 'org.chromium.test.SampleTest',
+ 'superclass': 'java.lang.Object',
+ 'methods': [
+ {
+ 'annotations': {'SmallTest': None},
+ 'method': 'testMethod1',
+ },
+ {
+ 'annotations': {'SmallTest': None},
+ 'method': 'testMethod1__sandboxed_mode',
+ },
+ ],
+ },
+ {
+ 'annotations': {'Feature': {'value': ['Bar']}},
+ 'class': 'org.chromium.test.SampleTest2',
+ 'superclass': 'java.lang.Object',
+ 'methods': [
+ {
+ 'annotations': {'SmallTest': None},
+ 'method': 'testMethod1',
+ },
+ ],
+ }
+ ]
+
+ expected_tests = [
+ {
+ 'annotations': {
+ 'Feature': {'value': ['Foo']},
+ 'SmallTest': None,
+ },
+ 'class': 'org.chromium.test.SampleTest',
+ 'method': 'testMethod1',
+ 'is_junit4': True,
+ },
+ {
+ 'annotations': {
+ 'Feature': {'value': ['Foo']},
+ 'SmallTest': None,
+ },
+ 'class': 'org.chromium.test.SampleTest',
+ 'method': 'testMethod1__sandboxed_mode',
+ 'is_junit4': True,
+ },
+ ]
+
+ o._test_jar = 'path/to/test.jar'
+ o._junit4_runner_class = 'J4Runner'
+ o._test_filter = 'org.chromium.test.SampleTest.testMethod1'
+ actual_tests = o.ProcessRawTests(raw_tests)
+
+ self.assertEqual(actual_tests, expected_tests)
+
+ def testGetTests_wildcardGtestFilter(self):
+ o = self.createTestInstance()
+ raw_tests = [
+ {
+ 'annotations': {'Feature': {'value': ['Foo']}},
+ 'class': 'org.chromium.test.SampleTest',
+ 'superclass': 'java.lang.Object',
+ 'methods': [
+ {
+ 'annotations': {'SmallTest': None},
+ 'method': 'testMethod1',
+ },
+ {
+ 'annotations': {'MediumTest': None},
+ 'method': 'testMethod2',
+ },
+ ],
+ },
+ {
+ 'annotations': {'Feature': {'value': ['Bar']}},
+ 'class': 'org.chromium.test.SampleTest2',
+ 'superclass': 'java.lang.Object',
+ 'methods': [
+ {
+ 'annotations': {'SmallTest': None},
+ 'method': 'testMethod1',
+ },
+ ],
+ }
+ ]
+
+ expected_tests = [
+ {
+ 'annotations': {
+ 'Feature': {'value': ['Bar']},
+ 'SmallTest': None,
+ },
+ 'class': 'org.chromium.test.SampleTest2',
+ 'is_junit4': True,
+ 'method': 'testMethod1',
+ },
+ ]
+
+ o._test_filter = 'org.chromium.test.SampleTest2.*'
+ o._test_jar = 'path/to/test.jar'
+ o._junit4_runner_class = 'J4Runner'
+ actual_tests = o.ProcessRawTests(raw_tests)
+
+ self.assertEqual(actual_tests, expected_tests)
+
+ def testGetTests_negativeGtestFilter(self):
+ o = self.createTestInstance()
+ raw_tests = [
+ {
+ 'annotations': {'Feature': {'value': ['Foo']}},
+ 'class': 'org.chromium.test.SampleTest',
+ 'superclass': 'java.lang.Object',
+ 'methods': [
+ {
+ 'annotations': {'SmallTest': None},
+ 'method': 'testMethod1',
+ },
+ {
+ 'annotations': {'MediumTest': None},
+ 'method': 'testMethod2',
+ },
+ ],
+ },
+ {
+ 'annotations': {'Feature': {'value': ['Bar']}},
+ 'class': 'org.chromium.test.SampleTest2',
+ 'superclass': 'java.lang.Object',
+ 'methods': [
+ {
+ 'annotations': {'SmallTest': None},
+ 'method': 'testMethod1',
+ },
+ ],
+ }
+ ]
+
+ expected_tests = [
+ {
+ 'annotations': {
+ 'Feature': {'value': ['Foo']},
+ 'MediumTest': None,
+ },
+ 'class': 'org.chromium.test.SampleTest',
+ 'is_junit4': True,
+ 'method': 'testMethod2',
+ },
+ {
+ 'annotations': {
+ 'Feature': {'value': ['Bar']},
+ 'SmallTest': None,
+ },
+ 'class': 'org.chromium.test.SampleTest2',
+ 'is_junit4': True,
+ 'method': 'testMethod1',
+ },
+ ]
+
+ o._test_filter = '*-org.chromium.test.SampleTest.testMethod1'
+ o._test_jar = 'path/to/test.jar'
+ o._junit4_runner_class = 'J4Runner'
+ actual_tests = o.ProcessRawTests(raw_tests)
+
+ self.assertEqual(actual_tests, expected_tests)
+
+ def testGetTests_annotationFilter(self):
+ o = self.createTestInstance()
+ raw_tests = [
+ {
+ 'annotations': {'Feature': {'value': ['Foo']}},
+ 'class': 'org.chromium.test.SampleTest',
+ 'superclass': 'java.lang.Object',
+ 'methods': [
+ {
+ 'annotations': {'SmallTest': None},
+ 'method': 'testMethod1',
+ },
+ {
+ 'annotations': {'MediumTest': None},
+ 'method': 'testMethod2',
+ },
+ ],
+ },
+ {
+ 'annotations': {'Feature': {'value': ['Bar']}},
+ 'class': 'org.chromium.test.SampleTest2',
+ 'superclass': 'java.lang.Object',
+ 'methods': [
+ {
+ 'annotations': {'SmallTest': None},
+ 'method': 'testMethod1',
+ },
+ ],
+ }
+ ]
+
+ expected_tests = [
+ {
+ 'annotations': {
+ 'Feature': {'value': ['Foo']},
+ 'SmallTest': None,
+ },
+ 'class': 'org.chromium.test.SampleTest',
+ 'is_junit4': True,
+ 'method': 'testMethod1',
+ },
+ {
+ 'annotations': {
+ 'Feature': {'value': ['Bar']},
+ 'SmallTest': None,
+ },
+ 'class': 'org.chromium.test.SampleTest2',
+ 'is_junit4': True,
+ 'method': 'testMethod1',
+ },
+ ]
+
+ o._annotations = [('SmallTest', None)]
+ o._test_jar = 'path/to/test.jar'
+ o._junit4_runner_class = 'J4Runner'
+ actual_tests = o.ProcessRawTests(raw_tests)
+
+ self.assertEqual(actual_tests, expected_tests)
+
+ def testGetTests_excludedAnnotationFilter(self):
+ o = self.createTestInstance()
+ raw_tests = [
+ {
+ 'annotations': {'Feature': {'value': ['Foo']}},
+ 'class': 'org.chromium.test.SampleTest',
+ 'superclass': 'junit.framework.TestCase',
+ 'methods': [
+ {
+ 'annotations': {'SmallTest': None},
+ 'method': 'testMethod1',
+ },
+ {
+ 'annotations': {'MediumTest': None},
+ 'method': 'testMethod2',
+ },
+ ],
+ },
+ {
+ 'annotations': {'Feature': {'value': ['Bar']}},
+ 'class': 'org.chromium.test.SampleTest2',
+ 'superclass': 'junit.framework.TestCase',
+ 'methods': [
+ {
+ 'annotations': {'SmallTest': None},
+ 'method': 'testMethod1',
+ },
+ ],
+ }
+ ]
+
+ expected_tests = [
+ {
+ 'annotations': {
+ 'Feature': {
+ 'value': ['Foo']
+ },
+ 'MediumTest': None,
+ },
+ 'class': 'org.chromium.test.SampleTest',
+ 'is_junit4': True,
+ 'method': 'testMethod2',
+ },
+ ]
+
+ o._excluded_annotations = [('SmallTest', None)]
+ o._test_jar = 'path/to/test.jar'
+ o._junit4_runner_class = 'J4Runner'
+ actual_tests = o.ProcessRawTests(raw_tests)
+
+ self.assertEqual(actual_tests, expected_tests)
+
+ def testGetTests_annotationSimpleValueFilter(self):
+ o = self.createTestInstance()
+ raw_tests = [
+ {
+ 'annotations': {'Feature': {'value': ['Foo']}},
+ 'class': 'org.chromium.test.SampleTest',
+ 'superclass': 'junit.framework.TestCase',
+ 'methods': [
+ {
+ 'annotations': {
+ 'SmallTest': None,
+ 'TestValue': '1',
+ },
+ 'method': 'testMethod1',
+ },
+ {
+ 'annotations': {
+ 'MediumTest': None,
+ 'TestValue': '2',
+ },
+ 'method': 'testMethod2',
+ },
+ ],
+ },
+ {
+ 'annotations': {'Feature': {'value': ['Bar']}},
+ 'class': 'org.chromium.test.SampleTest2',
+ 'superclass': 'junit.framework.TestCase',
+ 'methods': [
+ {
+ 'annotations': {
+ 'SmallTest': None,
+ 'TestValue': '3',
+ },
+ 'method': 'testMethod1',
+ },
+ ],
+ }
+ ]
+
+ expected_tests = [
+ {
+ 'annotations': {
+ 'Feature': {
+ 'value': ['Foo']
+ },
+ 'SmallTest': None,
+ 'TestValue': '1',
+ },
+ 'class': 'org.chromium.test.SampleTest',
+ 'is_junit4': True,
+ 'method': 'testMethod1',
+ },
+ ]
+
+ o._annotations = [('TestValue', '1')]
+ o._test_jar = 'path/to/test.jar'
+ o._junit4_runner_class = 'J4Runner'
+ actual_tests = o.ProcessRawTests(raw_tests)
+
+ self.assertEqual(actual_tests, expected_tests)
+
+ def testGetTests_annotationDictValueFilter(self):
+ o = self.createTestInstance()
+ raw_tests = [
+ {
+ 'annotations': {'Feature': {'value': ['Foo']}},
+ 'class': 'org.chromium.test.SampleTest',
+ 'superclass': 'java.lang.Object',
+ 'methods': [
+ {
+ 'annotations': {'SmallTest': None},
+ 'method': 'testMethod1',
+ },
+ {
+ 'annotations': {'MediumTest': None},
+ 'method': 'testMethod2',
+ },
+ ],
+ },
+ {
+ 'annotations': {'Feature': {'value': ['Bar']}},
+ 'class': 'org.chromium.test.SampleTest2',
+ 'superclass': 'java.lang.Object',
+ 'methods': [
+ {
+ 'annotations': {'SmallTest': None},
+ 'method': 'testMethod1',
+ },
+ ],
+ }
+ ]
+
+ expected_tests = [
+ {
+ 'annotations': {
+ 'Feature': {'value': ['Bar']},
+ 'SmallTest': None,
+ },
+ 'class': 'org.chromium.test.SampleTest2',
+ 'is_junit4': True,
+ 'method': 'testMethod1',
+ },
+ ]
+
+ o._annotations = [('Feature', 'Bar')]
+ o._test_jar = 'path/to/test.jar'
+ o._junit4_runner_class = 'J4Runner'
+ actual_tests = o.ProcessRawTests(raw_tests)
+
+ self.assertEqual(actual_tests, expected_tests)
+
+ def testGetTestName(self):
+ test = {
+ 'annotations': {
+ 'RunWith': {'value': 'class J4Runner'},
+ 'SmallTest': {},
+ 'Test': {'expected': 'class org.junit.Test$None',
+ 'timeout': '0'},
+ 'UiThreadTest': {}},
+ 'class': 'org.chromium.TestA',
+ 'is_junit4': True,
+ 'method': 'testSimple'}
+ unqualified_class_test = {
+ 'class': test['class'].split('.')[-1],
+ 'method': test['method']
+ }
+
+ self.assertEqual(instrumentation_test_instance.GetTestName(test, sep='.'),
+ 'org.chromium.TestA.testSimple')
+ self.assertEqual(
+ instrumentation_test_instance.GetTestName(unqualified_class_test,
+ sep='.'), 'TestA.testSimple')
+
+ def testGetUniqueTestName(self):
+ test = {
+ 'annotations': {
+ 'RunWith': {'value': 'class J4Runner'},
+ 'SmallTest': {},
+ 'Test': {'expected': 'class org.junit.Test$None', 'timeout': '0'},
+ 'UiThreadTest': {}},
+ 'class': 'org.chromium.TestA',
+ 'flags': ['enable_features=abc'],
+ 'is_junit4': True,
+ 'method': 'testSimple'}
+ self.assertEqual(
+ instrumentation_test_instance.GetUniqueTestName(test, sep='.'),
+ 'org.chromium.TestA.testSimple_with_enable_features=abc')
+
+ def testGetTestNameWithoutParameterPostfix(self):
+ test = {
+ 'annotations': {
+ 'RunWith': {'value': 'class J4Runner'},
+ 'SmallTest': {},
+ 'Test': {'expected': 'class org.junit.Test$None', 'timeout': '0'},
+ 'UiThreadTest': {}},
+ 'class': 'org.chromium.TestA__sandbox_mode',
+ 'flags': 'enable_features=abc',
+ 'is_junit4': True,
+ 'method': 'testSimple'}
+ unqualified_class_test = {
+ 'class': test['class'].split('.')[-1],
+ 'method': test['method']
+ }
+ self.assertEqual(
+ instrumentation_test_instance.GetTestNameWithoutParameterPostfix(
+ test, sep='.'), 'org.chromium.TestA')
+ self.assertEqual(
+ instrumentation_test_instance.GetTestNameWithoutParameterPostfix(
+ unqualified_class_test, sep='.'), 'TestA')
+
+ def testGetTests_multipleAnnotationValuesRequested(self):
+ o = self.createTestInstance()
+ raw_tests = [
+ {
+ 'annotations': {'Feature': {'value': ['Foo']}},
+ 'class': 'org.chromium.test.SampleTest',
+ 'superclass': 'junit.framework.TestCase',
+ 'methods': [
+ {
+ 'annotations': {'SmallTest': None},
+ 'method': 'testMethod1',
+ },
+ {
+ 'annotations': {
+ 'Feature': {'value': ['Baz']},
+ 'MediumTest': None,
+ },
+ 'method': 'testMethod2',
+ },
+ ],
+ },
+ {
+ 'annotations': {'Feature': {'value': ['Bar']}},
+ 'class': 'org.chromium.test.SampleTest2',
+ 'superclass': 'junit.framework.TestCase',
+ 'methods': [
+ {
+ 'annotations': {'SmallTest': None},
+ 'method': 'testMethod1',
+ },
+ ],
+ }
+ ]
+
+ expected_tests = [
+ {
+ 'annotations': {
+ 'Feature': {
+ 'value': ['Baz']
+ },
+ 'MediumTest': None,
+ },
+ 'class': 'org.chromium.test.SampleTest',
+ 'is_junit4': True,
+ 'method': 'testMethod2',
+ },
+ {
+ 'annotations': {
+ 'Feature': {
+ 'value': ['Bar']
+ },
+ 'SmallTest': None,
+ },
+ 'class': 'org.chromium.test.SampleTest2',
+ 'is_junit4': True,
+ 'method': 'testMethod1',
+ },
+ ]
+
+ o._annotations = [('Feature', 'Bar'), ('Feature', 'Baz')]
+ o._test_jar = 'path/to/test.jar'
+ o._junit4_runner_class = 'J4Runner'
+ actual_tests = o.ProcessRawTests(raw_tests)
+
+ self.assertEqual(actual_tests, expected_tests)
+
+ def testGenerateTestResults_noStatus(self):
+ results = instrumentation_test_instance.GenerateTestResults(
+ None, None, [], 1000, None, None)
+ self.assertEqual([], results)
+
+ def testGenerateTestResults_testPassed(self):
+ statuses = [
+ (1, {
+ 'class': 'test.package.TestClass',
+ 'test': 'testMethod',
+ }),
+ (0, {
+ 'class': 'test.package.TestClass',
+ 'test': 'testMethod',
+ }),
+ ]
+ results = instrumentation_test_instance.GenerateTestResults(
+ None, None, statuses, 1000, None, None)
+ self.assertEqual(1, len(results))
+ self.assertEqual(base_test_result.ResultType.PASS, results[0].GetType())
+
+ def testGenerateTestResults_testSkipped_true(self):
+ statuses = [
+ (1, {
+ 'class': 'test.package.TestClass',
+ 'test': 'testMethod',
+ }),
+ (0, {
+ 'test_skipped': 'true',
+ 'class': 'test.package.TestClass',
+ 'test': 'testMethod',
+ }),
+ (0, {
+ 'class': 'test.package.TestClass',
+ 'test': 'testMethod',
+ }),
+ ]
+ results = instrumentation_test_instance.GenerateTestResults(
+ None, None, statuses, 1000, None, None)
+ self.assertEqual(1, len(results))
+ self.assertEqual(base_test_result.ResultType.SKIP, results[0].GetType())
+
+ def testGenerateTestResults_testSkipped_false(self):
+ statuses = [
+ (1, {
+ 'class': 'test.package.TestClass',
+ 'test': 'testMethod',
+ }),
+ (0, {
+ 'test_skipped': 'false',
+ }),
+ (0, {
+ 'class': 'test.package.TestClass',
+ 'test': 'testMethod',
+ }),
+ ]
+ results = instrumentation_test_instance.GenerateTestResults(
+ None, None, statuses, 1000, None, None)
+ self.assertEqual(1, len(results))
+ self.assertEqual(base_test_result.ResultType.PASS, results[0].GetType())
+
+ def testGenerateTestResults_testFailed(self):
+ statuses = [
+ (1, {
+ 'class': 'test.package.TestClass',
+ 'test': 'testMethod',
+ }),
+ (-2, {
+ 'class': 'test.package.TestClass',
+ 'test': 'testMethod',
+ }),
+ ]
+ results = instrumentation_test_instance.GenerateTestResults(
+ None, None, statuses, 1000, None, None)
+ self.assertEqual(1, len(results))
+ self.assertEqual(base_test_result.ResultType.FAIL, results[0].GetType())
+
+ def testGenerateTestResults_testUnknownException(self):
+ stacktrace = 'long\nstacktrace'
+ statuses = [
+ (1, {
+ 'class': 'test.package.TestClass',
+ 'test': 'testMethod',
+ }),
+ (-1, {
+ 'class': 'test.package.TestClass',
+ 'test': 'testMethod',
+ 'stack': stacktrace,
+ }),
+ ]
+ results = instrumentation_test_instance.GenerateTestResults(
+ None, None, statuses, 1000, None, None)
+ self.assertEqual(1, len(results))
+ self.assertEqual(base_test_result.ResultType.FAIL, results[0].GetType())
+ self.assertEqual(stacktrace, results[0].GetLog())
+
+ def testGenerateJUnitTestResults_testSkipped_true(self):
+ statuses = [
+ (1, {
+ 'class': 'test.package.TestClass',
+ 'test': 'testMethod',
+ }),
+ (-3, {
+ 'class': 'test.package.TestClass',
+ 'test': 'testMethod',
+ }),
+ ]
+ results = instrumentation_test_instance.GenerateTestResults(
+ None, None, statuses, 1000, None, None)
+ self.assertEqual(1, len(results))
+ self.assertEqual(base_test_result.ResultType.SKIP, results[0].GetType())
+
+ def testParameterizedCommandLineFlagsSwitches(self):
+ o = self.createTestInstance()
+ raw_tests = [{
+ 'annotations': {
+ 'ParameterizedCommandLineFlags$Switches': {
+ 'value': ['enable-features=abc', 'enable-features=def']
+ }
+ },
+ 'class':
+ 'org.chromium.test.SampleTest',
+ 'superclass':
+ 'java.lang.Object',
+ 'methods': [
+ {
+ 'annotations': {
+ 'SmallTest': None
+ },
+ 'method': 'testMethod1',
+ },
+ {
+ 'annotations': {
+ 'MediumTest': None,
+ 'ParameterizedCommandLineFlags$Switches': {
+ 'value': ['enable-features=ghi', 'enable-features=jkl']
+ },
+ },
+ 'method': 'testMethod2',
+ },
+ {
+ 'annotations': {
+ 'MediumTest': None,
+ 'ParameterizedCommandLineFlags$Switches': {
+ 'value': []
+ },
+ },
+ 'method': 'testMethod3',
+ },
+ {
+ 'annotations': {
+ 'MediumTest': None,
+ 'SkipCommandLineParameterization': None,
+ },
+ 'method': 'testMethod4',
+ },
+ ],
+ }]
+
+ expected_tests = [
+ {
+ 'annotations': {},
+ 'class': 'org.chromium.test.SampleTest',
+ 'flags': ['--enable-features=abc', '--enable-features=def'],
+ 'is_junit4': True,
+ 'method': 'testMethod1'
+ },
+ {
+ 'annotations': {},
+ 'class': 'org.chromium.test.SampleTest',
+ 'flags': ['--enable-features=ghi', '--enable-features=jkl'],
+ 'is_junit4': True,
+ 'method': 'testMethod2'
+ },
+ {
+ 'annotations': {},
+ 'class': 'org.chromium.test.SampleTest',
+ 'is_junit4': True,
+ 'method': 'testMethod3'
+ },
+ {
+ 'annotations': {},
+ 'class': 'org.chromium.test.SampleTest',
+ 'is_junit4': True,
+ 'method': 'testMethod4'
+ },
+ ]
+ for i in range(4):
+ expected_tests[i]['annotations'].update(raw_tests[0]['annotations'])
+ expected_tests[i]['annotations'].update(
+ raw_tests[0]['methods'][i]['annotations'])
+
+ o._test_jar = 'path/to/test.jar'
+ o._junit4_runner_class = 'J4Runner'
+ actual_tests = o.ProcessRawTests(raw_tests)
+ self.assertEqual(actual_tests, expected_tests)
+
+ def testParameterizedCommandLineFlags(self):
+ o = self.createTestInstance()
+ raw_tests = [{
+ 'annotations': {
+ 'ParameterizedCommandLineFlags': {
+ 'value': [
+ {
+ 'ParameterizedCommandLineFlags$Switches': {
+ 'value': [
+ 'enable-features=abc',
+ 'force-fieldtrials=trial/group'
+ ],
+ }
+ },
+ {
+ 'ParameterizedCommandLineFlags$Switches': {
+ 'value': [
+ 'enable-features=abc2',
+ 'force-fieldtrials=trial/group2'
+ ],
+ }
+ },
+ ],
+ },
+ },
+ 'class':
+ 'org.chromium.test.SampleTest',
+ 'superclass':
+ 'java.lang.Object',
+ 'methods': [
+ {
+ 'annotations': {
+ 'SmallTest': None
+ },
+ 'method': 'testMethod1',
+ },
+ {
+ 'annotations': {
+ 'MediumTest': None,
+ 'ParameterizedCommandLineFlags': {
+ 'value': [{
+ 'ParameterizedCommandLineFlags$Switches': {
+ 'value': ['enable-features=def']
+ }
+ }],
+ },
+ },
+ 'method': 'testMethod2',
+ },
+ {
+ 'annotations': {
+ 'MediumTest': None,
+ 'ParameterizedCommandLineFlags': {
+ 'value': [],
+ },
+ },
+ 'method': 'testMethod3',
+ },
+ {
+ 'annotations': {
+ 'MediumTest': None,
+ 'SkipCommandLineParameterization': None,
+ },
+ 'method': 'testMethod4',
+ },
+ ],
+ }]
+
+ expected_tests = [
+ {
+ 'annotations': {},
+ 'class': 'org.chromium.test.SampleTest',
+ 'flags':
+ ['--enable-features=abc', '--force-fieldtrials=trial/group'],
+ 'is_junit4': True,
+ 'method': 'testMethod1'
+ },
+ {
+ 'annotations': {},
+ 'class': 'org.chromium.test.SampleTest',
+ 'flags': ['--enable-features=def'],
+ 'is_junit4': True,
+ 'method': 'testMethod2'
+ },
+ {
+ 'annotations': {},
+ 'class': 'org.chromium.test.SampleTest',
+ 'is_junit4': True,
+ 'method': 'testMethod3'
+ },
+ {
+ 'annotations': {},
+ 'class': 'org.chromium.test.SampleTest',
+ 'is_junit4': True,
+ 'method': 'testMethod4'
+ },
+ {
+ 'annotations': {},
+ 'class':
+ 'org.chromium.test.SampleTest',
+ 'flags': [
+ '--enable-features=abc2',
+ '--force-fieldtrials=trial/group2',
+ ],
+ 'is_junit4':
+ True,
+ 'method':
+ 'testMethod1'
+ },
+ ]
+ for i in range(4):
+ expected_tests[i]['annotations'].update(raw_tests[0]['annotations'])
+ expected_tests[i]['annotations'].update(
+ raw_tests[0]['methods'][i]['annotations'])
+ expected_tests[4]['annotations'].update(raw_tests[0]['annotations'])
+ expected_tests[4]['annotations'].update(
+ raw_tests[0]['methods'][0]['annotations'])
+
+ o._test_jar = 'path/to/test.jar'
+ o._junit4_runner_class = 'J4Runner'
+ actual_tests = o.ProcessRawTests(raw_tests)
+ self.assertEqual(actual_tests, expected_tests)
+
+ def testDifferentCommandLineParameterizations(self):
+ o = self.createTestInstance()
+ raw_tests = [{
+ 'annotations': {},
+ 'class':
+ 'org.chromium.test.SampleTest',
+ 'superclass':
+ 'java.lang.Object',
+ 'methods': [
+ {
+ 'annotations': {
+ 'SmallTest': None,
+ 'ParameterizedCommandLineFlags': {
+ 'value': [
+ {
+ 'ParameterizedCommandLineFlags$Switches': {
+ 'value': ['a1', 'a2'],
+ }
+ },
+ ],
+ },
+ },
+ 'method': 'testMethod2',
+ },
+ {
+ 'annotations': {
+ 'SmallTest': None,
+ 'ParameterizedCommandLineFlags$Switches': {
+ 'value': ['b1', 'b2'],
+ },
+ },
+ 'method': 'testMethod3',
+ },
+ ],
+ }]
+
+ expected_tests = [
+ {
+ 'annotations': {},
+ 'class': 'org.chromium.test.SampleTest',
+ 'flags': ['--a1', '--a2'],
+ 'is_junit4': True,
+ 'method': 'testMethod2'
+ },
+ {
+ 'annotations': {},
+ 'class': 'org.chromium.test.SampleTest',
+ 'flags': ['--b1', '--b2'],
+ 'is_junit4': True,
+ 'method': 'testMethod3'
+ },
+ ]
+ for i in range(2):
+ expected_tests[i]['annotations'].update(
+ raw_tests[0]['methods'][i]['annotations'])
+
+ o._test_jar = 'path/to/test.jar'
+ o._junit4_runner_class = 'J4Runner'
+ actual_tests = o.ProcessRawTests(raw_tests)
+ self.assertEqual(actual_tests, expected_tests)
+
+ def testMultipleCommandLineParameterizations_raises(self):
+ o = self.createTestInstance()
+ raw_tests = [
+ {
+ 'annotations': {
+ 'ParameterizedCommandLineFlags': {
+ 'value': [
+ {
+ 'ParameterizedCommandLineFlags$Switches': {
+ 'value': [
+ 'enable-features=abc',
+ 'force-fieldtrials=trial/group',
+ ],
+ }
+ },
+ ],
+ },
+ },
+ 'class':
+ 'org.chromium.test.SampleTest',
+ 'superclass':
+ 'java.lang.Object',
+ 'methods': [
+ {
+ 'annotations': {
+ 'SmallTest': None,
+ 'ParameterizedCommandLineFlags$Switches': {
+ 'value': [
+ 'enable-features=abc',
+ 'force-fieldtrials=trial/group',
+ ],
+ },
+ },
+ 'method': 'testMethod1',
+ },
+ ],
+ },
+ ]
+
+ o._test_jar = 'path/to/test.jar'
+ o._junit4_runner_class = 'J4Runner'
+ self.assertRaises(
+ instrumentation_test_instance.CommandLineParameterizationException,
+ o.ProcessRawTests, [raw_tests[0]])
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/third_party/libwebrtc/build/android/pylib/instrumentation/json_perf_parser.py b/third_party/libwebrtc/build/android/pylib/instrumentation/json_perf_parser.py
new file mode 100644
index 0000000000..8f53a2ffb3
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/instrumentation/json_perf_parser.py
@@ -0,0 +1,162 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+"""A helper module for parsing JSON objects from perf tests results."""
+
+
+import json
+
+
+def GetAverageRunInfo(json_data, name):
+ """Summarizes TraceEvent JSON data for performance metrics.
+
+ Example JSON Inputs (More tags can be added but these are required):
+ Measuring Duration:
+ [
+ { "cat": "Java",
+ "ts": 10000000000,
+ "ph": "S",
+ "name": "TestTrace"
+ },
+ { "cat": "Java",
+ "ts": 10000004000,
+ "ph": "F",
+ "name": "TestTrace"
+ },
+ ...
+ ]
+
+ Measuring Call Frequency (FPS):
+ [
+ { "cat": "Java",
+ "ts": 10000000000,
+ "ph": "I",
+ "name": "TestTraceFPS"
+ },
+ { "cat": "Java",
+ "ts": 10000004000,
+ "ph": "I",
+ "name": "TestTraceFPS"
+ },
+ ...
+ ]
+
+ Args:
+ json_data: A list of dictonaries each representing a JSON object.
+ name: The 'name' tag to filter on in the JSON file.
+
+ Returns:
+ A dictionary of result data with the following tags:
+ min: The minimum value tracked.
+ max: The maximum value tracked.
+ average: The average of all the values tracked.
+ count: The number of times the category/name pair was tracked.
+ type: The type of tracking ('Instant' for instant tags and 'Span' for
+ begin/end tags.
+ category: The passed in category filter.
+ name: The passed in name filter.
+ data_points: A list of all of the times used to generate this data.
+ units: The units for the values being reported.
+
+ Raises:
+ Exception: if entry contains invalid data.
+ """
+
+ def EntryFilter(entry):
+ return entry['cat'] == 'Java' and entry['name'] == name
+ filtered_entries = [j for j in json_data if EntryFilter(j)]
+
+ result = {}
+
+ result['min'] = -1
+ result['max'] = -1
+ result['average'] = 0
+ result['count'] = 0
+ result['type'] = 'Unknown'
+ result['category'] = 'Java'
+ result['name'] = name
+ result['data_points'] = []
+ result['units'] = ''
+
+ total_sum = 0
+
+ last_val = 0
+ val_type = None
+ for entry in filtered_entries:
+ if not val_type:
+ if 'mem' in entry:
+ val_type = 'mem'
+
+ def GetVal(entry):
+ return entry['mem']
+
+ result['units'] = 'kb'
+ elif 'ts' in entry:
+ val_type = 'ts'
+
+ def GetVal(entry):
+ return float(entry['ts']) / 1000.0
+
+ result['units'] = 'ms'
+ else:
+ raise Exception('Entry did not contain valid value info: %s' % entry)
+
+ if not val_type in entry:
+ raise Exception('Entry did not contain expected value type "%s" '
+ 'information: %s' % (val_type, entry))
+ val = GetVal(entry)
+ if (entry['ph'] == 'S' and
+ (result['type'] == 'Unknown' or result['type'] == 'Span')):
+ result['type'] = 'Span'
+ last_val = val
+ elif ((entry['ph'] == 'F' and result['type'] == 'Span') or
+ (entry['ph'] == 'I' and (result['type'] == 'Unknown' or
+ result['type'] == 'Instant'))):
+ if last_val > 0:
+ delta = val - last_val
+ if result['min'] == -1 or result['min'] > delta:
+ result['min'] = delta
+ if result['max'] == -1 or result['max'] < delta:
+ result['max'] = delta
+ total_sum += delta
+ result['count'] += 1
+ result['data_points'].append(delta)
+ if entry['ph'] == 'I':
+ result['type'] = 'Instant'
+ last_val = val
+ if result['count'] > 0:
+ result['average'] = total_sum / result['count']
+
+ return result
+
+
+def GetAverageRunInfoFromJSONString(json_string, name):
+ """Returns the results from GetAverageRunInfo using a JSON string.
+
+ Args:
+ json_string: The string containing JSON.
+ name: The 'name' tag to filter on in the JSON file.
+
+ Returns:
+ See GetAverageRunInfo Returns section.
+ """
+ return GetAverageRunInfo(json.loads(json_string), name)
+
+
+def GetAverageRunInfoFromFile(json_file, name):
+ """Returns the results from GetAverageRunInfo using a JSON file.
+
+ Args:
+ json_file: The path to a JSON file.
+ name: The 'name' tag to filter on in the JSON file.
+
+ Returns:
+ See GetAverageRunInfo Returns section.
+ """
+ with open(json_file, 'r') as f:
+ data = f.read()
+ perf = json.loads(data)
+
+ return GetAverageRunInfo(perf, name)
diff --git a/third_party/libwebrtc/build/android/pylib/instrumentation/render_test.html.jinja b/third_party/libwebrtc/build/android/pylib/instrumentation/render_test.html.jinja
new file mode 100644
index 0000000000..81b85b78e3
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/instrumentation/render_test.html.jinja
@@ -0,0 +1,40 @@
+<html>
+<head>
+ <title>{{ test_name }}</title>
+ <script>
+ function toggleZoom() {
+ for (const img of document.getElementsByTagName("img")) {
+ if (img.hasAttribute('style')) {
+ img.removeAttribute('style');
+ } else {
+ img.style.width = '100%';
+ }
+ }
+ }
+ </script>
+</head>
+<body>
+ <a href="https://cs.chromium.org/search/?q={{ test_name }}&m=100&type=cs">Link to Golden (in repo)</a><br />
+ <a download="{{ test_name }}" href="{{ failure_link }}">Download Failure Image (right click and 'Save link as')</a>
+ <table>
+ <thead>
+ <tr>
+ <th>Failure</th>
+ <th>Golden</th>
+ <th>Diff</th>
+ </tr>
+ </thead>
+ <tbody style="vertical-align: top">
+ <tr onclick="toggleZoom()">
+ <td><img src="{{ failure_link }}" style="width: 100%" /></td>
+ {% if golden_link %}
+ <td><img src="{{ golden_link }}" style="width: 100%" /></td>
+ <td><img src="{{ diff_link }}" style="width: 100%" /></td>
+ {% else %}
+ <td>No Golden Image.</td>
+ {% endif %}
+ </tr>
+ </tbody>
+ </table>
+</body>
+</html>
diff --git a/third_party/libwebrtc/build/android/pylib/instrumentation/test_result.py b/third_party/libwebrtc/build/android/pylib/instrumentation/test_result.py
new file mode 100644
index 0000000000..766dad8a5d
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/instrumentation/test_result.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+from pylib.base import base_test_result
+
+
+class InstrumentationTestResult(base_test_result.BaseTestResult):
+ """Result information for a single instrumentation test."""
+
+ def __init__(self, full_name, test_type, dur, log=''):
+ """Construct an InstrumentationTestResult object.
+
+ Args:
+ full_name: Full name of the test.
+ test_type: Type of the test result as defined in ResultType.
+ dur: Duration of the test run in milliseconds.
+ log: A string listing any errors.
+ """
+ super(InstrumentationTestResult, self).__init__(
+ full_name, test_type, dur, log)
+ name_pieces = full_name.rsplit('#')
+ if len(name_pieces) > 1:
+ self._test_name = name_pieces[1]
+ self._class_name = name_pieces[0]
+ else:
+ self._class_name = full_name
+ self._test_name = full_name
+
+ def SetDuration(self, duration):
+ """Set the test duration."""
+ self._duration = duration
diff --git a/third_party/libwebrtc/build/android/pylib/junit/__init__.py b/third_party/libwebrtc/build/android/pylib/junit/__init__.py
new file mode 100644
index 0000000000..4d6aabb953
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/junit/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/third_party/libwebrtc/build/android/pylib/junit/junit_test_instance.py b/third_party/libwebrtc/build/android/pylib/junit/junit_test_instance.py
new file mode 100644
index 0000000000..8a26f98b38
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/junit/junit_test_instance.py
@@ -0,0 +1,76 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+from pylib.base import test_instance
+from pylib.utils import test_filter
+
+
+class JunitTestInstance(test_instance.TestInstance):
+
+ def __init__(self, args, _):
+ super(JunitTestInstance, self).__init__()
+
+ self._coverage_dir = args.coverage_dir
+ self._debug_socket = args.debug_socket
+ self._coverage_on_the_fly = args.coverage_on_the_fly
+ self._package_filter = args.package_filter
+ self._resource_apk = args.resource_apk
+ self._robolectric_runtime_deps_dir = args.robolectric_runtime_deps_dir
+ self._runner_filter = args.runner_filter
+ self._shards = args.shards
+ self._test_filter = test_filter.InitializeFilterFromArgs(args)
+ self._test_suite = args.test_suite
+
+ #override
+ def TestType(self):
+ return 'junit'
+
+ #override
+ def SetUp(self):
+ pass
+
+ #override
+ def TearDown(self):
+ pass
+
+ @property
+ def coverage_dir(self):
+ return self._coverage_dir
+
+ @property
+ def coverage_on_the_fly(self):
+ return self._coverage_on_the_fly
+
+ @property
+ def debug_socket(self):
+ return self._debug_socket
+
+ @property
+ def package_filter(self):
+ return self._package_filter
+
+ @property
+ def resource_apk(self):
+ return self._resource_apk
+
+ @property
+ def robolectric_runtime_deps_dir(self):
+ return self._robolectric_runtime_deps_dir
+
+ @property
+ def runner_filter(self):
+ return self._runner_filter
+
+ @property
+ def test_filter(self):
+ return self._test_filter
+
+ @property
+ def shards(self):
+ return self._shards
+
+ @property
+ def suite(self):
+ return self._test_suite
diff --git a/third_party/libwebrtc/build/android/pylib/local/__init__.py b/third_party/libwebrtc/build/android/pylib/local/__init__.py
new file mode 100644
index 0000000000..4d6aabb953
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/third_party/libwebrtc/build/android/pylib/local/device/__init__.py b/third_party/libwebrtc/build/android/pylib/local/device/__init__.py
new file mode 100644
index 0000000000..4d6aabb953
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/device/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_environment.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_environment.py
new file mode 100644
index 0000000000..c254d2e8ca
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_environment.py
@@ -0,0 +1,328 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import datetime
+import functools
+import logging
+import os
+import shutil
+import tempfile
+import threading
+
+import devil_chromium
+from devil import base_error
+from devil.android import device_denylist
+from devil.android import device_errors
+from devil.android import device_utils
+from devil.android import logcat_monitor
+from devil.android.sdk import adb_wrapper
+from devil.utils import file_utils
+from devil.utils import parallelizer
+from pylib import constants
+from pylib.constants import host_paths
+from pylib.base import environment
+from pylib.utils import instrumentation_tracing
+from py_trace_event import trace_event
+
+
+LOGCAT_FILTERS = [
+ 'chromium:v',
+ 'cr_*:v',
+ 'DEBUG:I',
+ 'StrictMode:D',
+]
+
+
+def _DeviceCachePath(device):
+ file_name = 'device_cache_%s.json' % device.adb.GetDeviceSerial()
+ return os.path.join(constants.GetOutDirectory(), file_name)
+
+
+def handle_shard_failures(f):
+ """A decorator that handles device failures for per-device functions.
+
+ Args:
+ f: the function being decorated. The function must take at least one
+ argument, and that argument must be the device.
+ """
+ return handle_shard_failures_with(None)(f)
+
+
+# TODO(jbudorick): Refactor this to work as a decorator or context manager.
+def handle_shard_failures_with(on_failure):
+ """A decorator that handles device failures for per-device functions.
+
+ This calls on_failure in the event of a failure.
+
+ Args:
+ f: the function being decorated. The function must take at least one
+ argument, and that argument must be the device.
+ on_failure: A binary function to call on failure.
+ """
+ def decorator(f):
+ @functools.wraps(f)
+ def wrapper(dev, *args, **kwargs):
+ try:
+ return f(dev, *args, **kwargs)
+ except device_errors.CommandTimeoutError:
+ logging.exception('Shard timed out: %s(%s)', f.__name__, str(dev))
+ except device_errors.DeviceUnreachableError:
+ logging.exception('Shard died: %s(%s)', f.__name__, str(dev))
+ except base_error.BaseError:
+ logging.exception('Shard failed: %s(%s)', f.__name__, str(dev))
+ except SystemExit:
+ logging.exception('Shard killed: %s(%s)', f.__name__, str(dev))
+ raise
+ if on_failure:
+ on_failure(dev, f.__name__)
+ return None
+
+ return wrapper
+
+ return decorator
+
+
+def place_nomedia_on_device(dev, device_root):
+ """Places .nomedia file in test data root.
+
+ This helps to prevent system from scanning media files inside test data.
+
+ Args:
+ dev: Device to place .nomedia file.
+ device_root: Base path on device to place .nomedia file.
+ """
+
+ dev.RunShellCommand(['mkdir', '-p', device_root], check_return=True)
+ dev.WriteFile('%s/.nomedia' % device_root, 'https://crbug.com/796640')
+
+
+class LocalDeviceEnvironment(environment.Environment):
+
+ def __init__(self, args, output_manager, _error_func):
+ super(LocalDeviceEnvironment, self).__init__(output_manager)
+ self._current_try = 0
+ self._denylist = (device_denylist.Denylist(args.denylist_file)
+ if args.denylist_file else None)
+ self._device_serials = args.test_devices
+ self._devices_lock = threading.Lock()
+ self._devices = None
+ self._concurrent_adb = args.enable_concurrent_adb
+ self._enable_device_cache = args.enable_device_cache
+ self._logcat_monitors = []
+ self._logcat_output_dir = args.logcat_output_dir
+ self._logcat_output_file = args.logcat_output_file
+ self._max_tries = 1 + args.num_retries
+ self._preferred_abis = None
+ self._recover_devices = args.recover_devices
+ self._skip_clear_data = args.skip_clear_data
+ self._tool_name = args.tool
+ self._trace_output = None
+ if hasattr(args, 'trace_output'):
+ self._trace_output = args.trace_output
+ self._trace_all = None
+ if hasattr(args, 'trace_all'):
+ self._trace_all = args.trace_all
+
+ devil_chromium.Initialize(
+ output_directory=constants.GetOutDirectory(),
+ adb_path=args.adb_path)
+
+ # Some things such as Forwarder require ADB to be in the environment path,
+ # while others like Devil's bundletool.py require Java on the path.
+ adb_dir = os.path.dirname(adb_wrapper.AdbWrapper.GetAdbPath())
+ if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep):
+ os.environ['PATH'] = os.pathsep.join(
+ [adb_dir, host_paths.JAVA_PATH, os.environ['PATH']])
+
+ #override
+ def SetUp(self):
+ if self.trace_output and self._trace_all:
+ to_include = [r"pylib\..*", r"devil\..*", "__main__"]
+ to_exclude = ["logging"]
+ instrumentation_tracing.start_instrumenting(self.trace_output, to_include,
+ to_exclude)
+ elif self.trace_output:
+ self.EnableTracing()
+
+ # Must be called before accessing |devices|.
+ def SetPreferredAbis(self, abis):
+ assert self._devices is None
+ self._preferred_abis = abis
+
+ def _InitDevices(self):
+ device_arg = []
+ if self._device_serials:
+ device_arg = self._device_serials
+
+ self._devices = device_utils.DeviceUtils.HealthyDevices(
+ self._denylist,
+ retries=5,
+ enable_usb_resets=True,
+ enable_device_files_cache=self._enable_device_cache,
+ default_retries=self._max_tries - 1,
+ device_arg=device_arg,
+ abis=self._preferred_abis)
+
+ if self._logcat_output_file:
+ self._logcat_output_dir = tempfile.mkdtemp()
+
+ @handle_shard_failures_with(on_failure=self.DenylistDevice)
+ def prepare_device(d):
+ d.WaitUntilFullyBooted()
+
+ if self._enable_device_cache:
+ cache_path = _DeviceCachePath(d)
+ if os.path.exists(cache_path):
+ logging.info('Using device cache: %s', cache_path)
+ with open(cache_path) as f:
+ d.LoadCacheData(f.read())
+ # Delete cached file so that any exceptions cause it to be cleared.
+ os.unlink(cache_path)
+
+ if self._logcat_output_dir:
+ logcat_file = os.path.join(
+ self._logcat_output_dir,
+ '%s_%s' % (d.adb.GetDeviceSerial(),
+ datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%S')))
+ monitor = logcat_monitor.LogcatMonitor(
+ d.adb, clear=True, output_file=logcat_file)
+ self._logcat_monitors.append(monitor)
+ monitor.Start()
+
+ self.parallel_devices.pMap(prepare_device)
+
+ @property
+ def current_try(self):
+ return self._current_try
+
+ def IncrementCurrentTry(self):
+ self._current_try += 1
+
+ def ResetCurrentTry(self):
+ self._current_try = 0
+
+ @property
+ def denylist(self):
+ return self._denylist
+
+ @property
+ def concurrent_adb(self):
+ return self._concurrent_adb
+
+ @property
+ def devices(self):
+ # Initialize lazily so that host-only tests do not fail when no devices are
+ # attached.
+ if self._devices is None:
+ self._InitDevices()
+ return self._devices
+
+ @property
+ def max_tries(self):
+ return self._max_tries
+
+ @property
+ def parallel_devices(self):
+ return parallelizer.SyncParallelizer(self.devices)
+
+ @property
+ def recover_devices(self):
+ return self._recover_devices
+
+ @property
+ def skip_clear_data(self):
+ return self._skip_clear_data
+
+ @property
+ def tool(self):
+ return self._tool_name
+
+ @property
+ def trace_output(self):
+ return self._trace_output
+
+ #override
+ def TearDown(self):
+ if self.trace_output and self._trace_all:
+ instrumentation_tracing.stop_instrumenting()
+ elif self.trace_output:
+ self.DisableTracing()
+
+ # By default, teardown will invoke ADB. When receiving SIGTERM due to a
+ # timeout, there's a high probability that ADB is non-responsive. In these
+ # cases, sending an ADB command will potentially take a long time to time
+ # out. Before this happens, the process will be hard-killed for not
+ # responding to SIGTERM fast enough.
+ if self._received_sigterm:
+ return
+
+ if not self._devices:
+ return
+
+ @handle_shard_failures_with(on_failure=self.DenylistDevice)
+ def tear_down_device(d):
+ # Write the cache even when not using it so that it will be ready the
+ # first time that it is enabled. Writing it every time is also necessary
+ # so that an invalid cache can be flushed just by disabling it for one
+ # run.
+ cache_path = _DeviceCachePath(d)
+ if os.path.exists(os.path.dirname(cache_path)):
+ with open(cache_path, 'w') as f:
+ f.write(d.DumpCacheData())
+ logging.info('Wrote device cache: %s', cache_path)
+ else:
+ logging.warning(
+ 'Unable to write device cache as %s directory does not exist',
+ os.path.dirname(cache_path))
+
+ self.parallel_devices.pMap(tear_down_device)
+
+ for m in self._logcat_monitors:
+ try:
+ m.Stop()
+ m.Close()
+ _, temp_path = tempfile.mkstemp()
+ with open(m.output_file, 'r') as infile:
+ with open(temp_path, 'w') as outfile:
+ for line in infile:
+ outfile.write('Device(%s) %s' % (m.adb.GetDeviceSerial(), line))
+ shutil.move(temp_path, m.output_file)
+ except base_error.BaseError:
+ logging.exception('Failed to stop logcat monitor for %s',
+ m.adb.GetDeviceSerial())
+ except IOError:
+ logging.exception('Failed to locate logcat for device %s',
+ m.adb.GetDeviceSerial())
+
+ if self._logcat_output_file:
+ file_utils.MergeFiles(
+ self._logcat_output_file,
+ [m.output_file for m in self._logcat_monitors
+ if os.path.exists(m.output_file)])
+ shutil.rmtree(self._logcat_output_dir)
+
+ def DenylistDevice(self, device, reason='local_device_failure'):
+ device_serial = device.adb.GetDeviceSerial()
+ if self._denylist:
+ self._denylist.Extend([device_serial], reason=reason)
+ with self._devices_lock:
+ self._devices = [d for d in self._devices if str(d) != device_serial]
+ logging.error('Device %s denylisted: %s', device_serial, reason)
+ if not self._devices:
+ raise device_errors.NoDevicesError(
+ 'All devices were denylisted due to errors')
+
+ @staticmethod
+ def DisableTracing():
+ if not trace_event.trace_is_enabled():
+ logging.warning('Tracing is not running.')
+ else:
+ trace_event.trace_disable()
+
+ def EnableTracing(self):
+ if trace_event.trace_is_enabled():
+ logging.warning('Tracing is already running.')
+ else:
+ trace_event.trace_enable(self._trace_output)
diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_gtest_run.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_gtest_run.py
new file mode 100644
index 0000000000..c81722da6e
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_gtest_run.py
@@ -0,0 +1,896 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import contextlib
+import collections
+import itertools
+import logging
+import math
+import os
+import posixpath
+import subprocess
+import shutil
+import time
+
+from six.moves import range # pylint: disable=redefined-builtin
+from devil import base_error
+from devil.android import crash_handler
+from devil.android import device_errors
+from devil.android import device_temp_file
+from devil.android import logcat_monitor
+from devil.android import ports
+from devil.android.sdk import version_codes
+from devil.utils import reraiser_thread
+from incremental_install import installer
+from pylib import constants
+from pylib.base import base_test_result
+from pylib.gtest import gtest_test_instance
+from pylib.local import local_test_server_spawner
+from pylib.local.device import local_device_environment
+from pylib.local.device import local_device_test_run
+from pylib.utils import google_storage_helper
+from pylib.utils import logdog_helper
+from py_trace_event import trace_event
+from py_utils import contextlib_ext
+from py_utils import tempfile_ext
+import tombstones
+
+_MAX_INLINE_FLAGS_LENGTH = 50 # Arbitrarily chosen.
+_EXTRA_COMMAND_LINE_FILE = (
+ 'org.chromium.native_test.NativeTest.CommandLineFile')
+_EXTRA_COMMAND_LINE_FLAGS = (
+ 'org.chromium.native_test.NativeTest.CommandLineFlags')
+_EXTRA_COVERAGE_DEVICE_FILE = (
+ 'org.chromium.native_test.NativeTest.CoverageDeviceFile')
+_EXTRA_STDOUT_FILE = (
+ 'org.chromium.native_test.NativeTestInstrumentationTestRunner'
+ '.StdoutFile')
+_EXTRA_TEST = (
+ 'org.chromium.native_test.NativeTestInstrumentationTestRunner'
+ '.Test')
+_EXTRA_TEST_LIST = (
+ 'org.chromium.native_test.NativeTestInstrumentationTestRunner'
+ '.TestList')
+
+_SECONDS_TO_NANOS = int(1e9)
+
+# Tests that use SpawnedTestServer must run the LocalTestServerSpawner on the
+# host machine.
+# TODO(jbudorick): Move this up to the test instance if the net test server is
+# handled outside of the APK for the remote_device environment.
+_SUITE_REQUIRES_TEST_SERVER_SPAWNER = [
+ 'components_browsertests', 'content_unittests', 'content_browsertests',
+ 'net_unittests', 'services_unittests', 'unit_tests'
+]
+
+# These are use for code coverage.
+_LLVM_PROFDATA_PATH = os.path.join(constants.DIR_SOURCE_ROOT, 'third_party',
+ 'llvm-build', 'Release+Asserts', 'bin',
+ 'llvm-profdata')
+# Name of the file extension for profraw data files.
+_PROFRAW_FILE_EXTENSION = 'profraw'
+# Name of the file where profraw data files are merged.
+_MERGE_PROFDATA_FILE_NAME = 'coverage_merged.' + _PROFRAW_FILE_EXTENSION
+
+# No-op context manager. If we used Python 3, we could change this to
+# contextlib.ExitStack()
+class _NullContextManager(object):
+ def __enter__(self):
+ pass
+ def __exit__(self, *args):
+ pass
+
+
+def _GenerateSequentialFileNames(filename):
+ """Infinite generator of names: 'name.ext', 'name_1.ext', 'name_2.ext', ..."""
+ yield filename
+ base, ext = os.path.splitext(filename)
+ for i in itertools.count(1):
+ yield '%s_%d%s' % (base, i, ext)
+
+
+def _ExtractTestsFromFilter(gtest_filter):
+ """Returns the list of tests specified by the given filter.
+
+ Returns:
+ None if the device should be queried for the test list instead.
+ """
+ # Empty means all tests, - means exclude filter.
+ if not gtest_filter or '-' in gtest_filter:
+ return None
+
+ patterns = gtest_filter.split(':')
+ # For a single pattern, allow it even if it has a wildcard so long as the
+ # wildcard comes at the end and there is at least one . to prove the scope is
+ # not too large.
+ # This heuristic is not necessarily faster, but normally is.
+ if len(patterns) == 1 and patterns[0].endswith('*'):
+ no_suffix = patterns[0].rstrip('*')
+ if '*' not in no_suffix and '.' in no_suffix:
+ return patterns
+
+ if '*' in gtest_filter:
+ return None
+ return patterns
+
+
+def _GetDeviceTimeoutMultiplier():
+ # Emulated devices typically run 20-150x slower than real-time.
+ # Give a way to control this through the DEVICE_TIMEOUT_MULTIPLIER
+ # environment variable.
+ multiplier = os.getenv("DEVICE_TIMEOUT_MULTIPLIER")
+ if multiplier:
+ return int(multiplier)
+ return 1
+
+
+def _MergeCoverageFiles(coverage_dir, profdata_dir):
+ """Merge coverage data files.
+
+ Each instrumentation activity generates a separate profraw data file. This
+ merges all profraw files in profdata_dir into a single file in
+ coverage_dir. This happens after each test, rather than waiting until after
+ all tests are ran to reduce the memory footprint used by all the profraw
+ files.
+
+ Args:
+ coverage_dir: The path to the coverage directory.
+ profdata_dir: The directory where the profraw data file(s) are located.
+
+ Return:
+ None
+ """
+ # profdata_dir may not exist if pulling coverage files failed.
+ if not os.path.exists(profdata_dir):
+ logging.debug('Profraw directory does not exist.')
+ return
+
+ merge_file = os.path.join(coverage_dir, _MERGE_PROFDATA_FILE_NAME)
+ profraw_files = [
+ os.path.join(profdata_dir, f) for f in os.listdir(profdata_dir)
+ if f.endswith(_PROFRAW_FILE_EXTENSION)
+ ]
+
+ try:
+ logging.debug('Merging target profraw files into merged profraw file.')
+ subprocess_cmd = [
+ _LLVM_PROFDATA_PATH,
+ 'merge',
+ '-o',
+ merge_file,
+ '-sparse=true',
+ ]
+ # Grow the merge file by merging it with itself and the new files.
+ if os.path.exists(merge_file):
+ subprocess_cmd.append(merge_file)
+ subprocess_cmd.extend(profraw_files)
+ output = subprocess.check_output(subprocess_cmd)
+ logging.debug('Merge output: %s', output)
+ except subprocess.CalledProcessError:
+ # Don't raise error as that will kill the test run. When code coverage
+ # generates a report, that will raise the error in the report generation.
+ logging.error(
+ 'Failed to merge target profdata files to create merged profraw file.')
+
+ # Free up memory space on bot as all data is in the merge file.
+ for f in profraw_files:
+ os.remove(f)
+
+
+def _PullCoverageFiles(device, device_coverage_dir, output_dir):
+ """Pulls coverage files on device to host directory.
+
+ Args:
+ device: The working device.
+ device_coverage_dir: The directory to store coverage data on device.
+ output_dir: The output directory on host.
+ """
+ try:
+ if not os.path.exists(output_dir):
+ os.makedirs(output_dir)
+ device.PullFile(device_coverage_dir, output_dir)
+ if not os.listdir(os.path.join(output_dir, 'profraw')):
+ logging.warning('No coverage data was generated for this run')
+ except (OSError, base_error.BaseError) as e:
+ logging.warning('Failed to handle coverage data after tests: %s', e)
+ finally:
+ device.RemovePath(device_coverage_dir, force=True, recursive=True)
+
+
+def _GetDeviceCoverageDir(device):
+ """Gets the directory to generate coverage data on device.
+
+ Args:
+ device: The working device.
+
+ Returns:
+ The directory path on the device.
+ """
+ return posixpath.join(device.GetExternalStoragePath(), 'chrome', 'test',
+ 'coverage', 'profraw')
+
+
+def _GetLLVMProfilePath(device_coverage_dir, suite, coverage_index):
+ """Gets 'LLVM_PROFILE_FILE' environment variable path.
+
+ Dumping data to ONLY 1 file may cause warning and data overwrite in
+ browsertests, so that pattern "%2m" is used to expand to 2 raw profiles
+ at runtime.
+
+ Args:
+ device_coverage_dir: The directory to generate data on device.
+ suite: Test suite name.
+ coverage_index: The incremental index for this test suite.
+
+ Returns:
+ The path pattern for environment variable 'LLVM_PROFILE_FILE'.
+ """
+ return posixpath.join(device_coverage_dir,
+ '_'.join([suite,
+ str(coverage_index), '%2m.profraw']))
+
+
+class _ApkDelegate(object):
+ def __init__(self, test_instance, tool):
+ self._activity = test_instance.activity
+ self._apk_helper = test_instance.apk_helper
+ self._test_apk_incremental_install_json = (
+ test_instance.test_apk_incremental_install_json)
+ self._package = test_instance.package
+ self._runner = test_instance.runner
+ self._permissions = test_instance.permissions
+ self._suite = test_instance.suite
+ self._component = '%s/%s' % (self._package, self._runner)
+ self._extras = test_instance.extras
+ self._wait_for_java_debugger = test_instance.wait_for_java_debugger
+ self._tool = tool
+ self._coverage_dir = test_instance.coverage_dir
+ self._coverage_index = 0
+ self._use_existing_test_data = test_instance.use_existing_test_data
+
+ def GetTestDataRoot(self, device):
+ # pylint: disable=no-self-use
+ return posixpath.join(device.GetExternalStoragePath(),
+ 'chromium_tests_root')
+
+ def Install(self, device):
+ if self._use_existing_test_data:
+ return
+ if self._test_apk_incremental_install_json:
+ installer.Install(device, self._test_apk_incremental_install_json,
+ apk=self._apk_helper, permissions=self._permissions)
+ else:
+ device.Install(
+ self._apk_helper,
+ allow_downgrade=True,
+ reinstall=True,
+ permissions=self._permissions)
+
+ def ResultsDirectory(self, device):
+ return device.GetApplicationDataDirectory(self._package)
+
+ def Run(self, test, device, flags=None, **kwargs):
+ extras = dict(self._extras)
+ device_api = device.build_version_sdk
+
+ if self._coverage_dir and device_api >= version_codes.LOLLIPOP:
+ device_coverage_dir = _GetDeviceCoverageDir(device)
+ extras[_EXTRA_COVERAGE_DEVICE_FILE] = _GetLLVMProfilePath(
+ device_coverage_dir, self._suite, self._coverage_index)
+ self._coverage_index += 1
+
+ if ('timeout' in kwargs
+ and gtest_test_instance.EXTRA_SHARD_NANO_TIMEOUT not in extras):
+ # Make sure the instrumentation doesn't kill the test before the
+ # scripts do. The provided timeout value is in seconds, but the
+ # instrumentation deals with nanoseconds because that's how Android
+ # handles time.
+ extras[gtest_test_instance.EXTRA_SHARD_NANO_TIMEOUT] = int(
+ kwargs['timeout'] * _SECONDS_TO_NANOS)
+
+ # pylint: disable=redefined-variable-type
+ command_line_file = _NullContextManager()
+ if flags:
+ if len(flags) > _MAX_INLINE_FLAGS_LENGTH:
+ command_line_file = device_temp_file.DeviceTempFile(device.adb)
+ device.WriteFile(command_line_file.name, '_ %s' % flags)
+ extras[_EXTRA_COMMAND_LINE_FILE] = command_line_file.name
+ else:
+ extras[_EXTRA_COMMAND_LINE_FLAGS] = flags
+
+ test_list_file = _NullContextManager()
+ if test:
+ if len(test) > 1:
+ test_list_file = device_temp_file.DeviceTempFile(device.adb)
+ device.WriteFile(test_list_file.name, '\n'.join(test))
+ extras[_EXTRA_TEST_LIST] = test_list_file.name
+ else:
+ extras[_EXTRA_TEST] = test[0]
+ # pylint: enable=redefined-variable-type
+
+ # We need to use GetAppWritablePath here instead of GetExternalStoragePath
+ # since we will not have yet applied legacy storage permission workarounds
+ # on R+.
+ stdout_file = device_temp_file.DeviceTempFile(
+ device.adb, dir=device.GetAppWritablePath(), suffix='.gtest_out')
+ extras[_EXTRA_STDOUT_FILE] = stdout_file.name
+
+ if self._wait_for_java_debugger:
+ cmd = ['am', 'set-debug-app', '-w', self._package]
+ device.RunShellCommand(cmd, check_return=True)
+ logging.warning('*' * 80)
+ logging.warning('Waiting for debugger to attach to process: %s',
+ self._package)
+ logging.warning('*' * 80)
+
+ with command_line_file, test_list_file, stdout_file:
+ try:
+ device.StartInstrumentation(
+ self._component, extras=extras, raw=False, **kwargs)
+ except device_errors.CommandFailedError:
+ logging.exception('gtest shard failed.')
+ except device_errors.CommandTimeoutError:
+ logging.exception('gtest shard timed out.')
+ except device_errors.DeviceUnreachableError:
+ logging.exception('gtest shard device unreachable.')
+ except Exception:
+ device.ForceStop(self._package)
+ raise
+ finally:
+ if self._coverage_dir and device_api >= version_codes.LOLLIPOP:
+ if not os.path.isdir(self._coverage_dir):
+ os.makedirs(self._coverage_dir)
+ # TODO(crbug.com/1179004) Use _MergeCoverageFiles when llvm-profdata
+ # not found is fixed.
+ _PullCoverageFiles(
+ device, device_coverage_dir,
+ os.path.join(self._coverage_dir, str(self._coverage_index)))
+
+ return device.ReadFile(stdout_file.name).splitlines()
+
+ def PullAppFiles(self, device, files, directory):
+ device_dir = device.GetApplicationDataDirectory(self._package)
+ host_dir = os.path.join(directory, str(device))
+ for f in files:
+ device_file = posixpath.join(device_dir, f)
+ host_file = os.path.join(host_dir, *f.split(posixpath.sep))
+ for host_file in _GenerateSequentialFileNames(host_file):
+ if not os.path.exists(host_file):
+ break
+ device.PullFile(device_file, host_file)
+
+ def Clear(self, device):
+ device.ClearApplicationState(self._package, permissions=self._permissions)
+
+
+class _ExeDelegate(object):
+
+ def __init__(self, tr, test_instance, tool):
+ self._host_dist_dir = test_instance.exe_dist_dir
+ self._exe_file_name = os.path.basename(
+ test_instance.exe_dist_dir)[:-len('__dist')]
+ self._device_dist_dir = posixpath.join(
+ constants.TEST_EXECUTABLE_DIR,
+ os.path.basename(test_instance.exe_dist_dir))
+ self._test_run = tr
+ self._tool = tool
+ self._suite = test_instance.suite
+ self._coverage_dir = test_instance.coverage_dir
+ self._coverage_index = 0
+
+ def GetTestDataRoot(self, device):
+ # pylint: disable=no-self-use
+ # pylint: disable=unused-argument
+ return posixpath.join(constants.TEST_EXECUTABLE_DIR, 'chromium_tests_root')
+
+ def Install(self, device):
+ # TODO(jbudorick): Look into merging this with normal data deps pushing if
+ # executables become supported on nonlocal environments.
+ device.PushChangedFiles([(self._host_dist_dir, self._device_dist_dir)],
+ delete_device_stale=True)
+
+ def ResultsDirectory(self, device):
+ # pylint: disable=no-self-use
+ # pylint: disable=unused-argument
+ return constants.TEST_EXECUTABLE_DIR
+
+ def Run(self, test, device, flags=None, **kwargs):
+ tool = self._test_run.GetTool(device).GetTestWrapper()
+ if tool:
+ cmd = [tool]
+ else:
+ cmd = []
+ cmd.append(posixpath.join(self._device_dist_dir, self._exe_file_name))
+
+ if test:
+ cmd.append('--gtest_filter=%s' % ':'.join(test))
+ if flags:
+ # TODO(agrieve): This won't work if multiple flags are passed.
+ cmd.append(flags)
+ cwd = constants.TEST_EXECUTABLE_DIR
+
+ env = {
+ 'LD_LIBRARY_PATH': self._device_dist_dir
+ }
+
+ if self._coverage_dir:
+ device_coverage_dir = _GetDeviceCoverageDir(device)
+ env['LLVM_PROFILE_FILE'] = _GetLLVMProfilePath(
+ device_coverage_dir, self._suite, self._coverage_index)
+ self._coverage_index += 1
+
+ if self._tool != 'asan':
+ env['UBSAN_OPTIONS'] = constants.UBSAN_OPTIONS
+
+ try:
+ gcov_strip_depth = os.environ['NATIVE_COVERAGE_DEPTH_STRIP']
+ external = device.GetExternalStoragePath()
+ env['GCOV_PREFIX'] = '%s/gcov' % external
+ env['GCOV_PREFIX_STRIP'] = gcov_strip_depth
+ except (device_errors.CommandFailedError, KeyError):
+ pass
+
+ # Executable tests return a nonzero exit code on test failure, which is
+ # fine from the test runner's perspective; thus check_return=False.
+ output = device.RunShellCommand(
+ cmd, cwd=cwd, env=env, check_return=False, large_output=True, **kwargs)
+
+ if self._coverage_dir:
+ _PullCoverageFiles(
+ device, device_coverage_dir,
+ os.path.join(self._coverage_dir, str(self._coverage_index)))
+
+ return output
+
+ def PullAppFiles(self, device, files, directory):
+ pass
+
+ def Clear(self, device):
+ device.KillAll(self._exe_file_name,
+ blocking=True,
+ timeout=30 * _GetDeviceTimeoutMultiplier(),
+ quiet=True)
+
+
+class LocalDeviceGtestRun(local_device_test_run.LocalDeviceTestRun):
+
+ def __init__(self, env, test_instance):
+ assert isinstance(env, local_device_environment.LocalDeviceEnvironment)
+ assert isinstance(test_instance, gtest_test_instance.GtestTestInstance)
+ super(LocalDeviceGtestRun, self).__init__(env, test_instance)
+
+ if self._test_instance.apk_helper:
+ self._installed_packages = [
+ self._test_instance.apk_helper.GetPackageName()
+ ]
+
+ # pylint: disable=redefined-variable-type
+ if self._test_instance.apk:
+ self._delegate = _ApkDelegate(self._test_instance, env.tool)
+ elif self._test_instance.exe_dist_dir:
+ self._delegate = _ExeDelegate(self, self._test_instance, self._env.tool)
+ if self._test_instance.isolated_script_test_perf_output:
+ self._test_perf_output_filenames = _GenerateSequentialFileNames(
+ self._test_instance.isolated_script_test_perf_output)
+ else:
+ self._test_perf_output_filenames = itertools.repeat(None)
+ # pylint: enable=redefined-variable-type
+ self._crashes = set()
+ self._servers = collections.defaultdict(list)
+
+ #override
+ def TestPackage(self):
+ return self._test_instance.suite
+
+ #override
+ def SetUp(self):
+ @local_device_environment.handle_shard_failures_with(
+ on_failure=self._env.DenylistDevice)
+ @trace_event.traced
+ def individual_device_set_up(device, host_device_tuples):
+ def install_apk(dev):
+ # Install test APK.
+ self._delegate.Install(dev)
+
+ def push_test_data(dev):
+ if self._test_instance.use_existing_test_data:
+ return
+ # Push data dependencies.
+ device_root = self._delegate.GetTestDataRoot(dev)
+ host_device_tuples_substituted = [
+ (h, local_device_test_run.SubstituteDeviceRoot(d, device_root))
+ for h, d in host_device_tuples]
+ local_device_environment.place_nomedia_on_device(dev, device_root)
+ dev.PushChangedFiles(
+ host_device_tuples_substituted,
+ delete_device_stale=True,
+ # Some gtest suites, e.g. unit_tests, have data dependencies that
+ # can take longer than the default timeout to push. See
+ # crbug.com/791632 for context.
+ timeout=600 * math.ceil(_GetDeviceTimeoutMultiplier() / 10))
+ if not host_device_tuples:
+ dev.RemovePath(device_root, force=True, recursive=True, rename=True)
+ dev.RunShellCommand(['mkdir', '-p', device_root], check_return=True)
+
+ def init_tool_and_start_servers(dev):
+ tool = self.GetTool(dev)
+ tool.CopyFiles(dev)
+ tool.SetupEnvironment()
+
+ try:
+ # See https://crbug.com/1030827.
+ # This is a hack that may break in the future. We're relying on the
+ # fact that adb doesn't use ipv6 for it's server, and so doesn't
+ # listen on ipv6, but ssh remote forwarding does. 5037 is the port
+ # number adb uses for its server.
+ if "[::1]:5037" in subprocess.check_output(
+ "ss -o state listening 'sport = 5037'", shell=True):
+ logging.error(
+ 'Test Server cannot be started with a remote-forwarded adb '
+ 'server. Continuing anyways, but some tests may fail.')
+ return
+ except subprocess.CalledProcessError:
+ pass
+
+ self._servers[str(dev)] = []
+ if self.TestPackage() in _SUITE_REQUIRES_TEST_SERVER_SPAWNER:
+ self._servers[str(dev)].append(
+ local_test_server_spawner.LocalTestServerSpawner(
+ ports.AllocateTestServerPort(), dev, tool))
+
+ for s in self._servers[str(dev)]:
+ s.SetUp()
+
+ def bind_crash_handler(step, dev):
+ return lambda: crash_handler.RetryOnSystemCrash(step, dev)
+
+ # Explicitly enable root to ensure that tests run under deterministic
+ # conditions. Without this explicit call, EnableRoot() is called from
+ # push_test_data() when PushChangedFiles() determines that it should use
+ # _PushChangedFilesZipped(), which is only most of the time.
+ # Root is required (amongst maybe other reasons) to pull the results file
+ # from the device, since it lives within the application's data directory
+ # (via GetApplicationDataDirectory()).
+ device.EnableRoot()
+
+ steps = [
+ bind_crash_handler(s, device)
+ for s in (install_apk, push_test_data, init_tool_and_start_servers)]
+ if self._env.concurrent_adb:
+ reraiser_thread.RunAsync(steps)
+ else:
+ for step in steps:
+ step()
+
+ self._env.parallel_devices.pMap(
+ individual_device_set_up,
+ self._test_instance.GetDataDependencies())
+
+ #override
+ def _ShouldShard(self):
+ return True
+
+ #override
+ def _CreateShards(self, tests):
+ # _crashes are tests that might crash and make the tests in the same shard
+ # following the crashed testcase not run.
+ # Thus we need to create separate shards for each crashed testcase,
+ # so that other tests can be run.
+ device_count = len(self._env.devices)
+ shards = []
+
+ # Add shards with only one suspect testcase.
+ shards += [[crash] for crash in self._crashes if crash in tests]
+
+ # Delete suspect testcase from tests.
+ tests = [test for test in tests if not test in self._crashes]
+
+ max_shard_size = self._test_instance.test_launcher_batch_limit
+
+ shards.extend(self._PartitionTests(tests, device_count, max_shard_size))
+ return shards
+
+ #override
+ def _GetTests(self):
+ if self._test_instance.extract_test_list_from_filter:
+ # When the exact list of tests to run is given via command-line (e.g. when
+ # locally iterating on a specific test), skip querying the device (which
+ # takes ~3 seconds).
+ tests = _ExtractTestsFromFilter(self._test_instance.gtest_filter)
+ if tests:
+ return tests
+
+ # Even when there's only one device, it still makes sense to retrieve the
+ # test list so that tests can be split up and run in batches rather than all
+ # at once (since test output is not streamed).
+ @local_device_environment.handle_shard_failures_with(
+ on_failure=self._env.DenylistDevice)
+ def list_tests(dev):
+ timeout = 30 * _GetDeviceTimeoutMultiplier()
+ retries = 1
+ if self._test_instance.wait_for_java_debugger:
+ timeout = None
+
+ flags = [
+ f for f in self._test_instance.flags
+ if f not in ['--wait-for-debugger', '--wait-for-java-debugger']
+ ]
+ flags.append('--gtest_list_tests')
+
+ # TODO(crbug.com/726880): Remove retries when no longer necessary.
+ for i in range(0, retries+1):
+ logging.info('flags:')
+ for f in flags:
+ logging.info(' %s', f)
+
+ with self._ArchiveLogcat(dev, 'list_tests'):
+ raw_test_list = crash_handler.RetryOnSystemCrash(
+ lambda d: self._delegate.Run(
+ None, d, flags=' '.join(flags), timeout=timeout),
+ device=dev)
+
+ tests = gtest_test_instance.ParseGTestListTests(raw_test_list)
+ if not tests:
+ logging.info('No tests found. Output:')
+ for l in raw_test_list:
+ logging.info(' %s', l)
+ if i < retries:
+ logging.info('Retrying...')
+ else:
+ break
+ return tests
+
+ # Query all devices in case one fails.
+ test_lists = self._env.parallel_devices.pMap(list_tests).pGet(None)
+
+ # If all devices failed to list tests, raise an exception.
+ # Check that tl is not None and is not empty.
+ if all(not tl for tl in test_lists):
+ raise device_errors.CommandFailedError(
+ 'Failed to list tests on any device')
+ tests = list(sorted(set().union(*[set(tl) for tl in test_lists if tl])))
+ tests = self._test_instance.FilterTests(tests)
+ tests = self._ApplyExternalSharding(
+ tests, self._test_instance.external_shard_index,
+ self._test_instance.total_external_shards)
+ return tests
+
+ def _UploadTestArtifacts(self, device, test_artifacts_dir):
+ # TODO(jbudorick): Reconcile this with the output manager once
+ # https://codereview.chromium.org/2933993002/ lands.
+ if test_artifacts_dir:
+ with tempfile_ext.NamedTemporaryDirectory() as test_artifacts_host_dir:
+ device.PullFile(test_artifacts_dir.name, test_artifacts_host_dir)
+ with tempfile_ext.NamedTemporaryDirectory() as temp_zip_dir:
+ zip_base_name = os.path.join(temp_zip_dir, 'test_artifacts')
+ test_artifacts_zip = shutil.make_archive(
+ zip_base_name, 'zip', test_artifacts_host_dir)
+ link = google_storage_helper.upload(
+ google_storage_helper.unique_name(
+ 'test_artifacts', device=device),
+ test_artifacts_zip,
+ bucket='%s/test_artifacts' % (
+ self._test_instance.gs_test_artifacts_bucket))
+ logging.info('Uploading test artifacts to %s.', link)
+ return link
+ return None
+
+ def _PullRenderTestOutput(self, device, render_test_output_device_dir):
+ # We pull the render tests into a temp directory then copy them over
+ # individually. Otherwise we end up with a temporary directory name
+ # in the host output directory.
+ with tempfile_ext.NamedTemporaryDirectory() as tmp_host_dir:
+ try:
+ device.PullFile(render_test_output_device_dir, tmp_host_dir)
+ except device_errors.CommandFailedError:
+ logging.exception('Failed to pull render test output dir %s',
+ render_test_output_device_dir)
+ temp_host_dir = os.path.join(
+ tmp_host_dir, os.path.basename(render_test_output_device_dir))
+ for output_file in os.listdir(temp_host_dir):
+ src_path = os.path.join(temp_host_dir, output_file)
+ dst_path = os.path.join(self._test_instance.render_test_output_dir,
+ output_file)
+ shutil.move(src_path, dst_path)
+
+ @contextlib.contextmanager
+ def _ArchiveLogcat(self, device, test):
+ if isinstance(test, str):
+ desc = test
+ else:
+ desc = hash(tuple(test))
+
+ stream_name = 'logcat_%s_shard%s_%s_%s' % (
+ desc, self._test_instance.external_shard_index,
+ time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()), device.serial)
+
+ logcat_file = None
+ logmon = None
+ try:
+ with self._env.output_manager.ArchivedTempfile(stream_name,
+ 'logcat') as logcat_file:
+ with logcat_monitor.LogcatMonitor(
+ device.adb,
+ filter_specs=local_device_environment.LOGCAT_FILTERS,
+ output_file=logcat_file.name,
+ check_error=False) as logmon:
+ with contextlib_ext.Optional(trace_event.trace(str(test)),
+ self._env.trace_output):
+ yield logcat_file
+ finally:
+ if logmon:
+ logmon.Close()
+ if logcat_file and logcat_file.Link():
+ logging.info('Logcat saved to %s', logcat_file.Link())
+
+ #override
+ def _RunTest(self, device, test):
+ # Run the test.
+ timeout = (self._test_instance.shard_timeout *
+ self.GetTool(device).GetTimeoutScale() *
+ _GetDeviceTimeoutMultiplier())
+ if self._test_instance.wait_for_java_debugger:
+ timeout = None
+ if self._test_instance.store_tombstones:
+ tombstones.ClearAllTombstones(device)
+ test_perf_output_filename = next(self._test_perf_output_filenames)
+
+ if self._test_instance.isolated_script_test_output:
+ suffix = '.json'
+ else:
+ suffix = '.xml'
+
+ with device_temp_file.DeviceTempFile(
+ adb=device.adb,
+ dir=self._delegate.ResultsDirectory(device),
+ suffix=suffix) as device_tmp_results_file:
+ with contextlib_ext.Optional(
+ device_temp_file.NamedDeviceTemporaryDirectory(
+ adb=device.adb, dir='/sdcard/'),
+ self._test_instance.gs_test_artifacts_bucket) as test_artifacts_dir:
+ with (contextlib_ext.Optional(
+ device_temp_file.DeviceTempFile(
+ adb=device.adb, dir=self._delegate.ResultsDirectory(device)),
+ test_perf_output_filename)) as isolated_script_test_perf_output:
+ with contextlib_ext.Optional(
+ device_temp_file.NamedDeviceTemporaryDirectory(adb=device.adb,
+ dir='/sdcard/'),
+ self._test_instance.render_test_output_dir
+ ) as render_test_output_dir:
+
+ flags = list(self._test_instance.flags)
+ if self._test_instance.enable_xml_result_parsing:
+ flags.append('--gtest_output=xml:%s' %
+ device_tmp_results_file.name)
+
+ if self._test_instance.gs_test_artifacts_bucket:
+ flags.append('--test_artifacts_dir=%s' % test_artifacts_dir.name)
+
+ if self._test_instance.isolated_script_test_output:
+ flags.append('--isolated-script-test-output=%s' %
+ device_tmp_results_file.name)
+
+ if test_perf_output_filename:
+ flags.append('--isolated_script_test_perf_output=%s' %
+ isolated_script_test_perf_output.name)
+
+ if self._test_instance.render_test_output_dir:
+ flags.append('--render-test-output-dir=%s' %
+ render_test_output_dir.name)
+
+ logging.info('flags:')
+ for f in flags:
+ logging.info(' %s', f)
+
+ with self._ArchiveLogcat(device, test) as logcat_file:
+ output = self._delegate.Run(test,
+ device,
+ flags=' '.join(flags),
+ timeout=timeout,
+ retries=0)
+
+ if self._test_instance.enable_xml_result_parsing:
+ try:
+ gtest_xml = device.ReadFile(device_tmp_results_file.name)
+ except device_errors.CommandFailedError:
+ logging.exception('Failed to pull gtest results XML file %s',
+ device_tmp_results_file.name)
+ gtest_xml = None
+
+ if self._test_instance.isolated_script_test_output:
+ try:
+ gtest_json = device.ReadFile(device_tmp_results_file.name)
+ except device_errors.CommandFailedError:
+ logging.exception('Failed to pull gtest results JSON file %s',
+ device_tmp_results_file.name)
+ gtest_json = None
+
+ if test_perf_output_filename:
+ try:
+ device.PullFile(isolated_script_test_perf_output.name,
+ test_perf_output_filename)
+ except device_errors.CommandFailedError:
+ logging.exception('Failed to pull chartjson results %s',
+ isolated_script_test_perf_output.name)
+
+ test_artifacts_url = self._UploadTestArtifacts(
+ device, test_artifacts_dir)
+
+ if render_test_output_dir:
+ self._PullRenderTestOutput(device, render_test_output_dir.name)
+
+ for s in self._servers[str(device)]:
+ s.Reset()
+ if self._test_instance.app_files:
+ self._delegate.PullAppFiles(device, self._test_instance.app_files,
+ self._test_instance.app_file_dir)
+ if not self._env.skip_clear_data:
+ self._delegate.Clear(device)
+
+ for l in output:
+ logging.info(l)
+
+ # Parse the output.
+ # TODO(jbudorick): Transition test scripts away from parsing stdout.
+ if self._test_instance.enable_xml_result_parsing:
+ results = gtest_test_instance.ParseGTestXML(gtest_xml)
+ elif self._test_instance.isolated_script_test_output:
+ results = gtest_test_instance.ParseGTestJSON(gtest_json)
+ else:
+ results = gtest_test_instance.ParseGTestOutput(
+ output, self._test_instance.symbolizer, device.product_cpu_abi)
+
+ tombstones_url = None
+ for r in results:
+ if logcat_file:
+ r.SetLink('logcat', logcat_file.Link())
+
+ if self._test_instance.gs_test_artifacts_bucket:
+ r.SetLink('test_artifacts', test_artifacts_url)
+
+ if r.GetType() == base_test_result.ResultType.CRASH:
+ self._crashes.add(r.GetName())
+ if self._test_instance.store_tombstones:
+ if not tombstones_url:
+ resolved_tombstones = tombstones.ResolveTombstones(
+ device,
+ resolve_all_tombstones=True,
+ include_stack_symbols=False,
+ wipe_tombstones=True)
+ stream_name = 'tombstones_%s_%s' % (
+ time.strftime('%Y%m%dT%H%M%S', time.localtime()),
+ device.serial)
+ tombstones_url = logdog_helper.text(
+ stream_name, '\n'.join(resolved_tombstones))
+ r.SetLink('tombstones', tombstones_url)
+
+ tests_stripped_disabled_prefix = set()
+ for t in test:
+ tests_stripped_disabled_prefix.add(
+ gtest_test_instance.TestNameWithoutDisabledPrefix(t))
+ not_run_tests = tests_stripped_disabled_prefix.difference(
+ set(r.GetName() for r in results))
+ return results, list(not_run_tests) if results else None
+
+ #override
+ def TearDown(self):
+ # By default, teardown will invoke ADB. When receiving SIGTERM due to a
+ # timeout, there's a high probability that ADB is non-responsive. In these
+ # cases, sending an ADB command will potentially take a long time to time
+ # out. Before this happens, the process will be hard-killed for not
+ # responding to SIGTERM fast enough.
+ if self._received_sigterm:
+ return
+
+ @local_device_environment.handle_shard_failures
+ @trace_event.traced
+ def individual_device_tear_down(dev):
+ for s in self._servers.get(str(dev), []):
+ s.TearDown()
+
+ tool = self.GetTool(dev)
+ tool.CleanUpEnvironment()
+
+ self._env.parallel_devices.pMap(individual_device_tear_down)
diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_gtest_run_test.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_gtest_run_test.py
new file mode 100755
index 0000000000..b664d58131
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_gtest_run_test.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env vpython3
+# Copyright 2021 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Tests for local_device_gtest_test_run."""
+
+# pylint: disable=protected-access
+
+
+import os
+import tempfile
+import unittest
+
+from pylib.gtest import gtest_test_instance
+from pylib.local.device import local_device_environment
+from pylib.local.device import local_device_gtest_run
+from py_utils import tempfile_ext
+
+import mock # pylint: disable=import-error
+
+
+class LocalDeviceGtestRunTest(unittest.TestCase):
+ def setUp(self):
+ self._obj = local_device_gtest_run.LocalDeviceGtestRun(
+ mock.MagicMock(spec=local_device_environment.LocalDeviceEnvironment),
+ mock.MagicMock(spec=gtest_test_instance.GtestTestInstance))
+
+ def testExtractTestsFromFilter(self):
+ # Checks splitting by colons.
+ self.assertEqual([
+ 'b17',
+ 'm4e3',
+ 'p51',
+ ], local_device_gtest_run._ExtractTestsFromFilter('b17:m4e3:p51'))
+ # Checks the '-' sign.
+ self.assertIsNone(local_device_gtest_run._ExtractTestsFromFilter('-mk2'))
+ # Checks the more than one asterick.
+ self.assertIsNone(
+ local_device_gtest_run._ExtractTestsFromFilter('.mk2*:.M67*'))
+ # Checks just an asterick without a period
+ self.assertIsNone(local_device_gtest_run._ExtractTestsFromFilter('M67*'))
+ # Checks an asterick at the end with a period.
+ self.assertEqual(['.M67*'],
+ local_device_gtest_run._ExtractTestsFromFilter('.M67*'))
+
+ def testGetLLVMProfilePath(self):
+ path = local_device_gtest_run._GetLLVMProfilePath('test_dir', 'sr71', '5')
+ self.assertEqual(path, os.path.join('test_dir', 'sr71_5_%2m.profraw'))
+
+ @mock.patch('subprocess.check_output')
+ def testMergeCoverageFiles(self, mock_sub):
+ with tempfile_ext.NamedTemporaryDirectory() as cov_tempd:
+ pro_tempd = os.path.join(cov_tempd, 'profraw')
+ os.mkdir(pro_tempd)
+ profdata = tempfile.NamedTemporaryFile(
+ dir=pro_tempd,
+ delete=False,
+ suffix=local_device_gtest_run._PROFRAW_FILE_EXTENSION)
+ local_device_gtest_run._MergeCoverageFiles(cov_tempd, pro_tempd)
+ # Merged file should be deleted.
+ self.assertFalse(os.path.exists(profdata.name))
+ self.assertTrue(mock_sub.called)
+
+ @mock.patch('pylib.utils.google_storage_helper.upload')
+ def testUploadTestArtifacts(self, mock_gsh):
+ link = self._obj._UploadTestArtifacts(mock.MagicMock(), None)
+ self.assertFalse(mock_gsh.called)
+ self.assertIsNone(link)
+
+ result = 'A/10/warthog/path'
+ mock_gsh.return_value = result
+ with tempfile_ext.NamedTemporaryFile() as temp_f:
+ link = self._obj._UploadTestArtifacts(mock.MagicMock(), temp_f)
+ self.assertTrue(mock_gsh.called)
+ self.assertEqual(result, link)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_instrumentation_test_run.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_instrumentation_test_run.py
new file mode 100644
index 0000000000..54cb92a39c
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_instrumentation_test_run.py
@@ -0,0 +1,1512 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import collections
+import contextlib
+import copy
+import hashlib
+import json
+import logging
+import os
+import posixpath
+import re
+import shutil
+import sys
+import tempfile
+import time
+
+from six.moves import range # pylint: disable=redefined-builtin
+from six.moves import zip # pylint: disable=redefined-builtin
+from devil import base_error
+from devil.android import apk_helper
+from devil.android import crash_handler
+from devil.android import device_errors
+from devil.android import device_temp_file
+from devil.android import flag_changer
+from devil.android.sdk import shared_prefs
+from devil.android import logcat_monitor
+from devil.android.tools import system_app
+from devil.android.tools import webview_app
+from devil.utils import reraiser_thread
+from incremental_install import installer
+from pylib import constants
+from pylib import valgrind_tools
+from pylib.base import base_test_result
+from pylib.base import output_manager
+from pylib.constants import host_paths
+from pylib.instrumentation import instrumentation_test_instance
+from pylib.local.device import local_device_environment
+from pylib.local.device import local_device_test_run
+from pylib.output import remote_output_manager
+from pylib.utils import chrome_proxy_utils
+from pylib.utils import gold_utils
+from pylib.utils import instrumentation_tracing
+from pylib.utils import shared_preference_utils
+from py_trace_event import trace_event
+from py_trace_event import trace_time
+from py_utils import contextlib_ext
+from py_utils import tempfile_ext
+import tombstones
+
+with host_paths.SysPath(
+ os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party'), 0):
+ import jinja2 # pylint: disable=import-error
+ import markupsafe # pylint: disable=import-error,unused-import
+
+
+_JINJA_TEMPLATE_DIR = os.path.join(
+ host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'pylib', 'instrumentation')
+_JINJA_TEMPLATE_FILENAME = 'render_test.html.jinja'
+
+_WPR_GO_LINUX_X86_64_PATH = os.path.join(host_paths.DIR_SOURCE_ROOT,
+ 'third_party', 'webpagereplay', 'bin',
+ 'linux', 'x86_64', 'wpr')
+
+_TAG = 'test_runner_py'
+
+TIMEOUT_ANNOTATIONS = [
+ ('Manual', 10 * 60 * 60),
+ ('IntegrationTest', 10 * 60),
+ ('External', 10 * 60),
+ ('EnormousTest', 5 * 60),
+ ('LargeTest', 2 * 60),
+ ('MediumTest', 30),
+ ('SmallTest', 10),
+]
+
+# Account for Instrumentation and process init overhead.
+FIXED_TEST_TIMEOUT_OVERHEAD = 60
+
+# 30 minute max timeout for an instrumentation invocation to avoid shard
+# timeouts when tests never finish. The shard timeout is currently 60 minutes,
+# so this needs to be less than that.
+MAX_BATCH_TEST_TIMEOUT = 30 * 60
+
+LOGCAT_FILTERS = ['*:e', 'chromium:v', 'cr_*:v', 'DEBUG:I',
+ 'StrictMode:D', '%s:I' % _TAG]
+
+EXTRA_SCREENSHOT_FILE = (
+ 'org.chromium.base.test.ScreenshotOnFailureStatement.ScreenshotFile')
+
+EXTRA_UI_CAPTURE_DIR = (
+ 'org.chromium.base.test.util.Screenshooter.ScreenshotDir')
+
+EXTRA_TRACE_FILE = ('org.chromium.base.test.BaseJUnit4ClassRunner.TraceFile')
+
+_EXTRA_TEST_LIST = (
+ 'org.chromium.base.test.BaseChromiumAndroidJUnitRunner.TestList')
+
+_EXTRA_PACKAGE_UNDER_TEST = ('org.chromium.chrome.test.pagecontroller.rules.'
+ 'ChromeUiApplicationTestRule.PackageUnderTest')
+
+FEATURE_ANNOTATION = 'Feature'
+RENDER_TEST_FEATURE_ANNOTATION = 'RenderTest'
+WPR_ARCHIVE_FILE_PATH_ANNOTATION = 'WPRArchiveDirectory'
+WPR_RECORD_REPLAY_TEST_FEATURE_ANNOTATION = 'WPRRecordReplayTest'
+
+_DEVICE_GOLD_DIR = 'skia_gold'
+# A map of Android product models to SDK ints.
+RENDER_TEST_MODEL_SDK_CONFIGS = {
+ # Android x86 emulator.
+ 'Android SDK built for x86': [23],
+ # We would like this to be supported, but it is currently too prone to
+ # introducing flakiness due to a combination of Gold and Chromium issues.
+ # See crbug.com/1233700 and skbug.com/12149 for more information.
+ # 'Pixel 2': [28],
+}
+
+_BATCH_SUFFIX = '_batch'
+_TEST_BATCH_MAX_GROUP_SIZE = 256
+
+
+@contextlib.contextmanager
+def _LogTestEndpoints(device, test_name):
+ device.RunShellCommand(
+ ['log', '-p', 'i', '-t', _TAG, 'START %s' % test_name],
+ check_return=True)
+ try:
+ yield
+ finally:
+ device.RunShellCommand(
+ ['log', '-p', 'i', '-t', _TAG, 'END %s' % test_name],
+ check_return=True)
+
+
+def DismissCrashDialogs(device):
+ # Dismiss any error dialogs. Limit the number in case we have an error
+ # loop or we are failing to dismiss.
+ packages = set()
+ try:
+ for _ in range(10):
+ package = device.DismissCrashDialogIfNeeded(timeout=10, retries=1)
+ if not package:
+ break
+ packages.add(package)
+ except device_errors.CommandFailedError:
+ logging.exception('Error while attempting to dismiss crash dialog.')
+ return packages
+
+
+_CURRENT_FOCUS_CRASH_RE = re.compile(
+ r'\s*mCurrentFocus.*Application (Error|Not Responding): (\S+)}')
+
+
+def _GetTargetPackageName(test_apk):
+ # apk_under_test does not work for smoke tests, where it is set to an
+ # apk that is not listed as the targetPackage in the test apk's manifest.
+ return test_apk.GetAllInstrumentations()[0]['android:targetPackage']
+
+
+class LocalDeviceInstrumentationTestRun(
+ local_device_test_run.LocalDeviceTestRun):
+ def __init__(self, env, test_instance):
+ super(LocalDeviceInstrumentationTestRun, self).__init__(
+ env, test_instance)
+ self._chrome_proxy = None
+ self._context_managers = collections.defaultdict(list)
+ self._flag_changers = {}
+ self._render_tests_device_output_dir = None
+ self._shared_prefs_to_restore = []
+ self._skia_gold_session_manager = None
+ self._skia_gold_work_dir = None
+
+ #override
+ def TestPackage(self):
+ return self._test_instance.suite
+
+ #override
+ def SetUp(self):
+ target_package = _GetTargetPackageName(self._test_instance.test_apk)
+
+ @local_device_environment.handle_shard_failures_with(
+ self._env.DenylistDevice)
+ @trace_event.traced
+ def individual_device_set_up(device, host_device_tuples):
+ steps = []
+
+ if self._test_instance.replace_system_package:
+ @trace_event.traced
+ def replace_package(dev):
+ # We need the context manager to be applied before modifying any
+ # shared preference files in case the replacement APK needs to be
+ # set up, and it needs to be applied while the test is running.
+ # Thus, it needs to be applied early during setup, but must still be
+ # applied during _RunTest, which isn't possible using 'with' without
+ # applying the context manager up in test_runner. Instead, we
+ # manually invoke its __enter__ and __exit__ methods in setup and
+ # teardown.
+ system_app_context = system_app.ReplaceSystemApp(
+ dev, self._test_instance.replace_system_package.package,
+ self._test_instance.replace_system_package.replacement_apk)
+ # Pylint is not smart enough to realize that this field has
+ # an __enter__ method, and will complain loudly.
+ # pylint: disable=no-member
+ system_app_context.__enter__()
+ # pylint: enable=no-member
+ self._context_managers[str(dev)].append(system_app_context)
+
+ steps.append(replace_package)
+
+ if self._test_instance.system_packages_to_remove:
+
+ @trace_event.traced
+ def remove_packages(dev):
+ logging.info('Attempting to remove system packages %s',
+ self._test_instance.system_packages_to_remove)
+ system_app.RemoveSystemApps(
+ dev, self._test_instance.system_packages_to_remove)
+ logging.info('Done removing system packages')
+
+ # This should be at the front in case we're removing the package to make
+ # room for another APK installation later on. Since we disallow
+ # concurrent adb with this option specified, this should be safe.
+ steps.insert(0, remove_packages)
+
+ if self._test_instance.use_webview_provider:
+ @trace_event.traced
+ def use_webview_provider(dev):
+ # We need the context manager to be applied before modifying any
+ # shared preference files in case the replacement APK needs to be
+ # set up, and it needs to be applied while the test is running.
+ # Thus, it needs to be applied early during setup, but must still be
+ # applied during _RunTest, which isn't possible using 'with' without
+ # applying the context manager up in test_runner. Instead, we
+ # manually invoke its __enter__ and __exit__ methods in setup and
+ # teardown.
+ webview_context = webview_app.UseWebViewProvider(
+ dev, self._test_instance.use_webview_provider)
+ # Pylint is not smart enough to realize that this field has
+ # an __enter__ method, and will complain loudly.
+ # pylint: disable=no-member
+ webview_context.__enter__()
+ # pylint: enable=no-member
+ self._context_managers[str(dev)].append(webview_context)
+
+ steps.append(use_webview_provider)
+
+ def install_helper(apk,
+ modules=None,
+ fake_modules=None,
+ permissions=None,
+ additional_locales=None):
+
+ @instrumentation_tracing.no_tracing
+ @trace_event.traced
+ def install_helper_internal(d, apk_path=None):
+ # pylint: disable=unused-argument
+ d.Install(apk,
+ modules=modules,
+ fake_modules=fake_modules,
+ permissions=permissions,
+ additional_locales=additional_locales)
+
+ return install_helper_internal
+
+ def incremental_install_helper(apk, json_path, permissions):
+
+ @trace_event.traced
+ def incremental_install_helper_internal(d, apk_path=None):
+ # pylint: disable=unused-argument
+ installer.Install(d, json_path, apk=apk, permissions=permissions)
+ return incremental_install_helper_internal
+
+ permissions = self._test_instance.test_apk.GetPermissions()
+ if self._test_instance.test_apk_incremental_install_json:
+ steps.append(incremental_install_helper(
+ self._test_instance.test_apk,
+ self._test_instance.
+ test_apk_incremental_install_json,
+ permissions))
+ else:
+ steps.append(
+ install_helper(
+ self._test_instance.test_apk, permissions=permissions))
+
+ steps.extend(
+ install_helper(apk) for apk in self._test_instance.additional_apks)
+
+ # We'll potentially need the package names later for setting app
+ # compatibility workarounds.
+ for apk in (self._test_instance.additional_apks +
+ [self._test_instance.test_apk]):
+ self._installed_packages.append(apk_helper.GetPackageName(apk))
+
+ # The apk under test needs to be installed last since installing other
+ # apks after will unintentionally clear the fake module directory.
+ # TODO(wnwen): Make this more robust, fix crbug.com/1010954.
+ if self._test_instance.apk_under_test:
+ self._installed_packages.append(
+ apk_helper.GetPackageName(self._test_instance.apk_under_test))
+ permissions = self._test_instance.apk_under_test.GetPermissions()
+ if self._test_instance.apk_under_test_incremental_install_json:
+ steps.append(
+ incremental_install_helper(
+ self._test_instance.apk_under_test,
+ self._test_instance.apk_under_test_incremental_install_json,
+ permissions))
+ else:
+ steps.append(
+ install_helper(self._test_instance.apk_under_test,
+ self._test_instance.modules,
+ self._test_instance.fake_modules, permissions,
+ self._test_instance.additional_locales))
+
+ @trace_event.traced
+ def set_debug_app(dev):
+ # Set debug app in order to enable reading command line flags on user
+ # builds
+ cmd = ['am', 'set-debug-app', '--persistent']
+ if self._test_instance.wait_for_java_debugger:
+ cmd.append('-w')
+ cmd.append(target_package)
+ dev.RunShellCommand(cmd, check_return=True)
+
+ @trace_event.traced
+ def edit_shared_prefs(dev):
+ for setting in self._test_instance.edit_shared_prefs:
+ shared_pref = shared_prefs.SharedPrefs(
+ dev, setting['package'], setting['filename'],
+ use_encrypted_path=setting.get('supports_encrypted_path', False))
+ pref_to_restore = copy.copy(shared_pref)
+ pref_to_restore.Load()
+ self._shared_prefs_to_restore.append(pref_to_restore)
+
+ shared_preference_utils.ApplySharedPreferenceSetting(
+ shared_pref, setting)
+
+ @trace_event.traced
+ def set_vega_permissions(dev):
+ # Normally, installation of VrCore automatically grants storage
+ # permissions. However, since VrCore is part of the system image on
+ # the Vega standalone headset, we don't install the APK as part of test
+ # setup. Instead, grant the permissions here so that it can take
+ # screenshots.
+ if dev.product_name == 'vega':
+ dev.GrantPermissions('com.google.vr.vrcore', [
+ 'android.permission.WRITE_EXTERNAL_STORAGE',
+ 'android.permission.READ_EXTERNAL_STORAGE'
+ ])
+
+ @instrumentation_tracing.no_tracing
+ def push_test_data(dev):
+ device_root = posixpath.join(dev.GetExternalStoragePath(),
+ 'chromium_tests_root')
+ host_device_tuples_substituted = [
+ (h, local_device_test_run.SubstituteDeviceRoot(d, device_root))
+ for h, d in host_device_tuples]
+ logging.info('Pushing data dependencies.')
+ for h, d in host_device_tuples_substituted:
+ logging.debug(' %r -> %r', h, d)
+ local_device_environment.place_nomedia_on_device(dev, device_root)
+ dev.PushChangedFiles(host_device_tuples_substituted,
+ delete_device_stale=True)
+ if not host_device_tuples_substituted:
+ dev.RunShellCommand(['rm', '-rf', device_root], check_return=True)
+ dev.RunShellCommand(['mkdir', '-p', device_root], check_return=True)
+
+ @trace_event.traced
+ def create_flag_changer(dev):
+ if self._test_instance.flags:
+ self._CreateFlagChangerIfNeeded(dev)
+ logging.debug('Attempting to set flags: %r',
+ self._test_instance.flags)
+ self._flag_changers[str(dev)].AddFlags(self._test_instance.flags)
+
+ valgrind_tools.SetChromeTimeoutScale(
+ dev, self._test_instance.timeout_scale)
+
+ steps += [
+ set_debug_app, edit_shared_prefs, push_test_data, create_flag_changer,
+ set_vega_permissions, DismissCrashDialogs
+ ]
+
+ def bind_crash_handler(step, dev):
+ return lambda: crash_handler.RetryOnSystemCrash(step, dev)
+
+ steps = [bind_crash_handler(s, device) for s in steps]
+
+ try:
+ if self._env.concurrent_adb:
+ reraiser_thread.RunAsync(steps)
+ else:
+ for step in steps:
+ step()
+ if self._test_instance.store_tombstones:
+ tombstones.ClearAllTombstones(device)
+ except device_errors.CommandFailedError:
+ if not device.IsOnline():
+ raise
+
+ # A bugreport can be large and take a while to generate, so only capture
+ # one if we're using a remote manager.
+ if isinstance(
+ self._env.output_manager,
+ remote_output_manager.RemoteOutputManager):
+ logging.error(
+ 'Error when setting up device for tests. Taking a bugreport for '
+ 'investigation. This may take a while...')
+ report_name = '%s.bugreport' % device.serial
+ with self._env.output_manager.ArchivedTempfile(
+ report_name, 'bug_reports') as report_file:
+ device.TakeBugReport(report_file.name)
+ logging.error('Bug report saved to %s', report_file.Link())
+ raise
+
+ self._env.parallel_devices.pMap(
+ individual_device_set_up,
+ self._test_instance.GetDataDependencies())
+ # Created here instead of on a per-test basis so that the downloaded
+ # expectations can be re-used between tests, saving a significant amount
+ # of time.
+ self._skia_gold_work_dir = tempfile.mkdtemp()
+ self._skia_gold_session_manager = gold_utils.AndroidSkiaGoldSessionManager(
+ self._skia_gold_work_dir, self._test_instance.skia_gold_properties)
+ if self._test_instance.wait_for_java_debugger:
+ logging.warning('*' * 80)
+ logging.warning('Waiting for debugger to attach to process: %s',
+ target_package)
+ logging.warning('*' * 80)
+
+ #override
+ def TearDown(self):
+ shutil.rmtree(self._skia_gold_work_dir)
+ self._skia_gold_work_dir = None
+ self._skia_gold_session_manager = None
+ # By default, teardown will invoke ADB. When receiving SIGTERM due to a
+ # timeout, there's a high probability that ADB is non-responsive. In these
+ # cases, sending an ADB command will potentially take a long time to time
+ # out. Before this happens, the process will be hard-killed for not
+ # responding to SIGTERM fast enough.
+ if self._received_sigterm:
+ return
+
+ @local_device_environment.handle_shard_failures_with(
+ self._env.DenylistDevice)
+ @trace_event.traced
+ def individual_device_tear_down(dev):
+ if str(dev) in self._flag_changers:
+ self._flag_changers[str(dev)].Restore()
+
+ # Remove package-specific configuration
+ dev.RunShellCommand(['am', 'clear-debug-app'], check_return=True)
+
+ valgrind_tools.SetChromeTimeoutScale(dev, None)
+
+ # Restore any shared preference files that we stored during setup.
+ # This should be run sometime before the replace package contextmanager
+ # gets exited so we don't have to special case restoring files of
+ # replaced system apps.
+ for pref_to_restore in self._shared_prefs_to_restore:
+ pref_to_restore.Commit(force_commit=True)
+
+ # Context manager exit handlers are applied in reverse order
+ # of the enter handlers.
+ for context in reversed(self._context_managers[str(dev)]):
+ # See pylint-related comment above with __enter__()
+ # pylint: disable=no-member
+ context.__exit__(*sys.exc_info())
+ # pylint: enable=no-member
+
+ self._env.parallel_devices.pMap(individual_device_tear_down)
+
+ def _CreateFlagChangerIfNeeded(self, device):
+ if str(device) not in self._flag_changers:
+ cmdline_file = 'test-cmdline-file'
+ if self._test_instance.use_apk_under_test_flags_file:
+ if self._test_instance.package_info:
+ cmdline_file = self._test_instance.package_info.cmdline_file
+ else:
+ raise Exception('No PackageInfo found but'
+ '--use-apk-under-test-flags-file is specified.')
+ self._flag_changers[str(device)] = flag_changer.FlagChanger(
+ device, cmdline_file)
+
+ #override
+ def _CreateShards(self, tests):
+ return tests
+
+ #override
+ def _GetTests(self):
+ if self._test_instance.junit4_runner_supports_listing:
+ raw_tests = self._GetTestsFromRunner()
+ tests = self._test_instance.ProcessRawTests(raw_tests)
+ else:
+ tests = self._test_instance.GetTests()
+ tests = self._ApplyExternalSharding(
+ tests, self._test_instance.external_shard_index,
+ self._test_instance.total_external_shards)
+ return tests
+
+ #override
+ def _GroupTests(self, tests):
+ batched_tests = dict()
+ other_tests = []
+ for test in tests:
+ annotations = test['annotations']
+ if 'Batch' in annotations and 'RequiresRestart' not in annotations:
+ batch_name = annotations['Batch']['value']
+ if not batch_name:
+ batch_name = test['class']
+
+ # Feature flags won't work in instrumentation tests unless the activity
+ # is restarted.
+ # Tests with identical features are grouped to minimize restarts.
+ if 'Features$EnableFeatures' in annotations:
+ batch_name += '|enabled:' + ','.join(
+ sorted(annotations['Features$EnableFeatures']['value']))
+ if 'Features$DisableFeatures' in annotations:
+ batch_name += '|disabled:' + ','.join(
+ sorted(annotations['Features$DisableFeatures']['value']))
+
+ if not batch_name in batched_tests:
+ batched_tests[batch_name] = []
+ batched_tests[batch_name].append(test)
+ else:
+ other_tests.append(test)
+
+ all_tests = []
+ for _, tests in list(batched_tests.items()):
+ tests.sort() # Ensure a consistent ordering across external shards.
+ all_tests.extend([
+ tests[i:i + _TEST_BATCH_MAX_GROUP_SIZE]
+ for i in range(0, len(tests), _TEST_BATCH_MAX_GROUP_SIZE)
+ ])
+ all_tests.extend(other_tests)
+ return all_tests
+
+ #override
+ def _GetUniqueTestName(self, test):
+ return instrumentation_test_instance.GetUniqueTestName(test)
+
+ #override
+ def _RunTest(self, device, test):
+ extras = {}
+
+ # Provide package name under test for apk_under_test.
+ if self._test_instance.apk_under_test:
+ package_name = self._test_instance.apk_under_test.GetPackageName()
+ extras[_EXTRA_PACKAGE_UNDER_TEST] = package_name
+
+ flags_to_add = []
+ test_timeout_scale = None
+ if self._test_instance.coverage_directory:
+ coverage_basename = '%s' % ('%s_%s_group' %
+ (test[0]['class'], test[0]['method'])
+ if isinstance(test, list) else '%s_%s' %
+ (test['class'], test['method']))
+ extras['coverage'] = 'true'
+ coverage_directory = os.path.join(
+ device.GetExternalStoragePath(), 'chrome', 'test', 'coverage')
+ if not device.PathExists(coverage_directory):
+ device.RunShellCommand(['mkdir', '-p', coverage_directory],
+ check_return=True)
+ coverage_device_file = os.path.join(coverage_directory, coverage_basename)
+ coverage_device_file += '.exec'
+ extras['coverageFile'] = coverage_device_file
+
+ if self._test_instance.enable_breakpad_dump:
+ # Use external storage directory so that the breakpad dump can be accessed
+ # by the test APK in addition to the apk_under_test.
+ breakpad_dump_directory = os.path.join(device.GetExternalStoragePath(),
+ 'chromium_dumps')
+ if device.PathExists(breakpad_dump_directory):
+ device.RemovePath(breakpad_dump_directory, recursive=True)
+ flags_to_add.append('--breakpad-dump-location=' + breakpad_dump_directory)
+
+ # Save screenshot if screenshot dir is specified (save locally) or if
+ # a GS bucket is passed (save in cloud).
+ screenshot_device_file = device_temp_file.DeviceTempFile(
+ device.adb, suffix='.png', dir=device.GetExternalStoragePath())
+ extras[EXTRA_SCREENSHOT_FILE] = screenshot_device_file.name
+
+ # Set up the screenshot directory. This needs to be done for each test so
+ # that we only get screenshots created by that test. It has to be on
+ # external storage since the default location doesn't allow file creation
+ # from the instrumentation test app on Android L and M.
+ ui_capture_dir = device_temp_file.NamedDeviceTemporaryDirectory(
+ device.adb,
+ dir=device.GetExternalStoragePath())
+ extras[EXTRA_UI_CAPTURE_DIR] = ui_capture_dir.name
+
+ if self._env.trace_output:
+ trace_device_file = device_temp_file.DeviceTempFile(
+ device.adb, suffix='.json', dir=device.GetExternalStoragePath())
+ extras[EXTRA_TRACE_FILE] = trace_device_file.name
+
+ target = '%s/%s' % (self._test_instance.test_package,
+ self._test_instance.junit4_runner_class)
+ if isinstance(test, list):
+
+ def name_and_timeout(t):
+ n = instrumentation_test_instance.GetTestName(t)
+ i = self._GetTimeoutFromAnnotations(t['annotations'], n)
+ return (n, i)
+
+ test_names, timeouts = list(zip(*(name_and_timeout(t) for t in test)))
+
+ test_name = instrumentation_test_instance.GetTestName(
+ test[0]) + _BATCH_SUFFIX
+ extras['class'] = ','.join(test_names)
+ test_display_name = test_name
+ timeout = min(MAX_BATCH_TEST_TIMEOUT,
+ FIXED_TEST_TIMEOUT_OVERHEAD + sum(timeouts))
+ else:
+ assert test['is_junit4']
+ test_name = instrumentation_test_instance.GetTestName(test)
+ test_display_name = self._GetUniqueTestName(test)
+
+ extras['class'] = test_name
+ if 'flags' in test and test['flags']:
+ flags_to_add.extend(test['flags'])
+ timeout = FIXED_TEST_TIMEOUT_OVERHEAD + self._GetTimeoutFromAnnotations(
+ test['annotations'], test_display_name)
+
+ test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
+ test['annotations'])
+ if test_timeout_scale and test_timeout_scale != 1:
+ valgrind_tools.SetChromeTimeoutScale(
+ device, test_timeout_scale * self._test_instance.timeout_scale)
+
+ if self._test_instance.wait_for_java_debugger:
+ timeout = None
+ logging.info('preparing to run %s: %s', test_display_name, test)
+
+ if _IsRenderTest(test):
+ # TODO(mikecase): Add DeviceTempDirectory class and use that instead.
+ self._render_tests_device_output_dir = posixpath.join(
+ device.GetExternalStoragePath(), 'render_test_output_dir')
+ flags_to_add.append('--render-test-output-dir=%s' %
+ self._render_tests_device_output_dir)
+
+ if _IsWPRRecordReplayTest(test):
+ wpr_archive_relative_path = _GetWPRArchivePath(test)
+ if not wpr_archive_relative_path:
+ raise RuntimeError('Could not find the WPR archive file path '
+ 'from annotation.')
+ wpr_archive_path = os.path.join(host_paths.DIR_SOURCE_ROOT,
+ wpr_archive_relative_path)
+ if not os.path.isdir(wpr_archive_path):
+ raise RuntimeError('WPRArchiveDirectory annotation should point '
+ 'to a directory only. '
+ '{0} exist: {1}'.format(
+ wpr_archive_path,
+ os.path.exists(wpr_archive_path)))
+
+ # Some linux version does not like # in the name. Replaces it with __.
+ archive_path = os.path.join(
+ wpr_archive_path,
+ _ReplaceUncommonChars(self._GetUniqueTestName(test)) + '.wprgo')
+
+ if not os.path.exists(_WPR_GO_LINUX_X86_64_PATH):
+ # If we got to this stage, then we should have
+ # checkout_android set.
+ raise RuntimeError(
+ 'WPR Go binary not found at {}'.format(_WPR_GO_LINUX_X86_64_PATH))
+ # Tells the server to use the binaries retrieved from CIPD.
+ chrome_proxy_utils.ChromeProxySession.SetWPRServerBinary(
+ _WPR_GO_LINUX_X86_64_PATH)
+ self._chrome_proxy = chrome_proxy_utils.ChromeProxySession()
+ self._chrome_proxy.wpr_record_mode = self._test_instance.wpr_record_mode
+ self._chrome_proxy.Start(device, archive_path)
+ flags_to_add.extend(self._chrome_proxy.GetFlags())
+
+ if flags_to_add:
+ self._CreateFlagChangerIfNeeded(device)
+ self._flag_changers[str(device)].PushFlags(add=flags_to_add)
+
+ time_ms = lambda: int(time.time() * 1e3)
+ start_ms = time_ms()
+
+ with ui_capture_dir:
+ with self._ArchiveLogcat(device, test_name) as logcat_file:
+ output = device.StartInstrumentation(
+ target, raw=True, extras=extras, timeout=timeout, retries=0)
+
+ duration_ms = time_ms() - start_ms
+
+ with contextlib_ext.Optional(
+ trace_event.trace('ProcessResults'),
+ self._env.trace_output):
+ output = self._test_instance.MaybeDeobfuscateLines(output)
+ # TODO(jbudorick): Make instrumentation tests output a JSON so this
+ # doesn't have to parse the output.
+ result_code, result_bundle, statuses = (
+ self._test_instance.ParseAmInstrumentRawOutput(output))
+ results = self._test_instance.GenerateTestResults(
+ result_code, result_bundle, statuses, duration_ms,
+ device.product_cpu_abi, self._test_instance.symbolizer)
+
+ if self._env.trace_output:
+ self._SaveTraceData(trace_device_file, device, test['class'])
+
+
+ def restore_flags():
+ if flags_to_add:
+ self._flag_changers[str(device)].Restore()
+
+ def restore_timeout_scale():
+ if test_timeout_scale:
+ valgrind_tools.SetChromeTimeoutScale(
+ device, self._test_instance.timeout_scale)
+
+ def handle_coverage_data():
+ if self._test_instance.coverage_directory:
+ try:
+ if not os.path.exists(self._test_instance.coverage_directory):
+ os.makedirs(self._test_instance.coverage_directory)
+ device.PullFile(coverage_device_file,
+ self._test_instance.coverage_directory)
+ device.RemovePath(coverage_device_file, True)
+ except (OSError, base_error.BaseError) as e:
+ logging.warning('Failed to handle coverage data after tests: %s', e)
+
+ def handle_render_test_data():
+ if _IsRenderTest(test):
+ # Render tests do not cause test failure by default. So we have to
+ # check to see if any failure images were generated even if the test
+ # does not fail.
+ try:
+ self._ProcessRenderTestResults(device, results)
+ finally:
+ device.RemovePath(self._render_tests_device_output_dir,
+ recursive=True,
+ force=True)
+ self._render_tests_device_output_dir = None
+
+ def pull_ui_screen_captures():
+ screenshots = []
+ for filename in device.ListDirectory(ui_capture_dir.name):
+ if filename.endswith('.json'):
+ screenshots.append(pull_ui_screenshot(filename))
+ if screenshots:
+ json_archive_name = 'ui_capture_%s_%s.json' % (
+ test_name.replace('#', '.'),
+ time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()))
+ with self._env.output_manager.ArchivedTempfile(
+ json_archive_name, 'ui_capture', output_manager.Datatype.JSON
+ ) as json_archive:
+ json.dump(screenshots, json_archive)
+ _SetLinkOnResults(results, test_name, 'ui screenshot',
+ json_archive.Link())
+
+ def pull_ui_screenshot(filename):
+ source_dir = ui_capture_dir.name
+ json_path = posixpath.join(source_dir, filename)
+ json_data = json.loads(device.ReadFile(json_path))
+ image_file_path = posixpath.join(source_dir, json_data['location'])
+ with self._env.output_manager.ArchivedTempfile(
+ json_data['location'], 'ui_capture', output_manager.Datatype.PNG
+ ) as image_archive:
+ device.PullFile(image_file_path, image_archive.name)
+ json_data['image_link'] = image_archive.Link()
+ return json_data
+
+ def stop_chrome_proxy():
+ # Removes the port forwarding
+ if self._chrome_proxy:
+ self._chrome_proxy.Stop(device)
+ if not self._chrome_proxy.wpr_replay_mode:
+ logging.info('WPR Record test generated archive file %s',
+ self._chrome_proxy.wpr_archive_path)
+ self._chrome_proxy = None
+
+
+ # While constructing the TestResult objects, we can parallelize several
+ # steps that involve ADB. These steps should NOT depend on any info in
+ # the results! Things such as whether the test CRASHED have not yet been
+ # determined.
+ post_test_steps = [
+ restore_flags, restore_timeout_scale, stop_chrome_proxy,
+ handle_coverage_data, handle_render_test_data, pull_ui_screen_captures
+ ]
+ if self._env.concurrent_adb:
+ reraiser_thread.RunAsync(post_test_steps)
+ else:
+ for step in post_test_steps:
+ step()
+
+ if logcat_file:
+ _SetLinkOnResults(results, test_name, 'logcat', logcat_file.Link())
+
+ # Update the result name if the test used flags.
+ if flags_to_add:
+ for r in results:
+ if r.GetName() == test_name:
+ r.SetName(test_display_name)
+
+ # Add UNKNOWN results for any missing tests.
+ iterable_test = test if isinstance(test, list) else [test]
+ test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
+ results_names = set(r.GetName() for r in results)
+ results.extend(
+ base_test_result.BaseTestResult(u, base_test_result.ResultType.UNKNOWN)
+ for u in test_names.difference(results_names))
+
+ # Update the result type if we detect a crash.
+ try:
+ crashed_packages = DismissCrashDialogs(device)
+ # Assume test package convention of ".test" suffix
+ if any(p in self._test_instance.test_package for p in crashed_packages):
+ for r in results:
+ if r.GetType() == base_test_result.ResultType.UNKNOWN:
+ r.SetType(base_test_result.ResultType.CRASH)
+ elif (crashed_packages and len(results) == 1
+ and results[0].GetType() != base_test_result.ResultType.PASS):
+ # Add log message and set failure reason if:
+ # 1) The app crash was likely not caused by the test.
+ # AND
+ # 2) The app crash possibly caused the test to fail.
+ # Crashes of the package under test are assumed to be the test's fault.
+ _AppendToLogForResult(
+ results[0], 'OS displayed error dialogs for {}'.format(
+ ', '.join(crashed_packages)))
+ results[0].SetFailureReason('{} Crashed'.format(
+ ','.join(crashed_packages)))
+ except device_errors.CommandTimeoutError:
+ logging.warning('timed out when detecting/dismissing error dialogs')
+ # Attach screenshot to the test to help with debugging the dialog boxes.
+ self._SaveScreenshot(device, screenshot_device_file, test_display_name,
+ results, 'dialog_box_screenshot')
+
+ # The crash result can be set above or in
+ # InstrumentationTestRun.GenerateTestResults. If a test crashes,
+ # subprocesses such as the one used by EmbeddedTestServerRule can be left
+ # alive in a bad state, so kill them now.
+ for r in results:
+ if r.GetType() == base_test_result.ResultType.CRASH:
+ for apk in self._test_instance.additional_apks:
+ device.ForceStop(apk.GetPackageName())
+
+ # Handle failures by:
+ # - optionally taking a screenshot
+ # - logging the raw output at INFO level
+ # - clearing the application state while persisting permissions
+ if any(r.GetType() not in (base_test_result.ResultType.PASS,
+ base_test_result.ResultType.SKIP)
+ for r in results):
+ self._SaveScreenshot(device, screenshot_device_file, test_display_name,
+ results, 'post_test_screenshot')
+
+ logging.info('detected failure in %s. raw output:', test_display_name)
+ for l in output:
+ logging.info(' %s', l)
+ if not self._env.skip_clear_data:
+ if self._test_instance.package_info:
+ permissions = (self._test_instance.apk_under_test.GetPermissions()
+ if self._test_instance.apk_under_test else None)
+ device.ClearApplicationState(self._test_instance.package_info.package,
+ permissions=permissions)
+ if self._test_instance.enable_breakpad_dump:
+ device.RemovePath(breakpad_dump_directory, recursive=True)
+ else:
+ logging.debug('raw output from %s:', test_display_name)
+ for l in output:
+ logging.debug(' %s', l)
+
+ if self._test_instance.store_tombstones:
+ resolved_tombstones = tombstones.ResolveTombstones(
+ device,
+ resolve_all_tombstones=True,
+ include_stack_symbols=False,
+ wipe_tombstones=True,
+ tombstone_symbolizer=self._test_instance.symbolizer)
+ if resolved_tombstones:
+ tombstone_filename = 'tombstones_%s_%s' % (time.strftime(
+ '%Y%m%dT%H%M%S-UTC', time.gmtime()), device.serial)
+ with self._env.output_manager.ArchivedTempfile(
+ tombstone_filename, 'tombstones') as tombstone_file:
+ tombstone_file.write('\n'.join(resolved_tombstones))
+
+ # Associate tombstones with first crashing test.
+ for result in results:
+ if result.GetType() == base_test_result.ResultType.CRASH:
+ result.SetLink('tombstones', tombstone_file.Link())
+ break
+ else:
+ # We don't always detect crashes correctly. In this case,
+ # associate with the first test.
+ results[0].SetLink('tombstones', tombstone_file.Link())
+
+ unknown_tests = set(r.GetName() for r in results
+ if r.GetType() == base_test_result.ResultType.UNKNOWN)
+
+ # If a test that is batched crashes, the rest of the tests in that batch
+ # won't be ran and will have their status left as unknown in results,
+ # so rerun the tests. (see crbug/1127935)
+ # Need to "unbatch" the tests, so that on subsequent tries, the tests can
+ # get ran individually. This prevents an unrecognized crash from preventing
+ # the tests in the batch from being ran. Running the test as unbatched does
+ # not happen until a retry happens at the local_device_test_run/environment
+ # level.
+ tests_to_rerun = []
+ for t in iterable_test:
+ if self._GetUniqueTestName(t) in unknown_tests:
+ prior_attempts = t.get('run_attempts', 0)
+ t['run_attempts'] = prior_attempts + 1
+ # It's possible every test in the batch could crash, so need to
+ # try up to as many times as tests that there are.
+ if prior_attempts < len(results):
+ if t['annotations']:
+ t['annotations'].pop('Batch', None)
+ tests_to_rerun.append(t)
+
+ # If we have a crash that isn't recognized as a crash in a batch, the tests
+ # will be marked as unknown. Sometimes a test failure causes a crash, but
+ # the crash isn't recorded because the failure was detected first.
+ # When the UNKNOWN tests are reran while unbatched and pass,
+ # they'll have an UNKNOWN, PASS status, so will be improperly marked as
+ # flaky, so change status to NOTRUN and don't try rerunning. They will
+ # get rerun individually at the local_device_test_run/environment level.
+ # as the "Batch" annotation was removed.
+ found_crash_or_fail = False
+ for r in results:
+ if (r.GetType() == base_test_result.ResultType.CRASH
+ or r.GetType() == base_test_result.ResultType.FAIL):
+ found_crash_or_fail = True
+ break
+ if not found_crash_or_fail:
+ # Don't bother rerunning since the unrecognized crashes in
+ # the batch will keep failing.
+ tests_to_rerun = None
+ for r in results:
+ if r.GetType() == base_test_result.ResultType.UNKNOWN:
+ r.SetType(base_test_result.ResultType.NOTRUN)
+
+ return results, tests_to_rerun if tests_to_rerun else None
+
+ def _GetTestsFromRunner(self):
+ test_apk_path = self._test_instance.test_apk.path
+ pickle_path = '%s-runner.pickle' % test_apk_path
+ # For incremental APKs, the code doesn't live in the apk, so instead check
+ # the timestamp of the target's .stamp file.
+ if self._test_instance.test_apk_incremental_install_json:
+ with open(self._test_instance.test_apk_incremental_install_json) as f:
+ data = json.load(f)
+ out_dir = constants.GetOutDirectory()
+ test_mtime = max(
+ os.path.getmtime(os.path.join(out_dir, p)) for p in data['dex_files'])
+ else:
+ test_mtime = os.path.getmtime(test_apk_path)
+
+ try:
+ return instrumentation_test_instance.GetTestsFromPickle(
+ pickle_path, test_mtime)
+ except instrumentation_test_instance.TestListPickleException as e:
+ logging.info('Could not get tests from pickle: %s', e)
+ logging.info('Getting tests by having %s list them.',
+ self._test_instance.junit4_runner_class)
+ def list_tests(d):
+ def _run(dev):
+ # We need to use GetAppWritablePath instead of GetExternalStoragePath
+ # here because we will not have applied legacy storage workarounds on R+
+ # yet.
+ with device_temp_file.DeviceTempFile(
+ dev.adb, suffix='.json',
+ dir=dev.GetAppWritablePath()) as dev_test_list_json:
+ junit4_runner_class = self._test_instance.junit4_runner_class
+ test_package = self._test_instance.test_package
+ extras = {
+ 'log': 'true',
+ # Workaround for https://github.com/mockito/mockito/issues/922
+ 'notPackage': 'net.bytebuddy',
+ }
+ extras[_EXTRA_TEST_LIST] = dev_test_list_json.name
+ target = '%s/%s' % (test_package, junit4_runner_class)
+ timeout = 240
+ if self._test_instance.wait_for_java_debugger:
+ timeout = None
+ with self._ArchiveLogcat(dev, 'list_tests'):
+ test_list_run_output = dev.StartInstrumentation(
+ target, extras=extras, retries=0, timeout=timeout)
+ if any(test_list_run_output):
+ logging.error('Unexpected output while listing tests:')
+ for line in test_list_run_output:
+ logging.error(' %s', line)
+ with tempfile_ext.NamedTemporaryDirectory() as host_dir:
+ host_file = os.path.join(host_dir, 'list_tests.json')
+ dev.PullFile(dev_test_list_json.name, host_file)
+ with open(host_file, 'r') as host_file:
+ return json.load(host_file)
+
+ return crash_handler.RetryOnSystemCrash(_run, d)
+
+ raw_test_lists = self._env.parallel_devices.pMap(list_tests).pGet(None)
+
+ # If all devices failed to list tests, raise an exception.
+ # Check that tl is not None and is not empty.
+ if all(not tl for tl in raw_test_lists):
+ raise device_errors.CommandFailedError(
+ 'Failed to list tests on any device')
+
+ # Get the first viable list of raw tests
+ raw_tests = [tl for tl in raw_test_lists if tl][0]
+
+ instrumentation_test_instance.SaveTestsToPickle(pickle_path, raw_tests)
+ return raw_tests
+
+ @contextlib.contextmanager
+ def _ArchiveLogcat(self, device, test_name):
+ stream_name = 'logcat_%s_shard%s_%s_%s' % (
+ test_name.replace('#', '.'), self._test_instance.external_shard_index,
+ time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()), device.serial)
+
+ logcat_file = None
+ logmon = None
+ try:
+ with self._env.output_manager.ArchivedTempfile(
+ stream_name, 'logcat') as logcat_file:
+ with logcat_monitor.LogcatMonitor(
+ device.adb,
+ filter_specs=local_device_environment.LOGCAT_FILTERS,
+ output_file=logcat_file.name,
+ transform_func=self._test_instance.MaybeDeobfuscateLines,
+ check_error=False) as logmon:
+ with _LogTestEndpoints(device, test_name):
+ with contextlib_ext.Optional(
+ trace_event.trace(test_name),
+ self._env.trace_output):
+ yield logcat_file
+ finally:
+ if logmon:
+ logmon.Close()
+ if logcat_file and logcat_file.Link():
+ logging.info('Logcat saved to %s', logcat_file.Link())
+
+ def _SaveTraceData(self, trace_device_file, device, test_class):
+ trace_host_file = self._env.trace_output
+
+ if device.FileExists(trace_device_file.name):
+ try:
+ java_trace_json = device.ReadFile(trace_device_file.name)
+ except IOError:
+ raise Exception('error pulling trace file from device')
+ finally:
+ trace_device_file.close()
+
+ process_name = '%s (device %s)' % (test_class, device.serial)
+ process_hash = int(hashlib.md5(process_name).hexdigest()[:6], 16)
+
+ java_trace = json.loads(java_trace_json)
+ java_trace.sort(key=lambda event: event['ts'])
+
+ get_date_command = 'echo $EPOCHREALTIME'
+ device_time = device.RunShellCommand(get_date_command, single_line=True)
+ device_time = float(device_time) * 1e6
+ system_time = trace_time.Now()
+ time_difference = system_time - device_time
+
+ threads_to_add = set()
+ for event in java_trace:
+ # Ensure thread ID and thread name will be linked in the metadata.
+ threads_to_add.add((event['tid'], event['name']))
+
+ event['pid'] = process_hash
+
+ # Adjust time stamp to align with Python trace times (from
+ # trace_time.Now()).
+ event['ts'] += time_difference
+
+ for tid, thread_name in threads_to_add:
+ thread_name_metadata = {'pid': process_hash, 'tid': tid,
+ 'ts': 0, 'ph': 'M', 'cat': '__metadata',
+ 'name': 'thread_name',
+ 'args': {'name': thread_name}}
+ java_trace.append(thread_name_metadata)
+
+ process_name_metadata = {'pid': process_hash, 'tid': 0, 'ts': 0,
+ 'ph': 'M', 'cat': '__metadata',
+ 'name': 'process_name',
+ 'args': {'name': process_name}}
+ java_trace.append(process_name_metadata)
+
+ java_trace_json = json.dumps(java_trace)
+ java_trace_json = java_trace_json.rstrip(' ]')
+
+ with open(trace_host_file, 'r') as host_handle:
+ host_contents = host_handle.readline()
+
+ if host_contents:
+ java_trace_json = ',%s' % java_trace_json.lstrip(' [')
+
+ with open(trace_host_file, 'a') as host_handle:
+ host_handle.write(java_trace_json)
+
+ def _SaveScreenshot(self, device, screenshot_device_file, test_name, results,
+ link_name):
+ screenshot_filename = '%s-%s.png' % (
+ test_name, time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()))
+ if device.FileExists(screenshot_device_file.name):
+ with self._env.output_manager.ArchivedTempfile(
+ screenshot_filename, 'screenshot',
+ output_manager.Datatype.PNG) as screenshot_host_file:
+ try:
+ device.PullFile(screenshot_device_file.name,
+ screenshot_host_file.name)
+ finally:
+ screenshot_device_file.close()
+ _SetLinkOnResults(results, test_name, link_name,
+ screenshot_host_file.Link())
+
+ def _ProcessRenderTestResults(self, device, results):
+ if not self._render_tests_device_output_dir:
+ return
+ self._ProcessSkiaGoldRenderTestResults(device, results)
+
+ def _IsRetryWithoutPatch(self):
+ """Checks whether this test run is a retry without a patch/CL.
+
+ Returns:
+ True iff this is being run on a trybot and the current step is a retry
+ without the patch applied, otherwise False.
+ """
+ is_tryjob = self._test_instance.skia_gold_properties.IsTryjobRun()
+ # Builders automatically pass in --gtest_repeat,
+ # --test-launcher-retry-limit, --test-launcher-batch-limit, and
+ # --gtest_filter when running a step without a CL applied, but not for
+ # steps with the CL applied.
+ # TODO(skbug.com/12100): Check this in a less hacky way if a way can be
+ # found to check the actual step name. Ideally, this would not be necessary
+ # at all, but will be until Chromium stops doing step retries on trybots
+ # (extremely unlikely) or Gold is updated to not clobber earlier results
+ # (more likely, but a ways off).
+ has_filter = bool(self._test_instance.test_filter)
+ has_batch_limit = self._test_instance.test_launcher_batch_limit is not None
+ return is_tryjob and has_filter and has_batch_limit
+
+ def _ProcessSkiaGoldRenderTestResults(self, device, results):
+ gold_dir = posixpath.join(self._render_tests_device_output_dir,
+ _DEVICE_GOLD_DIR)
+ if not device.FileExists(gold_dir):
+ return
+
+ gold_properties = self._test_instance.skia_gold_properties
+ with tempfile_ext.NamedTemporaryDirectory() as host_dir:
+ use_luci = not (gold_properties.local_pixel_tests
+ or gold_properties.no_luci_auth)
+
+ # Pull everything at once instead of pulling individually, as it's
+ # slightly faster since each command over adb has some overhead compared
+ # to doing the same thing locally.
+ host_dir = os.path.join(host_dir, _DEVICE_GOLD_DIR)
+ device.PullFile(gold_dir, host_dir)
+ for image_name in os.listdir(host_dir):
+ if not image_name.endswith('.png'):
+ continue
+
+ render_name = image_name[:-4]
+ json_name = render_name + '.json'
+ json_path = os.path.join(host_dir, json_name)
+ image_path = os.path.join(host_dir, image_name)
+ full_test_name = None
+ if not os.path.exists(json_path):
+ _FailTestIfNecessary(results, full_test_name)
+ _AppendToLog(
+ results, full_test_name,
+ 'Unable to find corresponding JSON file for image %s '
+ 'when doing Skia Gold comparison.' % image_name)
+ continue
+
+ # Add 'ignore': '1' if a comparison failure would not be surfaced, as
+ # that implies that we aren't actively maintaining baselines for the
+ # test. This helps prevent unrelated CLs from getting comments posted to
+ # them.
+ should_rewrite = False
+ with open(json_path) as infile:
+ # All the key/value pairs in the JSON file are strings, so convert
+ # to a bool.
+ json_dict = json.load(infile)
+ fail_on_unsupported = json_dict.get('fail_on_unsupported_configs',
+ 'false')
+ fail_on_unsupported = fail_on_unsupported.lower() == 'true'
+ # Grab the full test name so we can associate the comparison with a
+ # particular test, which is necessary if tests are batched together.
+ # Remove the key/value pair from the JSON since we don't need/want to
+ # upload it to Gold.
+ full_test_name = json_dict.get('full_test_name')
+ if 'full_test_name' in json_dict:
+ should_rewrite = True
+ del json_dict['full_test_name']
+
+ running_on_unsupported = (
+ device.build_version_sdk not in RENDER_TEST_MODEL_SDK_CONFIGS.get(
+ device.product_model, []) and not fail_on_unsupported)
+ should_ignore_in_gold = running_on_unsupported
+ # We still want to fail the test even if we're ignoring the image in
+ # Gold if we're running on a supported configuration, so
+ # should_ignore_in_gold != should_hide_failure.
+ should_hide_failure = running_on_unsupported
+ if should_ignore_in_gold:
+ should_rewrite = True
+ json_dict['ignore'] = '1'
+ if should_rewrite:
+ with open(json_path, 'w') as outfile:
+ json.dump(json_dict, outfile)
+
+ gold_session = self._skia_gold_session_manager.GetSkiaGoldSession(
+ keys_input=json_path)
+
+ try:
+ status, error = gold_session.RunComparison(
+ name=render_name,
+ png_file=image_path,
+ output_manager=self._env.output_manager,
+ use_luci=use_luci,
+ force_dryrun=self._IsRetryWithoutPatch())
+ except Exception as e: # pylint: disable=broad-except
+ _FailTestIfNecessary(results, full_test_name)
+ _AppendToLog(results, full_test_name,
+ 'Skia Gold comparison raised exception: %s' % e)
+ continue
+
+ if not status:
+ continue
+
+ # Don't fail the test if we ran on an unsupported configuration unless
+ # the test has explicitly opted in, as it's likely that baselines
+ # aren't maintained for that configuration.
+ if should_hide_failure:
+ if self._test_instance.skia_gold_properties.local_pixel_tests:
+ _AppendToLog(
+ results, full_test_name,
+ 'Gold comparison for %s failed, but model %s with SDK '
+ '%d is not a supported configuration. This failure would be '
+ 'ignored on the bots, but failing since tests are being run '
+ 'locally.' %
+ (render_name, device.product_model, device.build_version_sdk))
+ else:
+ _AppendToLog(
+ results, full_test_name,
+ 'Gold comparison for %s failed, but model %s with SDK '
+ '%d is not a supported configuration, so ignoring failure.' %
+ (render_name, device.product_model, device.build_version_sdk))
+ continue
+
+ _FailTestIfNecessary(results, full_test_name)
+ failure_log = (
+ 'Skia Gold reported failure for RenderTest %s. See '
+ 'RENDER_TESTS.md for how to fix this failure.' % render_name)
+ status_codes =\
+ self._skia_gold_session_manager.GetSessionClass().StatusCodes
+ if status == status_codes.AUTH_FAILURE:
+ _AppendToLog(results, full_test_name,
+ 'Gold authentication failed with output %s' % error)
+ elif status == status_codes.INIT_FAILURE:
+ _AppendToLog(results, full_test_name,
+ 'Gold initialization failed with output %s' % error)
+ elif status == status_codes.COMPARISON_FAILURE_REMOTE:
+ public_triage_link, internal_triage_link =\
+ gold_session.GetTriageLinks(render_name)
+ if not public_triage_link:
+ _AppendToLog(
+ results, full_test_name,
+ 'Failed to get triage link for %s, raw output: %s' %
+ (render_name, error))
+ _AppendToLog(
+ results, full_test_name, 'Reason for no triage link: %s' %
+ gold_session.GetTriageLinkOmissionReason(render_name))
+ continue
+ if gold_properties.IsTryjobRun():
+ _SetLinkOnResults(results, full_test_name,
+ 'Public Skia Gold triage link for entire CL',
+ public_triage_link)
+ _SetLinkOnResults(results, full_test_name,
+ 'Internal Skia Gold triage link for entire CL',
+ internal_triage_link)
+ else:
+ _SetLinkOnResults(
+ results, full_test_name,
+ 'Public Skia Gold triage link for %s' % render_name,
+ public_triage_link)
+ _SetLinkOnResults(
+ results, full_test_name,
+ 'Internal Skia Gold triage link for %s' % render_name,
+ internal_triage_link)
+ _AppendToLog(results, full_test_name, failure_log)
+
+ elif status == status_codes.COMPARISON_FAILURE_LOCAL:
+ given_link = gold_session.GetGivenImageLink(render_name)
+ closest_link = gold_session.GetClosestImageLink(render_name)
+ diff_link = gold_session.GetDiffImageLink(render_name)
+
+ processed_template_output = _GenerateRenderTestHtml(
+ render_name, given_link, closest_link, diff_link)
+ with self._env.output_manager.ArchivedTempfile(
+ '%s.html' % render_name, 'gold_local_diffs',
+ output_manager.Datatype.HTML) as html_results:
+ html_results.write(processed_template_output)
+ _SetLinkOnResults(results, full_test_name, render_name,
+ html_results.Link())
+ _AppendToLog(
+ results, full_test_name,
+ 'See %s link for diff image with closest positive.' % render_name)
+ elif status == status_codes.LOCAL_DIFF_FAILURE:
+ _AppendToLog(results, full_test_name,
+ 'Failed to generate diffs from Gold: %s' % error)
+ else:
+ logging.error(
+ 'Given unhandled SkiaGoldSession StatusCode %s with error %s',
+ status, error)
+
+ #override
+ def _ShouldRetry(self, test, result):
+ # We've tried to disable retries in the past with mixed results.
+ # See crbug.com/619055 for historical context and crbug.com/797002
+ # for ongoing efforts.
+ if 'Batch' in test['annotations'] and test['annotations']['Batch'][
+ 'value'] == 'UnitTests':
+ return False
+ del test, result
+ return True
+
+ #override
+ def _ShouldShard(self):
+ return True
+
+ @classmethod
+ def _GetTimeoutScaleFromAnnotations(cls, annotations):
+ try:
+ return int(annotations.get('TimeoutScale', {}).get('value', 1))
+ except ValueError as e:
+ logging.warning("Non-integer value of TimeoutScale ignored. (%s)", str(e))
+ return 1
+
+ @classmethod
+ def _GetTimeoutFromAnnotations(cls, annotations, test_name):
+ for k, v in TIMEOUT_ANNOTATIONS:
+ if k in annotations:
+ timeout = v
+ break
+ else:
+ logging.warning('Using default 1 minute timeout for %s', test_name)
+ timeout = 60
+
+ timeout *= cls._GetTimeoutScaleFromAnnotations(annotations)
+
+ return timeout
+
+
+def _IsWPRRecordReplayTest(test):
+ """Determines whether a test or a list of tests is a WPR RecordReplay Test."""
+ if not isinstance(test, list):
+ test = [test]
+ return any([
+ WPR_RECORD_REPLAY_TEST_FEATURE_ANNOTATION in t['annotations'].get(
+ FEATURE_ANNOTATION, {}).get('value', ()) for t in test
+ ])
+
+
+def _GetWPRArchivePath(test):
+ """Retrieves the archive path from the WPRArchiveDirectory annotation."""
+ return test['annotations'].get(WPR_ARCHIVE_FILE_PATH_ANNOTATION,
+ {}).get('value', ())
+
+
+def _ReplaceUncommonChars(original):
+ """Replaces uncommon characters with __."""
+ if not original:
+ raise ValueError('parameter should not be empty')
+
+ uncommon_chars = ['#']
+ for char in uncommon_chars:
+ original = original.replace(char, '__')
+ return original
+
+
+def _IsRenderTest(test):
+ """Determines if a test or list of tests has a RenderTest amongst them."""
+ if not isinstance(test, list):
+ test = [test]
+ return any([RENDER_TEST_FEATURE_ANNOTATION in t['annotations'].get(
+ FEATURE_ANNOTATION, {}).get('value', ()) for t in test])
+
+
+def _GenerateRenderTestHtml(image_name, failure_link, golden_link, diff_link):
+ """Generates a RenderTest results page.
+
+ Displays the generated (failure) image, the golden image, and the diff
+ between them.
+
+ Args:
+ image_name: The name of the image whose comparison failed.
+ failure_link: The URL to the generated/failure image.
+ golden_link: The URL to the golden image.
+ diff_link: The URL to the diff image between the failure and golden images.
+
+ Returns:
+ A string containing the generated HTML.
+ """
+ jinja2_env = jinja2.Environment(
+ loader=jinja2.FileSystemLoader(_JINJA_TEMPLATE_DIR), trim_blocks=True)
+ template = jinja2_env.get_template(_JINJA_TEMPLATE_FILENAME)
+ # pylint: disable=no-member
+ return template.render(
+ test_name=image_name,
+ failure_link=failure_link,
+ golden_link=golden_link,
+ diff_link=diff_link)
+
+
+def _FailTestIfNecessary(results, full_test_name):
+ """Marks the given results as failed if it wasn't already.
+
+ Marks the result types as ResultType.FAIL unless they were already some sort
+ of failure type, e.g. ResultType.CRASH.
+
+ Args:
+ results: A list of base_test_result.BaseTestResult objects.
+ full_test_name: A string containing the full name of the test, e.g.
+ org.chromium.chrome.SomeTestClass#someTestMethod.
+ """
+ found_matching_test = _MatchingTestInResults(results, full_test_name)
+ if not found_matching_test and _ShouldReportNoMatchingResult(full_test_name):
+ logging.error(
+ 'Could not find result specific to %s, failing all tests in the batch.',
+ full_test_name)
+ for result in results:
+ if found_matching_test and result.GetName() != full_test_name:
+ continue
+ if result.GetType() not in [
+ base_test_result.ResultType.FAIL, base_test_result.ResultType.CRASH,
+ base_test_result.ResultType.TIMEOUT, base_test_result.ResultType.UNKNOWN
+ ]:
+ result.SetType(base_test_result.ResultType.FAIL)
+
+
+def _AppendToLog(results, full_test_name, line):
+ """Appends the given line to the end of the logs of the given results.
+
+ Args:
+ results: A list of base_test_result.BaseTestResult objects.
+ full_test_name: A string containing the full name of the test, e.g.
+ org.chromium.chrome.SomeTestClass#someTestMethod.
+ line: A string to be appended as a neww line to the log of |result|.
+ """
+ found_matching_test = _MatchingTestInResults(results, full_test_name)
+ if not found_matching_test and _ShouldReportNoMatchingResult(full_test_name):
+ logging.error(
+ 'Could not find result specific to %s, appending to log of all tests '
+ 'in the batch.', full_test_name)
+ for result in results:
+ if found_matching_test and result.GetName() != full_test_name:
+ continue
+ _AppendToLogForResult(result, line)
+
+
+def _AppendToLogForResult(result, line):
+ result.SetLog(result.GetLog() + '\n' + line)
+
+
+def _SetLinkOnResults(results, full_test_name, link_name, link):
+ """Sets the given link on the given results.
+
+ Args:
+ results: A list of base_test_result.BaseTestResult objects.
+ full_test_name: A string containing the full name of the test, e.g.
+ org.chromium.chrome.SomeTestClass#someTestMethod.
+ link_name: A string containing the name of the link being set.
+ link: A string containing the lkink being set.
+ """
+ found_matching_test = _MatchingTestInResults(results, full_test_name)
+ if not found_matching_test and _ShouldReportNoMatchingResult(full_test_name):
+ logging.error(
+ 'Could not find result specific to %s, adding link to results of all '
+ 'tests in the batch.', full_test_name)
+ for result in results:
+ if found_matching_test and result.GetName() != full_test_name:
+ continue
+ result.SetLink(link_name, link)
+
+
+def _MatchingTestInResults(results, full_test_name):
+ """Checks if any tests named |full_test_name| are in |results|.
+
+ Args:
+ results: A list of base_test_result.BaseTestResult objects.
+ full_test_name: A string containing the full name of the test, e.g.
+ org.chromium.chrome.Some
+
+ Returns:
+ True if one of the results in |results| has the same name as
+ |full_test_name|, otherwise False.
+ """
+ return any([r for r in results if r.GetName() == full_test_name])
+
+
+def _ShouldReportNoMatchingResult(full_test_name):
+ """Determines whether a failure to find a matching result is actually bad.
+
+ Args:
+ full_test_name: A string containing the full name of the test, e.g.
+ org.chromium.chrome.Some
+
+ Returns:
+ False if the failure to find a matching result is expected and should not
+ be reported, otherwise True.
+ """
+ if full_test_name is not None and full_test_name.endswith(_BATCH_SUFFIX):
+ # Handle batched tests, whose reported name is the first test's name +
+ # "_batch".
+ return False
+ return True
diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_instrumentation_test_run_test.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_instrumentation_test_run_test.py
new file mode 100755
index 0000000000..948e34c17a
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_instrumentation_test_run_test.py
@@ -0,0 +1,169 @@
+#!/usr/bin/env vpython3
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for local_device_instrumentation_test_run."""
+
+# pylint: disable=protected-access
+
+
+import unittest
+
+from pylib.base import base_test_result
+from pylib.base import mock_environment
+from pylib.base import mock_test_instance
+from pylib.local.device import local_device_instrumentation_test_run
+
+
+class LocalDeviceInstrumentationTestRunTest(unittest.TestCase):
+
+ def setUp(self):
+ super(LocalDeviceInstrumentationTestRunTest, self).setUp()
+ self._env = mock_environment.MockEnvironment()
+ self._ti = mock_test_instance.MockTestInstance()
+ self._obj = (
+ local_device_instrumentation_test_run.LocalDeviceInstrumentationTestRun(
+ self._env, self._ti))
+
+ # TODO(crbug.com/797002): Decide whether the _ShouldRetry hook is worth
+ # retaining and remove these tests if not.
+
+ def testShouldRetry_failure(self):
+ test = {
+ 'annotations': {},
+ 'class': 'SadTest',
+ 'method': 'testFailure',
+ 'is_junit4': True,
+ }
+ result = base_test_result.BaseTestResult(
+ 'SadTest.testFailure', base_test_result.ResultType.FAIL)
+ self.assertTrue(self._obj._ShouldRetry(test, result))
+
+ def testShouldRetry_retryOnFailure(self):
+ test = {
+ 'annotations': {'RetryOnFailure': None},
+ 'class': 'SadTest',
+ 'method': 'testRetryOnFailure',
+ 'is_junit4': True,
+ }
+ result = base_test_result.BaseTestResult(
+ 'SadTest.testRetryOnFailure', base_test_result.ResultType.FAIL)
+ self.assertTrue(self._obj._ShouldRetry(test, result))
+
+ def testShouldRetry_notRun(self):
+ test = {
+ 'annotations': {},
+ 'class': 'SadTest',
+ 'method': 'testNotRun',
+ 'is_junit4': True,
+ }
+ result = base_test_result.BaseTestResult(
+ 'SadTest.testNotRun', base_test_result.ResultType.NOTRUN)
+ self.assertTrue(self._obj._ShouldRetry(test, result))
+
+ def testIsWPRRecordReplayTest_matchedWithKey(self):
+ test = {
+ 'annotations': {
+ 'Feature': {
+ 'value': ['WPRRecordReplayTest', 'dummy']
+ }
+ },
+ 'class': 'WPRDummyTest',
+ 'method': 'testRun',
+ 'is_junit4': True,
+ }
+ self.assertTrue(
+ local_device_instrumentation_test_run._IsWPRRecordReplayTest(test))
+
+ def testIsWPRRecordReplayTest_noMatchedKey(self):
+ test = {
+ 'annotations': {
+ 'Feature': {
+ 'value': ['abc', 'dummy']
+ }
+ },
+ 'class': 'WPRDummyTest',
+ 'method': 'testRun',
+ 'is_junit4': True,
+ }
+ self.assertFalse(
+ local_device_instrumentation_test_run._IsWPRRecordReplayTest(test))
+
+ def testGetWPRArchivePath_matchedWithKey(self):
+ test = {
+ 'annotations': {
+ 'WPRArchiveDirectory': {
+ 'value': 'abc'
+ }
+ },
+ 'class': 'WPRDummyTest',
+ 'method': 'testRun',
+ 'is_junit4': True,
+ }
+ self.assertEqual(
+ local_device_instrumentation_test_run._GetWPRArchivePath(test), 'abc')
+
+ def testGetWPRArchivePath_noMatchedWithKey(self):
+ test = {
+ 'annotations': {
+ 'Feature': {
+ 'value': 'abc'
+ }
+ },
+ 'class': 'WPRDummyTest',
+ 'method': 'testRun',
+ 'is_junit4': True,
+ }
+ self.assertFalse(
+ local_device_instrumentation_test_run._GetWPRArchivePath(test))
+
+ def testIsRenderTest_matchedWithKey(self):
+ test = {
+ 'annotations': {
+ 'Feature': {
+ 'value': ['RenderTest', 'dummy']
+ }
+ },
+ 'class': 'DummyTest',
+ 'method': 'testRun',
+ 'is_junit4': True,
+ }
+ self.assertTrue(local_device_instrumentation_test_run._IsRenderTest(test))
+
+ def testIsRenderTest_noMatchedKey(self):
+ test = {
+ 'annotations': {
+ 'Feature': {
+ 'value': ['abc', 'dummy']
+ }
+ },
+ 'class': 'DummyTest',
+ 'method': 'testRun',
+ 'is_junit4': True,
+ }
+ self.assertFalse(local_device_instrumentation_test_run._IsRenderTest(test))
+
+ def testReplaceUncommonChars(self):
+ original = 'abc#edf'
+ self.assertEqual(
+ local_device_instrumentation_test_run._ReplaceUncommonChars(original),
+ 'abc__edf')
+ original = 'abc#edf#hhf'
+ self.assertEqual(
+ local_device_instrumentation_test_run._ReplaceUncommonChars(original),
+ 'abc__edf__hhf')
+ original = 'abcedfhhf'
+ self.assertEqual(
+ local_device_instrumentation_test_run._ReplaceUncommonChars(original),
+ 'abcedfhhf')
+ original = None
+ with self.assertRaises(ValueError):
+ local_device_instrumentation_test_run._ReplaceUncommonChars(original)
+ original = ''
+ with self.assertRaises(ValueError):
+ local_device_instrumentation_test_run._ReplaceUncommonChars(original)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_monkey_test_run.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_monkey_test_run.py
new file mode 100644
index 0000000000..71dd9bd793
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_monkey_test_run.py
@@ -0,0 +1,128 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import logging
+
+from six.moves import range # pylint: disable=redefined-builtin
+from devil.android import device_errors
+from devil.android.sdk import intent
+from pylib import constants
+from pylib.base import base_test_result
+from pylib.local.device import local_device_test_run
+
+
+_CHROME_PACKAGE = constants.PACKAGE_INFO['chrome'].package
+
+class LocalDeviceMonkeyTestRun(local_device_test_run.LocalDeviceTestRun):
+ def __init__(self, env, test_instance):
+ super(LocalDeviceMonkeyTestRun, self).__init__(env, test_instance)
+
+ def TestPackage(self):
+ return 'monkey'
+
+ #override
+ def SetUp(self):
+ pass
+
+ #override
+ def _RunTest(self, device, test):
+ device.ClearApplicationState(self._test_instance.package)
+
+ # Chrome crashes are not always caught by Monkey test runner.
+ # Launch Chrome and verify Chrome has the same PID before and after
+ # the test.
+ device.StartActivity(
+ intent.Intent(package=self._test_instance.package,
+ activity=self._test_instance.activity,
+ action='android.intent.action.MAIN'),
+ blocking=True, force_stop=True)
+ before_pids = device.GetPids(self._test_instance.package)
+
+ output = ''
+ if before_pids:
+ if len(before_pids.get(self._test_instance.package, [])) > 1:
+ raise Exception(
+ 'At most one instance of process %s expected but found pids: '
+ '%s' % (self._test_instance.package, before_pids))
+ output = '\n'.join(self._LaunchMonkeyTest(device))
+ after_pids = device.GetPids(self._test_instance.package)
+
+ crashed = True
+ if not self._test_instance.package in before_pids:
+ logging.error('Failed to start the process.')
+ elif not self._test_instance.package in after_pids:
+ logging.error('Process %s has died.',
+ before_pids[self._test_instance.package])
+ elif (before_pids[self._test_instance.package] !=
+ after_pids[self._test_instance.package]):
+ logging.error('Detected process restart %s -> %s',
+ before_pids[self._test_instance.package],
+ after_pids[self._test_instance.package])
+ else:
+ crashed = False
+
+ success_pattern = 'Events injected: %d' % self._test_instance.event_count
+ if success_pattern in output and not crashed:
+ result = base_test_result.BaseTestResult(
+ test, base_test_result.ResultType.PASS, log=output)
+ else:
+ result = base_test_result.BaseTestResult(
+ test, base_test_result.ResultType.FAIL, log=output)
+ if 'chrome' in self._test_instance.package:
+ logging.warning('Starting MinidumpUploadService...')
+ # TODO(jbudorick): Update this after upstreaming.
+ minidump_intent = intent.Intent(
+ action='%s.crash.ACTION_FIND_ALL' % _CHROME_PACKAGE,
+ package=self._test_instance.package,
+ activity='%s.crash.MinidumpUploadService' % _CHROME_PACKAGE)
+ try:
+ device.RunShellCommand(
+ ['am', 'startservice'] + minidump_intent.am_args,
+ as_root=True, check_return=True)
+ except device_errors.CommandFailedError:
+ logging.exception('Failed to start MinidumpUploadService')
+
+ return result, None
+
+ #override
+ def TearDown(self):
+ pass
+
+ #override
+ def _CreateShards(self, tests):
+ return tests
+
+ #override
+ def _ShouldShard(self):
+ # TODO(mikecase): Run Monkey test concurrently on each attached device.
+ return False
+
+ #override
+ def _GetTests(self):
+ return ['MonkeyTest']
+
+ def _LaunchMonkeyTest(self, device):
+ try:
+ cmd = ['monkey',
+ '-p', self._test_instance.package,
+ '--throttle', str(self._test_instance.throttle),
+ '-s', str(self._test_instance.seed),
+ '--monitor-native-crashes',
+ '--kill-process-after-error']
+ for category in self._test_instance.categories:
+ cmd.extend(['-c', category])
+ for _ in range(self._test_instance.verbose_count):
+ cmd.append('-v')
+ cmd.append(str(self._test_instance.event_count))
+ return device.RunShellCommand(
+ cmd, timeout=self._test_instance.timeout, check_return=True)
+ finally:
+ try:
+ # Kill the monkey test process on the device. If you manually
+ # interrupt the test run, this will prevent the monkey test from
+ # continuing to run.
+ device.KillAll('com.android.commands.monkey')
+ except device_errors.CommandFailedError:
+ pass
diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_test_run.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_test_run.py
new file mode 100644
index 0000000000..645d9c7471
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_test_run.py
@@ -0,0 +1,395 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import fnmatch
+import logging
+import posixpath
+import signal
+try:
+ import _thread as thread
+except ImportError:
+ import thread
+import threading
+
+from devil import base_error
+from devil.android import crash_handler
+from devil.android import device_errors
+from devil.android.sdk import version_codes
+from devil.android.tools import device_recovery
+from devil.utils import signal_handler
+from pylib import valgrind_tools
+from pylib.base import base_test_result
+from pylib.base import test_run
+from pylib.base import test_collection
+from pylib.local.device import local_device_environment
+
+
+_SIGTERM_TEST_LOG = (
+ ' Suite execution terminated, probably due to swarming timeout.\n'
+ ' Your test may not have run.')
+
+
+def SubstituteDeviceRoot(device_path, device_root):
+ if not device_path:
+ return device_root
+ elif isinstance(device_path, list):
+ return posixpath.join(*(p if p else device_root for p in device_path))
+ else:
+ return device_path
+
+
+class TestsTerminated(Exception):
+ pass
+
+
+class InvalidShardingSettings(Exception):
+ def __init__(self, shard_index, total_shards):
+ super(InvalidShardingSettings, self).__init__(
+ 'Invalid sharding settings. shard_index: %d total_shards: %d'
+ % (shard_index, total_shards))
+
+
+class LocalDeviceTestRun(test_run.TestRun):
+
+ def __init__(self, env, test_instance):
+ super(LocalDeviceTestRun, self).__init__(env, test_instance)
+ self._tools = {}
+ # This is intended to be filled by a child class.
+ self._installed_packages = []
+ env.SetPreferredAbis(test_instance.GetPreferredAbis())
+
+ #override
+ def RunTests(self, results):
+ tests = self._GetTests()
+
+ exit_now = threading.Event()
+
+ @local_device_environment.handle_shard_failures
+ def run_tests_on_device(dev, tests, results):
+ # This is performed here instead of during setup because restarting the
+ # device clears app compatibility flags, which will happen if a device
+ # needs to be recovered.
+ SetAppCompatibilityFlagsIfNecessary(self._installed_packages, dev)
+ consecutive_device_errors = 0
+ for test in tests:
+ if not test:
+ logging.warning('No tests in shared. Continuing.')
+ tests.test_completed()
+ continue
+ if exit_now.isSet():
+ thread.exit()
+
+ result = None
+ rerun = None
+ try:
+ result, rerun = crash_handler.RetryOnSystemCrash(
+ lambda d, t=test: self._RunTest(d, t),
+ device=dev)
+ consecutive_device_errors = 0
+ if isinstance(result, base_test_result.BaseTestResult):
+ results.AddResult(result)
+ elif isinstance(result, list):
+ results.AddResults(result)
+ else:
+ raise Exception(
+ 'Unexpected result type: %s' % type(result).__name__)
+ except device_errors.CommandTimeoutError:
+ # Test timeouts don't count as device errors for the purpose
+ # of bad device detection.
+ consecutive_device_errors = 0
+
+ if isinstance(test, list):
+ results.AddResults(
+ base_test_result.BaseTestResult(
+ self._GetUniqueTestName(t),
+ base_test_result.ResultType.TIMEOUT) for t in test)
+ else:
+ results.AddResult(
+ base_test_result.BaseTestResult(
+ self._GetUniqueTestName(test),
+ base_test_result.ResultType.TIMEOUT))
+ except Exception as e: # pylint: disable=broad-except
+ if isinstance(tests, test_collection.TestCollection):
+ rerun = test
+ if (isinstance(e, device_errors.DeviceUnreachableError)
+ or not isinstance(e, base_error.BaseError)):
+ # If we get a device error but believe the device is still
+ # reachable, attempt to continue using it. Otherwise, raise
+ # the exception and terminate this run_tests_on_device call.
+ raise
+
+ consecutive_device_errors += 1
+ if consecutive_device_errors >= 3:
+ # We believe the device is still reachable and may still be usable,
+ # but if it fails repeatedly, we shouldn't attempt to keep using
+ # it.
+ logging.error('Repeated failures on device %s. Abandoning.',
+ str(dev))
+ raise
+
+ logging.exception(
+ 'Attempting to continue using device %s despite failure (%d/3).',
+ str(dev), consecutive_device_errors)
+
+ finally:
+ if isinstance(tests, test_collection.TestCollection):
+ if rerun:
+ tests.add(rerun)
+ tests.test_completed()
+
+ logging.info('Finished running tests on this device.')
+
+ def stop_tests(_signum, _frame):
+ logging.critical('Received SIGTERM. Stopping test execution.')
+ exit_now.set()
+ raise TestsTerminated()
+
+ try:
+ with signal_handler.AddSignalHandler(signal.SIGTERM, stop_tests):
+ self._env.ResetCurrentTry()
+ while self._env.current_try < self._env.max_tries and tests:
+ tries = self._env.current_try
+ grouped_tests = self._GroupTests(tests)
+ logging.info('STARTING TRY #%d/%d', tries + 1, self._env.max_tries)
+ if tries > 0 and self._env.recover_devices:
+ if any(d.build_version_sdk == version_codes.LOLLIPOP_MR1
+ for d in self._env.devices):
+ logging.info(
+ 'Attempting to recover devices due to known issue on L MR1. '
+ 'See crbug.com/787056 for details.')
+ self._env.parallel_devices.pMap(
+ device_recovery.RecoverDevice, None)
+ elif tries + 1 == self._env.max_tries:
+ logging.info(
+ 'Attempting to recover devices prior to last test attempt.')
+ self._env.parallel_devices.pMap(
+ device_recovery.RecoverDevice, None)
+ logging.info('Will run %d tests on %d devices: %s',
+ len(tests), len(self._env.devices),
+ ', '.join(str(d) for d in self._env.devices))
+ for t in tests:
+ logging.debug(' %s', t)
+
+ try_results = base_test_result.TestRunResults()
+ test_names = (self._GetUniqueTestName(t) for t in tests)
+ try_results.AddResults(
+ base_test_result.BaseTestResult(
+ t, base_test_result.ResultType.NOTRUN)
+ for t in test_names if not t.endswith('*'))
+
+ # As soon as we know the names of the tests, we populate |results|.
+ # The tests in try_results will have their results updated by
+ # try_results.AddResult() as they are run.
+ results.append(try_results)
+
+ try:
+ if self._ShouldShard():
+ tc = test_collection.TestCollection(
+ self._CreateShards(grouped_tests))
+ self._env.parallel_devices.pMap(
+ run_tests_on_device, tc, try_results).pGet(None)
+ else:
+ self._env.parallel_devices.pMap(run_tests_on_device,
+ grouped_tests,
+ try_results).pGet(None)
+ except TestsTerminated:
+ for unknown_result in try_results.GetUnknown():
+ try_results.AddResult(
+ base_test_result.BaseTestResult(
+ unknown_result.GetName(),
+ base_test_result.ResultType.TIMEOUT,
+ log=_SIGTERM_TEST_LOG))
+ raise
+
+ self._env.IncrementCurrentTry()
+ tests = self._GetTestsToRetry(tests, try_results)
+
+ logging.info('FINISHED TRY #%d/%d', tries + 1, self._env.max_tries)
+ if tests:
+ logging.info('%d failed tests remain.', len(tests))
+ else:
+ logging.info('All tests completed.')
+ except TestsTerminated:
+ pass
+
+ def _GetTestsToRetry(self, tests, try_results):
+
+ def is_failure_result(test_result):
+ if isinstance(test_result, list):
+ return any(is_failure_result(r) for r in test_result)
+ return (
+ test_result is None
+ or test_result.GetType() not in (
+ base_test_result.ResultType.PASS,
+ base_test_result.ResultType.SKIP))
+
+ all_test_results = {r.GetName(): r for r in try_results.GetAll()}
+
+ tests_and_names = ((t, self._GetUniqueTestName(t)) for t in tests)
+
+ tests_and_results = {}
+ for test, name in tests_and_names:
+ if name.endswith('*'):
+ tests_and_results[name] = (test, [
+ r for n, r in all_test_results.items() if fnmatch.fnmatch(n, name)
+ ])
+ else:
+ tests_and_results[name] = (test, all_test_results.get(name))
+
+ failed_tests_and_results = ((test, result)
+ for test, result in tests_and_results.values()
+ if is_failure_result(result))
+
+ return [t for t, r in failed_tests_and_results if self._ShouldRetry(t, r)]
+
+ def _ApplyExternalSharding(self, tests, shard_index, total_shards):
+ logging.info('Using external sharding settings. This is shard %d/%d',
+ shard_index, total_shards)
+
+ if total_shards < 0 or shard_index < 0 or total_shards <= shard_index:
+ raise InvalidShardingSettings(shard_index, total_shards)
+
+ sharded_tests = []
+
+ # Group tests by tests that should run in the same test invocation - either
+ # unit tests or batched tests.
+ grouped_tests = self._GroupTests(tests)
+
+ # Partition grouped tests approximately evenly across shards.
+ partitioned_tests = self._PartitionTests(grouped_tests, total_shards,
+ float('inf'))
+ if len(partitioned_tests) <= shard_index:
+ return []
+ for t in partitioned_tests[shard_index]:
+ if isinstance(t, list):
+ sharded_tests.extend(t)
+ else:
+ sharded_tests.append(t)
+ return sharded_tests
+
+ # Partition tests evenly into |num_desired_partitions| partitions where
+ # possible. However, many constraints make partitioning perfectly impossible.
+ # If the max_partition_size isn't large enough, extra partitions may be
+ # created (infinite max size should always return precisely the desired
+ # number of partitions). Even if the |max_partition_size| is technically large
+ # enough to hold all of the tests in |num_desired_partitions|, we attempt to
+ # keep test order relatively stable to minimize flakes, so when tests are
+ # grouped (eg. batched tests), we cannot perfectly fill all paritions as that
+ # would require breaking up groups.
+ def _PartitionTests(self, tests, num_desired_partitions, max_partition_size):
+ # pylint: disable=no-self-use
+ partitions = []
+
+ # Sort by hash so we don't put all tests in a slow suite in the same
+ # partition.
+ tests = sorted(
+ tests,
+ key=lambda t: hash(
+ self._GetUniqueTestName(t[0] if isinstance(t, list) else t)))
+
+ def CountTestsIndividually(test):
+ if not isinstance(test, list):
+ return False
+ annotations = test[0]['annotations']
+ # UnitTests tests are really fast, so to balance shards better, count
+ # UnitTests Batches as single tests.
+ return ('Batch' not in annotations
+ or annotations['Batch']['value'] != 'UnitTests')
+
+ num_not_yet_allocated = sum(
+ [len(test) - 1 for test in tests if CountTestsIndividually(test)])
+ num_not_yet_allocated += len(tests)
+
+ # Fast linear partition approximation capped by max_partition_size. We
+ # cannot round-robin or otherwise re-order tests dynamically because we want
+ # test order to remain stable.
+ partition_size = min(num_not_yet_allocated // num_desired_partitions,
+ max_partition_size)
+ partitions.append([])
+ last_partition_size = 0
+ for test in tests:
+ test_count = len(test) if CountTestsIndividually(test) else 1
+ # Make a new shard whenever we would overfill the previous one. However,
+ # if the size of the test group is larger than the max partition size on
+ # its own, just put the group in its own shard instead of splitting up the
+ # group.
+ if (last_partition_size + test_count > partition_size
+ and last_partition_size > 0):
+ num_desired_partitions -= 1
+ if num_desired_partitions <= 0:
+ # Too many tests for number of partitions, just fill all partitions
+ # beyond num_desired_partitions.
+ partition_size = max_partition_size
+ else:
+ # Re-balance remaining partitions.
+ partition_size = min(num_not_yet_allocated // num_desired_partitions,
+ max_partition_size)
+ partitions.append([])
+ partitions[-1].append(test)
+ last_partition_size = test_count
+ else:
+ partitions[-1].append(test)
+ last_partition_size += test_count
+
+ num_not_yet_allocated -= test_count
+
+ if not partitions[-1]:
+ partitions.pop()
+ return partitions
+
+ def GetTool(self, device):
+ if str(device) not in self._tools:
+ self._tools[str(device)] = valgrind_tools.CreateTool(
+ self._env.tool, device)
+ return self._tools[str(device)]
+
+ def _CreateShards(self, tests):
+ raise NotImplementedError
+
+ def _GetUniqueTestName(self, test):
+ # pylint: disable=no-self-use
+ return test
+
+ def _ShouldRetry(self, test, result):
+ # pylint: disable=no-self-use,unused-argument
+ return True
+
+ def _GetTests(self):
+ raise NotImplementedError
+
+ def _GroupTests(self, tests):
+ # pylint: disable=no-self-use
+ return tests
+
+ def _RunTest(self, device, test):
+ raise NotImplementedError
+
+ def _ShouldShard(self):
+ raise NotImplementedError
+
+
+def SetAppCompatibilityFlagsIfNecessary(packages, device):
+ """Sets app compatibility flags on the given packages and device.
+
+ Args:
+ packages: A list of strings containing package names to apply flags to.
+ device: A DeviceUtils instance to apply the flags on.
+ """
+
+ def set_flag_for_packages(flag, enable):
+ enable_str = 'enable' if enable else 'disable'
+ for p in packages:
+ cmd = ['am', 'compat', enable_str, flag, p]
+ device.RunShellCommand(cmd)
+
+ sdk_version = device.build_version_sdk
+ if sdk_version >= version_codes.R:
+ # These flags are necessary to use the legacy storage permissions on R+.
+ # See crbug.com/1173699 for more information.
+ set_flag_for_packages('DEFAULT_SCOPED_STORAGE', False)
+ set_flag_for_packages('FORCE_ENABLE_SCOPED_STORAGE', False)
+
+
+class NoTestsError(Exception):
+ """Error for when no tests are found."""
diff --git a/third_party/libwebrtc/build/android/pylib/local/device/local_device_test_run_test.py b/third_party/libwebrtc/build/android/pylib/local/device/local_device_test_run_test.py
new file mode 100755
index 0000000000..0f6c9b5421
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/device/local_device_test_run_test.py
@@ -0,0 +1,171 @@
+#!/usr/bin/env vpython3
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=protected-access
+
+
+import unittest
+
+from pylib.base import base_test_result
+from pylib.local.device import local_device_test_run
+
+import mock # pylint: disable=import-error
+
+
+class SubstituteDeviceRootTest(unittest.TestCase):
+
+ def testNoneDevicePath(self):
+ self.assertEqual(
+ '/fake/device/root',
+ local_device_test_run.SubstituteDeviceRoot(None, '/fake/device/root'))
+
+ def testStringDevicePath(self):
+ self.assertEqual(
+ '/another/fake/device/path',
+ local_device_test_run.SubstituteDeviceRoot('/another/fake/device/path',
+ '/fake/device/root'))
+
+ def testListWithNoneDevicePath(self):
+ self.assertEqual(
+ '/fake/device/root/subpath',
+ local_device_test_run.SubstituteDeviceRoot([None, 'subpath'],
+ '/fake/device/root'))
+
+ def testListWithoutNoneDevicePath(self):
+ self.assertEqual(
+ '/another/fake/device/path',
+ local_device_test_run.SubstituteDeviceRoot(
+ ['/', 'another', 'fake', 'device', 'path'], '/fake/device/root'))
+
+
+class TestLocalDeviceTestRun(local_device_test_run.LocalDeviceTestRun):
+
+ # pylint: disable=abstract-method
+
+ def __init__(self):
+ super(TestLocalDeviceTestRun, self).__init__(
+ mock.MagicMock(), mock.MagicMock())
+
+
+class TestLocalDeviceNonStringTestRun(
+ local_device_test_run.LocalDeviceTestRun):
+
+ # pylint: disable=abstract-method
+
+ def __init__(self):
+ super(TestLocalDeviceNonStringTestRun, self).__init__(
+ mock.MagicMock(), mock.MagicMock())
+
+ def _GetUniqueTestName(self, test):
+ return test['name']
+
+
+class LocalDeviceTestRunTest(unittest.TestCase):
+
+ def testGetTestsToRetry_allTestsPassed(self):
+ results = [
+ base_test_result.BaseTestResult(
+ 'Test1', base_test_result.ResultType.PASS),
+ base_test_result.BaseTestResult(
+ 'Test2', base_test_result.ResultType.PASS),
+ ]
+
+ tests = [r.GetName() for r in results]
+ try_results = base_test_result.TestRunResults()
+ try_results.AddResults(results)
+
+ test_run = TestLocalDeviceTestRun()
+ tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
+ self.assertEqual(0, len(tests_to_retry))
+
+ def testGetTestsToRetry_testFailed(self):
+ results = [
+ base_test_result.BaseTestResult(
+ 'Test1', base_test_result.ResultType.FAIL),
+ base_test_result.BaseTestResult(
+ 'Test2', base_test_result.ResultType.PASS),
+ ]
+
+ tests = [r.GetName() for r in results]
+ try_results = base_test_result.TestRunResults()
+ try_results.AddResults(results)
+
+ test_run = TestLocalDeviceTestRun()
+ tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
+ self.assertEqual(1, len(tests_to_retry))
+ self.assertIn('Test1', tests_to_retry)
+
+ def testGetTestsToRetry_testUnknown(self):
+ results = [
+ base_test_result.BaseTestResult(
+ 'Test2', base_test_result.ResultType.PASS),
+ ]
+
+ tests = ['Test1'] + [r.GetName() for r in results]
+ try_results = base_test_result.TestRunResults()
+ try_results.AddResults(results)
+
+ test_run = TestLocalDeviceTestRun()
+ tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
+ self.assertEqual(1, len(tests_to_retry))
+ self.assertIn('Test1', tests_to_retry)
+
+ def testGetTestsToRetry_wildcardFilter_allPass(self):
+ results = [
+ base_test_result.BaseTestResult(
+ 'TestCase.Test1', base_test_result.ResultType.PASS),
+ base_test_result.BaseTestResult(
+ 'TestCase.Test2', base_test_result.ResultType.PASS),
+ ]
+
+ tests = ['TestCase.*']
+ try_results = base_test_result.TestRunResults()
+ try_results.AddResults(results)
+
+ test_run = TestLocalDeviceTestRun()
+ tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
+ self.assertEqual(0, len(tests_to_retry))
+
+ def testGetTestsToRetry_wildcardFilter_oneFails(self):
+ results = [
+ base_test_result.BaseTestResult(
+ 'TestCase.Test1', base_test_result.ResultType.PASS),
+ base_test_result.BaseTestResult(
+ 'TestCase.Test2', base_test_result.ResultType.FAIL),
+ ]
+
+ tests = ['TestCase.*']
+ try_results = base_test_result.TestRunResults()
+ try_results.AddResults(results)
+
+ test_run = TestLocalDeviceTestRun()
+ tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
+ self.assertEqual(1, len(tests_to_retry))
+ self.assertIn('TestCase.*', tests_to_retry)
+
+ def testGetTestsToRetry_nonStringTests(self):
+ results = [
+ base_test_result.BaseTestResult(
+ 'TestCase.Test1', base_test_result.ResultType.PASS),
+ base_test_result.BaseTestResult(
+ 'TestCase.Test2', base_test_result.ResultType.FAIL),
+ ]
+
+ tests = [
+ {'name': 'TestCase.Test1'},
+ {'name': 'TestCase.Test2'},
+ ]
+ try_results = base_test_result.TestRunResults()
+ try_results.AddResults(results)
+
+ test_run = TestLocalDeviceNonStringTestRun()
+ tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
+ self.assertEqual(1, len(tests_to_retry))
+ self.assertIsInstance(tests_to_retry[0], dict)
+ self.assertEqual(tests[1], tests_to_retry[0])
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/third_party/libwebrtc/build/android/pylib/local/emulator/OWNERS b/third_party/libwebrtc/build/android/pylib/local/emulator/OWNERS
new file mode 100644
index 0000000000..0853590d4b
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/emulator/OWNERS
@@ -0,0 +1,4 @@
+bpastene@chromium.org
+hypan@google.com
+jbudorick@chromium.org
+liaoyuke@chromium.org
diff --git a/third_party/libwebrtc/build/android/pylib/local/emulator/__init__.py b/third_party/libwebrtc/build/android/pylib/local/emulator/__init__.py
new file mode 100644
index 0000000000..4a12e35c92
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/emulator/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/third_party/libwebrtc/build/android/pylib/local/emulator/avd.py b/third_party/libwebrtc/build/android/pylib/local/emulator/avd.py
new file mode 100644
index 0000000000..d32fbd93e7
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/emulator/avd.py
@@ -0,0 +1,629 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import contextlib
+import json
+import logging
+import os
+import socket
+import stat
+import subprocess
+import threading
+
+from google.protobuf import text_format # pylint: disable=import-error
+
+from devil.android import device_utils
+from devil.android.sdk import adb_wrapper
+from devil.utils import cmd_helper
+from devil.utils import timeout_retry
+from py_utils import tempfile_ext
+from pylib import constants
+from pylib.local.emulator import ini
+from pylib.local.emulator.proto import avd_pb2
+
+_ALL_PACKAGES = object()
+_DEFAULT_AVDMANAGER_PATH = os.path.join(
+ constants.ANDROID_SDK_ROOT, 'cmdline-tools', 'latest', 'bin', 'avdmanager')
+# Default to a 480dp mdpi screen (a relatively large phone).
+# See https://developer.android.com/training/multiscreen/screensizes
+# and https://developer.android.com/training/multiscreen/screendensities
+# for more information.
+_DEFAULT_SCREEN_DENSITY = 160
+_DEFAULT_SCREEN_HEIGHT = 960
+_DEFAULT_SCREEN_WIDTH = 480
+
+# Default to swiftshader_indirect since it works for most cases.
+_DEFAULT_GPU_MODE = 'swiftshader_indirect'
+
+
+class AvdException(Exception):
+ """Raised when this module has a problem interacting with an AVD."""
+
+ def __init__(self, summary, command=None, stdout=None, stderr=None):
+ message_parts = [summary]
+ if command:
+ message_parts.append(' command: %s' % ' '.join(command))
+ if stdout:
+ message_parts.append(' stdout:')
+ message_parts.extend(' %s' % line for line in stdout.splitlines())
+ if stderr:
+ message_parts.append(' stderr:')
+ message_parts.extend(' %s' % line for line in stderr.splitlines())
+
+ super(AvdException, self).__init__('\n'.join(message_parts))
+
+
+def _Load(avd_proto_path):
+ """Loads an Avd proto from a textpb file at the given path.
+
+ Should not be called outside of this module.
+
+ Args:
+ avd_proto_path: path to a textpb file containing an Avd message.
+ """
+ with open(avd_proto_path) as avd_proto_file:
+ return text_format.Merge(avd_proto_file.read(), avd_pb2.Avd())
+
+
+class _AvdManagerAgent(object):
+ """Private utility for interacting with avdmanager."""
+
+ def __init__(self, avd_home, sdk_root):
+ """Create an _AvdManagerAgent.
+
+ Args:
+ avd_home: path to ANDROID_AVD_HOME directory.
+ Typically something like /path/to/dir/.android/avd
+ sdk_root: path to SDK root directory.
+ """
+ self._avd_home = avd_home
+ self._sdk_root = sdk_root
+
+ self._env = dict(os.environ)
+
+ # The avdmanager from cmdline-tools would look two levels
+ # up from toolsdir to find the SDK root.
+ # Pass avdmanager a fake directory under the directory in which
+ # we install the system images s.t. avdmanager can find the
+ # system images.
+ fake_tools_dir = os.path.join(self._sdk_root, 'non-existent-tools',
+ 'non-existent-version')
+ self._env.update({
+ 'ANDROID_AVD_HOME':
+ self._avd_home,
+ 'AVDMANAGER_OPTS':
+ '-Dcom.android.sdkmanager.toolsdir=%s' % fake_tools_dir,
+ })
+
+ def Create(self, avd_name, system_image, force=False):
+ """Call `avdmanager create`.
+
+ Args:
+ avd_name: name of the AVD to create.
+ system_image: system image to use for the AVD.
+ force: whether to force creation, overwriting any existing
+ AVD with the same name.
+ """
+ create_cmd = [
+ _DEFAULT_AVDMANAGER_PATH,
+ '-v',
+ 'create',
+ 'avd',
+ '-n',
+ avd_name,
+ '-k',
+ system_image,
+ ]
+ if force:
+ create_cmd += ['--force']
+
+ create_proc = cmd_helper.Popen(
+ create_cmd,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=self._env)
+ output, error = create_proc.communicate(input='\n')
+ if create_proc.returncode != 0:
+ raise AvdException(
+ 'AVD creation failed',
+ command=create_cmd,
+ stdout=output,
+ stderr=error)
+
+ for line in output.splitlines():
+ logging.info(' %s', line)
+
+ def Delete(self, avd_name):
+ """Call `avdmanager delete`.
+
+ Args:
+ avd_name: name of the AVD to delete.
+ """
+ delete_cmd = [
+ _DEFAULT_AVDMANAGER_PATH,
+ '-v',
+ 'delete',
+ 'avd',
+ '-n',
+ avd_name,
+ ]
+ try:
+ for line in cmd_helper.IterCmdOutputLines(delete_cmd, env=self._env):
+ logging.info(' %s', line)
+ except subprocess.CalledProcessError as e:
+ raise AvdException('AVD deletion failed: %s' % str(e), command=delete_cmd)
+
+
+class AvdConfig(object):
+ """Represents a particular AVD configuration.
+
+ This class supports creation, installation, and execution of an AVD
+ from a given Avd proto message, as defined in
+ //build/android/pylib/local/emulator/proto/avd.proto.
+ """
+
+ def __init__(self, avd_proto_path):
+ """Create an AvdConfig object.
+
+ Args:
+ avd_proto_path: path to a textpb file containing an Avd message.
+ """
+ self._config = _Load(avd_proto_path)
+
+ self._emulator_home = os.path.join(constants.DIR_SOURCE_ROOT,
+ self._config.avd_package.dest_path)
+ self._emulator_sdk_root = os.path.join(
+ constants.DIR_SOURCE_ROOT, self._config.emulator_package.dest_path)
+ self._emulator_path = os.path.join(self._emulator_sdk_root, 'emulator',
+ 'emulator')
+
+ self._initialized = False
+ self._initializer_lock = threading.Lock()
+
+ @property
+ def avd_settings(self):
+ return self._config.avd_settings
+
+ def Create(self,
+ force=False,
+ snapshot=False,
+ keep=False,
+ cipd_json_output=None,
+ dry_run=False):
+ """Create an instance of the AVD CIPD package.
+
+ This method:
+ - installs the requisite system image
+ - creates the AVD
+ - modifies the AVD's ini files to support running chromium tests
+ in chromium infrastructure
+ - optionally starts & stops the AVD for snapshotting (default no)
+ - By default creates and uploads an instance of the AVD CIPD package
+ (can be turned off by dry_run flag).
+ - optionally deletes the AVD (default yes)
+
+ Args:
+ force: bool indicating whether to force create the AVD.
+ snapshot: bool indicating whether to snapshot the AVD before creating
+ the CIPD package.
+ keep: bool indicating whether to keep the AVD after creating
+ the CIPD package.
+ cipd_json_output: string path to pass to `cipd create` via -json-output.
+ dry_run: When set to True, it will skip the CIPD package creation
+ after creating the AVD.
+ """
+ logging.info('Installing required packages.')
+ self._InstallCipdPackages(packages=[
+ self._config.emulator_package,
+ self._config.system_image_package,
+ ])
+
+ android_avd_home = os.path.join(self._emulator_home, 'avd')
+
+ if not os.path.exists(android_avd_home):
+ os.makedirs(android_avd_home)
+
+ avd_manager = _AvdManagerAgent(
+ avd_home=android_avd_home, sdk_root=self._emulator_sdk_root)
+
+ logging.info('Creating AVD.')
+ avd_manager.Create(
+ avd_name=self._config.avd_name,
+ system_image=self._config.system_image_name,
+ force=force)
+
+ try:
+ logging.info('Modifying AVD configuration.')
+
+ # Clear out any previous configuration or state from this AVD.
+ root_ini = os.path.join(android_avd_home,
+ '%s.ini' % self._config.avd_name)
+ features_ini = os.path.join(self._emulator_home, 'advancedFeatures.ini')
+ avd_dir = os.path.join(android_avd_home, '%s.avd' % self._config.avd_name)
+ config_ini = os.path.join(avd_dir, 'config.ini')
+
+ with ini.update_ini_file(root_ini) as root_ini_contents:
+ root_ini_contents['path.rel'] = 'avd/%s.avd' % self._config.avd_name
+
+ with ini.update_ini_file(features_ini) as features_ini_contents:
+ # features_ini file will not be refreshed by avdmanager during
+ # creation. So explicitly clear its content to exclude any leftover
+ # from previous creation.
+ features_ini_contents.clear()
+ features_ini_contents.update(self.avd_settings.advanced_features)
+
+ with ini.update_ini_file(config_ini) as config_ini_contents:
+ height = self.avd_settings.screen.height or _DEFAULT_SCREEN_HEIGHT
+ width = self.avd_settings.screen.width or _DEFAULT_SCREEN_WIDTH
+ density = self.avd_settings.screen.density or _DEFAULT_SCREEN_DENSITY
+
+ config_ini_contents.update({
+ 'disk.dataPartition.size': '4G',
+ 'hw.keyboard': 'yes',
+ 'hw.lcd.density': density,
+ 'hw.lcd.height': height,
+ 'hw.lcd.width': width,
+ 'hw.mainKeys': 'no', # Show nav buttons on screen
+ })
+
+ if self.avd_settings.ram_size:
+ config_ini_contents['hw.ramSize'] = self.avd_settings.ram_size
+
+ # Start & stop the AVD.
+ self._Initialize()
+ instance = _AvdInstance(self._emulator_path, self._emulator_home,
+ self._config)
+ # Enable debug for snapshot when it is set to True
+ debug_tags = 'init,snapshot' if snapshot else None
+ instance.Start(read_only=False,
+ snapshot_save=snapshot,
+ debug_tags=debug_tags,
+ gpu_mode=_DEFAULT_GPU_MODE)
+ # Android devices with full-disk encryption are encrypted on first boot,
+ # and then get decrypted to continue the boot process (See details in
+ # https://bit.ly/3agmjcM).
+ # Wait for this step to complete since it can take a while for old OSs
+ # like M, otherwise the avd may have "Encryption Unsuccessful" error.
+ device = device_utils.DeviceUtils(instance.serial)
+ device.WaitUntilFullyBooted(decrypt=True, timeout=180, retries=0)
+
+ # Skip network disabling on pre-N for now since the svc commands fail
+ # on Marshmallow.
+ if device.build_version_sdk > 23:
+ # Always disable the network to prevent built-in system apps from
+ # updating themselves, which could take over package manager and
+ # cause shell command timeout.
+ # Use svc as this also works on the images with build type "user".
+ logging.info('Disabling the network in emulator.')
+ device.RunShellCommand(['svc', 'wifi', 'disable'], check_return=True)
+ device.RunShellCommand(['svc', 'data', 'disable'], check_return=True)
+
+ instance.Stop()
+
+ # The multiinstance lock file seems to interfere with the emulator's
+ # operation in some circumstances (beyond the obvious -read-only ones),
+ # and there seems to be no mechanism by which it gets closed or deleted.
+ # See https://bit.ly/2pWQTH7 for context.
+ multiInstanceLockFile = os.path.join(avd_dir, 'multiinstance.lock')
+ if os.path.exists(multiInstanceLockFile):
+ os.unlink(multiInstanceLockFile)
+
+ package_def_content = {
+ 'package':
+ self._config.avd_package.package_name,
+ 'root':
+ self._emulator_home,
+ 'install_mode':
+ 'copy',
+ 'data': [{
+ 'dir': os.path.relpath(avd_dir, self._emulator_home)
+ }, {
+ 'file': os.path.relpath(root_ini, self._emulator_home)
+ }, {
+ 'file': os.path.relpath(features_ini, self._emulator_home)
+ }],
+ }
+
+ logging.info('Creating AVD CIPD package.')
+ logging.debug('ensure file content: %s',
+ json.dumps(package_def_content, indent=2))
+
+ with tempfile_ext.TemporaryFileName(suffix='.json') as package_def_path:
+ with open(package_def_path, 'w') as package_def_file:
+ json.dump(package_def_content, package_def_file)
+
+ logging.info(' %s', self._config.avd_package.package_name)
+ cipd_create_cmd = [
+ 'cipd',
+ 'create',
+ '-pkg-def',
+ package_def_path,
+ '-tag',
+ 'emulator_version:%s' % self._config.emulator_package.version,
+ '-tag',
+ 'system_image_version:%s' %
+ self._config.system_image_package.version,
+ ]
+ if cipd_json_output:
+ cipd_create_cmd.extend([
+ '-json-output',
+ cipd_json_output,
+ ])
+ logging.info('running %r%s', cipd_create_cmd,
+ ' (dry_run)' if dry_run else '')
+ if not dry_run:
+ try:
+ for line in cmd_helper.IterCmdOutputLines(cipd_create_cmd):
+ logging.info(' %s', line)
+ except subprocess.CalledProcessError as e:
+ raise AvdException(
+ 'CIPD package creation failed: %s' % str(e),
+ command=cipd_create_cmd)
+
+ finally:
+ if not keep:
+ logging.info('Deleting AVD.')
+ avd_manager.Delete(avd_name=self._config.avd_name)
+
+ def Install(self, packages=_ALL_PACKAGES):
+ """Installs the requested CIPD packages and prepares them for use.
+
+ This includes making files writeable and revising some of the
+ emulator's internal config files.
+
+ Returns: None
+ Raises: AvdException on failure to install.
+ """
+ self._InstallCipdPackages(packages=packages)
+ self._MakeWriteable()
+ self._EditConfigs()
+
+ def _InstallCipdPackages(self, packages):
+ pkgs_by_dir = {}
+ if packages is _ALL_PACKAGES:
+ packages = [
+ self._config.avd_package,
+ self._config.emulator_package,
+ self._config.system_image_package,
+ ]
+ for pkg in packages:
+ if not pkg.dest_path in pkgs_by_dir:
+ pkgs_by_dir[pkg.dest_path] = []
+ pkgs_by_dir[pkg.dest_path].append(pkg)
+
+ for pkg_dir, pkgs in list(pkgs_by_dir.items()):
+ logging.info('Installing packages in %s', pkg_dir)
+ cipd_root = os.path.join(constants.DIR_SOURCE_ROOT, pkg_dir)
+ if not os.path.exists(cipd_root):
+ os.makedirs(cipd_root)
+ ensure_path = os.path.join(cipd_root, '.ensure')
+ with open(ensure_path, 'w') as ensure_file:
+ # Make CIPD ensure that all files are present and correct,
+ # even if it thinks the package is installed.
+ ensure_file.write('$ParanoidMode CheckIntegrity\n\n')
+ for pkg in pkgs:
+ ensure_file.write('%s %s\n' % (pkg.package_name, pkg.version))
+ logging.info(' %s %s', pkg.package_name, pkg.version)
+ ensure_cmd = [
+ 'cipd',
+ 'ensure',
+ '-ensure-file',
+ ensure_path,
+ '-root',
+ cipd_root,
+ ]
+ try:
+ for line in cmd_helper.IterCmdOutputLines(ensure_cmd):
+ logging.info(' %s', line)
+ except subprocess.CalledProcessError as e:
+ raise AvdException(
+ 'Failed to install CIPD package %s: %s' % (pkg.package_name,
+ str(e)),
+ command=ensure_cmd)
+
+ def _MakeWriteable(self):
+ # The emulator requires that some files are writable.
+ for dirname, _, filenames in os.walk(self._emulator_home):
+ for f in filenames:
+ path = os.path.join(dirname, f)
+ mode = os.lstat(path).st_mode
+ if mode & stat.S_IRUSR:
+ mode = mode | stat.S_IWUSR
+ os.chmod(path, mode)
+
+ def _EditConfigs(self):
+ android_avd_home = os.path.join(self._emulator_home, 'avd')
+ avd_dir = os.path.join(android_avd_home, '%s.avd' % self._config.avd_name)
+
+ config_path = os.path.join(avd_dir, 'config.ini')
+ if os.path.exists(config_path):
+ with open(config_path) as config_file:
+ config_contents = ini.load(config_file)
+ else:
+ config_contents = {}
+
+ config_contents['hw.sdCard'] = 'true'
+ if self.avd_settings.sdcard.size:
+ sdcard_path = os.path.join(avd_dir, 'cr-sdcard.img')
+ if not os.path.exists(sdcard_path):
+ mksdcard_path = os.path.join(
+ os.path.dirname(self._emulator_path), 'mksdcard')
+ mksdcard_cmd = [
+ mksdcard_path,
+ self.avd_settings.sdcard.size,
+ sdcard_path,
+ ]
+ cmd_helper.RunCmd(mksdcard_cmd)
+
+ config_contents['hw.sdCard.path'] = sdcard_path
+
+ with open(config_path, 'w') as config_file:
+ ini.dump(config_contents, config_file)
+
+ def _Initialize(self):
+ if self._initialized:
+ return
+
+ with self._initializer_lock:
+ if self._initialized:
+ return
+
+ # Emulator start-up looks for the adb daemon. Make sure it's running.
+ adb_wrapper.AdbWrapper.StartServer()
+
+ # Emulator start-up tries to check for the SDK root by looking for
+ # platforms/ and platform-tools/. Ensure they exist.
+ # See http://bit.ly/2YAkyFE for context.
+ required_dirs = [
+ os.path.join(self._emulator_sdk_root, 'platforms'),
+ os.path.join(self._emulator_sdk_root, 'platform-tools'),
+ ]
+ for d in required_dirs:
+ if not os.path.exists(d):
+ os.makedirs(d)
+
+ def CreateInstance(self):
+ """Creates an AVD instance without starting it.
+
+ Returns:
+ An _AvdInstance.
+ """
+ self._Initialize()
+ return _AvdInstance(self._emulator_path, self._emulator_home, self._config)
+
+ def StartInstance(self):
+ """Starts an AVD instance.
+
+ Returns:
+ An _AvdInstance.
+ """
+ instance = self.CreateInstance()
+ instance.Start()
+ return instance
+
+
+class _AvdInstance(object):
+ """Represents a single running instance of an AVD.
+
+ This class should only be created directly by AvdConfig.StartInstance,
+ but its other methods can be freely called.
+ """
+
+ def __init__(self, emulator_path, emulator_home, avd_config):
+ """Create an _AvdInstance object.
+
+ Args:
+ emulator_path: path to the emulator binary.
+ emulator_home: path to the emulator home directory.
+ avd_config: AVD config proto.
+ """
+ self._avd_config = avd_config
+ self._avd_name = avd_config.avd_name
+ self._emulator_home = emulator_home
+ self._emulator_path = emulator_path
+ self._emulator_proc = None
+ self._emulator_serial = None
+ self._sink = None
+
+ def __str__(self):
+ return '%s|%s' % (self._avd_name, (self._emulator_serial or id(self)))
+
+ def Start(self,
+ read_only=True,
+ snapshot_save=False,
+ window=False,
+ writable_system=False,
+ gpu_mode=_DEFAULT_GPU_MODE,
+ debug_tags=None):
+ """Starts the emulator running an instance of the given AVD."""
+
+ with tempfile_ext.TemporaryFileName() as socket_path, (contextlib.closing(
+ socket.socket(socket.AF_UNIX))) as sock:
+ sock.bind(socket_path)
+ emulator_cmd = [
+ self._emulator_path,
+ '-avd',
+ self._avd_name,
+ '-report-console',
+ 'unix:%s' % socket_path,
+ '-no-boot-anim',
+ ]
+
+ if read_only:
+ emulator_cmd.append('-read-only')
+ if not snapshot_save:
+ emulator_cmd.append('-no-snapshot-save')
+ if writable_system:
+ emulator_cmd.append('-writable-system')
+ # Note when "--gpu-mode" is set to "host":
+ # * It needs a valid DISPLAY env, even if "--emulator-window" is false.
+ # Otherwise it may throw errors like "Failed to initialize backend
+ # EGL display". See the code in https://bit.ly/3ruiMlB as an example
+ # to setup the DISPLAY env with xvfb.
+ # * It will not work under remote sessions like chrome remote desktop.
+ if gpu_mode:
+ emulator_cmd.extend(['-gpu', gpu_mode])
+ if debug_tags:
+ emulator_cmd.extend(['-debug', debug_tags])
+
+ emulator_env = {}
+ if self._emulator_home:
+ emulator_env['ANDROID_EMULATOR_HOME'] = self._emulator_home
+ if 'DISPLAY' in os.environ:
+ emulator_env['DISPLAY'] = os.environ.get('DISPLAY')
+ if window:
+ if 'DISPLAY' not in emulator_env:
+ raise AvdException('Emulator failed to start: DISPLAY not defined')
+ else:
+ emulator_cmd.append('-no-window')
+
+ sock.listen(1)
+
+ logging.info('Starting emulator with commands: %s',
+ ' '.join(emulator_cmd))
+
+ # TODO(jbudorick): Add support for logging emulator stdout & stderr at
+ # higher logging levels.
+ # Enable the emulator log when debug_tags is set.
+ if not debug_tags:
+ self._sink = open('/dev/null', 'w')
+ self._emulator_proc = cmd_helper.Popen(
+ emulator_cmd, stdout=self._sink, stderr=self._sink, env=emulator_env)
+
+ # Waits for the emulator to report its serial as requested via
+ # -report-console. See http://bit.ly/2lK3L18 for more.
+ def listen_for_serial(s):
+ logging.info('Waiting for connection from emulator.')
+ with contextlib.closing(s.accept()[0]) as conn:
+ val = conn.recv(1024)
+ return 'emulator-%d' % int(val)
+
+ try:
+ self._emulator_serial = timeout_retry.Run(
+ listen_for_serial, timeout=30, retries=0, args=[sock])
+ logging.info('%s started', self._emulator_serial)
+ except Exception as e:
+ self.Stop()
+ raise AvdException('Emulator failed to start: %s' % str(e))
+
+ def Stop(self):
+ """Stops the emulator process."""
+ if self._emulator_proc:
+ if self._emulator_proc.poll() is None:
+ if self._emulator_serial:
+ device_utils.DeviceUtils(self._emulator_serial).adb.Emu('kill')
+ else:
+ self._emulator_proc.terminate()
+ self._emulator_proc.wait()
+ self._emulator_proc = None
+
+ if self._sink:
+ self._sink.close()
+ self._sink = None
+
+ @property
+ def serial(self):
+ return self._emulator_serial
diff --git a/third_party/libwebrtc/build/android/pylib/local/emulator/ini.py b/third_party/libwebrtc/build/android/pylib/local/emulator/ini.py
new file mode 100644
index 0000000000..2c5409934b
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/emulator/ini.py
@@ -0,0 +1,58 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Basic .ini encoding and decoding."""
+
+
+import contextlib
+import os
+
+
+def loads(ini_str, strict=True):
+ ret = {}
+ for line in ini_str.splitlines():
+ key, val = line.split('=', 1)
+ key = key.strip()
+ val = val.strip()
+ if strict and key in ret:
+ raise ValueError('Multiple entries present for key "%s"' % key)
+ ret[key] = val
+
+ return ret
+
+
+def load(fp):
+ return loads(fp.read())
+
+
+def dumps(obj):
+ ret = ''
+ for k, v in sorted(obj.items()):
+ ret += '%s = %s\n' % (k, str(v))
+ return ret
+
+
+def dump(obj, fp):
+ fp.write(dumps(obj))
+
+
+@contextlib.contextmanager
+def update_ini_file(ini_file_path):
+ """Load and update the contents of an ini file.
+
+ Args:
+ ini_file_path: A string containing the absolute path of the ini file.
+ Yields:
+ The contents of the file, as a dict
+ """
+ if os.path.exists(ini_file_path):
+ with open(ini_file_path) as ini_file:
+ ini_contents = load(ini_file)
+ else:
+ ini_contents = {}
+
+ yield ini_contents
+
+ with open(ini_file_path, 'w') as ini_file:
+ dump(ini_contents, ini_file)
diff --git a/third_party/libwebrtc/build/android/pylib/local/emulator/ini_test.py b/third_party/libwebrtc/build/android/pylib/local/emulator/ini_test.py
new file mode 100755
index 0000000000..279e964304
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/emulator/ini_test.py
@@ -0,0 +1,69 @@
+#! /usr/bin/env vpython3
+# Copyright 2020 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Tests for ini.py."""
+
+
+import textwrap
+import unittest
+
+from pylib.local.emulator import ini
+
+
+class IniTest(unittest.TestCase):
+ def testLoadsBasic(self):
+ ini_str = textwrap.dedent("""\
+ foo.bar = 1
+ foo.baz= example
+ bar.bad =/path/to/thing
+ """)
+ expected = {
+ 'foo.bar': '1',
+ 'foo.baz': 'example',
+ 'bar.bad': '/path/to/thing',
+ }
+ self.assertEqual(expected, ini.loads(ini_str))
+
+ def testLoadsStrictFailure(self):
+ ini_str = textwrap.dedent("""\
+ foo.bar = 1
+ foo.baz = example
+ bar.bad = /path/to/thing
+ foo.bar = duplicate
+ """)
+ with self.assertRaises(ValueError):
+ ini.loads(ini_str, strict=True)
+
+ def testLoadsPermissive(self):
+ ini_str = textwrap.dedent("""\
+ foo.bar = 1
+ foo.baz = example
+ bar.bad = /path/to/thing
+ foo.bar = duplicate
+ """)
+ expected = {
+ 'foo.bar': 'duplicate',
+ 'foo.baz': 'example',
+ 'bar.bad': '/path/to/thing',
+ }
+ self.assertEqual(expected, ini.loads(ini_str, strict=False))
+
+ def testDumpsBasic(self):
+ ini_contents = {
+ 'foo.bar': '1',
+ 'foo.baz': 'example',
+ 'bar.bad': '/path/to/thing',
+ }
+ # ini.dumps is expected to dump to string alphabetically
+ # by key.
+ expected = textwrap.dedent("""\
+ bar.bad = /path/to/thing
+ foo.bar = 1
+ foo.baz = example
+ """)
+ self.assertEqual(expected, ini.dumps(ini_contents))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/libwebrtc/build/android/pylib/local/emulator/local_emulator_environment.py b/third_party/libwebrtc/build/android/pylib/local/emulator/local_emulator_environment.py
new file mode 100644
index 0000000000..3bd3c50a19
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/emulator/local_emulator_environment.py
@@ -0,0 +1,102 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import logging
+
+from six.moves import range # pylint: disable=redefined-builtin
+from devil import base_error
+from devil.android import device_errors
+from devil.android import device_utils
+from devil.utils import parallelizer
+from devil.utils import reraiser_thread
+from devil.utils import timeout_retry
+from pylib.local.device import local_device_environment
+from pylib.local.emulator import avd
+
+# Mirroring https://bit.ly/2OjuxcS#23
+_MAX_ANDROID_EMULATORS = 16
+
+
+class LocalEmulatorEnvironment(local_device_environment.LocalDeviceEnvironment):
+
+ def __init__(self, args, output_manager, error_func):
+ super(LocalEmulatorEnvironment, self).__init__(args, output_manager,
+ error_func)
+ self._avd_config = avd.AvdConfig(args.avd_config)
+ if args.emulator_count < 1:
+ error_func('--emulator-count must be >= 1')
+ elif args.emulator_count >= _MAX_ANDROID_EMULATORS:
+ logging.warning('--emulator-count capped at 16.')
+ self._emulator_count = min(_MAX_ANDROID_EMULATORS, args.emulator_count)
+ self._emulator_window = args.emulator_window
+ self._writable_system = ((hasattr(args, 'use_webview_provider')
+ and args.use_webview_provider)
+ or (hasattr(args, 'replace_system_package')
+ and args.replace_system_package)
+ or (hasattr(args, 'system_packages_to_remove')
+ and args.system_packages_to_remove))
+
+ self._emulator_instances = []
+ self._device_serials = []
+
+ #override
+ def SetUp(self):
+ self._avd_config.Install()
+
+ emulator_instances = [
+ self._avd_config.CreateInstance() for _ in range(self._emulator_count)
+ ]
+
+ def start_emulator_instance(e):
+
+ def impl(e):
+ try:
+ e.Start(
+ window=self._emulator_window,
+ writable_system=self._writable_system)
+ except avd.AvdException:
+ logging.exception('Failed to start emulator instance.')
+ return None
+ try:
+ device_utils.DeviceUtils(e.serial).WaitUntilFullyBooted()
+ except base_error.BaseError:
+ e.Stop()
+ raise
+ return e
+
+ def retry_on_timeout(exc):
+ return (isinstance(exc, device_errors.CommandTimeoutError)
+ or isinstance(exc, reraiser_thread.TimeoutError))
+
+ return timeout_retry.Run(
+ impl,
+ timeout=120 if self._writable_system else 30,
+ retries=2,
+ args=[e],
+ retry_if_func=retry_on_timeout)
+
+ parallel_emulators = parallelizer.SyncParallelizer(emulator_instances)
+ self._emulator_instances = [
+ emu
+ for emu in parallel_emulators.pMap(start_emulator_instance).pGet(None)
+ if emu is not None
+ ]
+ self._device_serials = [e.serial for e in self._emulator_instances]
+
+ if not self._emulator_instances:
+ raise Exception('Failed to start any instances of the emulator.')
+ elif len(self._emulator_instances) < self._emulator_count:
+ logging.warning(
+ 'Running with fewer emulator instances than requested (%d vs %d)',
+ len(self._emulator_instances), self._emulator_count)
+
+ super(LocalEmulatorEnvironment, self).SetUp()
+
+ #override
+ def TearDown(self):
+ try:
+ super(LocalEmulatorEnvironment, self).TearDown()
+ finally:
+ parallelizer.SyncParallelizer(self._emulator_instances).Stop()
diff --git a/third_party/libwebrtc/build/android/pylib/local/emulator/proto/__init__.py b/third_party/libwebrtc/build/android/pylib/local/emulator/proto/__init__.py
new file mode 100644
index 0000000000..4a12e35c92
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/emulator/proto/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/third_party/libwebrtc/build/android/pylib/local/emulator/proto/avd.proto b/third_party/libwebrtc/build/android/pylib/local/emulator/proto/avd.proto
new file mode 100644
index 0000000000..b06da4900b
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/emulator/proto/avd.proto
@@ -0,0 +1,75 @@
+
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+syntax = "proto3";
+
+package tools.android.avd.proto;
+
+message CIPDPackage {
+ // CIPD package name.
+ string package_name = 1;
+ // CIPD package version to use.
+ // Ignored when creating AVD packages.
+ string version = 2;
+ // Path into which the package should be installed.
+ // src-relative.
+ string dest_path = 3;
+}
+
+message ScreenSettings {
+ // Screen height in pixels.
+ uint32 height = 1;
+
+ // Screen width in pixels.
+ uint32 width = 2;
+
+ // Scren density in dpi.
+ uint32 density = 3;
+}
+
+message SdcardSettings {
+ // Size of the sdcard that should be created for this AVD.
+ // Can be anything that `mksdcard` or `avdmanager -c` would accept:
+ // - a number of bytes
+ // - a number followed by K, M, or G, indicating that many
+ // KiB, MiB, or GiB, respectively.
+ string size = 1;
+}
+
+message AvdSettings {
+ // Settings pertaining to the AVD's screen.
+ ScreenSettings screen = 1;
+
+ // Settings pertaining to the AVD's sdcard.
+ SdcardSettings sdcard = 2;
+
+ // Advanced Features for AVD. The <key,value> pairs here will override the
+ // default ones in the given system image.
+ // See https://bit.ly/2P1qK2X for all the available keys.
+ // The values should be on, off, default, or null
+ map<string, string> advanced_features = 3;
+
+ // The physical RAM size on the device, in megabytes.
+ uint32 ram_size = 4;
+}
+
+message Avd {
+ // The emulator to use in running the AVD.
+ CIPDPackage emulator_package = 1;
+
+ // The system image to use.
+ CIPDPackage system_image_package = 2;
+ // The name of the system image to use, as reported by sdkmanager.
+ string system_image_name = 3;
+
+ // The AVD to create or use.
+ // (Only the package_name is used during AVD creation.)
+ CIPDPackage avd_package = 4;
+ // The name of the AVD to create or use.
+ string avd_name = 5;
+
+ // How to configure the AVD at creation.
+ AvdSettings avd_settings = 6;
+}
diff --git a/third_party/libwebrtc/build/android/pylib/local/emulator/proto/avd_pb2.py b/third_party/libwebrtc/build/android/pylib/local/emulator/proto/avd_pb2.py
new file mode 100644
index 0000000000..49cc1aa830
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/emulator/proto/avd_pb2.py
@@ -0,0 +1,362 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: avd.proto
+
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='avd.proto',
+ package='tools.android.avd.proto',
+ syntax='proto3',
+ serialized_options=None,
+ serialized_pb=b'\n\tavd.proto\x12\x17tools.android.avd.proto\"G\n\x0b\x43IPDPackage\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\x11\n\tdest_path\x18\x03 \x01(\t\"@\n\x0eScreenSettings\x12\x0e\n\x06height\x18\x01 \x01(\r\x12\r\n\x05width\x18\x02 \x01(\r\x12\x0f\n\x07\x64\x65nsity\x18\x03 \x01(\r\"\x1e\n\x0eSdcardSettings\x12\x0c\n\x04size\x18\x01 \x01(\t\"\xa1\x02\n\x0b\x41vdSettings\x12\x37\n\x06screen\x18\x01 \x01(\x0b\x32\'.tools.android.avd.proto.ScreenSettings\x12\x37\n\x06sdcard\x18\x02 \x01(\x0b\x32\'.tools.android.avd.proto.SdcardSettings\x12U\n\x11\x61\x64vanced_features\x18\x03 \x03(\x0b\x32:.tools.android.avd.proto.AvdSettings.AdvancedFeaturesEntry\x12\x10\n\x08ram_size\x18\x04 \x01(\r\x1a\x37\n\x15\x41\x64vancedFeaturesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xad\x02\n\x03\x41vd\x12>\n\x10\x65mulator_package\x18\x01 \x01(\x0b\x32$.tools.android.avd.proto.CIPDPackage\x12\x42\n\x14system_image_package\x18\x02 \x01(\x0b\x32$.tools.android.avd.proto.CIPDPackage\x12\x19\n\x11system_image_name\x18\x03 \x01(\t\x12\x39\n\x0b\x61vd_package\x18\x04 \x01(\x0b\x32$.tools.android.avd.proto.CIPDPackage\x12\x10\n\x08\x61vd_name\x18\x05 \x01(\t\x12:\n\x0c\x61vd_settings\x18\x06 \x01(\x0b\x32$.tools.android.avd.proto.AvdSettingsb\x06proto3'
+)
+
+
+
+
+_CIPDPACKAGE = _descriptor.Descriptor(
+ name='CIPDPackage',
+ full_name='tools.android.avd.proto.CIPDPackage',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='package_name', full_name='tools.android.avd.proto.CIPDPackage.package_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='version', full_name='tools.android.avd.proto.CIPDPackage.version', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='dest_path', full_name='tools.android.avd.proto.CIPDPackage.dest_path', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=38,
+ serialized_end=109,
+)
+
+
+_SCREENSETTINGS = _descriptor.Descriptor(
+ name='ScreenSettings',
+ full_name='tools.android.avd.proto.ScreenSettings',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='height', full_name='tools.android.avd.proto.ScreenSettings.height', index=0,
+ number=1, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='width', full_name='tools.android.avd.proto.ScreenSettings.width', index=1,
+ number=2, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='density', full_name='tools.android.avd.proto.ScreenSettings.density', index=2,
+ number=3, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=111,
+ serialized_end=175,
+)
+
+
+_SDCARDSETTINGS = _descriptor.Descriptor(
+ name='SdcardSettings',
+ full_name='tools.android.avd.proto.SdcardSettings',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='size', full_name='tools.android.avd.proto.SdcardSettings.size', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=177,
+ serialized_end=207,
+)
+
+
+_AVDSETTINGS_ADVANCEDFEATURESENTRY = _descriptor.Descriptor(
+ name='AdvancedFeaturesEntry',
+ full_name='tools.android.avd.proto.AvdSettings.AdvancedFeaturesEntry',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='key', full_name='tools.android.avd.proto.AvdSettings.AdvancedFeaturesEntry.key', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='value', full_name='tools.android.avd.proto.AvdSettings.AdvancedFeaturesEntry.value', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=b'8\001',
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=444,
+ serialized_end=499,
+)
+
+_AVDSETTINGS = _descriptor.Descriptor(
+ name='AvdSettings',
+ full_name='tools.android.avd.proto.AvdSettings',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='screen', full_name='tools.android.avd.proto.AvdSettings.screen', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='sdcard', full_name='tools.android.avd.proto.AvdSettings.sdcard', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='advanced_features', full_name='tools.android.avd.proto.AvdSettings.advanced_features', index=2,
+ number=3, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='ram_size', full_name='tools.android.avd.proto.AvdSettings.ram_size', index=3,
+ number=4, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[_AVDSETTINGS_ADVANCEDFEATURESENTRY, ],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=210,
+ serialized_end=499,
+)
+
+
+_AVD = _descriptor.Descriptor(
+ name='Avd',
+ full_name='tools.android.avd.proto.Avd',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='emulator_package', full_name='tools.android.avd.proto.Avd.emulator_package', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='system_image_package', full_name='tools.android.avd.proto.Avd.system_image_package', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='system_image_name', full_name='tools.android.avd.proto.Avd.system_image_name', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='avd_package', full_name='tools.android.avd.proto.Avd.avd_package', index=3,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='avd_name', full_name='tools.android.avd.proto.Avd.avd_name', index=4,
+ number=5, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='avd_settings', full_name='tools.android.avd.proto.Avd.avd_settings', index=5,
+ number=6, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=502,
+ serialized_end=803,
+)
+
+_AVDSETTINGS_ADVANCEDFEATURESENTRY.containing_type = _AVDSETTINGS
+_AVDSETTINGS.fields_by_name['screen'].message_type = _SCREENSETTINGS
+_AVDSETTINGS.fields_by_name['sdcard'].message_type = _SDCARDSETTINGS
+_AVDSETTINGS.fields_by_name['advanced_features'].message_type = _AVDSETTINGS_ADVANCEDFEATURESENTRY
+_AVD.fields_by_name['emulator_package'].message_type = _CIPDPACKAGE
+_AVD.fields_by_name['system_image_package'].message_type = _CIPDPACKAGE
+_AVD.fields_by_name['avd_package'].message_type = _CIPDPACKAGE
+_AVD.fields_by_name['avd_settings'].message_type = _AVDSETTINGS
+DESCRIPTOR.message_types_by_name['CIPDPackage'] = _CIPDPACKAGE
+DESCRIPTOR.message_types_by_name['ScreenSettings'] = _SCREENSETTINGS
+DESCRIPTOR.message_types_by_name['SdcardSettings'] = _SDCARDSETTINGS
+DESCRIPTOR.message_types_by_name['AvdSettings'] = _AVDSETTINGS
+DESCRIPTOR.message_types_by_name['Avd'] = _AVD
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+CIPDPackage = _reflection.GeneratedProtocolMessageType('CIPDPackage', (_message.Message,), {
+ 'DESCRIPTOR' : _CIPDPACKAGE,
+ '__module__' : 'avd_pb2'
+ # @@protoc_insertion_point(class_scope:tools.android.avd.proto.CIPDPackage)
+ })
+_sym_db.RegisterMessage(CIPDPackage)
+
+ScreenSettings = _reflection.GeneratedProtocolMessageType('ScreenSettings', (_message.Message,), {
+ 'DESCRIPTOR' : _SCREENSETTINGS,
+ '__module__' : 'avd_pb2'
+ # @@protoc_insertion_point(class_scope:tools.android.avd.proto.ScreenSettings)
+ })
+_sym_db.RegisterMessage(ScreenSettings)
+
+SdcardSettings = _reflection.GeneratedProtocolMessageType('SdcardSettings', (_message.Message,), {
+ 'DESCRIPTOR' : _SDCARDSETTINGS,
+ '__module__' : 'avd_pb2'
+ # @@protoc_insertion_point(class_scope:tools.android.avd.proto.SdcardSettings)
+ })
+_sym_db.RegisterMessage(SdcardSettings)
+
+AvdSettings = _reflection.GeneratedProtocolMessageType('AvdSettings', (_message.Message,), {
+
+ 'AdvancedFeaturesEntry' : _reflection.GeneratedProtocolMessageType('AdvancedFeaturesEntry', (_message.Message,), {
+ 'DESCRIPTOR' : _AVDSETTINGS_ADVANCEDFEATURESENTRY,
+ '__module__' : 'avd_pb2'
+ # @@protoc_insertion_point(class_scope:tools.android.avd.proto.AvdSettings.AdvancedFeaturesEntry)
+ })
+ ,
+ 'DESCRIPTOR' : _AVDSETTINGS,
+ '__module__' : 'avd_pb2'
+ # @@protoc_insertion_point(class_scope:tools.android.avd.proto.AvdSettings)
+ })
+_sym_db.RegisterMessage(AvdSettings)
+_sym_db.RegisterMessage(AvdSettings.AdvancedFeaturesEntry)
+
+Avd = _reflection.GeneratedProtocolMessageType('Avd', (_message.Message,), {
+ 'DESCRIPTOR' : _AVD,
+ '__module__' : 'avd_pb2'
+ # @@protoc_insertion_point(class_scope:tools.android.avd.proto.Avd)
+ })
+_sym_db.RegisterMessage(Avd)
+
+
+_AVDSETTINGS_ADVANCEDFEATURESENTRY._options = None
+# @@protoc_insertion_point(module_scope)
diff --git a/third_party/libwebrtc/build/android/pylib/local/local_test_server_spawner.py b/third_party/libwebrtc/build/android/pylib/local/local_test_server_spawner.py
new file mode 100644
index 0000000000..f5f9875c24
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/local_test_server_spawner.py
@@ -0,0 +1,101 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import json
+import time
+
+from six.moves import range # pylint: disable=redefined-builtin
+from devil.android import forwarder
+from devil.android import ports
+from pylib.base import test_server
+from pylib.constants import host_paths
+
+with host_paths.SysPath(host_paths.BUILD_COMMON_PATH):
+ import chrome_test_server_spawner
+
+
+# The tests should not need more than one test server instance.
+MAX_TEST_SERVER_INSTANCES = 1
+
+
+def _WaitUntil(predicate, max_attempts=5):
+ """Blocks until the provided predicate (function) is true.
+
+ Returns:
+ Whether the provided predicate was satisfied once (before the timeout).
+ """
+ sleep_time_sec = 0.025
+ for _ in range(1, max_attempts):
+ if predicate():
+ return True
+ time.sleep(sleep_time_sec)
+ sleep_time_sec = min(1, sleep_time_sec * 2) # Don't wait more than 1 sec.
+ return False
+
+
+class PortForwarderAndroid(chrome_test_server_spawner.PortForwarder):
+ def __init__(self, device, tool):
+ self.device = device
+ self.tool = tool
+
+ def Map(self, port_pairs):
+ forwarder.Forwarder.Map(port_pairs, self.device, self.tool)
+
+ def GetDevicePortForHostPort(self, host_port):
+ return forwarder.Forwarder.DevicePortForHostPort(host_port)
+
+ def WaitHostPortAvailable(self, port):
+ return _WaitUntil(lambda: ports.IsHostPortAvailable(port))
+
+ def WaitPortNotAvailable(self, port):
+ return _WaitUntil(lambda: not ports.IsHostPortAvailable(port))
+
+ def WaitDevicePortReady(self, port):
+ return _WaitUntil(lambda: ports.IsDevicePortUsed(self.device, port))
+
+ def Unmap(self, device_port):
+ forwarder.Forwarder.UnmapDevicePort(device_port, self.device)
+
+
+class LocalTestServerSpawner(test_server.TestServer):
+
+ def __init__(self, port, device, tool):
+ super(LocalTestServerSpawner, self).__init__()
+ self._device = device
+ self._spawning_server = chrome_test_server_spawner.SpawningServer(
+ port, PortForwarderAndroid(device, tool), MAX_TEST_SERVER_INSTANCES)
+ self._tool = tool
+
+ @property
+ def server_address(self):
+ return self._spawning_server.server.server_address
+
+ @property
+ def port(self):
+ return self.server_address[1]
+
+ #override
+ def SetUp(self):
+ # See net/test/spawned_test_server/remote_test_server.h for description of
+ # the fields in the config file.
+ test_server_config = json.dumps({
+ 'spawner_url_base': 'http://localhost:%d' % self.port
+ })
+ self._device.WriteFile(
+ '%s/net-test-server-config' % self._device.GetExternalStoragePath(),
+ test_server_config)
+ forwarder.Forwarder.Map(
+ [(self.port, self.port)], self._device, self._tool)
+ self._spawning_server.Start()
+
+ #override
+ def Reset(self):
+ self._spawning_server.CleanupState()
+
+ #override
+ def TearDown(self):
+ self.Reset()
+ self._spawning_server.Stop()
+ forwarder.Forwarder.UnmapDevicePort(self.port, self._device)
diff --git a/third_party/libwebrtc/build/android/pylib/local/machine/__init__.py b/third_party/libwebrtc/build/android/pylib/local/machine/__init__.py
new file mode 100644
index 0000000000..ca3e206fdd
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/machine/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/third_party/libwebrtc/build/android/pylib/local/machine/local_machine_environment.py b/third_party/libwebrtc/build/android/pylib/local/machine/local_machine_environment.py
new file mode 100644
index 0000000000..447204cfd6
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/machine/local_machine_environment.py
@@ -0,0 +1,25 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import devil_chromium
+from pylib import constants
+from pylib.base import environment
+
+
+class LocalMachineEnvironment(environment.Environment):
+
+ def __init__(self, _args, output_manager, _error_func):
+ super(LocalMachineEnvironment, self).__init__(output_manager)
+
+ devil_chromium.Initialize(
+ output_directory=constants.GetOutDirectory())
+
+ #override
+ def SetUp(self):
+ pass
+
+ #override
+ def TearDown(self):
+ pass
diff --git a/third_party/libwebrtc/build/android/pylib/local/machine/local_machine_junit_test_run.py b/third_party/libwebrtc/build/android/pylib/local/machine/local_machine_junit_test_run.py
new file mode 100644
index 0000000000..6cdbf47570
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/machine/local_machine_junit_test_run.py
@@ -0,0 +1,309 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import collections
+import json
+import logging
+import multiprocessing
+import os
+import select
+import subprocess
+import sys
+import zipfile
+
+from six.moves import range # pylint: disable=redefined-builtin
+from pylib import constants
+from pylib.base import base_test_result
+from pylib.base import test_run
+from pylib.constants import host_paths
+from pylib.results import json_results
+from py_utils import tempfile_ext
+
+
+# These Test classes are used for running tests and are excluded in the test
+# runner. See:
+# https://android.googlesource.com/platform/frameworks/testing/+/android-support-test/runner/src/main/java/android/support/test/internal/runner/TestRequestBuilder.java
+# base/test/android/javatests/src/org/chromium/base/test/BaseChromiumAndroidJUnitRunner.java # pylint: disable=line-too-long
+_EXCLUDED_CLASSES_PREFIXES = ('android', 'junit', 'org/bouncycastle/util',
+ 'org/hamcrest', 'org/junit', 'org/mockito')
+
+# Suites we shouldn't shard, usually because they don't contain enough test
+# cases.
+_EXCLUDED_SUITES = {
+ 'password_check_junit_tests',
+ 'touch_to_fill_junit_tests',
+}
+
+
+# It can actually take longer to run if you shard too much, especially on
+# smaller suites. Locally media_base_junit_tests takes 4.3 sec with 1 shard,
+# and 6 sec with 2 or more shards.
+_MIN_CLASSES_PER_SHARD = 8
+
+
+class LocalMachineJunitTestRun(test_run.TestRun):
+ def __init__(self, env, test_instance):
+ super(LocalMachineJunitTestRun, self).__init__(env, test_instance)
+
+ #override
+ def TestPackage(self):
+ return self._test_instance.suite
+
+ #override
+ def SetUp(self):
+ pass
+
+ def _CreateJarArgsList(self, json_result_file_paths, group_test_list, shards):
+ # Creates a list of jar_args. The important thing is each jar_args list
+ # has a different json_results file for writing test results to and that
+ # each list of jar_args has its own test to run as specified in the
+ # -gtest-filter.
+ jar_args_list = [['-json-results-file', result_file]
+ for result_file in json_result_file_paths]
+ for index, jar_arg in enumerate(jar_args_list):
+ if shards > 1:
+ jar_arg.extend(['-gtest-filter', ':'.join(group_test_list[index])])
+ elif self._test_instance.test_filter:
+ jar_arg.extend(['-gtest-filter', self._test_instance.test_filter])
+
+ if self._test_instance.package_filter:
+ jar_arg.extend(['-package-filter', self._test_instance.package_filter])
+ if self._test_instance.runner_filter:
+ jar_arg.extend(['-runner-filter', self._test_instance.runner_filter])
+
+ return jar_args_list
+
+ def _CreateJvmArgsList(self):
+ # Creates a list of jvm_args (robolectric, code coverage, etc...)
+ jvm_args = [
+ '-Drobolectric.dependency.dir=%s' %
+ self._test_instance.robolectric_runtime_deps_dir,
+ '-Ddir.source.root=%s' % constants.DIR_SOURCE_ROOT,
+ '-Drobolectric.resourcesMode=binary',
+ ]
+ if logging.getLogger().isEnabledFor(logging.INFO):
+ jvm_args += ['-Drobolectric.logging=stdout']
+ if self._test_instance.debug_socket:
+ jvm_args += [
+ '-agentlib:jdwp=transport=dt_socket'
+ ',server=y,suspend=y,address=%s' % self._test_instance.debug_socket
+ ]
+
+ if self._test_instance.coverage_dir:
+ if not os.path.exists(self._test_instance.coverage_dir):
+ os.makedirs(self._test_instance.coverage_dir)
+ elif not os.path.isdir(self._test_instance.coverage_dir):
+ raise Exception('--coverage-dir takes a directory, not file path.')
+ if self._test_instance.coverage_on_the_fly:
+ jacoco_coverage_file = os.path.join(
+ self._test_instance.coverage_dir,
+ '%s.exec' % self._test_instance.suite)
+ jacoco_agent_path = os.path.join(host_paths.DIR_SOURCE_ROOT,
+ 'third_party', 'jacoco', 'lib',
+ 'jacocoagent.jar')
+
+ # inclnolocationclasses is false to prevent no class def found error.
+ jacoco_args = '-javaagent:{}=destfile={},inclnolocationclasses=false'
+ jvm_args.append(
+ jacoco_args.format(jacoco_agent_path, jacoco_coverage_file))
+ else:
+ jvm_args.append('-Djacoco-agent.destfile=%s' %
+ os.path.join(self._test_instance.coverage_dir,
+ '%s.exec' % self._test_instance.suite))
+
+ return jvm_args
+
+ #override
+ def RunTests(self, results):
+ wrapper_path = os.path.join(constants.GetOutDirectory(), 'bin', 'helper',
+ self._test_instance.suite)
+
+ # This avoids searching through the classparth jars for tests classes,
+ # which takes about 1-2 seconds.
+ # Do not shard when a test filter is present since we do not know at this
+ # point which tests will be filtered out.
+ if (self._test_instance.shards == 1 or self._test_instance.test_filter
+ or self._test_instance.suite in _EXCLUDED_SUITES):
+ test_classes = []
+ shards = 1
+ else:
+ test_classes = _GetTestClasses(wrapper_path)
+ shards = ChooseNumOfShards(test_classes, self._test_instance.shards)
+
+ logging.info('Running tests on %d shard(s).', shards)
+ group_test_list = GroupTestsForShard(shards, test_classes)
+
+ with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
+ cmd_list = [[wrapper_path] for _ in range(shards)]
+ json_result_file_paths = [
+ os.path.join(temp_dir, 'results%d.json' % i) for i in range(shards)
+ ]
+ jar_args_list = self._CreateJarArgsList(json_result_file_paths,
+ group_test_list, shards)
+ for i in range(shards):
+ cmd_list[i].extend(['--jar-args', '"%s"' % ' '.join(jar_args_list[i])])
+
+ jvm_args = self._CreateJvmArgsList()
+ if jvm_args:
+ for cmd in cmd_list:
+ cmd.extend(['--jvm-args', '"%s"' % ' '.join(jvm_args)])
+
+ AddPropertiesJar(cmd_list, temp_dir, self._test_instance.resource_apk)
+
+ procs = [
+ subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT) for cmd in cmd_list
+ ]
+ PrintProcessesStdout(procs)
+
+ results_list = []
+ try:
+ for json_file_path in json_result_file_paths:
+ with open(json_file_path, 'r') as f:
+ results_list += json_results.ParseResultsFromJson(
+ json.loads(f.read()))
+ except IOError:
+ # In the case of a failure in the JUnit or Robolectric test runner
+ # the output json file may never be written.
+ results_list = [
+ base_test_result.BaseTestResult(
+ 'Test Runner Failure', base_test_result.ResultType.UNKNOWN)
+ ]
+
+ test_run_results = base_test_result.TestRunResults()
+ test_run_results.AddResults(results_list)
+ results.append(test_run_results)
+
+ #override
+ def TearDown(self):
+ pass
+
+
+def AddPropertiesJar(cmd_list, temp_dir, resource_apk):
+ # Create properties file for Robolectric test runners so they can find the
+ # binary resources.
+ properties_jar_path = os.path.join(temp_dir, 'properties.jar')
+ with zipfile.ZipFile(properties_jar_path, 'w') as z:
+ z.writestr('com/android/tools/test_config.properties',
+ 'android_resource_apk=%s' % resource_apk)
+
+ for cmd in cmd_list:
+ cmd.extend(['--classpath', properties_jar_path])
+
+
+def ChooseNumOfShards(test_classes, shards):
+ # Don't override requests to not shard.
+ if shards == 1:
+ return 1
+
+ # Sharding doesn't reduce runtime on just a few tests.
+ if shards > (len(test_classes) // _MIN_CLASSES_PER_SHARD) or shards < 1:
+ shards = max(1, (len(test_classes) // _MIN_CLASSES_PER_SHARD))
+
+ # Local tests of explicit --shard values show that max speed is achieved
+ # at cpu_count() / 2.
+ # Using -XX:TieredStopAtLevel=1 is required for this result. The flag reduces
+ # CPU time by two-thirds, making sharding more effective.
+ shards = max(1, min(shards, multiprocessing.cpu_count() // 2))
+ # Can have at minimum one test_class per shard.
+ shards = min(len(test_classes), shards)
+
+ return shards
+
+
+def GroupTestsForShard(num_of_shards, test_classes):
+ """Groups tests that will be ran on each shard.
+
+ Args:
+ num_of_shards: number of shards to split tests between.
+ test_classes: A list of test_class files in the jar.
+
+ Return:
+ Returns a dictionary containing a list of test classes.
+ """
+ test_dict = {i: [] for i in range(num_of_shards)}
+
+ # Round robin test distribiution to reduce chance that a sequential group of
+ # classes all have an unusually high number of tests.
+ for count, test_cls in enumerate(test_classes):
+ test_cls = test_cls.replace('.class', '*')
+ test_cls = test_cls.replace('/', '.')
+ test_dict[count % num_of_shards].append(test_cls)
+
+ return test_dict
+
+
+def PrintProcessesStdout(procs):
+ """Prints the stdout of all the processes.
+
+ Buffers the stdout of the processes and prints it when finished.
+
+ Args:
+ procs: A list of subprocesses.
+
+ Returns: N/A
+ """
+ streams = [p.stdout for p in procs]
+ outputs = collections.defaultdict(list)
+ first_fd = streams[0].fileno()
+
+ while streams:
+ rstreams, _, _ = select.select(streams, [], [])
+ for stream in rstreams:
+ line = stream.readline()
+ if line:
+ # Print out just one output so user can see work being done rather
+ # than waiting for it all at the end.
+ if stream.fileno() == first_fd:
+ sys.stdout.write(line)
+ else:
+ outputs[stream.fileno()].append(line)
+ else:
+ streams.remove(stream) # End of stream.
+
+ for p in procs:
+ sys.stdout.write(''.join(outputs[p.stdout.fileno()]))
+
+
+def _GetTestClasses(file_path):
+ test_jar_paths = subprocess.check_output([file_path, '--print-classpath'])
+ test_jar_paths = test_jar_paths.split(':')
+
+ test_classes = []
+ for test_jar_path in test_jar_paths:
+ # Avoid searching through jars that are for the test runner.
+ # TODO(crbug.com/1144077): Use robolectric buildconfig file arg.
+ if 'third_party/robolectric/' in test_jar_path:
+ continue
+
+ test_classes += _GetTestClassesFromJar(test_jar_path)
+
+ logging.info('Found %d test classes in class_path jars.', len(test_classes))
+ return test_classes
+
+
+def _GetTestClassesFromJar(test_jar_path):
+ """Returns a list of test classes from a jar.
+
+ Test files end in Test, this is enforced:
+ //tools/android/errorprone_plugin/src/org/chromium/tools/errorprone
+ /plugin/TestClassNameCheck.java
+
+ Args:
+ test_jar_path: Path to the jar.
+
+ Return:
+ Returns a list of test classes that were in the jar.
+ """
+ class_list = []
+ with zipfile.ZipFile(test_jar_path, 'r') as zip_f:
+ for test_class in zip_f.namelist():
+ if test_class.startswith(_EXCLUDED_CLASSES_PREFIXES):
+ continue
+ if test_class.endswith('Test.class') and '$' not in test_class:
+ class_list.append(test_class)
+
+ return class_list
diff --git a/third_party/libwebrtc/build/android/pylib/local/machine/local_machine_junit_test_run_test.py b/third_party/libwebrtc/build/android/pylib/local/machine/local_machine_junit_test_run_test.py
new file mode 100755
index 0000000000..553451d650
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/local/machine/local_machine_junit_test_run_test.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env vpython3
+# Copyright 2020 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=protected-access
+
+
+import os
+import unittest
+
+from pylib.local.machine import local_machine_junit_test_run
+from py_utils import tempfile_ext
+from mock import patch # pylint: disable=import-error
+
+
+class LocalMachineJunitTestRunTests(unittest.TestCase):
+ def testAddPropertiesJar(self):
+ with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
+ apk = 'resource_apk'
+ cmd_list = []
+ local_machine_junit_test_run.AddPropertiesJar(cmd_list, temp_dir, apk)
+ self.assertEqual(cmd_list, [])
+ cmd_list = [['test1']]
+ local_machine_junit_test_run.AddPropertiesJar(cmd_list, temp_dir, apk)
+ self.assertEqual(
+ cmd_list[0],
+ ['test1', '--classpath',
+ os.path.join(temp_dir, 'properties.jar')])
+ cmd_list = [['test1'], ['test2']]
+ local_machine_junit_test_run.AddPropertiesJar(cmd_list, temp_dir, apk)
+ self.assertEqual(len(cmd_list[0]), 3)
+ self.assertEqual(
+ cmd_list[1],
+ ['test2', '--classpath',
+ os.path.join(temp_dir, 'properties.jar')])
+
+ @patch('multiprocessing.cpu_count')
+ def testChooseNumOfShards(self, mock_cpu_count):
+ mock_cpu_count.return_value = 36
+ # Test shards is 1 when filter is set.
+ test_shards = 1
+ test_classes = [1] * 50
+ shards = local_machine_junit_test_run.ChooseNumOfShards(
+ test_classes, test_shards)
+ self.assertEqual(1, shards)
+
+ # Tests setting shards.
+ test_shards = 4
+ shards = local_machine_junit_test_run.ChooseNumOfShards(
+ test_classes, test_shards)
+ self.assertEqual(4, shards)
+
+ # Tests using min_class per shards.
+ test_classes = [1] * 20
+ test_shards = 8
+ shards = local_machine_junit_test_run.ChooseNumOfShards(
+ test_classes, test_shards)
+ self.assertEqual(2, shards)
+
+ def testGroupTestsForShard(self):
+ test_classes = []
+ results = local_machine_junit_test_run.GroupTestsForShard(1, test_classes)
+ self.assertDictEqual(results, {0: []})
+
+ test_classes = ['dir/test.class'] * 5
+ results = local_machine_junit_test_run.GroupTestsForShard(1, test_classes)
+ self.assertDictEqual(results, {0: ['dir.test*'] * 5})
+
+ test_classes = ['dir/test.class'] * 5
+ results = local_machine_junit_test_run.GroupTestsForShard(2, test_classes)
+ ans_dict = {
+ 0: ['dir.test*'] * 3,
+ 1: ['dir.test*'] * 2,
+ }
+ self.assertDictEqual(results, ans_dict)
+
+ test_classes = ['a10 warthog', 'b17', 'SR71']
+ results = local_machine_junit_test_run.GroupTestsForShard(3, test_classes)
+ ans_dict = {
+ 0: ['a10 warthog'],
+ 1: ['b17'],
+ 2: ['SR71'],
+ }
+ self.assertDictEqual(results, ans_dict)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/libwebrtc/build/android/pylib/monkey/__init__.py b/third_party/libwebrtc/build/android/pylib/monkey/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/monkey/__init__.py
diff --git a/third_party/libwebrtc/build/android/pylib/monkey/monkey_test_instance.py b/third_party/libwebrtc/build/android/pylib/monkey/monkey_test_instance.py
new file mode 100644
index 0000000000..0d5aed6095
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/monkey/monkey_test_instance.py
@@ -0,0 +1,73 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import random
+
+from pylib import constants
+from pylib.base import test_instance
+
+
+_SINGLE_EVENT_TIMEOUT = 100 # Milliseconds
+
+class MonkeyTestInstance(test_instance.TestInstance):
+
+ def __init__(self, args, _):
+ super(MonkeyTestInstance, self).__init__()
+
+ self._categories = args.categories
+ self._event_count = args.event_count
+ self._seed = args.seed or random.randint(1, 100)
+ self._throttle = args.throttle
+ self._verbose_count = args.verbose_count
+
+ self._package = constants.PACKAGE_INFO[args.browser].package
+ self._activity = constants.PACKAGE_INFO[args.browser].activity
+
+ self._timeout_s = (
+ self.event_count * (self.throttle + _SINGLE_EVENT_TIMEOUT)) / 1000
+
+ #override
+ def TestType(self):
+ return 'monkey'
+
+ #override
+ def SetUp(self):
+ pass
+
+ #override
+ def TearDown(self):
+ pass
+
+ @property
+ def activity(self):
+ return self._activity
+
+ @property
+ def categories(self):
+ return self._categories
+
+ @property
+ def event_count(self):
+ return self._event_count
+
+ @property
+ def package(self):
+ return self._package
+
+ @property
+ def seed(self):
+ return self._seed
+
+ @property
+ def throttle(self):
+ return self._throttle
+
+ @property
+ def timeout(self):
+ return self._timeout_s
+
+ @property
+ def verbose_count(self):
+ return self._verbose_count
diff --git a/third_party/libwebrtc/build/android/pylib/output/__init__.py b/third_party/libwebrtc/build/android/pylib/output/__init__.py
new file mode 100644
index 0000000000..a22a6ee39a
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/output/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/third_party/libwebrtc/build/android/pylib/output/local_output_manager.py b/third_party/libwebrtc/build/android/pylib/output/local_output_manager.py
new file mode 100644
index 0000000000..2b5c0f4393
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/output/local_output_manager.py
@@ -0,0 +1,49 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import time
+import os
+import shutil
+
+try:
+ from urllib.parse import quote
+except ImportError:
+ from urllib import quote
+
+from pylib.base import output_manager
+
+
+class LocalOutputManager(output_manager.OutputManager):
+ """Saves and manages test output files locally in output directory.
+
+ Location files will be saved in {output_dir}/TEST_RESULTS_{timestamp}.
+ """
+
+ def __init__(self, output_dir):
+ super(LocalOutputManager, self).__init__()
+ timestamp = time.strftime(
+ '%Y_%m_%dT%H_%M_%S', time.localtime())
+ self._output_root = os.path.abspath(os.path.join(
+ output_dir, 'TEST_RESULTS_%s' % timestamp))
+
+ #override
+ def _CreateArchivedFile(self, out_filename, out_subdir, datatype):
+ return LocalArchivedFile(
+ out_filename, out_subdir, datatype, self._output_root)
+
+
+class LocalArchivedFile(output_manager.ArchivedFile):
+
+ def __init__(self, out_filename, out_subdir, datatype, out_root):
+ super(LocalArchivedFile, self).__init__(
+ out_filename, out_subdir, datatype)
+ self._output_path = os.path.join(out_root, out_subdir, out_filename)
+
+ def _Link(self):
+ return 'file://%s' % quote(self._output_path)
+
+ def _Archive(self):
+ if not os.path.exists(os.path.dirname(self._output_path)):
+ os.makedirs(os.path.dirname(self._output_path))
+ shutil.copy(self.name, self._output_path)
diff --git a/third_party/libwebrtc/build/android/pylib/output/local_output_manager_test.py b/third_party/libwebrtc/build/android/pylib/output/local_output_manager_test.py
new file mode 100755
index 0000000000..14f556c42f
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/output/local_output_manager_test.py
@@ -0,0 +1,34 @@
+#! /usr/bin/env vpython3
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=protected-access
+
+import tempfile
+import shutil
+import unittest
+
+from pylib.base import output_manager
+from pylib.base import output_manager_test_case
+from pylib.output import local_output_manager
+
+
+class LocalOutputManagerTest(output_manager_test_case.OutputManagerTestCase):
+
+ def setUp(self):
+ self._output_dir = tempfile.mkdtemp()
+ self._output_manager = local_output_manager.LocalOutputManager(
+ self._output_dir)
+
+ def testUsableTempFile(self):
+ self.assertUsableTempFile(
+ self._output_manager._CreateArchivedFile(
+ 'test_file', 'test_subdir', output_manager.Datatype.TEXT))
+
+ def tearDown(self):
+ shutil.rmtree(self._output_dir)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/libwebrtc/build/android/pylib/output/noop_output_manager.py b/third_party/libwebrtc/build/android/pylib/output/noop_output_manager.py
new file mode 100644
index 0000000000..d29a7432f9
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/output/noop_output_manager.py
@@ -0,0 +1,42 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from pylib.base import output_manager
+
+# TODO(jbudorick): This class is currently mostly unused.
+# Add a --bot-mode argument that all bots pass. If --bot-mode and
+# --local-output args are both not passed to test runner then use this
+# as the output manager impl.
+
+# pylint: disable=no-self-use
+
+class NoopOutputManager(output_manager.OutputManager):
+
+ def __init__(self):
+ super(NoopOutputManager, self).__init__()
+
+ #override
+ def _CreateArchivedFile(self, out_filename, out_subdir, datatype):
+ del out_filename, out_subdir, datatype
+ return NoopArchivedFile()
+
+
+class NoopArchivedFile(output_manager.ArchivedFile):
+
+ def __init__(self):
+ super(NoopArchivedFile, self).__init__(None, None, None)
+
+ def Link(self):
+ """NoopArchivedFiles are not retained."""
+ return ''
+
+ def _Link(self):
+ pass
+
+ def Archive(self):
+ """NoopArchivedFiles are not retained."""
+ pass
+
+ def _Archive(self):
+ pass
diff --git a/third_party/libwebrtc/build/android/pylib/output/noop_output_manager_test.py b/third_party/libwebrtc/build/android/pylib/output/noop_output_manager_test.py
new file mode 100755
index 0000000000..4335563383
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/output/noop_output_manager_test.py
@@ -0,0 +1,27 @@
+#! /usr/bin/env vpython3
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=protected-access
+
+import unittest
+
+from pylib.base import output_manager
+from pylib.base import output_manager_test_case
+from pylib.output import noop_output_manager
+
+
+class NoopOutputManagerTest(output_manager_test_case.OutputManagerTestCase):
+
+ def setUp(self):
+ self._output_manager = noop_output_manager.NoopOutputManager()
+
+ def testUsableTempFile(self):
+ self.assertUsableTempFile(
+ self._output_manager._CreateArchivedFile(
+ 'test_file', 'test_subdir', output_manager.Datatype.TEXT))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/libwebrtc/build/android/pylib/output/remote_output_manager.py b/third_party/libwebrtc/build/android/pylib/output/remote_output_manager.py
new file mode 100644
index 0000000000..9fdb4bf65f
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/output/remote_output_manager.py
@@ -0,0 +1,89 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import hashlib
+import os
+
+from pylib.base import output_manager
+from pylib.output import noop_output_manager
+from pylib.utils import logdog_helper
+from pylib.utils import google_storage_helper
+
+
+class RemoteOutputManager(output_manager.OutputManager):
+
+ def __init__(self, bucket):
+ """Uploads output files to Google Storage or LogDog.
+
+ Files will either be uploaded directly to Google Storage or LogDog
+ depending on the datatype.
+
+ Args
+ bucket: Bucket to use when saving to Google Storage.
+ """
+ super(RemoteOutputManager, self).__init__()
+ self._bucket = bucket
+
+ #override
+ def _CreateArchivedFile(self, out_filename, out_subdir, datatype):
+ if datatype == output_manager.Datatype.TEXT:
+ try:
+ logdog_helper.get_logdog_client()
+ return LogdogArchivedFile(out_filename, out_subdir, datatype)
+ except RuntimeError:
+ return noop_output_manager.NoopArchivedFile()
+ else:
+ if self._bucket is None:
+ return noop_output_manager.NoopArchivedFile()
+ return GoogleStorageArchivedFile(
+ out_filename, out_subdir, datatype, self._bucket)
+
+
+class LogdogArchivedFile(output_manager.ArchivedFile):
+
+ def __init__(self, out_filename, out_subdir, datatype):
+ super(LogdogArchivedFile, self).__init__(out_filename, out_subdir, datatype)
+ self._stream_name = '%s_%s' % (out_subdir, out_filename)
+
+ def _Link(self):
+ return logdog_helper.get_viewer_url(self._stream_name)
+
+ def _Archive(self):
+ with open(self.name, 'r') as f:
+ logdog_helper.text(self._stream_name, f.read())
+
+
+class GoogleStorageArchivedFile(output_manager.ArchivedFile):
+
+ def __init__(self, out_filename, out_subdir, datatype, bucket):
+ super(GoogleStorageArchivedFile, self).__init__(
+ out_filename, out_subdir, datatype)
+ self._bucket = bucket
+ self._upload_path = None
+ self._content_addressed = None
+
+ def _PrepareArchive(self):
+ self._content_addressed = (self._datatype in (
+ output_manager.Datatype.HTML,
+ output_manager.Datatype.PNG,
+ output_manager.Datatype.JSON))
+ if self._content_addressed:
+ sha1 = hashlib.sha1()
+ with open(self.name, 'rb') as f:
+ sha1.update(f.read())
+ self._upload_path = sha1.hexdigest()
+ else:
+ self._upload_path = os.path.join(self._out_subdir, self._out_filename)
+
+ def _Link(self):
+ return google_storage_helper.get_url_link(
+ self._upload_path, self._bucket)
+
+ def _Archive(self):
+ if (self._content_addressed and
+ google_storage_helper.exists(self._upload_path, self._bucket)):
+ return
+
+ google_storage_helper.upload(
+ self._upload_path, self.name, self._bucket, content_type=self._datatype)
diff --git a/third_party/libwebrtc/build/android/pylib/output/remote_output_manager_test.py b/third_party/libwebrtc/build/android/pylib/output/remote_output_manager_test.py
new file mode 100755
index 0000000000..c9582f5959
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/output/remote_output_manager_test.py
@@ -0,0 +1,32 @@
+#! /usr/bin/env vpython3
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=protected-access
+
+import unittest
+
+from pylib.base import output_manager
+from pylib.base import output_manager_test_case
+from pylib.output import remote_output_manager
+
+import mock # pylint: disable=import-error
+
+
+@mock.patch('pylib.utils.google_storage_helper')
+class RemoteOutputManagerTest(output_manager_test_case.OutputManagerTestCase):
+
+ def setUp(self):
+ self._output_manager = remote_output_manager.RemoteOutputManager(
+ 'this-is-a-fake-bucket')
+
+ def testUsableTempFile(self, google_storage_helper_mock):
+ del google_storage_helper_mock
+ self.assertUsableTempFile(
+ self._output_manager._CreateArchivedFile(
+ 'test_file', 'test_subdir', output_manager.Datatype.TEXT))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/libwebrtc/build/android/pylib/pexpect.py b/third_party/libwebrtc/build/android/pylib/pexpect.py
new file mode 100644
index 0000000000..cf59fb0f6d
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/pexpect.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from __future__ import absolute_import
+
+import os
+import sys
+
+_CHROME_SRC = os.path.join(
+ os.path.abspath(os.path.dirname(__file__)), '..', '..', '..')
+
+_PEXPECT_PATH = os.path.join(_CHROME_SRC, 'third_party', 'pexpect')
+if _PEXPECT_PATH not in sys.path:
+ sys.path.append(_PEXPECT_PATH)
+
+# pexpect is not available on all platforms. We allow this file to be imported
+# on platforms without pexpect and only fail when pexpect is actually used.
+try:
+ from pexpect import * # pylint: disable=W0401,W0614
+except ImportError:
+ pass
diff --git a/third_party/libwebrtc/build/android/pylib/restart_adbd.sh b/third_party/libwebrtc/build/android/pylib/restart_adbd.sh
new file mode 100755
index 0000000000..393b2ebac0
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/restart_adbd.sh
@@ -0,0 +1,20 @@
+#!/system/bin/sh
+
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Android shell script to restart adbd on the device. This has to be run
+# atomically as a shell script because stopping adbd prevents further commands
+# from running (even if called in the same adb shell).
+
+trap '' HUP
+trap '' TERM
+trap '' PIPE
+
+function restart() {
+ stop adbd
+ start adbd
+}
+
+restart &
diff --git a/third_party/libwebrtc/build/android/pylib/results/__init__.py b/third_party/libwebrtc/build/android/pylib/results/__init__.py
new file mode 100644
index 0000000000..4d6aabb953
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/results/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/__init__.py b/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/__init__.py
new file mode 100644
index 0000000000..4d6aabb953
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/json_results_generator.py b/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/json_results_generator.py
new file mode 100644
index 0000000000..ff035ec1c7
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/json_results_generator.py
@@ -0,0 +1,702 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+#
+# Most of this file was ported over from Blink's
+# tools/blinkpy/web_tests/layout_package/json_results_generator.py
+# tools/blinkpy/common/net/file_uploader.py
+#
+
+import json
+import logging
+import mimetypes
+import os
+import time
+try:
+ from urllib.request import urlopen, Request
+ from urllib.error import HTTPError, URLError
+ from urllib.parse import quote
+except ImportError:
+ from urllib import quote
+ from urllib2 import urlopen, HTTPError, URLError, Request
+
+_log = logging.getLogger(__name__)
+
+_JSON_PREFIX = 'ADD_RESULTS('
+_JSON_SUFFIX = ');'
+
+
+def HasJSONWrapper(string):
+ return string.startswith(_JSON_PREFIX) and string.endswith(_JSON_SUFFIX)
+
+
+def StripJSONWrapper(json_content):
+ # FIXME: Kill this code once the server returns json instead of jsonp.
+ if HasJSONWrapper(json_content):
+ return json_content[len(_JSON_PREFIX):len(json_content) - len(_JSON_SUFFIX)]
+ return json_content
+
+
+def WriteJSON(json_object, file_path, callback=None):
+ # Specify separators in order to get compact encoding.
+ json_string = json.dumps(json_object, separators=(',', ':'))
+ if callback:
+ json_string = callback + '(' + json_string + ');'
+ with open(file_path, 'w') as fp:
+ fp.write(json_string)
+
+
+def ConvertTrieToFlatPaths(trie, prefix=None):
+ """Flattens the trie of paths, prepending a prefix to each."""
+ result = {}
+ for name, data in trie.items():
+ if prefix:
+ name = prefix + '/' + name
+
+ if len(data) and not 'results' in data:
+ result.update(ConvertTrieToFlatPaths(data, name))
+ else:
+ result[name] = data
+
+ return result
+
+
+def AddPathToTrie(path, value, trie):
+ """Inserts a single path and value into a directory trie structure."""
+ if not '/' in path:
+ trie[path] = value
+ return
+
+ directory, _, rest = path.partition('/')
+ if not directory in trie:
+ trie[directory] = {}
+ AddPathToTrie(rest, value, trie[directory])
+
+
+def TestTimingsTrie(individual_test_timings):
+ """Breaks a test name into dicts by directory
+
+ foo/bar/baz.html: 1ms
+ foo/bar/baz1.html: 3ms
+
+ becomes
+ foo: {
+ bar: {
+ baz.html: 1,
+ baz1.html: 3
+ }
+ }
+ """
+ trie = {}
+ for test_result in individual_test_timings:
+ test = test_result.test_name
+
+ AddPathToTrie(test, int(1000 * test_result.test_run_time), trie)
+
+ return trie
+
+
+class TestResult(object):
+ """A simple class that represents a single test result."""
+
+ # Test modifier constants.
+ (NONE, FAILS, FLAKY, DISABLED) = list(range(4))
+
+ def __init__(self, test, failed=False, elapsed_time=0):
+ self.test_name = test
+ self.failed = failed
+ self.test_run_time = elapsed_time
+
+ test_name = test
+ try:
+ test_name = test.split('.')[1]
+ except IndexError:
+ _log.warn('Invalid test name: %s.', test)
+
+ if test_name.startswith('FAILS_'):
+ self.modifier = self.FAILS
+ elif test_name.startswith('FLAKY_'):
+ self.modifier = self.FLAKY
+ elif test_name.startswith('DISABLED_'):
+ self.modifier = self.DISABLED
+ else:
+ self.modifier = self.NONE
+
+ def Fixable(self):
+ return self.failed or self.modifier == self.DISABLED
+
+
+class JSONResultsGeneratorBase(object):
+ """A JSON results generator for generic tests."""
+
+ MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG = 750
+ # Min time (seconds) that will be added to the JSON.
+ MIN_TIME = 1
+
+ # Note that in non-chromium tests those chars are used to indicate
+ # test modifiers (FAILS, FLAKY, etc) but not actual test results.
+ PASS_RESULT = 'P'
+ SKIP_RESULT = 'X'
+ FAIL_RESULT = 'F'
+ FLAKY_RESULT = 'L'
+ NO_DATA_RESULT = 'N'
+
+ MODIFIER_TO_CHAR = {TestResult.NONE: PASS_RESULT,
+ TestResult.DISABLED: SKIP_RESULT,
+ TestResult.FAILS: FAIL_RESULT,
+ TestResult.FLAKY: FLAKY_RESULT}
+
+ VERSION = 4
+ VERSION_KEY = 'version'
+ RESULTS = 'results'
+ TIMES = 'times'
+ BUILD_NUMBERS = 'buildNumbers'
+ TIME = 'secondsSinceEpoch'
+ TESTS = 'tests'
+
+ FIXABLE_COUNT = 'fixableCount'
+ FIXABLE = 'fixableCounts'
+ ALL_FIXABLE_COUNT = 'allFixableCount'
+
+ RESULTS_FILENAME = 'results.json'
+ TIMES_MS_FILENAME = 'times_ms.json'
+ INCREMENTAL_RESULTS_FILENAME = 'incremental_results.json'
+
+ # line too long pylint: disable=line-too-long
+ URL_FOR_TEST_LIST_JSON = (
+ 'https://%s/testfile?builder=%s&name=%s&testlistjson=1&testtype=%s&'
+ 'master=%s')
+ # pylint: enable=line-too-long
+
+ def __init__(self, builder_name, build_name, build_number,
+ results_file_base_path, builder_base_url,
+ test_results_map, svn_repositories=None,
+ test_results_server=None,
+ test_type='',
+ master_name=''):
+ """Modifies the results.json file. Grabs it off the archive directory
+ if it is not found locally.
+
+ Args
+ builder_name: the builder name (e.g. Webkit).
+ build_name: the build name (e.g. webkit-rel).
+ build_number: the build number.
+ results_file_base_path: Absolute path to the directory containing the
+ results json file.
+ builder_base_url: the URL where we have the archived test results.
+ If this is None no archived results will be retrieved.
+ test_results_map: A dictionary that maps test_name to TestResult.
+ svn_repositories: A (json_field_name, svn_path) pair for SVN
+ repositories that tests rely on. The SVN revision will be
+ included in the JSON with the given json_field_name.
+ test_results_server: server that hosts test results json.
+ test_type: test type string (e.g. 'layout-tests').
+ master_name: the name of the buildbot master.
+ """
+ self._builder_name = builder_name
+ self._build_name = build_name
+ self._build_number = build_number
+ self._builder_base_url = builder_base_url
+ self._results_directory = results_file_base_path
+
+ self._test_results_map = test_results_map
+ self._test_results = list(test_results_map.values())
+
+ self._svn_repositories = svn_repositories
+ if not self._svn_repositories:
+ self._svn_repositories = {}
+
+ self._test_results_server = test_results_server
+ self._test_type = test_type
+ self._master_name = master_name
+
+ self._archived_results = None
+
+ def GenerateJSONOutput(self):
+ json_object = self.GetJSON()
+ if json_object:
+ file_path = (
+ os.path.join(
+ self._results_directory,
+ self.INCREMENTAL_RESULTS_FILENAME))
+ WriteJSON(json_object, file_path)
+
+ def GenerateTimesMSFile(self):
+ times = TestTimingsTrie(list(self._test_results_map.values()))
+ file_path = os.path.join(self._results_directory, self.TIMES_MS_FILENAME)
+ WriteJSON(times, file_path)
+
+ def GetJSON(self):
+ """Gets the results for the results.json file."""
+ results_json = {}
+
+ if not results_json:
+ results_json, error = self._GetArchivedJSONResults()
+ if error:
+ # If there was an error don't write a results.json
+ # file at all as it would lose all the information on the
+ # bot.
+ _log.error('Archive directory is inaccessible. Not '
+ 'modifying or clobbering the results.json '
+ 'file: ' + str(error))
+ return None
+
+ builder_name = self._builder_name
+ if results_json and builder_name not in results_json:
+ _log.debug('Builder name (%s) is not in the results.json file.',
+ builder_name)
+
+ self._ConvertJSONToCurrentVersion(results_json)
+
+ if builder_name not in results_json:
+ results_json[builder_name] = (
+ self._CreateResultsForBuilderJSON())
+
+ results_for_builder = results_json[builder_name]
+
+ if builder_name:
+ self._InsertGenericMetaData(results_for_builder)
+
+ self._InsertFailureSummaries(results_for_builder)
+
+ # Update the all failing tests with result type and time.
+ tests = results_for_builder[self.TESTS]
+ all_failing_tests = self._GetFailedTestNames()
+ all_failing_tests.update(ConvertTrieToFlatPaths(tests))
+
+ for test in all_failing_tests:
+ self._InsertTestTimeAndResult(test, tests)
+
+ return results_json
+
+ def SetArchivedResults(self, archived_results):
+ self._archived_results = archived_results
+
+ def UploadJSONFiles(self, json_files):
+ """Uploads the given json_files to the test_results_server (if the
+ test_results_server is given)."""
+ if not self._test_results_server:
+ return
+
+ if not self._master_name:
+ _log.error(
+ '--test-results-server was set, but --master-name was not. Not '
+ 'uploading JSON files.')
+ return
+
+ _log.info('Uploading JSON files for builder: %s', self._builder_name)
+ attrs = [('builder', self._builder_name),
+ ('testtype', self._test_type),
+ ('master', self._master_name)]
+
+ files = [(json_file, os.path.join(self._results_directory, json_file))
+ for json_file in json_files]
+
+ url = 'https://%s/testfile/upload' % self._test_results_server
+ # Set uploading timeout in case appengine server is having problems.
+ # 120 seconds are more than enough to upload test results.
+ uploader = _FileUploader(url, 120)
+ try:
+ response = uploader.UploadAsMultipartFormData(files, attrs)
+ if response:
+ if response.code == 200:
+ _log.info('JSON uploaded.')
+ else:
+ _log.debug(
+ "JSON upload failed, %d: '%s'", response.code, response.read())
+ else:
+ _log.error('JSON upload failed; no response returned')
+ except Exception as err: # pylint: disable=broad-except
+ _log.error('Upload failed: %s', err)
+ return
+
+ def _GetTestTiming(self, test_name):
+ """Returns test timing data (elapsed time) in second
+ for the given test_name."""
+ if test_name in self._test_results_map:
+ # Floor for now to get time in seconds.
+ return int(self._test_results_map[test_name].test_run_time)
+ return 0
+
+ def _GetFailedTestNames(self):
+ """Returns a set of failed test names."""
+ return set([r.test_name for r in self._test_results if r.failed])
+
+ def _GetModifierChar(self, test_name):
+ """Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT,
+ PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test modifier
+ for the given test_name.
+ """
+ if test_name not in self._test_results_map:
+ return self.__class__.NO_DATA_RESULT
+
+ test_result = self._test_results_map[test_name]
+ if test_result.modifier in list(self.MODIFIER_TO_CHAR.keys()):
+ return self.MODIFIER_TO_CHAR[test_result.modifier]
+
+ return self.__class__.PASS_RESULT
+
+ def _get_result_char(self, test_name):
+ """Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT,
+ PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test result
+ for the given test_name.
+ """
+ if test_name not in self._test_results_map:
+ return self.__class__.NO_DATA_RESULT
+
+ test_result = self._test_results_map[test_name]
+ if test_result.modifier == TestResult.DISABLED:
+ return self.__class__.SKIP_RESULT
+
+ if test_result.failed:
+ return self.__class__.FAIL_RESULT
+
+ return self.__class__.PASS_RESULT
+
+ def _GetSVNRevision(self, in_directory):
+ """Returns the svn revision for the given directory.
+
+ Args:
+ in_directory: The directory where svn is to be run.
+ """
+ # This is overridden in flakiness_dashboard_results_uploader.py.
+ raise NotImplementedError()
+
+ def _GetArchivedJSONResults(self):
+ """Download JSON file that only contains test
+ name list from test-results server. This is for generating incremental
+ JSON so the file generated has info for tests that failed before but
+ pass or are skipped from current run.
+
+ Returns (archived_results, error) tuple where error is None if results
+ were successfully read.
+ """
+ results_json = {}
+ old_results = None
+ error = None
+
+ if not self._test_results_server:
+ return {}, None
+
+ results_file_url = (self.URL_FOR_TEST_LIST_JSON %
+ (quote(self._test_results_server),
+ quote(self._builder_name), self.RESULTS_FILENAME,
+ quote(self._test_type), quote(self._master_name)))
+
+ # pylint: disable=redefined-variable-type
+ try:
+ # FIXME: We should talk to the network via a Host object.
+ results_file = urlopen(results_file_url)
+ old_results = results_file.read()
+ except HTTPError as http_error:
+ # A non-4xx status code means the bot is hosed for some reason
+ # and we can't grab the results.json file off of it.
+ if http_error.code < 400 and http_error.code >= 500:
+ error = http_error
+ except URLError as url_error:
+ error = url_error
+ # pylint: enable=redefined-variable-type
+
+ if old_results:
+ # Strip the prefix and suffix so we can get the actual JSON object.
+ old_results = StripJSONWrapper(old_results)
+
+ try:
+ results_json = json.loads(old_results)
+ except Exception: # pylint: disable=broad-except
+ _log.debug('results.json was not valid JSON. Clobbering.')
+ # The JSON file is not valid JSON. Just clobber the results.
+ results_json = {}
+ else:
+ _log.debug('Old JSON results do not exist. Starting fresh.')
+ results_json = {}
+
+ return results_json, error
+
+ def _InsertFailureSummaries(self, results_for_builder):
+ """Inserts aggregate pass/failure statistics into the JSON.
+ This method reads self._test_results and generates
+ FIXABLE, FIXABLE_COUNT and ALL_FIXABLE_COUNT entries.
+
+ Args:
+ results_for_builder: Dictionary containing the test results for a
+ single builder.
+ """
+ # Insert the number of tests that failed or skipped.
+ fixable_count = len([r for r in self._test_results if r.Fixable()])
+ self._InsertItemIntoRawList(results_for_builder,
+ fixable_count, self.FIXABLE_COUNT)
+
+ # Create a test modifiers (FAILS, FLAKY etc) summary dictionary.
+ entry = {}
+ for test_name in self._test_results_map.keys():
+ result_char = self._GetModifierChar(test_name)
+ entry[result_char] = entry.get(result_char, 0) + 1
+
+ # Insert the pass/skip/failure summary dictionary.
+ self._InsertItemIntoRawList(results_for_builder, entry,
+ self.FIXABLE)
+
+ # Insert the number of all the tests that are supposed to pass.
+ all_test_count = len(self._test_results)
+ self._InsertItemIntoRawList(results_for_builder,
+ all_test_count, self.ALL_FIXABLE_COUNT)
+
+ def _InsertItemIntoRawList(self, results_for_builder, item, key):
+ """Inserts the item into the list with the given key in the results for
+ this builder. Creates the list if no such list exists.
+
+ Args:
+ results_for_builder: Dictionary containing the test results for a
+ single builder.
+ item: Number or string to insert into the list.
+ key: Key in results_for_builder for the list to insert into.
+ """
+ if key in results_for_builder:
+ raw_list = results_for_builder[key]
+ else:
+ raw_list = []
+
+ raw_list.insert(0, item)
+ raw_list = raw_list[:self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG]
+ results_for_builder[key] = raw_list
+
+ def _InsertItemRunLengthEncoded(self, item, encoded_results):
+ """Inserts the item into the run-length encoded results.
+
+ Args:
+ item: String or number to insert.
+ encoded_results: run-length encoded results. An array of arrays, e.g.
+ [[3,'A'],[1,'Q']] encodes AAAQ.
+ """
+ if len(encoded_results) and item == encoded_results[0][1]:
+ num_results = encoded_results[0][0]
+ if num_results <= self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
+ encoded_results[0][0] = num_results + 1
+ else:
+ # Use a list instead of a class for the run-length encoding since
+ # we want the serialized form to be concise.
+ encoded_results.insert(0, [1, item])
+
+ def _InsertGenericMetaData(self, results_for_builder):
+ """ Inserts generic metadata (such as version number, current time etc)
+ into the JSON.
+
+ Args:
+ results_for_builder: Dictionary containing the test results for
+ a single builder.
+ """
+ self._InsertItemIntoRawList(results_for_builder,
+ self._build_number, self.BUILD_NUMBERS)
+
+ # Include SVN revisions for the given repositories.
+ for (name, path) in self._svn_repositories:
+ # Note: for JSON file's backward-compatibility we use 'chrome' rather
+ # than 'chromium' here.
+ lowercase_name = name.lower()
+ if lowercase_name == 'chromium':
+ lowercase_name = 'chrome'
+ self._InsertItemIntoRawList(results_for_builder,
+ self._GetSVNRevision(path),
+ lowercase_name + 'Revision')
+
+ self._InsertItemIntoRawList(results_for_builder,
+ int(time.time()),
+ self.TIME)
+
+ def _InsertTestTimeAndResult(self, test_name, tests):
+ """ Insert a test item with its results to the given tests dictionary.
+
+ Args:
+ tests: Dictionary containing test result entries.
+ """
+
+ result = self._get_result_char(test_name)
+ test_time = self._GetTestTiming(test_name)
+
+ this_test = tests
+ for segment in test_name.split('/'):
+ if segment not in this_test:
+ this_test[segment] = {}
+ this_test = this_test[segment]
+
+ if not len(this_test):
+ self._PopulateResultsAndTimesJSON(this_test)
+
+ if self.RESULTS in this_test:
+ self._InsertItemRunLengthEncoded(result, this_test[self.RESULTS])
+ else:
+ this_test[self.RESULTS] = [[1, result]]
+
+ if self.TIMES in this_test:
+ self._InsertItemRunLengthEncoded(test_time, this_test[self.TIMES])
+ else:
+ this_test[self.TIMES] = [[1, test_time]]
+
+ def _ConvertJSONToCurrentVersion(self, results_json):
+ """If the JSON does not match the current version, converts it to the
+ current version and adds in the new version number.
+ """
+ if self.VERSION_KEY in results_json:
+ archive_version = results_json[self.VERSION_KEY]
+ if archive_version == self.VERSION:
+ return
+ else:
+ archive_version = 3
+
+ # version 3->4
+ if archive_version == 3:
+ for results in list(results_json.values()):
+ self._ConvertTestsToTrie(results)
+
+ results_json[self.VERSION_KEY] = self.VERSION
+
+ def _ConvertTestsToTrie(self, results):
+ if not self.TESTS in results:
+ return
+
+ test_results = results[self.TESTS]
+ test_results_trie = {}
+ for test in test_results.keys():
+ single_test_result = test_results[test]
+ AddPathToTrie(test, single_test_result, test_results_trie)
+
+ results[self.TESTS] = test_results_trie
+
+ def _PopulateResultsAndTimesJSON(self, results_and_times):
+ results_and_times[self.RESULTS] = []
+ results_and_times[self.TIMES] = []
+ return results_and_times
+
+ def _CreateResultsForBuilderJSON(self):
+ results_for_builder = {}
+ results_for_builder[self.TESTS] = {}
+ return results_for_builder
+
+ def _RemoveItemsOverMaxNumberOfBuilds(self, encoded_list):
+ """Removes items from the run-length encoded list after the final
+ item that exceeds the max number of builds to track.
+
+ Args:
+ encoded_results: run-length encoded results. An array of arrays, e.g.
+ [[3,'A'],[1,'Q']] encodes AAAQ.
+ """
+ num_builds = 0
+ index = 0
+ for result in encoded_list:
+ num_builds = num_builds + result[0]
+ index = index + 1
+ if num_builds > self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
+ return encoded_list[:index]
+ return encoded_list
+
+ def _NormalizeResultsJSON(self, test, test_name, tests):
+ """ Prune tests where all runs pass or tests that no longer exist and
+ truncate all results to maxNumberOfBuilds.
+
+ Args:
+ test: ResultsAndTimes object for this test.
+ test_name: Name of the test.
+ tests: The JSON object with all the test results for this builder.
+ """
+ test[self.RESULTS] = self._RemoveItemsOverMaxNumberOfBuilds(
+ test[self.RESULTS])
+ test[self.TIMES] = self._RemoveItemsOverMaxNumberOfBuilds(
+ test[self.TIMES])
+
+ is_all_pass = self._IsResultsAllOfType(test[self.RESULTS],
+ self.PASS_RESULT)
+ is_all_no_data = self._IsResultsAllOfType(test[self.RESULTS],
+ self.NO_DATA_RESULT)
+ max_time = max([test_time[1] for test_time in test[self.TIMES]])
+
+ # Remove all passes/no-data from the results to reduce noise and
+ # filesize. If a test passes every run, but takes > MIN_TIME to run,
+ # don't throw away the data.
+ if is_all_no_data or (is_all_pass and max_time <= self.MIN_TIME):
+ del tests[test_name]
+
+ # method could be a function pylint: disable=R0201
+ def _IsResultsAllOfType(self, results, result_type):
+ """Returns whether all the results are of the given type
+ (e.g. all passes)."""
+ return len(results) == 1 and results[0][1] == result_type
+
+
+class _FileUploader(object):
+
+ def __init__(self, url, timeout_seconds):
+ self._url = url
+ self._timeout_seconds = timeout_seconds
+
+ def UploadAsMultipartFormData(self, files, attrs):
+ file_objs = []
+ for filename, path in files:
+ with file(path, 'rb') as fp:
+ file_objs.append(('file', filename, fp.read()))
+
+ # FIXME: We should use the same variable names for the formal and actual
+ # parameters.
+ content_type, data = _EncodeMultipartFormData(attrs, file_objs)
+ return self._UploadData(content_type, data)
+
+ def _UploadData(self, content_type, data):
+ start = time.time()
+ end = start + self._timeout_seconds
+ while time.time() < end:
+ try:
+ request = Request(self._url, data, {'Content-Type': content_type})
+ return urlopen(request)
+ except HTTPError as e:
+ _log.warn("Received HTTP status %s loading \"%s\". "
+ 'Retrying in 10 seconds...', e.code, e.filename)
+ time.sleep(10)
+
+
+def _GetMIMEType(filename):
+ return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
+
+
+# FIXME: Rather than taking tuples, this function should take more
+# structured data.
+def _EncodeMultipartFormData(fields, files):
+ """Encode form fields for multipart/form-data.
+
+ Args:
+ fields: A sequence of (name, value) elements for regular form fields.
+ files: A sequence of (name, filename, value) elements for data to be
+ uploaded as files.
+ Returns:
+ (content_type, body) ready for httplib.HTTP instance.
+
+ Source:
+ http://code.google.com/p/rietveld/source/browse/trunk/upload.py
+ """
+ BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
+ CRLF = '\r\n'
+ lines = []
+
+ for key, value in fields:
+ lines.append('--' + BOUNDARY)
+ lines.append('Content-Disposition: form-data; name="%s"' % key)
+ lines.append('')
+ if isinstance(value, str):
+ value = value.encode('utf-8')
+ lines.append(value)
+
+ for key, filename, value in files:
+ lines.append('--' + BOUNDARY)
+ lines.append('Content-Disposition: form-data; name="%s"; '
+ 'filename="%s"' % (key, filename))
+ lines.append('Content-Type: %s' % _GetMIMEType(filename))
+ lines.append('')
+ if isinstance(value, str):
+ value = value.encode('utf-8')
+ lines.append(value)
+
+ lines.append('--' + BOUNDARY + '--')
+ lines.append('')
+ body = CRLF.join(lines)
+ content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
+ return content_type, body
diff --git a/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/json_results_generator_unittest.py b/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/json_results_generator_unittest.py
new file mode 100644
index 0000000000..70c808c71f
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/json_results_generator_unittest.py
@@ -0,0 +1,213 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+#
+# Most of this file was ported over from Blink's
+# webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
+#
+
+import unittest
+import json
+
+from pylib.results.flakiness_dashboard import json_results_generator
+
+
+class JSONGeneratorTest(unittest.TestCase):
+
+ def setUp(self):
+ self.builder_name = 'DUMMY_BUILDER_NAME'
+ self.build_name = 'DUMMY_BUILD_NAME'
+ self.build_number = 'DUMMY_BUILDER_NUMBER'
+
+ # For archived results.
+ self._json = None
+ self._num_runs = 0
+ self._tests_set = set([])
+ self._test_timings = {}
+ self._failed_count_map = {}
+
+ self._PASS_count = 0
+ self._DISABLED_count = 0
+ self._FLAKY_count = 0
+ self._FAILS_count = 0
+ self._fixable_count = 0
+
+ self._orig_write_json = json_results_generator.WriteJSON
+
+ # unused arguments ... pylint: disable=W0613
+ def _WriteJSONStub(json_object, file_path, callback=None):
+ pass
+
+ json_results_generator.WriteJSON = _WriteJSONStub
+
+ def tearDown(self):
+ json_results_generator.WriteJSON = self._orig_write_json
+
+ def _TestJSONGeneration(self, passed_tests_list, failed_tests_list):
+ tests_set = set(passed_tests_list) | set(failed_tests_list)
+
+ DISABLED_tests = set([t for t in tests_set
+ if t.startswith('DISABLED_')])
+ FLAKY_tests = set([t for t in tests_set
+ if t.startswith('FLAKY_')])
+ FAILS_tests = set([t for t in tests_set
+ if t.startswith('FAILS_')])
+ PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests)
+
+ failed_tests = set(failed_tests_list) - DISABLED_tests
+ failed_count_map = dict([(t, 1) for t in failed_tests])
+
+ test_timings = {}
+ i = 0
+ for test in tests_set:
+ test_timings[test] = float(self._num_runs * 100 + i)
+ i += 1
+
+ test_results_map = dict()
+ for test in tests_set:
+ test_results_map[test] = json_results_generator.TestResult(
+ test, failed=(test in failed_tests),
+ elapsed_time=test_timings[test])
+
+ generator = json_results_generator.JSONResultsGeneratorBase(
+ self.builder_name, self.build_name, self.build_number,
+ '',
+ None, # don't fetch past json results archive
+ test_results_map)
+
+ failed_count_map = dict([(t, 1) for t in failed_tests])
+
+ # Test incremental json results
+ incremental_json = generator.GetJSON()
+ self._VerifyJSONResults(
+ tests_set,
+ test_timings,
+ failed_count_map,
+ len(PASS_tests),
+ len(DISABLED_tests),
+ len(FLAKY_tests),
+ len(DISABLED_tests | failed_tests),
+ incremental_json,
+ 1)
+
+ # We don't verify the results here, but at least we make sure the code
+ # runs without errors.
+ generator.GenerateJSONOutput()
+ generator.GenerateTimesMSFile()
+
+ def _VerifyJSONResults(self, tests_set, test_timings, failed_count_map,
+ PASS_count, DISABLED_count, FLAKY_count,
+ fixable_count, json_obj, num_runs):
+ # Aliasing to a short name for better access to its constants.
+ JRG = json_results_generator.JSONResultsGeneratorBase
+
+ self.assertIn(JRG.VERSION_KEY, json_obj)
+ self.assertIn(self.builder_name, json_obj)
+
+ buildinfo = json_obj[self.builder_name]
+ self.assertIn(JRG.FIXABLE, buildinfo)
+ self.assertIn(JRG.TESTS, buildinfo)
+ self.assertEqual(len(buildinfo[JRG.BUILD_NUMBERS]), num_runs)
+ self.assertEqual(buildinfo[JRG.BUILD_NUMBERS][0], self.build_number)
+
+ if tests_set or DISABLED_count:
+ fixable = {}
+ for fixable_items in buildinfo[JRG.FIXABLE]:
+ for (result_type, count) in fixable_items.items():
+ if result_type in fixable:
+ fixable[result_type] = fixable[result_type] + count
+ else:
+ fixable[result_type] = count
+
+ if PASS_count:
+ self.assertEqual(fixable[JRG.PASS_RESULT], PASS_count)
+ else:
+ self.assertTrue(JRG.PASS_RESULT not in fixable or
+ fixable[JRG.PASS_RESULT] == 0)
+ if DISABLED_count:
+ self.assertEqual(fixable[JRG.SKIP_RESULT], DISABLED_count)
+ else:
+ self.assertTrue(JRG.SKIP_RESULT not in fixable or
+ fixable[JRG.SKIP_RESULT] == 0)
+ if FLAKY_count:
+ self.assertEqual(fixable[JRG.FLAKY_RESULT], FLAKY_count)
+ else:
+ self.assertTrue(JRG.FLAKY_RESULT not in fixable or
+ fixable[JRG.FLAKY_RESULT] == 0)
+
+ if failed_count_map:
+ tests = buildinfo[JRG.TESTS]
+ for test_name in failed_count_map.keys():
+ test = self._FindTestInTrie(test_name, tests)
+
+ failed = 0
+ for result in test[JRG.RESULTS]:
+ if result[1] == JRG.FAIL_RESULT:
+ failed += result[0]
+ self.assertEqual(failed_count_map[test_name], failed)
+
+ timing_count = 0
+ for timings in test[JRG.TIMES]:
+ if timings[1] == test_timings[test_name]:
+ timing_count = timings[0]
+ self.assertEqual(1, timing_count)
+
+ if fixable_count:
+ self.assertEqual(sum(buildinfo[JRG.FIXABLE_COUNT]), fixable_count)
+
+ def _FindTestInTrie(self, path, trie):
+ nodes = path.split('/')
+ sub_trie = trie
+ for node in nodes:
+ self.assertIn(node, sub_trie)
+ sub_trie = sub_trie[node]
+ return sub_trie
+
+ def testJSONGeneration(self):
+ self._TestJSONGeneration([], [])
+ self._TestJSONGeneration(['A1', 'B1'], [])
+ self._TestJSONGeneration([], ['FAILS_A2', 'FAILS_B2'])
+ self._TestJSONGeneration(['DISABLED_A3', 'DISABLED_B3'], [])
+ self._TestJSONGeneration(['A4'], ['B4', 'FAILS_C4'])
+ self._TestJSONGeneration(['DISABLED_C5', 'DISABLED_D5'], ['A5', 'B5'])
+ self._TestJSONGeneration(
+ ['A6', 'B6', 'FAILS_C6', 'DISABLED_E6', 'DISABLED_F6'],
+ ['FAILS_D6'])
+
+ # Generate JSON with the same test sets. (Both incremental results and
+ # archived results must be updated appropriately.)
+ self._TestJSONGeneration(
+ ['A', 'FLAKY_B', 'DISABLED_C'],
+ ['FAILS_D', 'FLAKY_E'])
+ self._TestJSONGeneration(
+ ['A', 'DISABLED_C', 'FLAKY_E'],
+ ['FLAKY_B', 'FAILS_D'])
+ self._TestJSONGeneration(
+ ['FLAKY_B', 'DISABLED_C', 'FAILS_D'],
+ ['A', 'FLAKY_E'])
+
+ def testHierarchicalJSNGeneration(self):
+ # FIXME: Re-work tests to be more comprehensible and comprehensive.
+ self._TestJSONGeneration(['foo/A'], ['foo/B', 'bar/C'])
+
+ def testTestTimingsTrie(self):
+ individual_test_timings = []
+ individual_test_timings.append(
+ json_results_generator.TestResult(
+ 'foo/bar/baz.html',
+ elapsed_time=1.2))
+ individual_test_timings.append(
+ json_results_generator.TestResult('bar.html', elapsed_time=0.0001))
+ trie = json_results_generator.TestTimingsTrie(individual_test_timings)
+
+ expected_trie = {
+ 'bar.html': 0,
+ 'foo': {
+ 'bar': {
+ 'baz.html': 1200,
+ }
+ }
+ }
+
+ self.assertEqual(json.dumps(trie), json.dumps(expected_trie))
diff --git a/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/results_uploader.py b/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/results_uploader.py
new file mode 100644
index 0000000000..b68a898b7d
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/results_uploader.py
@@ -0,0 +1,176 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Uploads the results to the flakiness dashboard server."""
+# pylint: disable=E1002,R0201
+
+import logging
+import os
+import shutil
+import tempfile
+import xml
+
+
+from devil.utils import cmd_helper
+from pylib.constants import host_paths
+from pylib.results.flakiness_dashboard import json_results_generator
+from pylib.utils import repo_utils
+
+
+
+class JSONResultsGenerator(json_results_generator.JSONResultsGeneratorBase):
+ """Writes test results to a JSON file and handles uploading that file to
+ the test results server.
+ """
+ def __init__(self, builder_name, build_name, build_number, tmp_folder,
+ test_results_map, test_results_server, test_type, master_name):
+ super(JSONResultsGenerator, self).__init__(
+ builder_name=builder_name,
+ build_name=build_name,
+ build_number=build_number,
+ results_file_base_path=tmp_folder,
+ builder_base_url=None,
+ test_results_map=test_results_map,
+ svn_repositories=(('webkit', 'third_party/WebKit'),
+ ('chrome', '.')),
+ test_results_server=test_results_server,
+ test_type=test_type,
+ master_name=master_name)
+
+ #override
+ def _GetModifierChar(self, test_name):
+ if test_name not in self._test_results_map:
+ return self.__class__.NO_DATA_RESULT
+
+ return self._test_results_map[test_name].modifier
+
+ #override
+ def _GetSVNRevision(self, in_directory):
+ """Returns the git/svn revision for the given directory.
+
+ Args:
+ in_directory: The directory relative to src.
+ """
+ def _is_git_directory(in_directory):
+ """Returns true if the given directory is in a git repository.
+
+ Args:
+ in_directory: The directory path to be tested.
+ """
+ if os.path.exists(os.path.join(in_directory, '.git')):
+ return True
+ parent = os.path.dirname(in_directory)
+ if parent == host_paths.DIR_SOURCE_ROOT or parent == in_directory:
+ return False
+ return _is_git_directory(parent)
+
+ in_directory = os.path.join(host_paths.DIR_SOURCE_ROOT, in_directory)
+
+ if not os.path.exists(os.path.join(in_directory, '.svn')):
+ if _is_git_directory(in_directory):
+ return repo_utils.GetGitHeadSHA1(in_directory)
+ else:
+ return ''
+
+ output = cmd_helper.GetCmdOutput(['svn', 'info', '--xml'], cwd=in_directory)
+ try:
+ dom = xml.dom.minidom.parseString(output)
+ return dom.getElementsByTagName('entry')[0].getAttribute('revision')
+ except xml.parsers.expat.ExpatError:
+ return ''
+ return ''
+
+
+class ResultsUploader(object):
+ """Handles uploading buildbot tests results to the flakiness dashboard."""
+ def __init__(self, tests_type):
+ self._build_number = os.environ.get('BUILDBOT_BUILDNUMBER')
+ self._master_name = os.environ.get('BUILDBOT_MASTERNAME')
+ self._builder_name = os.environ.get('BUILDBOT_BUILDERNAME')
+ self._tests_type = tests_type
+ self._build_name = None
+
+ if not self._build_number or not self._builder_name:
+ raise Exception('You should not be uploading tests results to the server'
+ 'from your local machine.')
+
+ upstream = (tests_type != 'Chromium_Android_Instrumentation')
+ if not upstream:
+ self._build_name = 'chromium-android'
+ buildbot_branch = os.environ.get('BUILDBOT_BRANCH')
+ if not buildbot_branch:
+ buildbot_branch = 'master'
+ else:
+ # Ensure there's no leading "origin/"
+ buildbot_branch = buildbot_branch[buildbot_branch.find('/') + 1:]
+ self._master_name = '%s-%s' % (self._build_name, buildbot_branch)
+
+ self._test_results_map = {}
+
+ def AddResults(self, test_results):
+ # TODO(frankf): Differentiate between fail/crash/timeouts.
+ conversion_map = [
+ (test_results.GetPass(), False,
+ json_results_generator.JSONResultsGeneratorBase.PASS_RESULT),
+ (test_results.GetFail(), True,
+ json_results_generator.JSONResultsGeneratorBase.FAIL_RESULT),
+ (test_results.GetCrash(), True,
+ json_results_generator.JSONResultsGeneratorBase.FAIL_RESULT),
+ (test_results.GetTimeout(), True,
+ json_results_generator.JSONResultsGeneratorBase.FAIL_RESULT),
+ (test_results.GetUnknown(), True,
+ json_results_generator.JSONResultsGeneratorBase.NO_DATA_RESULT),
+ ]
+
+ for results_list, failed, modifier in conversion_map:
+ for single_test_result in results_list:
+ test_result = json_results_generator.TestResult(
+ test=single_test_result.GetName(),
+ failed=failed,
+ elapsed_time=single_test_result.GetDuration() / 1000)
+ # The WebKit TestResult object sets the modifier it based on test name.
+ # Since we don't use the same test naming convention as WebKit the
+ # modifier will be wrong, so we need to overwrite it.
+ test_result.modifier = modifier
+
+ self._test_results_map[single_test_result.GetName()] = test_result
+
+ def Upload(self, test_results_server):
+ if not self._test_results_map:
+ return
+
+ tmp_folder = tempfile.mkdtemp()
+
+ try:
+ results_generator = JSONResultsGenerator(
+ builder_name=self._builder_name,
+ build_name=self._build_name,
+ build_number=self._build_number,
+ tmp_folder=tmp_folder,
+ test_results_map=self._test_results_map,
+ test_results_server=test_results_server,
+ test_type=self._tests_type,
+ master_name=self._master_name)
+
+ json_files = ["incremental_results.json", "times_ms.json"]
+ results_generator.GenerateJSONOutput()
+ results_generator.GenerateTimesMSFile()
+ results_generator.UploadJSONFiles(json_files)
+ except Exception as e: # pylint: disable=broad-except
+ logging.error("Uploading results to test server failed: %s.", e)
+ finally:
+ shutil.rmtree(tmp_folder)
+
+
+def Upload(results, flakiness_dashboard_server, test_type):
+ """Reports test results to the flakiness dashboard for Chrome for Android.
+
+ Args:
+ results: test results.
+ flakiness_dashboard_server: the server to upload the results to.
+ test_type: the type of the tests (as displayed by the flakiness dashboard).
+ """
+ uploader = ResultsUploader(test_type)
+ uploader.AddResults(results)
+ uploader.Upload(flakiness_dashboard_server)
diff --git a/third_party/libwebrtc/build/android/pylib/results/json_results.py b/third_party/libwebrtc/build/android/pylib/results/json_results.py
new file mode 100644
index 0000000000..ed63c1540c
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/results/json_results.py
@@ -0,0 +1,239 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import collections
+import itertools
+import json
+import logging
+import time
+
+import six
+
+from pylib.base import base_test_result
+
+def GenerateResultsDict(test_run_results, global_tags=None):
+ """Create a results dict from |test_run_results| suitable for writing to JSON.
+ Args:
+ test_run_results: a list of base_test_result.TestRunResults objects.
+ Returns:
+ A results dict that mirrors the one generated by
+ base/test/launcher/test_results_tracker.cc:SaveSummaryAsJSON.
+ """
+ # Example json output.
+ # {
+ # "global_tags": [],
+ # "all_tests": [
+ # "test1",
+ # "test2",
+ # ],
+ # "disabled_tests": [],
+ # "per_iteration_data": [
+ # {
+ # "test1": [
+ # {
+ # "status": "SUCCESS",
+ # "elapsed_time_ms": 1,
+ # "output_snippet": "",
+ # "output_snippet_base64": "",
+ # "losless_snippet": "",
+ # },
+ # ...
+ # ],
+ # "test2": [
+ # {
+ # "status": "FAILURE",
+ # "elapsed_time_ms": 12,
+ # "output_snippet": "",
+ # "output_snippet_base64": "",
+ # "losless_snippet": "",
+ # },
+ # ...
+ # ],
+ # },
+ # {
+ # "test1": [
+ # {
+ # "status": "SUCCESS",
+ # "elapsed_time_ms": 1,
+ # "output_snippet": "",
+ # "output_snippet_base64": "",
+ # "losless_snippet": "",
+ # },
+ # ],
+ # "test2": [
+ # {
+ # "status": "FAILURE",
+ # "elapsed_time_ms": 12,
+ # "output_snippet": "",
+ # "output_snippet_base64": "",
+ # "losless_snippet": "",
+ # },
+ # ],
+ # },
+ # ...
+ # ],
+ # }
+
+ all_tests = set()
+ per_iteration_data = []
+ test_run_links = {}
+
+ for test_run_result in test_run_results:
+ iteration_data = collections.defaultdict(list)
+ if isinstance(test_run_result, list):
+ results_iterable = itertools.chain(*(t.GetAll() for t in test_run_result))
+ for tr in test_run_result:
+ test_run_links.update(tr.GetLinks())
+
+ else:
+ results_iterable = test_run_result.GetAll()
+ test_run_links.update(test_run_result.GetLinks())
+
+ for r in results_iterable:
+ result_dict = {
+ 'status': r.GetType(),
+ 'elapsed_time_ms': r.GetDuration(),
+ 'output_snippet': six.ensure_text(r.GetLog(), errors='replace'),
+ 'losless_snippet': True,
+ 'output_snippet_base64': '',
+ 'links': r.GetLinks(),
+ }
+ iteration_data[r.GetName()].append(result_dict)
+
+ all_tests = all_tests.union(set(six.iterkeys(iteration_data)))
+ per_iteration_data.append(iteration_data)
+
+ return {
+ 'global_tags': global_tags or [],
+ 'all_tests': sorted(list(all_tests)),
+ # TODO(jbudorick): Add support for disabled tests within base_test_result.
+ 'disabled_tests': [],
+ 'per_iteration_data': per_iteration_data,
+ 'links': test_run_links,
+ }
+
+
+def GenerateJsonTestResultFormatDict(test_run_results, interrupted):
+ """Create a results dict from |test_run_results| suitable for writing to JSON.
+
+ Args:
+ test_run_results: a list of base_test_result.TestRunResults objects.
+ interrupted: True if tests were interrupted, e.g. timeout listing tests
+ Returns:
+ A results dict that mirrors the standard JSON Test Results Format.
+ """
+
+ tests = {}
+ counts = {'PASS': 0, 'FAIL': 0, 'SKIP': 0, 'CRASH': 0, 'TIMEOUT': 0}
+
+ for test_run_result in test_run_results:
+ if isinstance(test_run_result, list):
+ results_iterable = itertools.chain(*(t.GetAll() for t in test_run_result))
+ else:
+ results_iterable = test_run_result.GetAll()
+
+ for r in results_iterable:
+ element = tests
+ for key in r.GetName().split('.'):
+ if key not in element:
+ element[key] = {}
+ element = element[key]
+
+ element['expected'] = 'PASS'
+
+ if r.GetType() == base_test_result.ResultType.PASS:
+ result = 'PASS'
+ elif r.GetType() == base_test_result.ResultType.SKIP:
+ result = 'SKIP'
+ elif r.GetType() == base_test_result.ResultType.CRASH:
+ result = 'CRASH'
+ elif r.GetType() == base_test_result.ResultType.TIMEOUT:
+ result = 'TIMEOUT'
+ else:
+ result = 'FAIL'
+
+ if 'actual' in element:
+ element['actual'] += ' ' + result
+ else:
+ counts[result] += 1
+ element['actual'] = result
+ if result == 'FAIL':
+ element['is_unexpected'] = True
+
+ if r.GetDuration() != 0:
+ element['time'] = r.GetDuration()
+
+ # Fill in required fields.
+ return {
+ 'interrupted': interrupted,
+ 'num_failures_by_type': counts,
+ 'path_delimiter': '.',
+ 'seconds_since_epoch': time.time(),
+ 'tests': tests,
+ 'version': 3,
+ }
+
+
+def GenerateJsonResultsFile(test_run_result, file_path, global_tags=None,
+ **kwargs):
+ """Write |test_run_result| to JSON.
+
+ This emulates the format of the JSON emitted by
+ base/test/launcher/test_results_tracker.cc:SaveSummaryAsJSON.
+
+ Args:
+ test_run_result: a base_test_result.TestRunResults object.
+ file_path: The path to the JSON file to write.
+ """
+ with open(file_path, 'w') as json_result_file:
+ json_result_file.write(json.dumps(
+ GenerateResultsDict(test_run_result, global_tags=global_tags),
+ **kwargs))
+ logging.info('Generated json results file at %s', file_path)
+
+
+def GenerateJsonTestResultFormatFile(test_run_result, interrupted, file_path,
+ **kwargs):
+ """Write |test_run_result| to JSON.
+
+ This uses the official Chromium Test Results Format.
+
+ Args:
+ test_run_result: a base_test_result.TestRunResults object.
+ interrupted: True if tests were interrupted, e.g. timeout listing tests
+ file_path: The path to the JSON file to write.
+ """
+ with open(file_path, 'w') as json_result_file:
+ json_result_file.write(
+ json.dumps(
+ GenerateJsonTestResultFormatDict(test_run_result, interrupted),
+ **kwargs))
+ logging.info('Generated json results file at %s', file_path)
+
+
+def ParseResultsFromJson(json_results):
+ """Creates a list of BaseTestResult objects from JSON.
+
+ Args:
+ json_results: A JSON dict in the format created by
+ GenerateJsonResultsFile.
+ """
+
+ def string_as_status(s):
+ if s in base_test_result.ResultType.GetTypes():
+ return s
+ return base_test_result.ResultType.UNKNOWN
+
+ results_list = []
+ testsuite_runs = json_results['per_iteration_data']
+ for testsuite_run in testsuite_runs:
+ for test, test_runs in six.iteritems(testsuite_run):
+ results_list.extend(
+ [base_test_result.BaseTestResult(test,
+ string_as_status(tr['status']),
+ duration=tr['elapsed_time_ms'],
+ log=tr.get('output_snippet'))
+ for tr in test_runs])
+ return results_list
diff --git a/third_party/libwebrtc/build/android/pylib/results/json_results_test.py b/third_party/libwebrtc/build/android/pylib/results/json_results_test.py
new file mode 100755
index 0000000000..cb942e2898
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/results/json_results_test.py
@@ -0,0 +1,311 @@
+#!/usr/bin/env vpython3
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import unittest
+
+import six
+from pylib.base import base_test_result
+from pylib.results import json_results
+
+
+class JsonResultsTest(unittest.TestCase):
+
+ def testGenerateResultsDict_passedResult(self):
+ result = base_test_result.BaseTestResult(
+ 'test.package.TestName', base_test_result.ResultType.PASS)
+
+ all_results = base_test_result.TestRunResults()
+ all_results.AddResult(result)
+
+ results_dict = json_results.GenerateResultsDict([all_results])
+ self.assertEqual(['test.package.TestName'], results_dict['all_tests'])
+ self.assertEqual(1, len(results_dict['per_iteration_data']))
+
+ iteration_result = results_dict['per_iteration_data'][0]
+ self.assertTrue('test.package.TestName' in iteration_result)
+ self.assertEqual(1, len(iteration_result['test.package.TestName']))
+
+ test_iteration_result = iteration_result['test.package.TestName'][0]
+ self.assertTrue('status' in test_iteration_result)
+ self.assertEqual('SUCCESS', test_iteration_result['status'])
+
+ def testGenerateResultsDict_skippedResult(self):
+ result = base_test_result.BaseTestResult(
+ 'test.package.TestName', base_test_result.ResultType.SKIP)
+
+ all_results = base_test_result.TestRunResults()
+ all_results.AddResult(result)
+
+ results_dict = json_results.GenerateResultsDict([all_results])
+ self.assertEqual(['test.package.TestName'], results_dict['all_tests'])
+ self.assertEqual(1, len(results_dict['per_iteration_data']))
+
+ iteration_result = results_dict['per_iteration_data'][0]
+ self.assertTrue('test.package.TestName' in iteration_result)
+ self.assertEqual(1, len(iteration_result['test.package.TestName']))
+
+ test_iteration_result = iteration_result['test.package.TestName'][0]
+ self.assertTrue('status' in test_iteration_result)
+ self.assertEqual('SKIPPED', test_iteration_result['status'])
+
+ def testGenerateResultsDict_failedResult(self):
+ result = base_test_result.BaseTestResult(
+ 'test.package.TestName', base_test_result.ResultType.FAIL)
+
+ all_results = base_test_result.TestRunResults()
+ all_results.AddResult(result)
+
+ results_dict = json_results.GenerateResultsDict([all_results])
+ self.assertEqual(['test.package.TestName'], results_dict['all_tests'])
+ self.assertEqual(1, len(results_dict['per_iteration_data']))
+
+ iteration_result = results_dict['per_iteration_data'][0]
+ self.assertTrue('test.package.TestName' in iteration_result)
+ self.assertEqual(1, len(iteration_result['test.package.TestName']))
+
+ test_iteration_result = iteration_result['test.package.TestName'][0]
+ self.assertTrue('status' in test_iteration_result)
+ self.assertEqual('FAILURE', test_iteration_result['status'])
+
+ def testGenerateResultsDict_duration(self):
+ result = base_test_result.BaseTestResult(
+ 'test.package.TestName', base_test_result.ResultType.PASS, duration=123)
+
+ all_results = base_test_result.TestRunResults()
+ all_results.AddResult(result)
+
+ results_dict = json_results.GenerateResultsDict([all_results])
+ self.assertEqual(['test.package.TestName'], results_dict['all_tests'])
+ self.assertEqual(1, len(results_dict['per_iteration_data']))
+
+ iteration_result = results_dict['per_iteration_data'][0]
+ self.assertTrue('test.package.TestName' in iteration_result)
+ self.assertEqual(1, len(iteration_result['test.package.TestName']))
+
+ test_iteration_result = iteration_result['test.package.TestName'][0]
+ self.assertTrue('elapsed_time_ms' in test_iteration_result)
+ self.assertEqual(123, test_iteration_result['elapsed_time_ms'])
+
+ def testGenerateResultsDict_multipleResults(self):
+ result1 = base_test_result.BaseTestResult(
+ 'test.package.TestName1', base_test_result.ResultType.PASS)
+ result2 = base_test_result.BaseTestResult(
+ 'test.package.TestName2', base_test_result.ResultType.PASS)
+
+ all_results = base_test_result.TestRunResults()
+ all_results.AddResult(result1)
+ all_results.AddResult(result2)
+
+ results_dict = json_results.GenerateResultsDict([all_results])
+ self.assertEqual(['test.package.TestName1', 'test.package.TestName2'],
+ results_dict['all_tests'])
+
+ self.assertTrue('per_iteration_data' in results_dict)
+ iterations = results_dict['per_iteration_data']
+ self.assertEqual(1, len(iterations))
+
+ expected_tests = set([
+ 'test.package.TestName1',
+ 'test.package.TestName2',
+ ])
+
+ for test_name, iteration_result in six.iteritems(iterations[0]):
+ self.assertTrue(test_name in expected_tests)
+ expected_tests.remove(test_name)
+ self.assertEqual(1, len(iteration_result))
+
+ test_iteration_result = iteration_result[0]
+ self.assertTrue('status' in test_iteration_result)
+ self.assertEqual('SUCCESS', test_iteration_result['status'])
+
+ def testGenerateResultsDict_passOnRetry(self):
+ raw_results = []
+
+ result1 = base_test_result.BaseTestResult(
+ 'test.package.TestName1', base_test_result.ResultType.FAIL)
+ run_results1 = base_test_result.TestRunResults()
+ run_results1.AddResult(result1)
+ raw_results.append(run_results1)
+
+ result2 = base_test_result.BaseTestResult(
+ 'test.package.TestName1', base_test_result.ResultType.PASS)
+ run_results2 = base_test_result.TestRunResults()
+ run_results2.AddResult(result2)
+ raw_results.append(run_results2)
+
+ results_dict = json_results.GenerateResultsDict([raw_results])
+ self.assertEqual(['test.package.TestName1'], results_dict['all_tests'])
+
+ # Check that there's only one iteration.
+ self.assertIn('per_iteration_data', results_dict)
+ iterations = results_dict['per_iteration_data']
+ self.assertEqual(1, len(iterations))
+
+ # Check that test.package.TestName1 is the only test in the iteration.
+ self.assertEqual(1, len(iterations[0]))
+ self.assertIn('test.package.TestName1', iterations[0])
+
+ # Check that there are two results for test.package.TestName1.
+ actual_test_results = iterations[0]['test.package.TestName1']
+ self.assertEqual(2, len(actual_test_results))
+
+ # Check that the first result is a failure.
+ self.assertIn('status', actual_test_results[0])
+ self.assertEqual('FAILURE', actual_test_results[0]['status'])
+
+ # Check that the second result is a success.
+ self.assertIn('status', actual_test_results[1])
+ self.assertEqual('SUCCESS', actual_test_results[1]['status'])
+
+ def testGenerateResultsDict_globalTags(self):
+ raw_results = []
+ global_tags = ['UNRELIABLE_RESULTS']
+
+ results_dict = json_results.GenerateResultsDict(
+ [raw_results], global_tags=global_tags)
+ self.assertEqual(['UNRELIABLE_RESULTS'], results_dict['global_tags'])
+
+ def testGenerateResultsDict_loslessSnippet(self):
+ result = base_test_result.BaseTestResult(
+ 'test.package.TestName', base_test_result.ResultType.FAIL)
+ log = 'blah-blah'
+ result.SetLog(log)
+
+ all_results = base_test_result.TestRunResults()
+ all_results.AddResult(result)
+
+ results_dict = json_results.GenerateResultsDict([all_results])
+ self.assertEqual(['test.package.TestName'], results_dict['all_tests'])
+ self.assertEqual(1, len(results_dict['per_iteration_data']))
+
+ iteration_result = results_dict['per_iteration_data'][0]
+ self.assertTrue('test.package.TestName' in iteration_result)
+ self.assertEqual(1, len(iteration_result['test.package.TestName']))
+
+ test_iteration_result = iteration_result['test.package.TestName'][0]
+ self.assertTrue('losless_snippet' in test_iteration_result)
+ self.assertTrue(test_iteration_result['losless_snippet'])
+ self.assertTrue('output_snippet' in test_iteration_result)
+ self.assertEqual(log, test_iteration_result['output_snippet'])
+ self.assertTrue('output_snippet_base64' in test_iteration_result)
+ self.assertEqual('', test_iteration_result['output_snippet_base64'])
+
+ def testGenerateJsonTestResultFormatDict_passedResult(self):
+ result = base_test_result.BaseTestResult('test.package.TestName',
+ base_test_result.ResultType.PASS)
+
+ all_results = base_test_result.TestRunResults()
+ all_results.AddResult(result)
+
+ results_dict = json_results.GenerateJsonTestResultFormatDict([all_results],
+ False)
+ self.assertEqual(1, len(results_dict['tests']))
+ self.assertEqual(1, len(results_dict['tests']['test']))
+ self.assertEqual(1, len(results_dict['tests']['test']['package']))
+ self.assertEqual(
+ 'PASS',
+ results_dict['tests']['test']['package']['TestName']['expected'])
+ self.assertEqual(
+ 'PASS', results_dict['tests']['test']['package']['TestName']['actual'])
+
+ self.assertTrue('FAIL' not in results_dict['num_failures_by_type']
+ or results_dict['num_failures_by_type']['FAIL'] == 0)
+ self.assertIn('PASS', results_dict['num_failures_by_type'])
+ self.assertEqual(1, results_dict['num_failures_by_type']['PASS'])
+
+ def testGenerateJsonTestResultFormatDict_failedResult(self):
+ result = base_test_result.BaseTestResult('test.package.TestName',
+ base_test_result.ResultType.FAIL)
+
+ all_results = base_test_result.TestRunResults()
+ all_results.AddResult(result)
+
+ results_dict = json_results.GenerateJsonTestResultFormatDict([all_results],
+ False)
+ self.assertEqual(1, len(results_dict['tests']))
+ self.assertEqual(1, len(results_dict['tests']['test']))
+ self.assertEqual(1, len(results_dict['tests']['test']['package']))
+ self.assertEqual(
+ 'PASS',
+ results_dict['tests']['test']['package']['TestName']['expected'])
+ self.assertEqual(
+ 'FAIL', results_dict['tests']['test']['package']['TestName']['actual'])
+ self.assertEqual(
+ True,
+ results_dict['tests']['test']['package']['TestName']['is_unexpected'])
+
+ self.assertTrue('PASS' not in results_dict['num_failures_by_type']
+ or results_dict['num_failures_by_type']['PASS'] == 0)
+ self.assertIn('FAIL', results_dict['num_failures_by_type'])
+ self.assertEqual(1, results_dict['num_failures_by_type']['FAIL'])
+
+ def testGenerateJsonTestResultFormatDict_skippedResult(self):
+ result = base_test_result.BaseTestResult('test.package.TestName',
+ base_test_result.ResultType.SKIP)
+
+ all_results = base_test_result.TestRunResults()
+ all_results.AddResult(result)
+
+ results_dict = json_results.GenerateJsonTestResultFormatDict([all_results],
+ False)
+ self.assertEqual(1, len(results_dict['tests']))
+ self.assertEqual(1, len(results_dict['tests']['test']))
+ self.assertEqual(1, len(results_dict['tests']['test']['package']))
+ self.assertEqual(
+ 'PASS',
+ results_dict['tests']['test']['package']['TestName']['expected'])
+ self.assertEqual(
+ 'SKIP', results_dict['tests']['test']['package']['TestName']['actual'])
+ # Should only be set if the test fails.
+ self.assertNotIn('is_unexpected',
+ results_dict['tests']['test']['package']['TestName'])
+
+ self.assertTrue('FAIL' not in results_dict['num_failures_by_type']
+ or results_dict['num_failures_by_type']['FAIL'] == 0)
+ self.assertTrue('PASS' not in results_dict['num_failures_by_type']
+ or results_dict['num_failures_by_type']['PASS'] == 0)
+ self.assertIn('SKIP', results_dict['num_failures_by_type'])
+ self.assertEqual(1, results_dict['num_failures_by_type']['SKIP'])
+
+ def testGenerateJsonTestResultFormatDict_failedResultWithRetry(self):
+ result_1 = base_test_result.BaseTestResult('test.package.TestName',
+ base_test_result.ResultType.FAIL)
+ run_results_1 = base_test_result.TestRunResults()
+ run_results_1.AddResult(result_1)
+
+ # Simulate a second retry with failure.
+ result_2 = base_test_result.BaseTestResult('test.package.TestName',
+ base_test_result.ResultType.FAIL)
+ run_results_2 = base_test_result.TestRunResults()
+ run_results_2.AddResult(result_2)
+
+ all_results = [run_results_1, run_results_2]
+
+ results_dict = json_results.GenerateJsonTestResultFormatDict(
+ all_results, False)
+ self.assertEqual(1, len(results_dict['tests']))
+ self.assertEqual(1, len(results_dict['tests']['test']))
+ self.assertEqual(1, len(results_dict['tests']['test']['package']))
+ self.assertEqual(
+ 'PASS',
+ results_dict['tests']['test']['package']['TestName']['expected'])
+ self.assertEqual(
+ 'FAIL FAIL',
+ results_dict['tests']['test']['package']['TestName']['actual'])
+ self.assertEqual(
+ True,
+ results_dict['tests']['test']['package']['TestName']['is_unexpected'])
+
+ self.assertTrue('PASS' not in results_dict['num_failures_by_type']
+ or results_dict['num_failures_by_type']['PASS'] == 0)
+ # According to the spec: If a test was run more than once, only the first
+ # invocation's result is included in the totals.
+ self.assertIn('FAIL', results_dict['num_failures_by_type'])
+ self.assertEqual(1, results_dict['num_failures_by_type']['FAIL'])
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/third_party/libwebrtc/build/android/pylib/results/presentation/__init__.py b/third_party/libwebrtc/build/android/pylib/results/presentation/__init__.py
new file mode 100644
index 0000000000..a22a6ee39a
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/results/presentation/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/third_party/libwebrtc/build/android/pylib/results/presentation/javascript/main_html.js b/third_party/libwebrtc/build/android/pylib/results/presentation/javascript/main_html.js
new file mode 100644
index 0000000000..3d94663e33
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/results/presentation/javascript/main_html.js
@@ -0,0 +1,193 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function getArguments() {
+ // Returns the URL arguments as a dictionary.
+ args = {}
+ var s = location.search;
+ if (s) {
+ var vals = s.substring(1).split('&');
+ for (var i = 0; i < vals.length; i++) {
+ var pair = vals[i].split('=');
+ args[pair[0]] = pair[1];
+ }
+ }
+ return args;
+}
+
+function showSuiteTable(show_the_table) {
+ document.getElementById('suite-table').style.display = (
+ show_the_table ? 'table' : 'none');
+}
+
+function showTestTable(show_the_table) {
+ document.getElementById('test-table').style.display = (
+ show_the_table ? 'table' : 'none');
+}
+
+function showTestsOfOneSuiteOnly(suite_name) {
+ setTitle('Test Results of Suite: ' + suite_name)
+ show_all = (suite_name == 'TOTAL')
+ var testTableBlocks = document.getElementById('test-table')
+ .getElementsByClassName('row_block');
+ Array.prototype.slice.call(testTableBlocks)
+ .forEach(function(testTableBlock) {
+ if (!show_all) {
+ var table_block_in_suite = (testTableBlock.firstElementChild
+ .firstElementChild.firstElementChild.innerHTML)
+ .startsWith(suite_name);
+ if (!table_block_in_suite) {
+ testTableBlock.style.display = 'none';
+ return;
+ }
+ }
+ testTableBlock.style.display = 'table-row-group';
+ });
+ showTestTable(true);
+ showSuiteTable(false);
+ window.scrollTo(0, 0);
+}
+
+function showTestsOfOneSuiteOnlyWithNewState(suite_name) {
+ showTestsOfOneSuiteOnly(suite_name);
+ history.pushState({suite: suite_name}, suite_name, '');
+}
+
+function showSuiteTableOnly() {
+ setTitle('Suites Summary')
+ showTestTable(false);
+ showSuiteTable(true);
+ window.scrollTo(0, 0);
+}
+
+function showSuiteTableOnlyWithReplaceState() {
+ showSuiteTableOnly();
+ history.replaceState({}, 'suite_table', '');
+}
+
+function setBrowserBackButtonLogic() {
+ window.onpopstate = function(event) {
+ if (!event.state || !event.state.suite) {
+ showSuiteTableOnly();
+ } else {
+ showTestsOfOneSuiteOnly(event.state.suite);
+ }
+ };
+}
+
+function setTitle(title) {
+ document.getElementById('summary-header').textContent = title;
+}
+
+function sortByColumn(head) {
+ var table = head.parentNode.parentNode.parentNode;
+ var rowBlocks = Array.prototype.slice.call(
+ table.getElementsByTagName('tbody'));
+
+ // Determine whether to asc or desc and set arrows.
+ var headers = head.parentNode.getElementsByTagName('th');
+ var headIndex = Array.prototype.slice.call(headers).indexOf(head);
+ var asc = -1;
+ for (var i = 0; i < headers.length; i++) {
+ if (headers[i].dataset.ascSorted != 0) {
+ if (headers[i].dataset.ascSorted == 1) {
+ headers[i].getElementsByClassName('up')[0]
+ .style.display = 'none';
+ } else {
+ headers[i].getElementsByClassName('down')[0]
+ .style.display = 'none';
+ }
+ if (headers[i] == head) {
+ asc = headers[i].dataset.ascSorted * -1;
+ } else {
+ headers[i].dataset.ascSorted = 0;
+ }
+ break;
+ }
+ }
+ headers[headIndex].dataset.ascSorted = asc;
+ if (asc == 1) {
+ headers[headIndex].getElementsByClassName('up')[0]
+ .style.display = 'inline';
+ } else {
+ headers[headIndex].getElementsByClassName('down')[0]
+ .style.display = 'inline';
+ }
+
+ // Sort the array by the specified column number (col) and order (asc).
+ rowBlocks.sort(function (a, b) {
+ if (a.style.display == 'none') {
+ return -1;
+ } else if (b.style.display == 'none') {
+ return 1;
+ }
+ var a_rows = Array.prototype.slice.call(a.children);
+ var b_rows = Array.prototype.slice.call(b.children);
+ if (head.className == "text") {
+ // If sorting by text, we only compare the entry on the first row.
+ var aInnerHTML = a_rows[0].children[headIndex].innerHTML;
+ var bInnerHTML = b_rows[0].children[headIndex].innerHTML;
+ return (aInnerHTML == bInnerHTML) ? 0 : (
+ (aInnerHTML > bInnerHTML) ? asc : -1 * asc);
+ } else if (head.className == "number") {
+ // If sorting by number, for example, duration,
+ // we will sum up the durations of different test runs
+ // for one specific test case and sort by the sum.
+ var avalue = 0;
+ var bvalue = 0;
+ a_rows.forEach(function (row, i) {
+ var index = (i > 0) ? headIndex - 1 : headIndex;
+ avalue += Number(row.children[index].innerHTML);
+ });
+ b_rows.forEach(function (row, i) {
+ var index = (i > 0) ? headIndex - 1 : headIndex;
+ bvalue += Number(row.children[index].innerHTML);
+ });
+ } else if (head.className == "flaky") {
+ // Flakiness = (#total - #success - #skipped) / (#total - #skipped)
+ var a_success_or_skipped = 0;
+ var a_skipped = 0;
+ var b_success_or_skipped = 0;
+ var b_skipped = 0;
+ a_rows.forEach(function (row, i) {
+ var index = (i > 0) ? headIndex - 1 : headIndex;
+ var status = row.children[index].innerHTML.trim();
+ if (status == 'SUCCESS') {
+ a_success_or_skipped += 1;
+ }
+ if (status == 'SKIPPED') {
+ a_success_or_skipped += 1;
+ a_skipped += 1;
+ }
+ });
+ b_rows.forEach(function (row, i) {
+ var index = (i > 0) ? headIndex - 1 : headIndex;
+ var status = row.children[index].innerHTML.trim();
+ if (status == 'SUCCESS') {
+ b_success_or_skipped += 1;
+ }
+ if (status == 'SKIPPED') {
+ b_success_or_skipped += 1;
+ b_skipped += 1;
+ }
+ });
+ var atotal_minus_skipped = a_rows.length - a_skipped;
+ var btotal_minus_skipped = b_rows.length - b_skipped;
+
+ var avalue = ((atotal_minus_skipped == 0) ? -1 :
+ (a_rows.length - a_success_or_skipped) / atotal_minus_skipped);
+ var bvalue = ((btotal_minus_skipped == 0) ? -1 :
+ (b_rows.length - b_success_or_skipped) / btotal_minus_skipped);
+ }
+ return asc * (avalue - bvalue);
+ });
+
+ for (var i = 0; i < rowBlocks.length; i++) {
+ table.appendChild(rowBlocks[i]);
+ }
+}
+
+function sortSuiteTableByFailedTestCases() {
+ sortByColumn(document.getElementById('number_fail_tests'));
+}
diff --git a/third_party/libwebrtc/build/android/pylib/results/presentation/standard_gtest_merge.py b/third_party/libwebrtc/build/android/pylib/results/presentation/standard_gtest_merge.py
new file mode 100755
index 0000000000..d458223abb
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/results/presentation/standard_gtest_merge.py
@@ -0,0 +1,173 @@
+#! /usr/bin/env python3
+#
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+
+import argparse
+import json
+import os
+import sys
+
+
+def merge_shard_results(summary_json, jsons_to_merge):
+ """Reads JSON test output from all shards and combines them into one.
+
+ Returns dict with merged test output on success or None on failure. Emits
+ annotations.
+ """
+ try:
+ with open(summary_json) as f:
+ summary = json.load(f)
+ except (IOError, ValueError):
+ raise Exception('Summary json cannot be loaded.')
+
+ # Merge all JSON files together. Keep track of missing shards.
+ merged = {
+ 'all_tests': set(),
+ 'disabled_tests': set(),
+ 'global_tags': set(),
+ 'missing_shards': [],
+ 'per_iteration_data': [],
+ 'swarming_summary': summary,
+ 'links': set()
+ }
+ for index, result in enumerate(summary['shards']):
+ if result is None:
+ merged['missing_shards'].append(index)
+ continue
+
+ # Author note: this code path doesn't trigger convert_to_old_format() in
+ # client/swarming.py, which means the state enum is saved in its string
+ # name form, not in the number form.
+ state = result.get('state')
+ if state == 'BOT_DIED':
+ print(
+ 'Shard #%d had a Swarming internal failure' % index, file=sys.stderr)
+ elif state == 'EXPIRED':
+ print('There wasn\'t enough capacity to run your test', file=sys.stderr)
+ elif state == 'TIMED_OUT':
+ print('Test runtime exceeded allocated time'
+ 'Either it ran for too long (hard timeout) or it didn\'t produce '
+ 'I/O for an extended period of time (I/O timeout)',
+ file=sys.stderr)
+ elif state != 'COMPLETED':
+ print('Invalid Swarming task state: %s' % state, file=sys.stderr)
+
+ json_data, err_msg = load_shard_json(index, result.get('task_id'),
+ jsons_to_merge)
+ if json_data:
+ # Set-like fields.
+ for key in ('all_tests', 'disabled_tests', 'global_tags', 'links'):
+ merged[key].update(json_data.get(key), [])
+
+ # 'per_iteration_data' is a list of dicts. Dicts should be merged
+ # together, not the 'per_iteration_data' list itself.
+ merged['per_iteration_data'] = merge_list_of_dicts(
+ merged['per_iteration_data'], json_data.get('per_iteration_data', []))
+ else:
+ merged['missing_shards'].append(index)
+ print('No result was found: %s' % err_msg, file=sys.stderr)
+
+ # If some shards are missing, make it known. Continue parsing anyway. Step
+ # should be red anyway, since swarming.py return non-zero exit code in that
+ # case.
+ if merged['missing_shards']:
+ as_str = ', '.join([str(shard) for shard in merged['missing_shards']])
+ print('some shards did not complete: %s' % as_str, file=sys.stderr)
+ # Not all tests run, combined JSON summary can not be trusted.
+ merged['global_tags'].add('UNRELIABLE_RESULTS')
+
+ # Convert to jsonish dict.
+ for key in ('all_tests', 'disabled_tests', 'global_tags', 'links'):
+ merged[key] = sorted(merged[key])
+ return merged
+
+
+OUTPUT_JSON_SIZE_LIMIT = 100 * 1024 * 1024 # 100 MB
+
+
+def load_shard_json(index, task_id, jsons_to_merge):
+ """Reads JSON output of the specified shard.
+
+ Args:
+ output_dir: The directory in which to look for the JSON output to load.
+ index: The index of the shard to load data for, this is for old api.
+ task_id: The directory of the shard to load data for, this is for new api.
+
+ Returns: A tuple containing:
+ * The contents of path, deserialized into a python object.
+ * An error string.
+ (exactly one of the tuple elements will be non-None).
+ """
+ matching_json_files = [
+ j for j in jsons_to_merge
+ if (os.path.basename(j) == 'output.json' and
+ (os.path.basename(os.path.dirname(j)) == str(index) or
+ os.path.basename(os.path.dirname(j)) == task_id))]
+
+ if not matching_json_files:
+ print('shard %s test output missing' % index, file=sys.stderr)
+ return (None, 'shard %s test output was missing' % index)
+ elif len(matching_json_files) > 1:
+ print('duplicate test output for shard %s' % index, file=sys.stderr)
+ return (None, 'shard %s test output was duplicated' % index)
+
+ path = matching_json_files[0]
+
+ try:
+ filesize = os.stat(path).st_size
+ if filesize > OUTPUT_JSON_SIZE_LIMIT:
+ print(
+ 'output.json is %d bytes. Max size is %d' % (filesize,
+ OUTPUT_JSON_SIZE_LIMIT),
+ file=sys.stderr)
+ return (None, 'shard %s test output exceeded the size limit' % index)
+
+ with open(path) as f:
+ return (json.load(f), None)
+ except (IOError, ValueError, OSError) as e:
+ print('Missing or invalid gtest JSON file: %s' % path, file=sys.stderr)
+ print('%s: %s' % (type(e).__name__, e), file=sys.stderr)
+
+ return (None, 'shard %s test output was missing or invalid' % index)
+
+
+def merge_list_of_dicts(left, right):
+ """Merges dicts left[0] with right[0], left[1] with right[1], etc."""
+ output = []
+ for i in range(max(len(left), len(right))):
+ left_dict = left[i] if i < len(left) else {}
+ right_dict = right[i] if i < len(right) else {}
+ merged_dict = left_dict.copy()
+ merged_dict.update(right_dict)
+ output.append(merged_dict)
+ return output
+
+
+def standard_gtest_merge(
+ output_json, summary_json, jsons_to_merge):
+
+ output = merge_shard_results(summary_json, jsons_to_merge)
+ with open(output_json, 'wb') as f:
+ json.dump(output, f)
+
+ return 0
+
+
+def main(raw_args):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--summary-json')
+ parser.add_argument('-o', '--output-json', required=True)
+ parser.add_argument('jsons_to_merge', nargs='*')
+
+ args = parser.parse_args(raw_args)
+
+ return standard_gtest_merge(
+ args.output_json, args.summary_json, args.jsons_to_merge)
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/third_party/libwebrtc/build/android/pylib/results/presentation/template/main.html b/third_party/libwebrtc/build/android/pylib/results/presentation/template/main.html
new file mode 100644
index 0000000000..e30d7d3f23
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/results/presentation/template/main.html
@@ -0,0 +1,93 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
+ <style>
+ body {
+ background-color: #fff;
+ color: #333;
+ font-family: Verdana, sans-serif;
+ font-size: 10px;
+ margin-left: 30px;
+ margin-right: 30px;
+ margin-top: 20px;
+ margin-bottom: 50px;
+ padding: 0;
+ }
+ table, th, td {
+ border: 1px solid black;
+ border-collapse: collapse;
+ text-align: center;
+ }
+ table, td {
+ padding: 0.1em 1em 0.1em 1em;
+ }
+ th {
+ cursor: pointer;
+ padding: 0.2em 1.5em 0.2em 1.5em;
+ }
+ table {
+ width: 100%;
+ }
+ .center {
+ text-align: center;
+ }
+ .left {
+ text-align: left;
+ }
+ a {
+ cursor: pointer;
+ text-decoration: underline;
+ }
+ a:link,a:visited,a:active {
+ color: #444;
+ }
+ .row_block:hover {
+ background-color: #F6F6F6;
+ }
+ .skipped, .success, .failure {
+ border-color: #000000;
+ }
+ .success {
+ color: #000;
+ background-color: #8d4;
+ }
+ .failure {
+ color: #000;
+ background-color: #e88;
+ }
+ .skipped {
+ color: #000;
+ background: #AADDEE;
+ }
+ </style>
+ <script type="text/javascript">
+ {% include "javascript/main_html.js" %}
+ </script>
+ </head>
+ <body>
+ <div>
+ <h2 id="summary-header"></h2>
+ {% for tb_value in tb_values %}
+ {% include 'template/table.html' %}
+ {% endfor %}
+ </div>
+ {% if feedback_url %}
+ </br>
+ <a href="{{feedback_url}}" target="_blank"><b>Feedback</b></a>
+ </body>
+ {%- endif %}
+ <script>
+ sortSuiteTableByFailedTestCases();
+ showSuiteTableOnlyWithReplaceState();
+ // Enable sorting for each column of tables.
+ Array.prototype.slice.call(document.getElementsByTagName('th'))
+ .forEach(function(head) {
+ head.addEventListener(
+ "click",
+ function() { sortByColumn(head); });
+ }
+ );
+ setBrowserBackButtonLogic();
+ </script>
+</html>
diff --git a/third_party/libwebrtc/build/android/pylib/results/presentation/template/table.html b/third_party/libwebrtc/build/android/pylib/results/presentation/template/table.html
new file mode 100644
index 0000000000..4240043490
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/results/presentation/template/table.html
@@ -0,0 +1,60 @@
+<table id="{{tb_value.table_id}}" style="display:none;">
+ <thead class="heads">
+ <tr>
+ {% for cell in tb_value.table_headers -%}
+ <th class="{{cell.class}}" id="{{cell.data}}" data-asc-sorted=0>
+ {{cell.data}}
+ <span class="up" style="display:none;"> &#8593</span>
+ <span class="down" style="display:none;"> &#8595</span>
+ </th>
+ {%- endfor %}
+ </tr>
+ </thead>
+ {% for block in tb_value.table_row_blocks -%}
+ <tbody class="row_block">
+ {% for row in block -%}
+ <tr class="{{tb_value.table_id}}-body-row">
+ {% for cell in row -%}
+ {% if cell.rowspan -%}
+ <td rowspan="{{cell.rowspan}}" class="{{tb_value.table_id}}-body-column-{{loop.index0}} {{cell.class}}">
+ {%- else -%}
+ <td rowspan="1" class="{{tb_value.table_id}}-body-column-{{loop.index0}} {{cell.class}}">
+ {%- endif %}
+ {% if cell.cell_type == 'pre' -%}
+ <pre>{{cell.data}}</pre>
+ {%- elif cell.cell_type == 'links' -%}
+ {% for link in cell.links -%}
+ <a href="{{link.href}}" target="{{link.target}}">{{link.data}}</a>
+ {% if not loop.last -%}
+ <br />
+ {%- endif %}
+ {%- endfor %}
+ {%- elif cell.cell_type == 'action' -%}
+ <a onclick="{{cell.action}}">{{cell.data}}</a>
+ {%- else -%}
+ {{cell.data}}
+ {%- endif %}
+ </td>
+ {%- endfor %}
+ </tr>
+ {%- endfor %}
+ </tbody>
+ {%- endfor %}
+ <tfoot>
+ <tr>
+ {% for cell in tb_value.table_footer -%}
+ <td class="{{tb_value.table_id}}-summary-column-{{loop.index0}} {{cell.class}}">
+ {% if cell.cell_type == 'links' -%}
+ {% for link in cell.links -%}
+ <a href="{{link.href}}" target="{{link.target}}"><b>{{link.data}}</b></a>
+ {%- endfor %}
+ {%- elif cell.cell_type == 'action' -%}
+ <a onclick="{{cell.action}}">{{cell.data}}</a>
+ {%- else -%}
+ <b>{{cell.data}}</b>
+ {%- endif %}
+ </td>
+ {%- endfor %}
+ </tr>
+ </tfoot>
+</table>
diff --git a/third_party/libwebrtc/build/android/pylib/results/presentation/test_results_presentation.py b/third_party/libwebrtc/build/android/pylib/results/presentation/test_results_presentation.py
new file mode 100755
index 0000000000..fc14b8bf03
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/results/presentation/test_results_presentation.py
@@ -0,0 +1,549 @@
+#!/usr/bin/env python3
+#
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+
+import argparse
+import collections
+import contextlib
+import json
+import logging
+import tempfile
+import os
+import sys
+try:
+ from urllib.parse import urlencode
+ from urllib.request import urlopen
+except ImportError:
+ from urllib import urlencode
+ from urllib2 import urlopen
+
+
+CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
+BASE_DIR = os.path.abspath(os.path.join(
+ CURRENT_DIR, '..', '..', '..', '..', '..'))
+
+sys.path.append(os.path.join(BASE_DIR, 'build', 'android'))
+from pylib.results.presentation import standard_gtest_merge
+from pylib.utils import google_storage_helper # pylint: disable=import-error
+
+sys.path.append(os.path.join(BASE_DIR, 'third_party'))
+import jinja2 # pylint: disable=import-error
+JINJA_ENVIRONMENT = jinja2.Environment(
+ loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
+ autoescape=True)
+
+
+def cell(data, html_class='center'):
+ """Formats table cell data for processing in jinja template."""
+ return {
+ 'data': data,
+ 'class': html_class,
+ }
+
+
+def pre_cell(data, html_class='center'):
+ """Formats table <pre> cell data for processing in jinja template."""
+ return {
+ 'cell_type': 'pre',
+ 'data': data,
+ 'class': html_class,
+ }
+
+
+class LinkTarget(object):
+ # Opens the linked document in a new window or tab.
+ NEW_TAB = '_blank'
+ # Opens the linked document in the same frame as it was clicked.
+ CURRENT_TAB = '_self'
+
+
+def link(data, href, target=LinkTarget.CURRENT_TAB):
+ """Formats <a> tag data for processing in jinja template.
+
+ Args:
+ data: String link appears as on HTML page.
+ href: URL where link goes.
+ target: Where link should be opened (e.g. current tab or new tab).
+ """
+ return {
+ 'data': data,
+ 'href': href,
+ 'target': target,
+ }
+
+
+def links_cell(links, html_class='center', rowspan=None):
+ """Formats table cell with links for processing in jinja template.
+
+ Args:
+ links: List of link dictionaries. Use |link| function to generate them.
+ html_class: Class for table cell.
+ rowspan: Rowspan HTML attribute.
+ """
+ return {
+ 'cell_type': 'links',
+ 'class': html_class,
+ 'links': links,
+ 'rowspan': rowspan,
+ }
+
+
+def action_cell(action, data, html_class):
+ """Formats table cell with javascript actions.
+
+ Args:
+ action: Javscript action.
+ data: Data in cell.
+ class: Class for table cell.
+ """
+ return {
+ 'cell_type': 'action',
+ 'action': action,
+ 'data': data,
+ 'class': html_class,
+ }
+
+
+def flakiness_dashbord_link(test_name, suite_name):
+ url_args = urlencode([('testType', suite_name), ('tests', test_name)])
+ return ('https://test-results.appspot.com/'
+ 'dashboards/flakiness_dashboard.html#%s' % url_args)
+
+
+def logs_cell(result, test_name, suite_name):
+ """Formats result logs data for processing in jinja template."""
+ link_list = []
+ result_link_dict = result.get('links', {})
+ result_link_dict['flakiness'] = flakiness_dashbord_link(
+ test_name, suite_name)
+ for name, href in sorted(result_link_dict.items()):
+ link_list.append(link(
+ data=name,
+ href=href,
+ target=LinkTarget.NEW_TAB))
+ if link_list:
+ return links_cell(link_list)
+ else:
+ return cell('(no logs)')
+
+
+def code_search(test, cs_base_url):
+ """Returns URL for test on codesearch."""
+ search = test.replace('#', '.')
+ return '%s/search/?q=%s&type=cs' % (cs_base_url, search)
+
+
+def status_class(status):
+ """Returns HTML class for test status."""
+ if not status:
+ return 'failure unknown'
+ status = status.lower()
+ if status not in ('success', 'skipped'):
+ return 'failure %s' % status
+ return status
+
+
+def create_test_table(results_dict, cs_base_url, suite_name):
+ """Format test data for injecting into HTML table."""
+
+ header_row = [
+ cell(data='test_name', html_class='text'),
+ cell(data='status', html_class='flaky'),
+ cell(data='elapsed_time_ms', html_class='number'),
+ cell(data='logs', html_class='text'),
+ cell(data='output_snippet', html_class='text'),
+ ]
+
+ test_row_blocks = []
+ for test_name, test_results in results_dict.items():
+ test_runs = []
+ for index, result in enumerate(test_results):
+ if index == 0:
+ test_run = [links_cell(
+ links=[
+ link(href=code_search(test_name, cs_base_url),
+ target=LinkTarget.NEW_TAB,
+ data=test_name)],
+ rowspan=len(test_results),
+ html_class='left %s' % test_name
+ )] # test_name
+ else:
+ test_run = []
+
+ test_run.extend([
+ cell(data=result['status'] or 'UNKNOWN',
+ # status
+ html_class=('center %s' %
+ status_class(result['status']))),
+ cell(data=result['elapsed_time_ms']), # elapsed_time_ms
+ logs_cell(result, test_name, suite_name), # logs
+ pre_cell(data=result['output_snippet'], # output_snippet
+ html_class='left'),
+ ])
+ test_runs.append(test_run)
+ test_row_blocks.append(test_runs)
+ return header_row, test_row_blocks
+
+
+def create_suite_table(results_dict):
+ """Format test suite data for injecting into HTML table."""
+
+ SUCCESS_COUNT_INDEX = 1
+ FAIL_COUNT_INDEX = 2
+ ALL_COUNT_INDEX = 3
+ TIME_INDEX = 4
+
+ header_row = [
+ cell(data='suite_name', html_class='text'),
+ cell(data='number_success_tests', html_class='number'),
+ cell(data='number_fail_tests', html_class='number'),
+ cell(data='all_tests', html_class='number'),
+ cell(data='elapsed_time_ms', html_class='number'),
+ ]
+
+ footer_row = [
+ action_cell(
+ 'showTestsOfOneSuiteOnlyWithNewState("TOTAL")',
+ 'TOTAL',
+ 'center'
+ ), # TOTAL
+ cell(data=0), # number_success_tests
+ cell(data=0), # number_fail_tests
+ cell(data=0), # all_tests
+ cell(data=0), # elapsed_time_ms
+ ]
+
+ suite_row_dict = {}
+ for test_name, test_results in results_dict.items():
+ # TODO(mikecase): This logic doesn't work if there are multiple test runs.
+ # That is, if 'per_iteration_data' has multiple entries.
+ # Since we only care about the result of the last test run.
+ result = test_results[-1]
+
+ suite_name = (test_name.split('#')[0] if '#' in test_name
+ else test_name.split('.')[0])
+ if suite_name in suite_row_dict:
+ suite_row = suite_row_dict[suite_name]
+ else:
+ suite_row = [
+ action_cell(
+ 'showTestsOfOneSuiteOnlyWithNewState("%s")' % suite_name,
+ suite_name,
+ 'left'
+ ), # suite_name
+ cell(data=0), # number_success_tests
+ cell(data=0), # number_fail_tests
+ cell(data=0), # all_tests
+ cell(data=0), # elapsed_time_ms
+ ]
+
+ suite_row_dict[suite_name] = suite_row
+
+ suite_row[ALL_COUNT_INDEX]['data'] += 1
+ footer_row[ALL_COUNT_INDEX]['data'] += 1
+
+ if result['status'] == 'SUCCESS':
+ suite_row[SUCCESS_COUNT_INDEX]['data'] += 1
+ footer_row[SUCCESS_COUNT_INDEX]['data'] += 1
+ elif result['status'] != 'SKIPPED':
+ suite_row[FAIL_COUNT_INDEX]['data'] += 1
+ footer_row[FAIL_COUNT_INDEX]['data'] += 1
+
+ # Some types of crashes can have 'null' values for elapsed_time_ms.
+ if result['elapsed_time_ms'] is not None:
+ suite_row[TIME_INDEX]['data'] += result['elapsed_time_ms']
+ footer_row[TIME_INDEX]['data'] += result['elapsed_time_ms']
+
+ for suite in list(suite_row_dict.values()):
+ if suite[FAIL_COUNT_INDEX]['data'] > 0:
+ suite[FAIL_COUNT_INDEX]['class'] += ' failure'
+ else:
+ suite[FAIL_COUNT_INDEX]['class'] += ' success'
+
+ if footer_row[FAIL_COUNT_INDEX]['data'] > 0:
+ footer_row[FAIL_COUNT_INDEX]['class'] += ' failure'
+ else:
+ footer_row[FAIL_COUNT_INDEX]['class'] += ' success'
+
+ return (header_row, [[suite_row]
+ for suite_row in list(suite_row_dict.values())],
+ footer_row)
+
+
+def feedback_url(result_details_link):
+ # pylint: disable=redefined-variable-type
+ url_args = [
+ ('labels', 'Pri-2,Type-Bug,Restrict-View-Google'),
+ ('summary', 'Result Details Feedback:'),
+ ('components', 'Test>Android'),
+ ]
+ if result_details_link:
+ url_args.append(('comment', 'Please check out: %s' % result_details_link))
+ url_args = urlencode(url_args)
+ # pylint: enable=redefined-variable-type
+ return 'https://bugs.chromium.org/p/chromium/issues/entry?%s' % url_args
+
+
+def results_to_html(results_dict, cs_base_url, bucket, test_name,
+ builder_name, build_number, local_output):
+ """Convert list of test results into html format.
+
+ Args:
+ local_output: Whether this results file is uploaded to Google Storage or
+ just a local file.
+ """
+ test_rows_header, test_rows = create_test_table(
+ results_dict, cs_base_url, test_name)
+ suite_rows_header, suite_rows, suite_row_footer = create_suite_table(
+ results_dict)
+
+ suite_table_values = {
+ 'table_id': 'suite-table',
+ 'table_headers': suite_rows_header,
+ 'table_row_blocks': suite_rows,
+ 'table_footer': suite_row_footer,
+ }
+
+ test_table_values = {
+ 'table_id': 'test-table',
+ 'table_headers': test_rows_header,
+ 'table_row_blocks': test_rows,
+ }
+
+ main_template = JINJA_ENVIRONMENT.get_template(
+ os.path.join('template', 'main.html'))
+
+ if local_output:
+ html_render = main_template.render( # pylint: disable=no-member
+ {
+ 'tb_values': [suite_table_values, test_table_values],
+ 'feedback_url': feedback_url(None),
+ })
+ return (html_render, None, None)
+ else:
+ dest = google_storage_helper.unique_name(
+ '%s_%s_%s' % (test_name, builder_name, build_number))
+ result_details_link = google_storage_helper.get_url_link(
+ dest, '%s/html' % bucket)
+ html_render = main_template.render( # pylint: disable=no-member
+ {
+ 'tb_values': [suite_table_values, test_table_values],
+ 'feedback_url': feedback_url(result_details_link),
+ })
+ return (html_render, dest, result_details_link)
+
+
+def result_details(json_path, test_name, cs_base_url, bucket=None,
+ builder_name=None, build_number=None, local_output=False):
+ """Get result details from json path and then convert results to html.
+
+ Args:
+ local_output: Whether this results file is uploaded to Google Storage or
+ just a local file.
+ """
+
+ with open(json_path) as json_file:
+ json_object = json.loads(json_file.read())
+
+ if not 'per_iteration_data' in json_object:
+ return 'Error: json file missing per_iteration_data.'
+
+ results_dict = collections.defaultdict(list)
+ for testsuite_run in json_object['per_iteration_data']:
+ for test, test_runs in testsuite_run.items():
+ results_dict[test].extend(test_runs)
+ return results_to_html(results_dict, cs_base_url, bucket, test_name,
+ builder_name, build_number, local_output)
+
+
+def upload_to_google_bucket(html, bucket, dest):
+ with tempfile.NamedTemporaryFile(suffix='.html') as temp_file:
+ temp_file.write(html)
+ temp_file.flush()
+ return google_storage_helper.upload(
+ name=dest,
+ filepath=temp_file.name,
+ bucket='%s/html' % bucket,
+ content_type='text/html',
+ authenticated_link=True)
+
+
+def ui_screenshot_set(json_path):
+ with open(json_path) as json_file:
+ json_object = json.loads(json_file.read())
+ if not 'per_iteration_data' in json_object:
+ # This will be reported as an error by result_details, no need to duplicate.
+ return None
+ ui_screenshots = []
+ # pylint: disable=too-many-nested-blocks
+ for testsuite_run in json_object['per_iteration_data']:
+ for _, test_runs in testsuite_run.items():
+ for test_run in test_runs:
+ if 'ui screenshot' in test_run['links']:
+ screenshot_link = test_run['links']['ui screenshot']
+ if screenshot_link.startswith('file:'):
+ with contextlib.closing(urlopen(screenshot_link)) as f:
+ test_screenshots = json.load(f)
+ else:
+ # Assume anything that isn't a file link is a google storage link
+ screenshot_string = google_storage_helper.read_from_link(
+ screenshot_link)
+ if not screenshot_string:
+ logging.error('Bad screenshot link %s', screenshot_link)
+ continue
+ test_screenshots = json.loads(
+ screenshot_string)
+ ui_screenshots.extend(test_screenshots)
+ # pylint: enable=too-many-nested-blocks
+
+ if ui_screenshots:
+ return json.dumps(ui_screenshots)
+ return None
+
+
+def upload_screenshot_set(json_path, test_name, bucket, builder_name,
+ build_number):
+ screenshot_set = ui_screenshot_set(json_path)
+ if not screenshot_set:
+ return None
+ dest = google_storage_helper.unique_name(
+ 'screenshots_%s_%s_%s' % (test_name, builder_name, build_number),
+ suffix='.json')
+ with tempfile.NamedTemporaryFile(suffix='.json') as temp_file:
+ temp_file.write(screenshot_set)
+ temp_file.flush()
+ return google_storage_helper.upload(
+ name=dest,
+ filepath=temp_file.name,
+ bucket='%s/json' % bucket,
+ content_type='application/json',
+ authenticated_link=True)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--json-file', help='Path of json file.')
+ parser.add_argument('--cs-base-url', help='Base url for code search.',
+ default='http://cs.chromium.org')
+ parser.add_argument('--bucket', help='Google storage bucket.', required=True)
+ parser.add_argument('--builder-name', help='Builder name.')
+ parser.add_argument('--build-number', help='Build number.')
+ parser.add_argument('--test-name', help='The name of the test.',
+ required=True)
+ parser.add_argument(
+ '-o', '--output-json',
+ help='(Swarming Merge Script API) '
+ 'Output JSON file to create.')
+ parser.add_argument(
+ '--build-properties',
+ help='(Swarming Merge Script API) '
+ 'Build property JSON file provided by recipes.')
+ parser.add_argument(
+ '--summary-json',
+ help='(Swarming Merge Script API) '
+ 'Summary of shard state running on swarming. '
+ '(Output of the swarming.py collect '
+ '--task-summary-json=XXX command.)')
+ parser.add_argument(
+ '--task-output-dir',
+ help='(Swarming Merge Script API) '
+ 'Directory containing all swarming task results.')
+ parser.add_argument(
+ 'positional', nargs='*',
+ help='output.json from shards.')
+
+ args = parser.parse_args()
+
+ if ((args.build_properties is None) ==
+ (args.build_number is None or args.builder_name is None)):
+ raise parser.error('Exactly one of build_perperties or '
+ '(build_number or builder_name) should be given.')
+
+ if (args.build_number is None) != (args.builder_name is None):
+ raise parser.error('args.build_number and args.builder_name '
+ 'has to be be given together'
+ 'or not given at all.')
+
+ if len(args.positional) == 0 and args.json_file is None:
+ if args.output_json:
+ with open(args.output_json, 'w') as f:
+ json.dump({}, f)
+ return
+ elif len(args.positional) != 0 and args.json_file:
+ raise parser.error('Exactly one of args.positional and '
+ 'args.json_file should be given.')
+
+ if args.build_properties:
+ build_properties = json.loads(args.build_properties)
+ if ((not 'buildnumber' in build_properties) or
+ (not 'buildername' in build_properties)):
+ raise parser.error('Build number/builder name not specified.')
+ build_number = build_properties['buildnumber']
+ builder_name = build_properties['buildername']
+ elif args.build_number and args.builder_name:
+ build_number = args.build_number
+ builder_name = args.builder_name
+
+ if args.positional:
+ if len(args.positional) == 1:
+ json_file = args.positional[0]
+ else:
+ if args.output_json and args.summary_json:
+ standard_gtest_merge.standard_gtest_merge(
+ args.output_json, args.summary_json, args.positional)
+ json_file = args.output_json
+ elif not args.output_json:
+ raise Exception('output_json required by merge API is missing.')
+ else:
+ raise Exception('summary_json required by merge API is missing.')
+ elif args.json_file:
+ json_file = args.json_file
+
+ if not os.path.exists(json_file):
+ raise IOError('--json-file %s not found.' % json_file)
+
+ # Link to result details presentation page is a part of the page.
+ result_html_string, dest, result_details_link = result_details(
+ json_file, args.test_name, args.cs_base_url, args.bucket,
+ builder_name, build_number)
+
+ result_details_link_2 = upload_to_google_bucket(
+ result_html_string.encode('UTF-8'),
+ args.bucket, dest)
+ assert result_details_link == result_details_link_2, (
+ 'Result details link do not match. The link returned by get_url_link'
+ ' should be the same as that returned by upload.')
+
+ ui_screenshot_set_link = upload_screenshot_set(json_file, args.test_name,
+ args.bucket, builder_name, build_number)
+
+ if ui_screenshot_set_link:
+ ui_catalog_url = 'https://chrome-ui-catalog.appspot.com/'
+ ui_catalog_query = urlencode({'screenshot_source': ui_screenshot_set_link})
+ ui_screenshot_link = '%s?%s' % (ui_catalog_url, ui_catalog_query)
+
+ if args.output_json:
+ with open(json_file) as original_json_file:
+ json_object = json.load(original_json_file)
+ json_object['links'] = {
+ 'result_details (logcats, flakiness links)': result_details_link
+ }
+
+ if ui_screenshot_set_link:
+ json_object['links']['ui screenshots'] = ui_screenshot_link
+
+ with open(args.output_json, 'w') as f:
+ json.dump(json_object, f)
+ else:
+ print('Result Details: %s' % result_details_link)
+
+ if ui_screenshot_set_link:
+ print('UI Screenshots %s' % ui_screenshot_link)
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/third_party/libwebrtc/build/android/pylib/results/report_results.py b/third_party/libwebrtc/build/android/pylib/results/report_results.py
new file mode 100644
index 0000000000..56eefac46c
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/results/report_results.py
@@ -0,0 +1,136 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Module containing utility functions for reporting results."""
+
+from __future__ import print_function
+
+import logging
+import os
+import re
+
+from pylib import constants
+from pylib.results.flakiness_dashboard import results_uploader
+from pylib.utils import logging_utils
+
+
+def _LogToFile(results, test_type, suite_name):
+ """Log results to local files which can be used for aggregation later."""
+ log_file_path = os.path.join(constants.GetOutDirectory(), 'test_logs')
+ if not os.path.exists(log_file_path):
+ os.mkdir(log_file_path)
+ full_file_name = os.path.join(
+ log_file_path, re.sub(r'\W', '_', test_type).lower() + '.log')
+ if not os.path.exists(full_file_name):
+ with open(full_file_name, 'w') as log_file:
+ print(
+ '\n%s results for %s build %s:' %
+ (test_type, os.environ.get('BUILDBOT_BUILDERNAME'),
+ os.environ.get('BUILDBOT_BUILDNUMBER')),
+ file=log_file)
+ logging.info('Writing results to %s.', full_file_name)
+
+ logging.info('Writing results to %s.', full_file_name)
+ with open(full_file_name, 'a') as log_file:
+ shortened_suite_name = suite_name[:25] + (suite_name[25:] and '...')
+ print(
+ '%s%s' % (shortened_suite_name.ljust(30), results.GetShortForm()),
+ file=log_file)
+
+
+def _LogToFlakinessDashboard(results, test_type, test_package,
+ flakiness_server):
+ """Upload results to the flakiness dashboard"""
+ logging.info('Upload results for test type "%s", test package "%s" to %s',
+ test_type, test_package, flakiness_server)
+
+ try:
+ # TODO(jbudorick): remove Instrumentation once instrumentation tests
+ # switch to platform mode.
+ if test_type in ('instrumentation', 'Instrumentation'):
+ if flakiness_server == constants.UPSTREAM_FLAKINESS_SERVER:
+ assert test_package in ['ContentShellTest',
+ 'ChromePublicTest',
+ 'ChromeSyncShellTest',
+ 'SystemWebViewShellLayoutTest',
+ 'WebViewInstrumentationTest']
+ dashboard_test_type = ('%s_instrumentation_tests' %
+ test_package.lower().rstrip('test'))
+ # Downstream server.
+ else:
+ dashboard_test_type = 'Chromium_Android_Instrumentation'
+
+ elif test_type == 'gtest':
+ dashboard_test_type = test_package
+
+ else:
+ logging.warning('Invalid test type')
+ return
+
+ results_uploader.Upload(
+ results, flakiness_server, dashboard_test_type)
+
+ except Exception: # pylint: disable=broad-except
+ logging.exception('Failure while logging to %s', flakiness_server)
+
+
+def LogFull(results, test_type, test_package, annotation=None,
+ flakiness_server=None):
+ """Log the tests results for the test suite.
+
+ The results will be logged three different ways:
+ 1. Log to stdout.
+ 2. Log to local files for aggregating multiple test steps
+ (on buildbots only).
+ 3. Log to flakiness dashboard (on buildbots only).
+
+ Args:
+ results: An instance of TestRunResults object.
+ test_type: Type of the test (e.g. 'Instrumentation', 'Unit test', etc.).
+ test_package: Test package name (e.g. 'ipc_tests' for gtests,
+ 'ContentShellTest' for instrumentation tests)
+ annotation: If instrumenation test type, this is a list of annotations
+ (e.g. ['Feature', 'SmallTest']).
+ flakiness_server: If provider, upload the results to flakiness dashboard
+ with this URL.
+ """
+ # pylint doesn't like how colorama set up its color enums.
+ # pylint: disable=no-member
+ black_on_white = (logging_utils.BACK.WHITE, logging_utils.FORE.BLACK)
+ with logging_utils.OverrideColor(logging.CRITICAL, black_on_white):
+ if not results.DidRunPass():
+ logging.critical('*' * 80)
+ logging.critical('Detailed Logs')
+ logging.critical('*' * 80)
+ for line in results.GetLogs().splitlines():
+ logging.critical(line)
+ logging.critical('*' * 80)
+ logging.critical('Summary')
+ logging.critical('*' * 80)
+ for line in results.GetGtestForm().splitlines():
+ color = black_on_white
+ if 'FAILED' in line:
+ # Red on white, dim.
+ color = (logging_utils.BACK.WHITE, logging_utils.FORE.RED,
+ logging_utils.STYLE.DIM)
+ elif 'PASSED' in line:
+ # Green on white, dim.
+ color = (logging_utils.BACK.WHITE, logging_utils.FORE.GREEN,
+ logging_utils.STYLE.DIM)
+ with logging_utils.OverrideColor(logging.CRITICAL, color):
+ logging.critical(line)
+ logging.critical('*' * 80)
+
+ if os.environ.get('BUILDBOT_BUILDERNAME'):
+ # It is possible to have multiple buildbot steps for the same
+ # instrumenation test package using different annotations.
+ if annotation and len(annotation) == 1:
+ suite_name = annotation[0]
+ else:
+ suite_name = test_package
+ _LogToFile(results, test_type, suite_name)
+
+ if flakiness_server:
+ _LogToFlakinessDashboard(results, test_type, test_package,
+ flakiness_server)
diff --git a/third_party/libwebrtc/build/android/pylib/symbols/__init__.py b/third_party/libwebrtc/build/android/pylib/symbols/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/symbols/__init__.py
diff --git a/third_party/libwebrtc/build/android/pylib/symbols/apk_lib_dump.py b/third_party/libwebrtc/build/android/pylib/symbols/apk_lib_dump.py
new file mode 100755
index 0000000000..f40c7581fc
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/symbols/apk_lib_dump.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python3
+
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Dump shared library information from an APK file.
+
+This script is used to dump which *uncompressed* native shared libraries an
+APK contains, as well as their position within the file. This is mostly useful
+to diagnose logcat and tombstone symbolization issues when the libraries are
+loaded directly from the APK at runtime.
+
+The default format will print one line per uncompressed shared library with the
+following format:
+
+ 0x<start-offset> 0x<end-offset> 0x<file-size> <file-path>
+
+The --format=python option can be used to dump the same information that is
+easy to use in a Python script, e.g. with a line like:
+
+ (0x<start-offset>, 0x<end-offset>, 0x<file-size>, <file-path>),
+"""
+
+
+
+import argparse
+import os
+import sys
+
+sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
+
+from pylib.symbols import apk_native_libs
+
+def main():
+ parser = argparse.ArgumentParser(
+ description=__doc__,
+ formatter_class=argparse.RawDescriptionHelpFormatter)
+
+ parser.add_argument('apk', help='Input APK file path.')
+
+ parser.add_argument('--format', help='Select output format',
+ default='default', choices=['default', 'python'])
+
+ args = parser.parse_args()
+
+ apk_reader = apk_native_libs.ApkReader(args.apk)
+ lib_map = apk_native_libs.ApkNativeLibraries(apk_reader)
+ for lib_path, file_offset, file_size in lib_map.GetDumpList():
+ if args.format == 'python':
+ print('(0x%08x, 0x%08x, 0x%08x, \'%s\'),' %
+ (file_offset, file_offset + file_size, file_size, lib_path))
+ else:
+ print('0x%08x 0x%08x 0x%08x %s' % (file_offset, file_offset + file_size,
+ file_size, lib_path))
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/third_party/libwebrtc/build/android/pylib/symbols/apk_native_libs.py b/third_party/libwebrtc/build/android/pylib/symbols/apk_native_libs.py
new file mode 100644
index 0000000000..59b303990b
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/symbols/apk_native_libs.py
@@ -0,0 +1,419 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import re
+import struct
+import zipfile
+
+# The default zipfile python module cannot open APKs properly, but this
+# fixes it. Note that simply importing this file is sufficient to
+# ensure that zip works correctly for all other modules. See:
+# http://bugs.python.org/issue14315
+# https://hg.python.org/cpython/rev/6dd5e9556a60#l2.8
+def _PatchZipFile():
+ # pylint: disable=protected-access
+ oldDecodeExtra = zipfile.ZipInfo._decodeExtra
+ def decodeExtra(self):
+ try:
+ oldDecodeExtra(self)
+ except struct.error:
+ pass
+ zipfile.ZipInfo._decodeExtra = decodeExtra
+_PatchZipFile()
+
+
+class ApkZipInfo(object):
+ """Models a single file entry from an ApkReader.
+
+ This is very similar to the zipfile.ZipInfo class. It provides a few
+ properties describing the entry:
+ - filename (same as ZipInfo.filename)
+ - file_size (same as ZipInfo.file_size)
+ - compress_size (same as ZipInfo.file_size)
+ - file_offset (note: not provided by ZipInfo)
+
+ And a few useful methods: IsCompressed() and IsElfFile().
+
+ Entries can be created by using ApkReader() methods.
+ """
+ def __init__(self, zip_file, zip_info):
+ """Construct instance. Do not call this directly. Use ApkReader methods."""
+ self._file = zip_file
+ self._info = zip_info
+ self._file_offset = None
+
+ @property
+ def filename(self):
+ """Entry's file path within APK."""
+ return self._info.filename
+
+ @property
+ def file_size(self):
+ """Entry's extracted file size in bytes."""
+ return self._info.file_size
+
+ @property
+ def compress_size(self):
+ """Entry' s compressed file size in bytes."""
+ return self._info.compress_size
+
+ @property
+ def file_offset(self):
+ """Entry's starting file offset in the APK."""
+ if self._file_offset is None:
+ self._file_offset = self._ZipFileOffsetFromLocalHeader(
+ self._file.fp, self._info.header_offset)
+ return self._file_offset
+
+ def __repr__(self):
+ """Convert to string for debugging."""
+ return 'ApkZipInfo["%s",size=0x%x,compressed=0x%x,offset=0x%x]' % (
+ self.filename, self.file_size, self.compress_size, self.file_offset)
+
+ def IsCompressed(self):
+ """Returns True iff the entry is compressed."""
+ return self._info.compress_type != zipfile.ZIP_STORED
+
+ def IsElfFile(self):
+ """Returns True iff the entry is an ELF file."""
+ with self._file.open(self._info, 'r') as f:
+ return f.read(4) == '\x7fELF'
+
+ @staticmethod
+ def _ZipFileOffsetFromLocalHeader(fd, local_header_offset):
+ """Return a file's start offset from its zip archive local header.
+
+ Args:
+ fd: Input file object.
+ local_header_offset: Local header offset (from its ZipInfo entry).
+ Returns:
+ file start offset.
+ """
+ FILE_NAME_LEN_OFFSET = 26
+ FILE_NAME_OFFSET = 30
+ fd.seek(local_header_offset + FILE_NAME_LEN_OFFSET)
+ file_name_len = struct.unpack('H', fd.read(2))[0]
+ extra_field_len = struct.unpack('H', fd.read(2))[0]
+ file_offset = (local_header_offset + FILE_NAME_OFFSET +
+ file_name_len + extra_field_len)
+ return file_offset
+
+
+class ApkReader(object):
+ """A convenience class used to read the content of APK files.
+
+ Its design is very similar to the one from zipfile.ZipFile, except
+ that its returns ApkZipInfo entries which provide a |file_offset|
+ property that can be used to know where a given file is located inside
+ the archive.
+
+ It is also easy to mock for unit-testing (see MockApkReader in
+ apk_utils_unittest.py) without creating any files on disk.
+
+ Usage is the following:
+ - Create an instance using a with statement (for proper unit-testing).
+ - Call ListEntries() to list all entries in the archive. This returns
+ a list of ApkZipInfo entries.
+ - Or call FindEntry() corresponding to a given path within the archive.
+
+ For example:
+ with ApkReader(input_apk_path) as reader:
+ info = reader.FindEntry('lib/armeabi-v7a/libfoo.so')
+ if info.IsCompressed() or not info.IsElfFile():
+ raise Exception('Invalid library path")
+
+ The ApkZipInfo can be used to inspect the entry's metadata, or read its
+ content with the ReadAll() method. See its documentation for all details.
+ """
+ def __init__(self, apk_path):
+ """Initialize instance."""
+ self._zip_file = zipfile.ZipFile(apk_path, 'r')
+ self._path = apk_path
+
+ def __enter__(self):
+ """Python context manager entry."""
+ return self
+
+ def __exit__(self, *kwargs):
+ """Python context manager exit."""
+ self.Close()
+
+ @property
+ def path(self):
+ """The corresponding input APK path."""
+ return self._path
+
+ def Close(self):
+ """Close the reader (and underlying ZipFile instance)."""
+ self._zip_file.close()
+
+ def ListEntries(self):
+ """Return a list of ApkZipInfo entries for this APK."""
+ result = []
+ for info in self._zip_file.infolist():
+ result.append(ApkZipInfo(self._zip_file, info))
+ return result
+
+ def FindEntry(self, file_path):
+ """Return an ApkZipInfo instance for a given archive file path.
+
+ Args:
+ file_path: zip file path.
+ Return:
+ A new ApkZipInfo entry on success.
+ Raises:
+ KeyError on failure (entry not found).
+ """
+ info = self._zip_file.getinfo(file_path)
+ return ApkZipInfo(self._zip_file, info)
+
+
+
+class ApkNativeLibraries(object):
+ """A class for the list of uncompressed shared libraries inside an APK.
+
+ Create a new instance by passing the path to an input APK, then use
+ the FindLibraryByOffset() method to find the native shared library path
+ corresponding to a given file offset.
+
+ GetAbiList() and GetLibrariesList() can also be used to inspect
+ the state of the instance.
+ """
+ def __init__(self, apk_reader):
+ """Initialize instance.
+
+ Args:
+ apk_reader: An ApkReader instance corresponding to the input APK.
+ """
+ self._native_libs = []
+ for entry in apk_reader.ListEntries():
+ # Chromium uses so-called 'placeholder' native shared libraries
+ # that have a size of 0, and are only used to deal with bugs in
+ # older Android system releases (they are never loaded and cannot
+ # appear in stack traces). Ignore these here to avoid generating
+ # confusing results.
+ if entry.file_size == 0:
+ continue
+
+ # Only uncompressed libraries can appear in stack traces.
+ if entry.IsCompressed():
+ continue
+
+ # Only consider files within lib/ and with a filename ending with .so
+ # at the moment. NOTE: Do not require a 'lib' prefix, since that would
+ # prevent finding the 'crazy.libXXX.so' libraries used by Chromium.
+ if (not entry.filename.startswith('lib/') or
+ not entry.filename.endswith('.so')):
+ continue
+
+ lib_path = entry.filename
+
+ self._native_libs.append(
+ (lib_path, entry.file_offset, entry.file_offset + entry.file_size))
+
+ def IsEmpty(self):
+ """Return true iff the list is empty."""
+ return not bool(self._native_libs)
+
+ def GetLibraries(self):
+ """Return the list of all library paths in this instance."""
+ return sorted([x[0] for x in self._native_libs])
+
+ def GetDumpList(self):
+ """Retrieve full library map.
+
+ Returns:
+ A list of (lib_path, file_offset, file_size) tuples, sorted
+ in increasing |file_offset| values.
+ """
+ result = []
+ for entry in self._native_libs:
+ lib_path, file_start, file_end = entry
+ result.append((lib_path, file_start, file_end - file_start))
+
+ return sorted(result, key=lambda x: x[1])
+
+ def FindLibraryByOffset(self, file_offset):
+ """Find the native library at a given file offset.
+
+ Args:
+ file_offset: File offset within the original APK.
+ Returns:
+ Returns a (lib_path, lib_offset) tuple on success, or (None, 0)
+ on failure. Note that lib_path will omit the 'lib/$ABI/' prefix,
+ lib_offset is the adjustment of file_offset within the library.
+ """
+ for lib_path, start_offset, end_offset in self._native_libs:
+ if file_offset >= start_offset and file_offset < end_offset:
+ return (lib_path, file_offset - start_offset)
+
+ return (None, 0)
+
+
+class ApkLibraryPathTranslator(object):
+ """Translates APK file paths + byte offsets into library path + offset.
+
+ The purpose of this class is to translate a native shared library path
+ that points to an APK into a new device-specific path that points to a
+ native shared library, as if it was installed there. E.g.:
+
+ ('/data/data/com.example.app-1/base.apk', 0x123be00)
+
+ would be translated into:
+
+ ('/data/data/com.example.app-1/base.apk!lib/libfoo.so', 0x3be00)
+
+ If the original APK (installed as base.apk) contains an uncompressed shared
+ library under lib/armeabi-v7a/libfoo.so at offset 0x120000.
+
+ Note that the virtual device path after the ! doesn't necessarily match
+ the path inside the .apk. This doesn't really matter for the rest of
+ the symbolization functions since only the file's base name can be used
+ to find the corresponding file on the host.
+
+ Usage is the following:
+
+ 1/ Create new instance.
+
+ 2/ Call AddHostApk() one or several times to add the host path
+ of an APK, its package name, and device-installed named.
+
+ 3/ Call TranslatePath() to translate a (path, offset) tuple corresponding
+ to an on-device APK, into the corresponding virtual device library
+ path and offset.
+ """
+
+ # Depending on the version of the system, a non-system APK might be installed
+ # on a path that looks like the following:
+ #
+ # * /data/..../<package_name>-<number>.apk, where <number> is used to
+ # distinguish several versions of the APK during package updates.
+ #
+ # * /data/..../<package_name>-<suffix>/base.apk, where <suffix> is a
+ # string of random ASCII characters following the dash after the
+ # package name. This serves as a way to distinguish the installation
+ # paths during package update, and randomize its final location
+ # (to prevent apps from hard-coding the paths to other apps).
+ #
+ # Note that the 'base.apk' name comes from the system.
+ #
+ # * /data/.../<package_name>-<suffix>/<split_name>.apk, where <suffix>
+ # is the same as above, and <split_name> is the name of am app bundle
+ # split APK.
+ #
+ # System APKs are installed on paths that look like /system/app/Foo.apk
+ # but this class ignores them intentionally.
+
+ # Compiler regular expression for the first format above.
+ _RE_APK_PATH_1 = re.compile(
+ r'/data/.*/(?P<package_name>[A-Za-z0-9_.]+)-(?P<version>[0-9]+)\.apk')
+
+ # Compiled regular expression for the second and third formats above.
+ _RE_APK_PATH_2 = re.compile(
+ r'/data/.*/(?P<package_name>[A-Za-z0-9_.]+)-(?P<suffix>[^/]+)/' +
+ r'(?P<apk_name>.+\.apk)')
+
+ def __init__(self):
+ """Initialize instance. Call AddHostApk() to add host apk file paths."""
+ self._path_map = {} # Maps (package_name, apk_name) to host-side APK path.
+ self._libs_map = {} # Maps APK host path to ApkNativeLibrariesMap instance.
+
+ def AddHostApk(self, package_name, native_libs, device_apk_name=None):
+ """Add a file path to the host APK search list.
+
+ Args:
+ package_name: Corresponding apk package name.
+ native_libs: ApkNativeLibraries instance for the corresponding APK.
+ device_apk_name: Optional expected name of the installed APK on the
+ device. This is only useful when symbolizing app bundle that run on
+ Android L+. I.e. it will be ignored in other cases.
+ """
+ if native_libs.IsEmpty():
+ logging.debug('Ignoring host APK without any uncompressed native ' +
+ 'libraries: %s', device_apk_name)
+ return
+
+ # If the APK name is not provided, use the default of 'base.apk'. This
+ # will be ignored if we find <package_name>-<number>.apk file paths
+ # in the input, but will work properly for Android L+, as long as we're
+ # not using Android app bundles.
+ device_apk_name = device_apk_name or 'base.apk'
+
+ key = "%s/%s" % (package_name, device_apk_name)
+ if key in self._libs_map:
+ raise KeyError('There is already an APK associated with (%s)' % key)
+
+ self._libs_map[key] = native_libs
+
+ @staticmethod
+ def _MatchApkDeviceInstallPath(apk_path):
+ """Check whether a given path matches an installed APK device file path.
+
+ Args:
+ apk_path: Device-specific file path.
+ Returns:
+ On success, a (package_name, apk_name) tuple. On failure, (None. None).
+ """
+ m = ApkLibraryPathTranslator._RE_APK_PATH_1.match(apk_path)
+ if m:
+ return (m.group('package_name'), 'base.apk')
+
+ m = ApkLibraryPathTranslator._RE_APK_PATH_2.match(apk_path)
+ if m:
+ return (m.group('package_name'), m.group('apk_name'))
+
+ return (None, None)
+
+ def TranslatePath(self, apk_path, apk_offset):
+ """Translate a potential apk file path + offset into library path + offset.
+
+ Args:
+ apk_path: Library or apk file path on the device (e.g.
+ '/data/data/com.example.app-XSAHKSJH/base.apk').
+ apk_offset: Byte offset within the library or apk.
+
+ Returns:
+ a new (lib_path, lib_offset) tuple. If |apk_path| points to an APK,
+ then this function searches inside the corresponding host-side APKs
+ (added with AddHostApk() above) for the corresponding uncompressed
+ native shared library at |apk_offset|, if found, this returns a new
+ device-specific path corresponding to a virtual installation of said
+ library with an adjusted offset.
+
+ Otherwise, just return the original (apk_path, apk_offset) values.
+ """
+ if not apk_path.endswith('.apk'):
+ return (apk_path, apk_offset)
+
+ apk_package, apk_name = self._MatchApkDeviceInstallPath(apk_path)
+ if not apk_package:
+ return (apk_path, apk_offset)
+
+ key = '%s/%s' % (apk_package, apk_name)
+ native_libs = self._libs_map.get(key)
+ if not native_libs:
+ logging.debug('Unknown %s package', key)
+ return (apk_path, apk_offset)
+
+ lib_name, new_offset = native_libs.FindLibraryByOffset(apk_offset)
+ if not lib_name:
+ logging.debug('Invalid offset in %s.apk package: %d', key, apk_offset)
+ return (apk_path, apk_offset)
+
+ lib_name = os.path.basename(lib_name)
+
+ # Some libraries are stored with a crazy. prefix inside the APK, this
+ # is done to prevent the PackageManager from extracting the libraries
+ # at installation time when running on pre Android M systems, where the
+ # system linker cannot load libraries directly from APKs.
+ crazy_prefix = 'crazy.'
+ if lib_name.startswith(crazy_prefix):
+ lib_name = lib_name[len(crazy_prefix):]
+
+ # Put this in a fictional lib sub-directory for good measure.
+ new_path = '%s!lib/%s' % (apk_path, lib_name)
+
+ return (new_path, new_offset)
diff --git a/third_party/libwebrtc/build/android/pylib/symbols/apk_native_libs_unittest.py b/third_party/libwebrtc/build/android/pylib/symbols/apk_native_libs_unittest.py
new file mode 100755
index 0000000000..59f7e2b02a
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/symbols/apk_native_libs_unittest.py
@@ -0,0 +1,397 @@
+#!/usr/bin/env vpython3
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import unittest
+
+from pylib.symbols import apk_native_libs
+
+# Mock ELF-like data
+MOCK_ELF_DATA = '\x7fELFFFFFFFFFFFFFFFF'
+
+class MockApkZipInfo(object):
+ """A mock ApkZipInfo class, returned by MockApkReaderFactory instances."""
+ def __init__(self, filename, file_size, compress_size, file_offset,
+ file_data):
+ self.filename = filename
+ self.file_size = file_size
+ self.compress_size = compress_size
+ self.file_offset = file_offset
+ self._data = file_data
+
+ def __repr__(self):
+ """Convert to string for debugging."""
+ return 'MockApkZipInfo["%s",size=%d,compressed=%d,offset=%d]' % (
+ self.filename, self.file_size, self.compress_size, self.file_offset)
+
+ def IsCompressed(self):
+ """Returns True iff the entry is compressed."""
+ return self.file_size != self.compress_size
+
+ def IsElfFile(self):
+ """Returns True iff the entry is an ELF file."""
+ if not self._data or len(self._data) < 4:
+ return False
+
+ return self._data[0:4] == '\x7fELF'
+
+
+class MockApkReader(object):
+ """A mock ApkReader instance used during unit-testing.
+
+ Do not use directly, but use a MockApkReaderFactory context, as in:
+
+ with MockApkReaderFactory() as mock:
+ mock.AddTestEntry(file_path, file_size, compress_size, file_data)
+ ...
+
+ # Actually returns the mock instance.
+ apk_reader = apk_native_libs.ApkReader('/some/path.apk')
+ """
+ def __init__(self, apk_path='test.apk'):
+ """Initialize instance."""
+ self._entries = []
+ self._fake_offset = 0
+ self._path = apk_path
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *kwarg):
+ self.Close()
+ return
+
+ @property
+ def path(self):
+ return self._path
+
+ def AddTestEntry(self, filepath, file_size, compress_size, file_data):
+ """Add a new entry to the instance for unit-tests.
+
+ Do not call this directly, use the AddTestEntry() method on the parent
+ MockApkReaderFactory instance.
+
+ Args:
+ filepath: archive file path.
+ file_size: uncompressed file size in bytes.
+ compress_size: compressed size in bytes.
+ file_data: file data to be checked by IsElfFile()
+
+ Note that file_data can be None, or that its size can be actually
+ smaller than |compress_size| when used during unit-testing.
+ """
+ self._entries.append(MockApkZipInfo(filepath, file_size, compress_size,
+ self._fake_offset, file_data))
+ self._fake_offset += compress_size
+
+ def Close(self): # pylint: disable=no-self-use
+ """Close this reader instance."""
+ return
+
+ def ListEntries(self):
+ """Return a list of MockApkZipInfo instances for this input APK."""
+ return self._entries
+
+ def FindEntry(self, file_path):
+ """Find the MockApkZipInfo instance corresponds to a given file path."""
+ for entry in self._entries:
+ if entry.filename == file_path:
+ return entry
+ raise KeyError('Could not find mock zip archive member for: ' + file_path)
+
+
+class MockApkReaderTest(unittest.TestCase):
+
+ def testEmpty(self):
+ with MockApkReader() as reader:
+ entries = reader.ListEntries()
+ self.assertTrue(len(entries) == 0)
+ with self.assertRaises(KeyError):
+ reader.FindEntry('non-existent-entry.txt')
+
+ def testSingleEntry(self):
+ with MockApkReader() as reader:
+ reader.AddTestEntry('some-path/some-file', 20000, 12345, file_data=None)
+ entries = reader.ListEntries()
+ self.assertTrue(len(entries) == 1)
+ entry = entries[0]
+ self.assertEqual(entry.filename, 'some-path/some-file')
+ self.assertEqual(entry.file_size, 20000)
+ self.assertEqual(entry.compress_size, 12345)
+ self.assertTrue(entry.IsCompressed())
+
+ entry2 = reader.FindEntry('some-path/some-file')
+ self.assertEqual(entry, entry2)
+
+ def testMultipleEntries(self):
+ with MockApkReader() as reader:
+ _ENTRIES = {
+ 'foo.txt': (1024, 1024, 'FooFooFoo'),
+ 'lib/bar/libcode.so': (16000, 3240, 1024, '\x7fELFFFFFFFFFFFF'),
+ }
+ for path, props in _ENTRIES.items():
+ reader.AddTestEntry(path, props[0], props[1], props[2])
+
+ entries = reader.ListEntries()
+ self.assertEqual(len(entries), len(_ENTRIES))
+ for path, props in _ENTRIES.items():
+ entry = reader.FindEntry(path)
+ self.assertEqual(entry.filename, path)
+ self.assertEqual(entry.file_size, props[0])
+ self.assertEqual(entry.compress_size, props[1])
+
+
+class ApkNativeLibrariesTest(unittest.TestCase):
+
+ def setUp(self):
+ logging.getLogger().setLevel(logging.ERROR)
+
+ def testEmptyApk(self):
+ with MockApkReader() as reader:
+ libs_map = apk_native_libs.ApkNativeLibraries(reader)
+ self.assertTrue(libs_map.IsEmpty())
+ self.assertEqual(len(libs_map.GetLibraries()), 0)
+ lib_path, lib_offset = libs_map.FindLibraryByOffset(0)
+ self.assertIsNone(lib_path)
+ self.assertEqual(lib_offset, 0)
+
+ def testSimpleApk(self):
+ with MockApkReader() as reader:
+ _MOCK_ENTRIES = [
+ # Top-level library should be ignored.
+ ('libfoo.so', 1000, 1000, MOCK_ELF_DATA, False),
+ # Library not under lib/ should be ignored.
+ ('badlib/test-abi/libfoo2.so', 1001, 1001, MOCK_ELF_DATA, False),
+ # Library under lib/<abi>/ but without .so extension should be ignored.
+ ('lib/test-abi/libfoo4.so.1', 1003, 1003, MOCK_ELF_DATA, False),
+ # Library under lib/<abi>/ with .so suffix, but compressed -> ignored.
+ ('lib/test-abi/libfoo5.so', 1004, 1003, MOCK_ELF_DATA, False),
+ # First correct library
+ ('lib/test-abi/libgood1.so', 1005, 1005, MOCK_ELF_DATA, True),
+ # Second correct library: support sub-directories
+ ('lib/test-abi/subdir/libgood2.so', 1006, 1006, MOCK_ELF_DATA, True),
+ # Third correct library, no lib prefix required
+ ('lib/test-abi/crazy.libgood3.so', 1007, 1007, MOCK_ELF_DATA, True),
+ ]
+ file_offsets = []
+ prev_offset = 0
+ for ent in _MOCK_ENTRIES:
+ reader.AddTestEntry(ent[0], ent[1], ent[2], ent[3])
+ file_offsets.append(prev_offset)
+ prev_offset += ent[2]
+
+ libs_map = apk_native_libs.ApkNativeLibraries(reader)
+ self.assertFalse(libs_map.IsEmpty())
+ self.assertEqual(libs_map.GetLibraries(), [
+ 'lib/test-abi/crazy.libgood3.so',
+ 'lib/test-abi/libgood1.so',
+ 'lib/test-abi/subdir/libgood2.so',
+ ])
+
+ BIAS = 10
+ for mock_ent, file_offset in zip(_MOCK_ENTRIES, file_offsets):
+ if mock_ent[4]:
+ lib_path, lib_offset = libs_map.FindLibraryByOffset(
+ file_offset + BIAS)
+ self.assertEqual(lib_path, mock_ent[0])
+ self.assertEqual(lib_offset, BIAS)
+
+
+ def testMultiAbiApk(self):
+ with MockApkReader() as reader:
+ _MOCK_ENTRIES = [
+ ('lib/abi1/libfoo.so', 1000, 1000, MOCK_ELF_DATA),
+ ('lib/abi2/libfoo.so', 1000, 1000, MOCK_ELF_DATA),
+ ]
+ for ent in _MOCK_ENTRIES:
+ reader.AddTestEntry(ent[0], ent[1], ent[2], ent[3])
+
+ libs_map = apk_native_libs.ApkNativeLibraries(reader)
+ self.assertFalse(libs_map.IsEmpty())
+ self.assertEqual(libs_map.GetLibraries(), [
+ 'lib/abi1/libfoo.so', 'lib/abi2/libfoo.so'])
+
+ lib1_name, lib1_offset = libs_map.FindLibraryByOffset(10)
+ self.assertEqual(lib1_name, 'lib/abi1/libfoo.so')
+ self.assertEqual(lib1_offset, 10)
+
+ lib2_name, lib2_offset = libs_map.FindLibraryByOffset(1000)
+ self.assertEqual(lib2_name, 'lib/abi2/libfoo.so')
+ self.assertEqual(lib2_offset, 0)
+
+
+class MockApkNativeLibraries(apk_native_libs.ApkNativeLibraries):
+ """A mock ApkNativeLibraries instance that can be used as input to
+ ApkLibraryPathTranslator without creating an ApkReader instance.
+
+ Create a new instance, then call AddTestEntry or AddTestEntries
+ as many times as necessary, before using it as a regular
+ ApkNativeLibraries instance.
+ """
+ # pylint: disable=super-init-not-called
+ def __init__(self):
+ self._native_libs = []
+
+ # pylint: enable=super-init-not-called
+
+ def AddTestEntry(self, lib_path, file_offset, file_size):
+ """Add a new test entry.
+
+ Args:
+ entry: A tuple of (library-path, file-offset, file-size) values,
+ (e.g. ('lib/armeabi-v8a/libfoo.so', 0x10000, 0x2000)).
+ """
+ self._native_libs.append((lib_path, file_offset, file_offset + file_size))
+
+ def AddTestEntries(self, entries):
+ """Add a list of new test entries.
+
+ Args:
+ entries: A list of (library-path, file-offset, file-size) values.
+ """
+ for entry in entries:
+ self.AddTestEntry(entry[0], entry[1], entry[2])
+
+
+class MockApkNativeLibrariesTest(unittest.TestCase):
+
+ def testEmptyInstance(self):
+ mock = MockApkNativeLibraries()
+ self.assertTrue(mock.IsEmpty())
+ self.assertEqual(mock.GetLibraries(), [])
+ self.assertEqual(mock.GetDumpList(), [])
+
+ def testAddTestEntry(self):
+ mock = MockApkNativeLibraries()
+ mock.AddTestEntry('lib/armeabi-v7a/libfoo.so', 0x20000, 0x4000)
+ mock.AddTestEntry('lib/x86/libzoo.so', 0x10000, 0x10000)
+ mock.AddTestEntry('lib/armeabi-v7a/libbar.so', 0x24000, 0x8000)
+ self.assertFalse(mock.IsEmpty())
+ self.assertEqual(mock.GetLibraries(), ['lib/armeabi-v7a/libbar.so',
+ 'lib/armeabi-v7a/libfoo.so',
+ 'lib/x86/libzoo.so'])
+ self.assertEqual(mock.GetDumpList(), [
+ ('lib/x86/libzoo.so', 0x10000, 0x10000),
+ ('lib/armeabi-v7a/libfoo.so', 0x20000, 0x4000),
+ ('lib/armeabi-v7a/libbar.so', 0x24000, 0x8000),
+ ])
+
+ def testAddTestEntries(self):
+ mock = MockApkNativeLibraries()
+ mock.AddTestEntries([
+ ('lib/armeabi-v7a/libfoo.so', 0x20000, 0x4000),
+ ('lib/x86/libzoo.so', 0x10000, 0x10000),
+ ('lib/armeabi-v7a/libbar.so', 0x24000, 0x8000),
+ ])
+ self.assertFalse(mock.IsEmpty())
+ self.assertEqual(mock.GetLibraries(), ['lib/armeabi-v7a/libbar.so',
+ 'lib/armeabi-v7a/libfoo.so',
+ 'lib/x86/libzoo.so'])
+ self.assertEqual(mock.GetDumpList(), [
+ ('lib/x86/libzoo.so', 0x10000, 0x10000),
+ ('lib/armeabi-v7a/libfoo.so', 0x20000, 0x4000),
+ ('lib/armeabi-v7a/libbar.so', 0x24000, 0x8000),
+ ])
+
+
+class ApkLibraryPathTranslatorTest(unittest.TestCase):
+
+ def _CheckUntranslated(self, translator, path, offset):
+ """Check that a given (path, offset) is not modified by translation."""
+ self.assertEqual(translator.TranslatePath(path, offset), (path, offset))
+
+
+ def _CheckTranslated(self, translator, path, offset, new_path, new_offset):
+ """Check that (path, offset) is translated into (new_path, new_offset)."""
+ self.assertEqual(translator.TranslatePath(path, offset),
+ (new_path, new_offset))
+
+ def testEmptyInstance(self):
+ translator = apk_native_libs.ApkLibraryPathTranslator()
+ self._CheckUntranslated(
+ translator, '/data/data/com.example.app-1/base.apk', 0x123456)
+
+ def testSimpleApk(self):
+ mock_libs = MockApkNativeLibraries()
+ mock_libs.AddTestEntries([
+ ('lib/test-abi/libfoo.so', 200, 2000),
+ ('lib/test-abi/libbar.so', 3200, 3000),
+ ('lib/test-abi/crazy.libzoo.so', 6200, 2000),
+ ])
+ translator = apk_native_libs.ApkLibraryPathTranslator()
+ translator.AddHostApk('com.example.app', mock_libs)
+
+ # Offset is within the first uncompressed library
+ self._CheckTranslated(
+ translator,
+ '/data/data/com.example.app-9.apk', 757,
+ '/data/data/com.example.app-9.apk!lib/libfoo.so', 557)
+
+ # Offset is within the second compressed library.
+ self._CheckUntranslated(
+ translator,
+ '/data/data/com.example.app-9/base.apk', 2800)
+
+ # Offset is within the third uncompressed library.
+ self._CheckTranslated(
+ translator,
+ '/data/data/com.example.app-1/base.apk', 3628,
+ '/data/data/com.example.app-1/base.apk!lib/libbar.so', 428)
+
+ # Offset is within the fourth uncompressed library with crazy. prefix
+ self._CheckTranslated(
+ translator,
+ '/data/data/com.example.app-XX/base.apk', 6500,
+ '/data/data/com.example.app-XX/base.apk!lib/libzoo.so', 300)
+
+ # Out-of-bounds apk offset.
+ self._CheckUntranslated(
+ translator,
+ '/data/data/com.example.app-1/base.apk', 10000)
+
+ # Invalid package name.
+ self._CheckUntranslated(
+ translator, '/data/data/com.example2.app-1/base.apk', 757)
+
+ # Invalid apk name.
+ self._CheckUntranslated(
+ translator, '/data/data/com.example.app-2/not-base.apk', 100)
+
+ # Invalid file extensions.
+ self._CheckUntranslated(
+ translator, '/data/data/com.example.app-2/base', 100)
+
+ self._CheckUntranslated(
+ translator, '/data/data/com.example.app-2/base.apk.dex', 100)
+
+ def testBundleApks(self):
+ mock_libs1 = MockApkNativeLibraries()
+ mock_libs1.AddTestEntries([
+ ('lib/test-abi/libfoo.so', 200, 2000),
+ ('lib/test-abi/libbbar.so', 3200, 3000),
+ ])
+ mock_libs2 = MockApkNativeLibraries()
+ mock_libs2.AddTestEntries([
+ ('lib/test-abi/libzoo.so', 200, 2000),
+ ('lib/test-abi/libtool.so', 3000, 4000),
+ ])
+ translator = apk_native_libs.ApkLibraryPathTranslator()
+ translator.AddHostApk('com.example.app', mock_libs1, 'base-master.apk')
+ translator.AddHostApk('com.example.app', mock_libs2, 'feature-master.apk')
+
+ self._CheckTranslated(
+ translator,
+ '/data/app/com.example.app-XUIYIUW/base-master.apk', 757,
+ '/data/app/com.example.app-XUIYIUW/base-master.apk!lib/libfoo.so', 557)
+
+ self._CheckTranslated(
+ translator,
+ '/data/app/com.example.app-XUIYIUW/feature-master.apk', 3200,
+ '/data/app/com.example.app-XUIYIUW/feature-master.apk!lib/libtool.so',
+ 200)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/libwebrtc/build/android/pylib/symbols/deobfuscator.py b/third_party/libwebrtc/build/android/pylib/symbols/deobfuscator.py
new file mode 100644
index 0000000000..1fd188a425
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/symbols/deobfuscator.py
@@ -0,0 +1,178 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import subprocess
+import threading
+import time
+import uuid
+
+from devil.utils import reraiser_thread
+from pylib import constants
+
+
+_MINIUMUM_TIMEOUT = 3.0
+_PER_LINE_TIMEOUT = .002 # Should be able to process 500 lines per second.
+_PROCESS_START_TIMEOUT = 10.0
+_MAX_RESTARTS = 10 # Should be plenty unless tool is crashing on start-up.
+
+
+class Deobfuscator(object):
+ def __init__(self, mapping_path):
+ script_path = os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'android',
+ 'stacktrace', 'java_deobfuscate.py')
+ cmd = [script_path, mapping_path]
+ # Allow only one thread to call TransformLines() at a time.
+ self._lock = threading.Lock()
+ # Ensure that only one thread attempts to kill self._proc in Close().
+ self._close_lock = threading.Lock()
+ self._closed_called = False
+ # Assign to None so that attribute exists if Popen() throws.
+ self._proc = None
+ # Start process eagerly to hide start-up latency.
+ self._proc_start_time = time.time()
+ self._proc = subprocess.Popen(cmd,
+ bufsize=1,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ universal_newlines=True,
+ close_fds=True)
+
+ def IsClosed(self):
+ return self._closed_called or self._proc.returncode is not None
+
+ def IsBusy(self):
+ return self._lock.locked()
+
+ def IsReady(self):
+ return not self.IsClosed() and not self.IsBusy()
+
+ def TransformLines(self, lines):
+ """Deobfuscates obfuscated names found in the given lines.
+
+ If anything goes wrong (process crashes, timeout, etc), returns |lines|.
+
+ Args:
+ lines: A list of strings without trailing newlines.
+
+ Returns:
+ A list of strings without trailing newlines.
+ """
+ if not lines:
+ return []
+
+ # Deobfuscated stacks contain more frames than obfuscated ones when method
+ # inlining occurs. To account for the extra output lines, keep reading until
+ # this eof_line token is reached.
+ eof_line = uuid.uuid4().hex
+ out_lines = []
+
+ def deobfuscate_reader():
+ while True:
+ line = self._proc.stdout.readline()
+ # Return an empty string at EOF (when stdin is closed).
+ if not line:
+ break
+ line = line[:-1]
+ if line == eof_line:
+ break
+ out_lines.append(line)
+
+ if self.IsBusy():
+ logging.warning('deobfuscator: Having to wait for Java deobfuscation.')
+
+ # Allow only one thread to operate at a time.
+ with self._lock:
+ if self.IsClosed():
+ if not self._closed_called:
+ logging.warning('deobfuscator: Process exited with code=%d.',
+ self._proc.returncode)
+ self.Close()
+ return lines
+
+ # TODO(agrieve): Can probably speed this up by only sending lines through
+ # that might contain an obfuscated name.
+ reader_thread = reraiser_thread.ReraiserThread(deobfuscate_reader)
+ reader_thread.start()
+
+ try:
+ self._proc.stdin.write('\n'.join(lines))
+ self._proc.stdin.write('\n{}\n'.format(eof_line))
+ self._proc.stdin.flush()
+ time_since_proc_start = time.time() - self._proc_start_time
+ timeout = (max(0, _PROCESS_START_TIMEOUT - time_since_proc_start) +
+ max(_MINIUMUM_TIMEOUT, len(lines) * _PER_LINE_TIMEOUT))
+ reader_thread.join(timeout)
+ if self.IsClosed():
+ logging.warning(
+ 'deobfuscator: Close() called by another thread during join().')
+ return lines
+ if reader_thread.is_alive():
+ logging.error('deobfuscator: Timed out.')
+ self.Close()
+ return lines
+ return out_lines
+ except IOError:
+ logging.exception('deobfuscator: Exception during java_deobfuscate')
+ self.Close()
+ return lines
+
+ def Close(self):
+ with self._close_lock:
+ needs_closing = not self.IsClosed()
+ self._closed_called = True
+
+ if needs_closing:
+ self._proc.stdin.close()
+ self._proc.kill()
+ self._proc.wait()
+
+ def __del__(self):
+ # self._proc is None when Popen() fails.
+ if not self._closed_called and self._proc:
+ logging.error('deobfuscator: Forgot to Close()')
+ self.Close()
+
+
+class DeobfuscatorPool(object):
+ # As of Sep 2017, each instance requires about 500MB of RAM, as measured by:
+ # /usr/bin/time -v build/android/stacktrace/java_deobfuscate.py \
+ # out/Release/apks/ChromePublic.apk.mapping
+ def __init__(self, mapping_path, pool_size=4):
+ self._mapping_path = mapping_path
+ self._pool = [Deobfuscator(mapping_path) for _ in range(pool_size)]
+ # Allow only one thread to select from the pool at a time.
+ self._lock = threading.Lock()
+ self._num_restarts = 0
+
+ def TransformLines(self, lines):
+ with self._lock:
+ assert self._pool, 'TransformLines() called on a closed DeobfuscatorPool.'
+
+ # De-obfuscation is broken.
+ if self._num_restarts == _MAX_RESTARTS:
+ raise Exception('Deobfuscation seems broken.')
+
+ # Restart any closed Deobfuscators.
+ for i, d in enumerate(self._pool):
+ if d.IsClosed():
+ logging.warning('deobfuscator: Restarting closed instance.')
+ self._pool[i] = Deobfuscator(self._mapping_path)
+ self._num_restarts += 1
+ if self._num_restarts == _MAX_RESTARTS:
+ logging.warning('deobfuscator: MAX_RESTARTS reached.')
+
+ selected = next((x for x in self._pool if x.IsReady()), self._pool[0])
+ # Rotate the order so that next caller will not choose the same one.
+ self._pool.remove(selected)
+ self._pool.append(selected)
+
+ return selected.TransformLines(lines)
+
+ def Close(self):
+ with self._lock:
+ for d in self._pool:
+ d.Close()
+ self._pool = None
diff --git a/third_party/libwebrtc/build/android/pylib/symbols/elf_symbolizer.py b/third_party/libwebrtc/build/android/pylib/symbols/elf_symbolizer.py
new file mode 100644
index 0000000000..4198511bf3
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/symbols/elf_symbolizer.py
@@ -0,0 +1,497 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import datetime
+import logging
+import multiprocessing
+import os
+import posixpath
+try:
+ from queue import Empty, Queue
+except ImportError:
+ from Queue import Empty, Queue
+import re
+import subprocess
+import sys
+import threading
+import time
+
+
+# addr2line builds a possibly infinite memory cache that can exhaust
+# the computer's memory if allowed to grow for too long. This constant
+# controls how many lookups we do before restarting the process. 4000
+# gives near peak performance without extreme memory usage.
+ADDR2LINE_RECYCLE_LIMIT = 4000
+
+
+ELF_MAGIC = '\x7f\x45\x4c\x46'
+
+
+def ContainsElfMagic(file_path):
+ if os.path.getsize(file_path) < 4:
+ return False
+ try:
+ with open(file_path, 'r') as f:
+ b = f.read(4)
+ return b == ELF_MAGIC
+ except IOError:
+ return False
+
+
+class ELFSymbolizer(object):
+ """An uber-fast (multiprocessing, pipelined and asynchronous) ELF symbolizer.
+
+ This class is a frontend for addr2line (part of GNU binutils), designed to
+ symbolize batches of large numbers of symbols for a given ELF file. It
+ supports sharding symbolization against many addr2line instances and
+ pipelining of multiple requests per each instance (in order to hide addr2line
+ internals and OS pipe latencies).
+
+ The interface exhibited by this class is a very simple asynchronous interface,
+ which is based on the following three methods:
+ - SymbolizeAsync(): used to request (enqueue) resolution of a given address.
+ - The |callback| method: used to communicated back the symbol information.
+ - Join(): called to conclude the batch to gather the last outstanding results.
+ In essence, before the Join method returns, this class will have issued as
+ many callbacks as the number of SymbolizeAsync() calls. In this regard, note
+ that due to multiprocess sharding, callbacks can be delivered out of order.
+
+ Some background about addr2line:
+ - it is invoked passing the elf path in the cmdline, piping the addresses in
+ its stdin and getting results on its stdout.
+ - it has pretty large response times for the first requests, but it
+ works very well in streaming mode once it has been warmed up.
+ - it doesn't scale by itself (on more cores). However, spawning multiple
+ instances at the same time on the same file is pretty efficient as they
+ keep hitting the pagecache and become mostly CPU bound.
+ - it might hang or crash, mostly for OOM. This class deals with both of these
+ problems.
+
+ Despite the "scary" imports and the multi* words above, (almost) no multi-
+ threading/processing is involved from the python viewpoint. Concurrency
+ here is achieved by spawning several addr2line subprocesses and handling their
+ output pipes asynchronously. Therefore, all the code here (with the exception
+ of the Queue instance in Addr2Line) should be free from mind-blowing
+ thread-safety concerns.
+
+ The multiprocess sharding works as follows:
+ The symbolizer tries to use the lowest number of addr2line instances as
+ possible (with respect of |max_concurrent_jobs|) and enqueue all the requests
+ in a single addr2line instance. For few symbols (i.e. dozens) sharding isn't
+ worth the startup cost.
+ The multiprocess logic kicks in as soon as the queues for the existing
+ instances grow. Specifically, once all the existing instances reach the
+ |max_queue_size| bound, a new addr2line instance is kicked in.
+ In the case of a very eager producer (i.e. all |max_concurrent_jobs| instances
+ have a backlog of |max_queue_size|), back-pressure is applied on the caller by
+ blocking the SymbolizeAsync method.
+
+ This module has been deliberately designed to be dependency free (w.r.t. of
+ other modules in this project), to allow easy reuse in external projects.
+ """
+
+ def __init__(self, elf_file_path, addr2line_path, callback, inlines=False,
+ max_concurrent_jobs=None, addr2line_timeout=30, max_queue_size=50,
+ source_root_path=None, strip_base_path=None):
+ """Args:
+ elf_file_path: path of the elf file to be symbolized.
+ addr2line_path: path of the toolchain's addr2line binary.
+ callback: a callback which will be invoked for each resolved symbol with
+ the two args (sym_info, callback_arg). The former is an instance of
+ |ELFSymbolInfo| and contains the symbol information. The latter is an
+ embedder-provided argument which is passed to SymbolizeAsync().
+ inlines: when True, the ELFSymbolInfo will contain also the details about
+ the outer inlining functions. When False, only the innermost function
+ will be provided.
+ max_concurrent_jobs: Max number of addr2line instances spawned.
+ Parallelize responsibly, addr2line is a memory and I/O monster.
+ max_queue_size: Max number of outstanding requests per addr2line instance.
+ addr2line_timeout: Max time (in seconds) to wait for a addr2line response.
+ After the timeout, the instance will be considered hung and respawned.
+ source_root_path: In some toolchains only the name of the source file is
+ is output, without any path information; disambiguation searches
+ through the source directory specified by |source_root_path| argument
+ for files whose name matches, adding the full path information to the
+ output. For example, if the toolchain outputs "unicode.cc" and there
+ is a file called "unicode.cc" located under |source_root_path|/foo,
+ the tool will replace "unicode.cc" with
+ "|source_root_path|/foo/unicode.cc". If there are multiple files with
+ the same name, disambiguation will fail because the tool cannot
+ determine which of the files was the source of the symbol.
+ strip_base_path: Rebases the symbols source paths onto |source_root_path|
+ (i.e replace |strip_base_path| with |source_root_path).
+ """
+ assert(os.path.isfile(addr2line_path)), 'Cannot find ' + addr2line_path
+ self.elf_file_path = elf_file_path
+ self.addr2line_path = addr2line_path
+ self.callback = callback
+ self.inlines = inlines
+ self.max_concurrent_jobs = (max_concurrent_jobs or
+ min(multiprocessing.cpu_count(), 4))
+ self.max_queue_size = max_queue_size
+ self.addr2line_timeout = addr2line_timeout
+ self.requests_counter = 0 # For generating monotonic request IDs.
+ self._a2l_instances = [] # Up to |max_concurrent_jobs| _Addr2Line inst.
+
+ # If necessary, create disambiguation lookup table
+ self.disambiguate = source_root_path is not None
+ self.disambiguation_table = {}
+ self.strip_base_path = strip_base_path
+ if self.disambiguate:
+ self.source_root_path = os.path.abspath(source_root_path)
+ self._CreateDisambiguationTable()
+
+ # Create one addr2line instance. More instances will be created on demand
+ # (up to |max_concurrent_jobs|) depending on the rate of the requests.
+ self._CreateNewA2LInstance()
+
+ def SymbolizeAsync(self, addr, callback_arg=None):
+ """Requests symbolization of a given address.
+
+ This method is not guaranteed to return immediately. It generally does, but
+ in some scenarios (e.g. all addr2line instances have full queues) it can
+ block to create back-pressure.
+
+ Args:
+ addr: address to symbolize.
+ callback_arg: optional argument which will be passed to the |callback|."""
+ assert isinstance(addr, int)
+
+ # Process all the symbols that have been resolved in the meanwhile.
+ # Essentially, this drains all the addr2line(s) out queues.
+ for a2l_to_purge in self._a2l_instances:
+ a2l_to_purge.ProcessAllResolvedSymbolsInQueue()
+ a2l_to_purge.RecycleIfNecessary()
+
+ # Find the best instance according to this logic:
+ # 1. Find an existing instance with the shortest queue.
+ # 2. If all of instances' queues are full, but there is room in the pool,
+ # (i.e. < |max_concurrent_jobs|) create a new instance.
+ # 3. If there were already |max_concurrent_jobs| instances and all of them
+ # had full queues, make back-pressure.
+
+ # 1.
+ def _SortByQueueSizeAndReqID(a2l):
+ return (a2l.queue_size, a2l.first_request_id)
+ a2l = min(self._a2l_instances, key=_SortByQueueSizeAndReqID)
+
+ # 2.
+ if (a2l.queue_size >= self.max_queue_size and
+ len(self._a2l_instances) < self.max_concurrent_jobs):
+ a2l = self._CreateNewA2LInstance()
+
+ # 3.
+ if a2l.queue_size >= self.max_queue_size:
+ a2l.WaitForNextSymbolInQueue()
+
+ a2l.EnqueueRequest(addr, callback_arg)
+
+ def WaitForIdle(self):
+ """Waits for all the outstanding requests to complete."""
+ for a2l in self._a2l_instances:
+ a2l.WaitForIdle()
+
+ def Join(self):
+ """Waits for all the outstanding requests to complete and terminates."""
+ for a2l in self._a2l_instances:
+ a2l.WaitForIdle()
+ a2l.Terminate()
+
+ def _CreateNewA2LInstance(self):
+ assert len(self._a2l_instances) < self.max_concurrent_jobs
+ a2l = ELFSymbolizer.Addr2Line(self)
+ self._a2l_instances.append(a2l)
+ return a2l
+
+ def _CreateDisambiguationTable(self):
+ """ Non-unique file names will result in None entries"""
+ start_time = time.time()
+ logging.info('Collecting information about available source files...')
+ self.disambiguation_table = {}
+
+ for root, _, filenames in os.walk(self.source_root_path):
+ for f in filenames:
+ self.disambiguation_table[f] = os.path.join(root, f) if (f not in
+ self.disambiguation_table) else None
+ logging.info('Finished collecting information about '
+ 'possible files (took %.1f s).',
+ (time.time() - start_time))
+
+
+ class Addr2Line(object):
+ """A python wrapper around an addr2line instance.
+
+ The communication with the addr2line process looks as follows:
+ [STDIN] [STDOUT] (from addr2line's viewpoint)
+ > f001111
+ > f002222
+ < Symbol::Name(foo, bar) for f001111
+ < /path/to/source/file.c:line_number
+ > f003333
+ < Symbol::Name2() for f002222
+ < /path/to/source/file.c:line_number
+ < Symbol::Name3() for f003333
+ < /path/to/source/file.c:line_number
+ """
+
+ SYM_ADDR_RE = re.compile(r'([^:]+):(\?|\d+).*')
+
+ def __init__(self, symbolizer):
+ self._symbolizer = symbolizer
+ self._lib_file_name = posixpath.basename(symbolizer.elf_file_path)
+
+ # The request queue (i.e. addresses pushed to addr2line's stdin and not
+ # yet retrieved on stdout)
+ self._request_queue = collections.deque()
+
+ # This is essentially len(self._request_queue). It has been optimized to a
+ # separate field because turned out to be a perf hot-spot.
+ self.queue_size = 0
+
+ # Keep track of the number of symbols a process has processed to
+ # avoid a single process growing too big and using all the memory.
+ self._processed_symbols_count = 0
+
+ # Objects required to handle the addr2line subprocess.
+ self._proc = None # Subprocess.Popen(...) instance.
+ self._thread = None # Threading.thread instance.
+ self._out_queue = None # Queue instance (for buffering a2l stdout).
+ self._RestartAddr2LineProcess()
+
+ def EnqueueRequest(self, addr, callback_arg):
+ """Pushes an address to addr2line's stdin (and keeps track of it)."""
+ self._symbolizer.requests_counter += 1 # For global "age" of requests.
+ req_idx = self._symbolizer.requests_counter
+ self._request_queue.append((addr, callback_arg, req_idx))
+ self.queue_size += 1
+ self._WriteToA2lStdin(addr)
+
+ def WaitForIdle(self):
+ """Waits until all the pending requests have been symbolized."""
+ while self.queue_size > 0:
+ self.WaitForNextSymbolInQueue()
+
+ def WaitForNextSymbolInQueue(self):
+ """Waits for the next pending request to be symbolized."""
+ if not self.queue_size:
+ return
+
+ # This outer loop guards against a2l hanging (detecting stdout timeout).
+ while True:
+ start_time = datetime.datetime.now()
+ timeout = datetime.timedelta(seconds=self._symbolizer.addr2line_timeout)
+
+ # The inner loop guards against a2l crashing (checking if it exited).
+ while datetime.datetime.now() - start_time < timeout:
+ # poll() returns !None if the process exited. a2l should never exit.
+ if self._proc.poll():
+ logging.warning('addr2line crashed, respawning (lib: %s).',
+ self._lib_file_name)
+ self._RestartAddr2LineProcess()
+ # TODO(primiano): the best thing to do in this case would be
+ # shrinking the pool size as, very likely, addr2line is crashed
+ # due to low memory (and the respawned one will die again soon).
+
+ try:
+ lines = self._out_queue.get(block=True, timeout=0.25)
+ except Empty:
+ # On timeout (1/4 s.) repeat the inner loop and check if either the
+ # addr2line process did crash or we waited its output for too long.
+ continue
+
+ # In nominal conditions, we get straight to this point.
+ self._ProcessSymbolOutput(lines)
+ return
+
+ # If this point is reached, we waited more than |addr2line_timeout|.
+ logging.warning('Hung addr2line process, respawning (lib: %s).',
+ self._lib_file_name)
+ self._RestartAddr2LineProcess()
+
+ def ProcessAllResolvedSymbolsInQueue(self):
+ """Consumes all the addr2line output lines produced (without blocking)."""
+ if not self.queue_size:
+ return
+ while True:
+ try:
+ lines = self._out_queue.get_nowait()
+ except Empty:
+ break
+ self._ProcessSymbolOutput(lines)
+
+ def RecycleIfNecessary(self):
+ """Restarts the process if it has been used for too long.
+
+ A long running addr2line process will consume excessive amounts
+ of memory without any gain in performance."""
+ if self._processed_symbols_count >= ADDR2LINE_RECYCLE_LIMIT:
+ self._RestartAddr2LineProcess()
+
+
+ def Terminate(self):
+ """Kills the underlying addr2line process.
+
+ The poller |_thread| will terminate as well due to the broken pipe."""
+ try:
+ self._proc.kill()
+ self._proc.communicate() # Essentially wait() without risking deadlock.
+ except Exception: # pylint: disable=broad-except
+ # An exception while terminating? How interesting.
+ pass
+ self._proc = None
+
+ def _WriteToA2lStdin(self, addr):
+ self._proc.stdin.write('%s\n' % hex(addr))
+ if self._symbolizer.inlines:
+ # In the case of inlines we output an extra blank line, which causes
+ # addr2line to emit a (??,??:0) tuple that we use as a boundary marker.
+ self._proc.stdin.write('\n')
+ self._proc.stdin.flush()
+
+ def _ProcessSymbolOutput(self, lines):
+ """Parses an addr2line symbol output and triggers the client callback."""
+ (_, callback_arg, _) = self._request_queue.popleft()
+ self.queue_size -= 1
+
+ innermost_sym_info = None
+ sym_info = None
+ for (line1, line2) in lines:
+ prev_sym_info = sym_info
+ name = line1 if not line1.startswith('?') else None
+ source_path = None
+ source_line = None
+ m = ELFSymbolizer.Addr2Line.SYM_ADDR_RE.match(line2)
+ if m:
+ if not m.group(1).startswith('?'):
+ source_path = m.group(1)
+ if not m.group(2).startswith('?'):
+ source_line = int(m.group(2))
+ else:
+ logging.warning('Got invalid symbol path from addr2line: %s', line2)
+
+ # In case disambiguation is on, and needed
+ was_ambiguous = False
+ disambiguated = False
+ if self._symbolizer.disambiguate:
+ if source_path and not posixpath.isabs(source_path):
+ path = self._symbolizer.disambiguation_table.get(source_path)
+ was_ambiguous = True
+ disambiguated = path is not None
+ source_path = path if disambiguated else source_path
+
+ # Use absolute paths (so that paths are consistent, as disambiguation
+ # uses absolute paths)
+ if source_path and not was_ambiguous:
+ source_path = os.path.abspath(source_path)
+
+ if source_path and self._symbolizer.strip_base_path:
+ # Strip the base path
+ source_path = re.sub('^' + self._symbolizer.strip_base_path,
+ self._symbolizer.source_root_path or '', source_path)
+
+ sym_info = ELFSymbolInfo(name, source_path, source_line, was_ambiguous,
+ disambiguated)
+ if prev_sym_info:
+ prev_sym_info.inlined_by = sym_info
+ if not innermost_sym_info:
+ innermost_sym_info = sym_info
+
+ self._processed_symbols_count += 1
+ self._symbolizer.callback(innermost_sym_info, callback_arg)
+
+ def _RestartAddr2LineProcess(self):
+ if self._proc:
+ self.Terminate()
+
+ # The only reason of existence of this Queue (and the corresponding
+ # Thread below) is the lack of a subprocess.stdout.poll_avail_lines().
+ # Essentially this is a pipe able to extract a couple of lines atomically.
+ self._out_queue = Queue()
+
+ # Start the underlying addr2line process in line buffered mode.
+
+ cmd = [self._symbolizer.addr2line_path, '--functions', '--demangle',
+ '--exe=' + self._symbolizer.elf_file_path]
+ if self._symbolizer.inlines:
+ cmd += ['--inlines']
+ self._proc = subprocess.Popen(cmd,
+ bufsize=1,
+ universal_newlines=True,
+ stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ stderr=sys.stderr,
+ close_fds=True)
+
+ # Start the poller thread, which simply moves atomically the lines read
+ # from the addr2line's stdout to the |_out_queue|.
+ self._thread = threading.Thread(
+ target=ELFSymbolizer.Addr2Line.StdoutReaderThread,
+ args=(self._proc.stdout, self._out_queue, self._symbolizer.inlines))
+ self._thread.daemon = True # Don't prevent early process exit.
+ self._thread.start()
+
+ self._processed_symbols_count = 0
+
+ # Replay the pending requests on the new process (only for the case
+ # of a hung addr2line timing out during the game).
+ for (addr, _, _) in self._request_queue:
+ self._WriteToA2lStdin(addr)
+
+ @staticmethod
+ def StdoutReaderThread(process_pipe, my_queue, inlines):
+ """The poller thread fn, which moves the addr2line stdout to the |queue|.
+
+ This is the only piece of code not running on the main thread. It merely
+ writes to a Queue, which is thread-safe. In the case of inlines, it
+ detects the ??,??:0 marker and sends the lines atomically, such that the
+ main thread always receives all the lines corresponding to one symbol in
+ one shot."""
+ try:
+ lines_for_one_symbol = []
+ while True:
+ line1 = process_pipe.readline().rstrip('\r\n')
+ if not line1:
+ break
+ line2 = process_pipe.readline().rstrip('\r\n')
+ if not line2:
+ break
+ inline_has_more_lines = inlines and (len(lines_for_one_symbol) == 0 or
+ (line1 != '??' and line2 != '??:0'))
+ if not inlines or inline_has_more_lines:
+ lines_for_one_symbol += [(line1, line2)]
+ if inline_has_more_lines:
+ continue
+ my_queue.put(lines_for_one_symbol)
+ lines_for_one_symbol = []
+ process_pipe.close()
+
+ # Every addr2line processes will die at some point, please die silently.
+ except (IOError, OSError):
+ pass
+
+ @property
+ def first_request_id(self):
+ """Returns the request_id of the oldest pending request in the queue."""
+ return self._request_queue[0][2] if self._request_queue else 0
+
+
+class ELFSymbolInfo(object):
+ """The result of the symbolization passed as first arg. of each callback."""
+
+ def __init__(self, name, source_path, source_line, was_ambiguous=False,
+ disambiguated=False):
+ """All the fields here can be None (if addr2line replies with '??')."""
+ self.name = name
+ self.source_path = source_path
+ self.source_line = source_line
+ # In the case of |inlines|=True, the |inlined_by| points to the outer
+ # function inlining the current one (and so on, to form a chain).
+ self.inlined_by = None
+ self.disambiguated = disambiguated
+ self.was_ambiguous = was_ambiguous
+
+ def __str__(self):
+ return '%s [%s:%d]' % (
+ self.name or '??', self.source_path or '??', self.source_line or 0)
diff --git a/third_party/libwebrtc/build/android/pylib/symbols/elf_symbolizer_unittest.py b/third_party/libwebrtc/build/android/pylib/symbols/elf_symbolizer_unittest.py
new file mode 100755
index 0000000000..f906da8314
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/symbols/elf_symbolizer_unittest.py
@@ -0,0 +1,196 @@
+#!/usr/bin/env python3
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import functools
+import logging
+import os
+import unittest
+
+from pylib.symbols import elf_symbolizer
+from pylib.symbols import mock_addr2line
+
+
+_MOCK_A2L_PATH = os.path.join(os.path.dirname(mock_addr2line.__file__),
+ 'mock_addr2line')
+_INCOMPLETE_MOCK_ADDR = 1024 * 1024
+_UNKNOWN_MOCK_ADDR = 2 * 1024 * 1024
+_INLINE_MOCK_ADDR = 3 * 1024 * 1024
+
+
+class ELFSymbolizerTest(unittest.TestCase):
+ def setUp(self):
+ self._callback = functools.partial(
+ ELFSymbolizerTest._SymbolizeCallback, self)
+ self._resolved_addresses = set()
+ # Mute warnings, we expect them due to the crash/hang tests.
+ logging.getLogger().setLevel(logging.ERROR)
+
+ def testParallelism1(self):
+ self._RunTest(max_concurrent_jobs=1, num_symbols=100)
+
+ def testParallelism4(self):
+ self._RunTest(max_concurrent_jobs=4, num_symbols=100)
+
+ def testParallelism8(self):
+ self._RunTest(max_concurrent_jobs=8, num_symbols=100)
+
+ def testCrash(self):
+ os.environ['MOCK_A2L_CRASH_EVERY'] = '99'
+ self._RunTest(max_concurrent_jobs=1, num_symbols=100)
+ os.environ['MOCK_A2L_CRASH_EVERY'] = '0'
+
+ def testHang(self):
+ os.environ['MOCK_A2L_HANG_EVERY'] = '99'
+ self._RunTest(max_concurrent_jobs=1, num_symbols=100)
+ os.environ['MOCK_A2L_HANG_EVERY'] = '0'
+
+ def testInlines(self):
+ """Stimulate the inline processing logic."""
+ symbolizer = elf_symbolizer.ELFSymbolizer(
+ elf_file_path='/path/doesnt/matter/mock_lib1.so',
+ addr2line_path=_MOCK_A2L_PATH,
+ callback=self._callback,
+ inlines=True,
+ max_concurrent_jobs=4)
+
+ for addr in range(1000):
+ exp_inline = False
+ exp_unknown = False
+
+ # First 100 addresses with inlines.
+ if addr < 100:
+ addr += _INLINE_MOCK_ADDR
+ exp_inline = True
+
+ # Followed by 100 without inlines.
+ elif addr < 200:
+ pass
+
+ # Followed by 100 interleaved inlines and not inlines.
+ elif addr < 300:
+ if addr & 1:
+ addr += _INLINE_MOCK_ADDR
+ exp_inline = True
+
+ # Followed by 100 interleaved inlines and unknonwn.
+ elif addr < 400:
+ if addr & 1:
+ addr += _INLINE_MOCK_ADDR
+ exp_inline = True
+ else:
+ addr += _UNKNOWN_MOCK_ADDR
+ exp_unknown = True
+
+ exp_name = 'mock_sym_for_addr_%d' % addr if not exp_unknown else None
+ exp_source_path = 'mock_src/mock_lib1.so.c' if not exp_unknown else None
+ exp_source_line = addr if not exp_unknown else None
+ cb_arg = (addr, exp_name, exp_source_path, exp_source_line, exp_inline)
+ symbolizer.SymbolizeAsync(addr, cb_arg)
+
+ symbolizer.Join()
+
+ def testIncompleteSyminfo(self):
+ """Stimulate the symbol-not-resolved logic."""
+ symbolizer = elf_symbolizer.ELFSymbolizer(
+ elf_file_path='/path/doesnt/matter/mock_lib1.so',
+ addr2line_path=_MOCK_A2L_PATH,
+ callback=self._callback,
+ max_concurrent_jobs=1)
+
+ # Test symbols with valid name but incomplete path.
+ addr = _INCOMPLETE_MOCK_ADDR
+ exp_name = 'mock_sym_for_addr_%d' % addr
+ exp_source_path = None
+ exp_source_line = None
+ cb_arg = (addr, exp_name, exp_source_path, exp_source_line, False)
+ symbolizer.SymbolizeAsync(addr, cb_arg)
+
+ # Test symbols with no name or sym info.
+ addr = _UNKNOWN_MOCK_ADDR
+ exp_name = None
+ exp_source_path = None
+ exp_source_line = None
+ cb_arg = (addr, exp_name, exp_source_path, exp_source_line, False)
+ symbolizer.SymbolizeAsync(addr, cb_arg)
+
+ symbolizer.Join()
+
+ def testWaitForIdle(self):
+ symbolizer = elf_symbolizer.ELFSymbolizer(
+ elf_file_path='/path/doesnt/matter/mock_lib1.so',
+ addr2line_path=_MOCK_A2L_PATH,
+ callback=self._callback,
+ max_concurrent_jobs=1)
+
+ # Test symbols with valid name but incomplete path.
+ addr = _INCOMPLETE_MOCK_ADDR
+ exp_name = 'mock_sym_for_addr_%d' % addr
+ exp_source_path = None
+ exp_source_line = None
+ cb_arg = (addr, exp_name, exp_source_path, exp_source_line, False)
+ symbolizer.SymbolizeAsync(addr, cb_arg)
+ symbolizer.WaitForIdle()
+
+ # Test symbols with no name or sym info.
+ addr = _UNKNOWN_MOCK_ADDR
+ exp_name = None
+ exp_source_path = None
+ exp_source_line = None
+ cb_arg = (addr, exp_name, exp_source_path, exp_source_line, False)
+ symbolizer.SymbolizeAsync(addr, cb_arg)
+ symbolizer.Join()
+
+ def _RunTest(self, max_concurrent_jobs, num_symbols):
+ symbolizer = elf_symbolizer.ELFSymbolizer(
+ elf_file_path='/path/doesnt/matter/mock_lib1.so',
+ addr2line_path=_MOCK_A2L_PATH,
+ callback=self._callback,
+ max_concurrent_jobs=max_concurrent_jobs,
+ addr2line_timeout=0.5)
+
+ for addr in range(num_symbols):
+ exp_name = 'mock_sym_for_addr_%d' % addr
+ exp_source_path = 'mock_src/mock_lib1.so.c'
+ exp_source_line = addr
+ cb_arg = (addr, exp_name, exp_source_path, exp_source_line, False)
+ symbolizer.SymbolizeAsync(addr, cb_arg)
+
+ symbolizer.Join()
+
+ # Check that all the expected callbacks have been received.
+ for addr in range(num_symbols):
+ self.assertIn(addr, self._resolved_addresses)
+ self._resolved_addresses.remove(addr)
+
+ # Check for unexpected callbacks.
+ self.assertEqual(len(self._resolved_addresses), 0)
+
+ def _SymbolizeCallback(self, sym_info, cb_arg):
+ self.assertTrue(isinstance(sym_info, elf_symbolizer.ELFSymbolInfo))
+ self.assertTrue(isinstance(cb_arg, tuple))
+ self.assertEqual(len(cb_arg), 5)
+
+ # Unpack expectations from the callback extra argument.
+ (addr, exp_name, exp_source_path, exp_source_line, exp_inlines) = cb_arg
+ if exp_name is None:
+ self.assertIsNone(sym_info.name)
+ else:
+ self.assertTrue(sym_info.name.startswith(exp_name))
+ self.assertEqual(sym_info.source_path, exp_source_path)
+ self.assertEqual(sym_info.source_line, exp_source_line)
+
+ if exp_inlines:
+ self.assertEqual(sym_info.name, exp_name + '_inner')
+ self.assertEqual(sym_info.inlined_by.name, exp_name + '_middle')
+ self.assertEqual(sym_info.inlined_by.inlined_by.name,
+ exp_name + '_outer')
+
+ # Check against duplicate callbacks.
+ self.assertNotIn(addr, self._resolved_addresses)
+ self._resolved_addresses.add(addr)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/libwebrtc/build/android/pylib/symbols/mock_addr2line/__init__.py b/third_party/libwebrtc/build/android/pylib/symbols/mock_addr2line/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/symbols/mock_addr2line/__init__.py
diff --git a/third_party/libwebrtc/build/android/pylib/symbols/mock_addr2line/mock_addr2line b/third_party/libwebrtc/build/android/pylib/symbols/mock_addr2line/mock_addr2line
new file mode 100755
index 0000000000..8b2a72375d
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/symbols/mock_addr2line/mock_addr2line
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Simple mock for addr2line.
+
+Outputs mock symbol information, with each symbol being a function of the
+original address (so it is easy to double-check consistency in unittests).
+"""
+
+from __future__ import print_function
+
+import optparse
+import os
+import posixpath
+import sys
+import time
+
+
+def main(argv):
+ parser = optparse.OptionParser()
+ parser.add_option('-e', '--exe', dest='exe') # Path of the debug-library.so.
+ # Silently swallow the other unnecessary arguments.
+ parser.add_option('-C', '--demangle', action='store_true')
+ parser.add_option('-f', '--functions', action='store_true')
+ parser.add_option('-i', '--inlines', action='store_true')
+ options, _ = parser.parse_args(argv[1:])
+ lib_file_name = posixpath.basename(options.exe)
+ processed_sym_count = 0
+ crash_every = int(os.environ.get('MOCK_A2L_CRASH_EVERY', 0))
+ hang_every = int(os.environ.get('MOCK_A2L_HANG_EVERY', 0))
+
+ while(True):
+ line = sys.stdin.readline().rstrip('\r')
+ if not line:
+ break
+
+ # An empty line should generate '??,??:0' (is used as marker for inlines).
+ if line == '\n':
+ print('??')
+ print('??:0')
+ sys.stdout.flush()
+ continue
+
+ addr = int(line, 16)
+ processed_sym_count += 1
+ if crash_every and processed_sym_count % crash_every == 0:
+ sys.exit(1)
+ if hang_every and processed_sym_count % hang_every == 0:
+ time.sleep(1)
+
+ # Addresses < 1M will return good mock symbol information.
+ if addr < 1024 * 1024:
+ print('mock_sym_for_addr_%d' % addr)
+ print('mock_src/%s.c:%d' % (lib_file_name, addr))
+
+ # Addresses 1M <= x < 2M will return symbols with a name but a missing path.
+ elif addr < 2 * 1024 * 1024:
+ print('mock_sym_for_addr_%d' % addr)
+ print('??:0')
+
+ # Addresses 2M <= x < 3M will return unknown symbol information.
+ elif addr < 3 * 1024 * 1024:
+ print('??')
+ print('??')
+
+ # Addresses 3M <= x < 4M will return inlines.
+ elif addr < 4 * 1024 * 1024:
+ print('mock_sym_for_addr_%d_inner' % addr)
+ print('mock_src/%s.c:%d' % (lib_file_name, addr))
+ print('mock_sym_for_addr_%d_middle' % addr)
+ print('mock_src/%s.c:%d' % (lib_file_name, addr))
+ print('mock_sym_for_addr_%d_outer' % addr)
+ print('mock_src/%s.c:%d' % (lib_file_name, addr))
+
+ sys.stdout.flush()
+
+
+if __name__ == '__main__':
+ main(sys.argv) \ No newline at end of file
diff --git a/third_party/libwebrtc/build/android/pylib/symbols/stack_symbolizer.py b/third_party/libwebrtc/build/android/pylib/symbols/stack_symbolizer.py
new file mode 100644
index 0000000000..fdd47780f7
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/symbols/stack_symbolizer.py
@@ -0,0 +1,86 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import re
+import tempfile
+import time
+
+from devil.utils import cmd_helper
+from pylib import constants
+
+_STACK_TOOL = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..',
+ 'third_party', 'android_platform', 'development',
+ 'scripts', 'stack')
+ABI_REG = re.compile('ABI: \'(.+?)\'')
+
+
+def _DeviceAbiToArch(device_abi):
+ # The order of this list is significant to find the more specific match
+ # (e.g., arm64) before the less specific (e.g., arm).
+ arches = ['arm64', 'arm', 'x86_64', 'x86_64', 'x86', 'mips']
+ for arch in arches:
+ if arch in device_abi:
+ return arch
+ raise RuntimeError('Unknown device ABI: %s' % device_abi)
+
+
+class Symbolizer(object):
+ """A helper class to symbolize stack."""
+
+ def __init__(self, apk_under_test=None):
+ self._apk_under_test = apk_under_test
+ self._time_spent_symbolizing = 0
+
+
+ def __del__(self):
+ self.CleanUp()
+
+
+ def CleanUp(self):
+ """Clean up the temporary directory of apk libs."""
+ if self._time_spent_symbolizing > 0:
+ logging.info(
+ 'Total time spent symbolizing: %.2fs', self._time_spent_symbolizing)
+
+
+ def ExtractAndResolveNativeStackTraces(self, data_to_symbolize,
+ device_abi, include_stack=True):
+ """Run the stack tool for given input.
+
+ Args:
+ data_to_symbolize: a list of strings to symbolize.
+ include_stack: boolean whether to include stack data in output.
+ device_abi: the default ABI of the device which generated the tombstone.
+
+ Yields:
+ A string for each line of resolved stack output.
+ """
+ if not os.path.exists(_STACK_TOOL):
+ logging.warning('%s missing. Unable to resolve native stack traces.',
+ _STACK_TOOL)
+ return
+
+ arch = _DeviceAbiToArch(device_abi)
+ if not arch:
+ logging.warning('No device_abi can be found.')
+ return
+
+ cmd = [_STACK_TOOL, '--arch', arch, '--output-directory',
+ constants.GetOutDirectory(), '--more-info']
+ env = dict(os.environ)
+ env['PYTHONDONTWRITEBYTECODE'] = '1'
+ with tempfile.NamedTemporaryFile(mode='w') as f:
+ f.write('\n'.join(data_to_symbolize))
+ f.flush()
+ start = time.time()
+ try:
+ _, output = cmd_helper.GetCmdStatusAndOutput(cmd + [f.name], env=env)
+ finally:
+ self._time_spent_symbolizing += time.time() - start
+ for line in output.splitlines():
+ if not include_stack and 'Stack Data:' in line:
+ break
+ yield line
diff --git a/third_party/libwebrtc/build/android/pylib/symbols/symbol_utils.py b/third_party/libwebrtc/build/android/pylib/symbols/symbol_utils.py
new file mode 100644
index 0000000000..0b6ec8bb29
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/symbols/symbol_utils.py
@@ -0,0 +1,813 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+
+import bisect
+import collections
+import logging
+import os
+import re
+
+from pylib.constants import host_paths
+from pylib.symbols import elf_symbolizer
+
+
+def _AndroidAbiToCpuArch(android_abi):
+ """Return the Chromium CPU architecture name for a given Android ABI."""
+ _ARCH_MAP = {
+ 'armeabi': 'arm',
+ 'armeabi-v7a': 'arm',
+ 'arm64-v8a': 'arm64',
+ 'x86_64': 'x64',
+ }
+ return _ARCH_MAP.get(android_abi, android_abi)
+
+
+def _HexAddressRegexpFor(android_abi):
+ """Return a regexp matching hexadecimal addresses for a given Android ABI."""
+ if android_abi in ['x86_64', 'arm64-v8a', 'mips64']:
+ width = 16
+ else:
+ width = 8
+ return '[0-9a-f]{%d}' % width
+
+
+class HostLibraryFinder(object):
+ """Translate device library path to matching host unstripped library path.
+
+ Usage is the following:
+ 1) Create instance.
+ 2) Call AddSearchDir() once or more times to add host directory path to
+ look for unstripped native libraries.
+ 3) Call Find(device_libpath) repeatedly to translate a device-specific
+ library path into the corresponding host path to the unstripped
+ version.
+ """
+ def __init__(self):
+ """Initialize instance."""
+ self._search_dirs = []
+ self._lib_map = {} # Map of library name to host file paths.
+
+ def AddSearchDir(self, lib_dir):
+ """Add a directory to the search path for host native shared libraries.
+
+ Args:
+ lib_dir: host path containing native libraries.
+ """
+ if not os.path.exists(lib_dir):
+ logging.warning('Ignoring missing host library directory: %s', lib_dir)
+ return
+ if not os.path.isdir(lib_dir):
+ logging.warning('Ignoring invalid host library directory: %s', lib_dir)
+ return
+ self._search_dirs.append(lib_dir)
+ self._lib_map = {} # Reset the map.
+
+ def Find(self, device_libpath):
+ """Find the host file path matching a specific device library path.
+
+ Args:
+ device_libpath: device-specific file path to library or executable.
+ Returns:
+ host file path to the unstripped version of the library, or None.
+ """
+ host_lib_path = None
+ lib_name = os.path.basename(device_libpath)
+ host_lib_path = self._lib_map.get(lib_name)
+ if not host_lib_path:
+ for search_dir in self._search_dirs:
+ lib_path = os.path.join(search_dir, lib_name)
+ if os.path.exists(lib_path):
+ host_lib_path = lib_path
+ break
+
+ if not host_lib_path:
+ logging.debug('Could not find host library for: %s', lib_name)
+ self._lib_map[lib_name] = host_lib_path
+
+ return host_lib_path
+
+
+
+class SymbolResolver(object):
+ """A base class for objets that can symbolize library (path, offset)
+ pairs into symbol information strings. Usage is the following:
+
+ 1) Create new instance (by calling the constructor of a derived
+ class, since this is only the base one).
+
+ 2) Call SetAndroidAbi() before any call to FindSymbolInfo() in order
+ to set the Android CPU ABI used for symbolization.
+
+ 3) Before the first call to FindSymbolInfo(), one can call
+ AddLibraryOffset(), or AddLibraryOffsets() to record a set of offsets
+ that you will want to symbolize later through FindSymbolInfo(). Doing
+ so allows some SymbolResolver derived classes to work faster (e.g. the
+ one that invokes the 'addr2line' program, since the latter works faster
+ if the offsets provided as inputs are sorted in increasing order).
+
+ 3) Call FindSymbolInfo(path, offset) to return the corresponding
+ symbol information string, or None if this doesn't correspond
+ to anything the instance can handle.
+
+ Note that whether the path is specific to the device or to the
+ host depends on the derived class implementation.
+ """
+ def __init__(self):
+ self._android_abi = None
+ self._lib_offsets_map = collections.defaultdict(set)
+
+ def SetAndroidAbi(self, android_abi):
+ """Set the Android ABI value for this instance.
+
+ Calling this function before FindSymbolInfo() is required by some
+ derived class implementations.
+
+ Args:
+ android_abi: Native Android CPU ABI name (e.g. 'armeabi-v7a').
+ Raises:
+ Exception if the ABI was already set with a different value.
+ """
+ if self._android_abi and self._android_abi != android_abi:
+ raise Exception('Cannot reset Android ABI to new value %s, already set '
+ 'to %s' % (android_abi, self._android_abi))
+
+ self._android_abi = android_abi
+
+ def AddLibraryOffset(self, lib_path, offset):
+ """Associate a single offset to a given device library.
+
+ This must be called before FindSymbolInfo(), otherwise its input arguments
+ will be ignored.
+
+ Args:
+ lib_path: A library path.
+ offset: An integer offset within the corresponding library that will be
+ symbolized by future calls to FindSymbolInfo.
+ """
+ self._lib_offsets_map[lib_path].add(offset)
+
+ def AddLibraryOffsets(self, lib_path, lib_offsets):
+ """Associate a set of wanted offsets to a given device library.
+
+ This must be called before FindSymbolInfo(), otherwise its input arguments
+ will be ignored.
+
+ Args:
+ lib_path: A library path.
+ lib_offsets: An iterable of integer offsets within the corresponding
+ library that will be symbolized by future calls to FindSymbolInfo.
+ """
+ self._lib_offsets_map[lib_path].update(lib_offsets)
+
+ # pylint: disable=unused-argument,no-self-use
+ def FindSymbolInfo(self, lib_path, lib_offset):
+ """Symbolize a device library path and offset.
+
+ Args:
+ lib_path: Library path (device or host specific, depending on the
+ derived class implementation).
+ lib_offset: Integer offset within the library.
+ Returns:
+ Corresponding symbol information string, or None.
+ """
+ # The base implementation cannot symbolize anything.
+ return None
+ # pylint: enable=unused-argument,no-self-use
+
+
+class ElfSymbolResolver(SymbolResolver):
+ """A SymbolResolver that can symbolize host path + offset values using
+ an elf_symbolizer.ELFSymbolizer instance.
+ """
+ def __init__(self, addr2line_path_for_tests=None):
+ super(ElfSymbolResolver, self).__init__()
+ self._addr2line_path = addr2line_path_for_tests
+
+ # Used to cache one ELFSymbolizer instance per library path.
+ self._elf_symbolizer_cache = {}
+
+ # Used to cache FindSymbolInfo() results. Maps host library paths
+ # to (offset -> symbol info string) dictionaries.
+ self._symbol_info_cache = collections.defaultdict(dict)
+ self._allow_symbolizer = True
+
+ def _CreateSymbolizerFor(self, host_path):
+ """Create the ELFSymbolizer instance associated with a given lib path."""
+ addr2line_path = self._addr2line_path
+ if not addr2line_path:
+ if not self._android_abi:
+ raise Exception(
+ 'Android CPU ABI must be set before calling FindSymbolInfo!')
+
+ cpu_arch = _AndroidAbiToCpuArch(self._android_abi)
+ self._addr2line_path = host_paths.ToolPath('addr2line', cpu_arch)
+
+ return elf_symbolizer.ELFSymbolizer(
+ elf_file_path=host_path, addr2line_path=self._addr2line_path,
+ callback=ElfSymbolResolver._Callback, inlines=True)
+
+ def DisallowSymbolizerForTesting(self):
+ """Disallow FindSymbolInfo() from using a symbolizer.
+
+ This is used during unit-testing to ensure that the offsets that were
+ recorded via AddLibraryOffset()/AddLibraryOffsets() are properly
+ symbolized, but not anything else.
+ """
+ self._allow_symbolizer = False
+
+ def FindSymbolInfo(self, host_path, offset):
+ """Override SymbolResolver.FindSymbolInfo.
+
+ Args:
+ host_path: Host-specific path to the native shared library.
+ offset: Integer offset within the native library.
+ Returns:
+ A symbol info string, or None.
+ """
+ offset_map = self._symbol_info_cache[host_path]
+ symbol_info = offset_map.get(offset)
+ if symbol_info:
+ return symbol_info
+
+ # Create symbolizer on demand.
+ symbolizer = self._elf_symbolizer_cache.get(host_path)
+ if not symbolizer:
+ symbolizer = self._CreateSymbolizerFor(host_path)
+ self._elf_symbolizer_cache[host_path] = symbolizer
+
+ # If there are pre-recorded offsets for this path, symbolize them now.
+ offsets = self._lib_offsets_map.get(host_path)
+ if offsets:
+ offset_map = {}
+ for pre_offset in offsets:
+ symbolizer.SymbolizeAsync(
+ pre_offset, callback_arg=(offset_map, pre_offset))
+ symbolizer.WaitForIdle()
+ self._symbol_info_cache[host_path] = offset_map
+
+ symbol_info = offset_map.get(offset)
+ if symbol_info:
+ return symbol_info
+
+ if not self._allow_symbolizer:
+ return None
+
+ # Symbolize single offset. Slower if addresses are not provided in
+ # increasing order to addr2line.
+ symbolizer.SymbolizeAsync(offset,
+ callback_arg=(offset_map, offset))
+ symbolizer.WaitForIdle()
+ return offset_map.get(offset)
+
+ @staticmethod
+ def _Callback(sym_info, callback_arg):
+ offset_map, offset = callback_arg
+ offset_map[offset] = str(sym_info)
+
+
+class DeviceSymbolResolver(SymbolResolver):
+ """A SymbolResolver instance that accepts device-specific path.
+
+ Usage is the following:
+ 1) Create new instance, passing a parent SymbolResolver instance that
+ accepts host-specific paths, and a HostLibraryFinder instance.
+
+ 2) Optional: call AddApkOffsets() to add offsets from within an APK
+ that contains uncompressed native shared libraries.
+
+ 3) Use it as any SymbolResolver instance.
+ """
+ def __init__(self, host_resolver, host_lib_finder):
+ """Initialize instance.
+
+ Args:
+ host_resolver: A parent SymbolResolver instance that will be used
+ to resolve symbols from host library paths.
+ host_lib_finder: A HostLibraryFinder instance used to locate
+ unstripped libraries on the host.
+ """
+ super(DeviceSymbolResolver, self).__init__()
+ self._host_lib_finder = host_lib_finder
+ self._bad_device_lib_paths = set()
+ self._host_resolver = host_resolver
+
+ def SetAndroidAbi(self, android_abi):
+ super(DeviceSymbolResolver, self).SetAndroidAbi(android_abi)
+ self._host_resolver.SetAndroidAbi(android_abi)
+
+ def AddLibraryOffsets(self, device_lib_path, lib_offsets):
+ """Associate a set of wanted offsets to a given device library.
+
+ This must be called before FindSymbolInfo(), otherwise its input arguments
+ will be ignored.
+
+ Args:
+ device_lib_path: A device-specific library path.
+ lib_offsets: An iterable of integer offsets within the corresponding
+ library that will be symbolized by future calls to FindSymbolInfo.
+ want to symbolize.
+ """
+ if device_lib_path in self._bad_device_lib_paths:
+ return
+
+ host_lib_path = self._host_lib_finder.Find(device_lib_path)
+ if not host_lib_path:
+ # NOTE: self._bad_device_lib_paths is only used to only print this
+ # warning once per bad library.
+ logging.warning('Could not find host library matching device path: %s',
+ device_lib_path)
+ self._bad_device_lib_paths.add(device_lib_path)
+ return
+
+ self._host_resolver.AddLibraryOffsets(host_lib_path, lib_offsets)
+
+ def AddApkOffsets(self, device_apk_path, apk_offsets, apk_translator):
+ """Associate a set of wanted offsets to a given device APK path.
+
+ This converts the APK-relative offsets into offsets relative to the
+ uncompressed libraries it contains, then calls AddLibraryOffsets()
+ for each one of the libraries.
+
+ Must be called before FindSymbolInfo() as well, otherwise input arguments
+ will be ignored.
+
+ Args:
+ device_apk_path: Device-specific APK path.
+ apk_offsets: Iterable of offsets within the APK file.
+ apk_translator: An ApkLibraryPathTranslator instance used to extract
+ library paths from the APK.
+ """
+ libraries_map = collections.defaultdict(set)
+ for offset in apk_offsets:
+ lib_path, lib_offset = apk_translator.TranslatePath(device_apk_path,
+ offset)
+ libraries_map[lib_path].add(lib_offset)
+
+ for lib_path, lib_offsets in libraries_map.items():
+ self.AddLibraryOffsets(lib_path, lib_offsets)
+
+ def FindSymbolInfo(self, device_path, offset):
+ """Overrides SymbolResolver.FindSymbolInfo.
+
+ Args:
+ device_path: Device-specific library path (e.g.
+ '/data/app/com.example.app-1/lib/x86/libfoo.so')
+ offset: Offset in device library path.
+ Returns:
+ Corresponding symbol information string, or None.
+ """
+ host_path = self._host_lib_finder.Find(device_path)
+ if not host_path:
+ return None
+
+ return self._host_resolver.FindSymbolInfo(host_path, offset)
+
+
+class MemoryMap(object):
+ """Models the memory map of a given process. Usage is:
+
+ 1) Create new instance, passing the Android ABI.
+
+ 2) Call TranslateLine() whenever you want to detect and translate any
+ memory map input line.
+
+ 3) Otherwise, it is possible to parse the whole memory map input with
+ ParseLines(), then call FindSectionForAddress() repeatedly in order
+ to translate a memory address into the corresponding mapping and
+ file information tuple (e.g. to symbolize stack entries).
+ """
+
+ # A named tuple describing interesting memory map line items.
+ # Fields:
+ # addr_start: Mapping start address in memory.
+ # file_offset: Corresponding file offset.
+ # file_size: Corresponding mapping size in bytes.
+ # file_path: Input file path.
+ # match: Corresponding regular expression match object.
+ LineTuple = collections.namedtuple('MemoryMapLineTuple',
+ 'addr_start,file_offset,file_size,'
+ 'file_path, match')
+
+ # A name tuple describing a memory map section.
+ # Fields:
+ # address: Memory address.
+ # size: Size in bytes in memory
+ # offset: Starting file offset.
+ # path: Input file path.
+ SectionTuple = collections.namedtuple('MemoryMapSection',
+ 'address,size,offset,path')
+
+ def __init__(self, android_abi):
+ """Initializes instance.
+
+ Args:
+ android_abi: Android CPU ABI name (e.g. 'armeabi-v7a')
+ """
+ hex_addr = _HexAddressRegexpFor(android_abi)
+
+ # pylint: disable=line-too-long
+ # A regular expression used to match memory map entries which look like:
+ # b278c000-b2790fff r-- 4fda000 5000 /data/app/com.google.android.apps.chrome-2/base.apk
+ # pylint: enable=line-too-long
+ self._re_map_section = re.compile(
+ r'\s*(?P<addr_start>' + hex_addr + r')-(?P<addr_end>' + hex_addr + ')' +
+ r'\s+' +
+ r'(?P<perm>...)\s+' +
+ r'(?P<file_offset>[0-9a-f]+)\s+' +
+ r'(?P<file_size>[0-9a-f]+)\s*' +
+ r'(?P<file_path>[^ \t]+)?')
+
+ self._addr_map = [] # Sorted list of (address, size, path, offset) tuples.
+ self._sorted_addresses = [] # Sorted list of address fields in _addr_map.
+ self._in_section = False
+
+ def TranslateLine(self, line, apk_path_translator):
+ """Try to translate a memory map input line, if detected.
+
+ This only takes care of converting mapped APK file path and offsets
+ into a corresponding uncompressed native library file path + new offsets,
+ e.g. '..... <offset> <size> /data/.../base.apk' gets
+ translated into '.... <new-offset> <size> /data/.../base.apk!lib/libfoo.so'
+
+ This function should always work, even if ParseLines() was not called
+ previously.
+
+ Args:
+ line: Input memory map / tombstone line.
+ apk_translator: An ApkLibraryPathTranslator instance, used to map
+ APK offsets into uncompressed native libraries + new offsets.
+ Returns:
+ Translated memory map line, if relevant, or unchanged input line
+ otherwise.
+ """
+ t = self._ParseLine(line.rstrip())
+ if not t:
+ return line
+
+ new_path, new_offset = apk_path_translator.TranslatePath(
+ t.file_path, t.file_offset)
+
+ if new_path == t.file_path:
+ return line
+
+ pos = t.match.start('file_path')
+ return '%s%s (offset 0x%x)%s' % (line[0:pos], new_path, new_offset,
+ line[t.match.end('file_path'):])
+
+ def ParseLines(self, input_lines, in_section=False):
+ """Parse a list of input lines and extract the APK memory map out of it.
+
+ Args:
+ input_lines: list, or iterable, of input lines.
+ in_section: Optional. If true, considers that the input lines are
+ already part of the memory map. Otherwise, wait until the start of
+ the section appears in the input before trying to record data.
+ Returns:
+ True iff APK-related memory map entries were found. False otherwise.
+ """
+ addr_list = [] # list of (address, size, file_path, file_offset) tuples.
+ self._in_section = in_section
+ for line in input_lines:
+ t = self._ParseLine(line.rstrip())
+ if not t:
+ continue
+
+ addr_list.append(t)
+
+ self._addr_map = sorted(addr_list, key=lambda x: x.addr_start)
+ self._sorted_addresses = [e.addr_start for e in self._addr_map]
+ return bool(self._addr_map)
+
+ def _ParseLine(self, line):
+ """Used internally to recognized memory map input lines.
+
+ Args:
+ line: Input logcat or tomstone line.
+ Returns:
+ A LineTuple instance on success, or None on failure.
+ """
+ if not self._in_section:
+ self._in_section = line.startswith('memory map:')
+ return None
+
+ m = self._re_map_section.match(line)
+ if not m:
+ self._in_section = False # End of memory map section
+ return None
+
+ # Only accept .apk and .so files that are not from the system partitions.
+ file_path = m.group('file_path')
+ if not file_path:
+ return None
+
+ if file_path.startswith('/system') or file_path.startswith('/vendor'):
+ return None
+
+ if not (file_path.endswith('.apk') or file_path.endswith('.so')):
+ return None
+
+ addr_start = int(m.group('addr_start'), 16)
+ file_offset = int(m.group('file_offset'), 16)
+ file_size = int(m.group('file_size'), 16)
+
+ return self.LineTuple(addr_start, file_offset, file_size, file_path, m)
+
+ def Dump(self):
+ """Print memory map for debugging."""
+ print('MEMORY MAP [')
+ for t in self._addr_map:
+ print('[%08x-%08x %08x %08x %s]' %
+ (t.addr_start, t.addr_start + t.file_size, t.file_size,
+ t.file_offset, t.file_path))
+ print('] MEMORY MAP')
+
+ def FindSectionForAddress(self, addr):
+ """Find the map section corresponding to a specific memory address.
+
+ Call this method only after using ParseLines() was called to extract
+ relevant information from the memory map.
+
+ Args:
+ addr: Memory address
+ Returns:
+ A SectionTuple instance on success, or None on failure.
+ """
+ pos = bisect.bisect_right(self._sorted_addresses, addr)
+ if pos > 0:
+ # All values in [0,pos) are <= addr, just ensure that the last
+ # one contains the address as well.
+ entry = self._addr_map[pos - 1]
+ if entry.addr_start + entry.file_size > addr:
+ return self.SectionTuple(entry.addr_start, entry.file_size,
+ entry.file_offset, entry.file_path)
+ return None
+
+
+class BacktraceTranslator(object):
+ """Translates backtrace-related lines in a tombstone or crash report.
+
+ Usage is the following:
+ 1) Create new instance with appropriate arguments.
+ 2) If the tombstone / logcat input is available, one can call
+ FindLibraryOffsets() in order to detect which library offsets
+ will need to be symbolized during a future parse. Doing so helps
+ speed up the ELF symbolizer.
+ 3) For each tombstone/logcat input line, call TranslateLine() to
+ try to detect and symbolize backtrace lines.
+ """
+
+ # A named tuple for relevant input backtrace lines.
+ # Fields:
+ # rel_pc: Instruction pointer, relative to offset in library start.
+ # location: Library or APK file path.
+ # offset: Load base of executable code in library or apk file path.
+ # match: The corresponding regular expression match object.
+ # Note:
+ # The actual instruction pointer always matches the position at
+ # |offset + rel_pc| in |location|.
+ LineTuple = collections.namedtuple('BacktraceLineTuple',
+ 'rel_pc,location,offset,match')
+
+ def __init__(self, android_abi, apk_translator):
+ """Initialize instance.
+
+ Args:
+ android_abi: Android CPU ABI name (e.g. 'armeabi-v7a').
+ apk_translator: ApkLibraryPathTranslator instance used to convert
+ mapped APK file offsets into uncompressed library file paths with
+ new offsets.
+ """
+ hex_addr = _HexAddressRegexpFor(android_abi)
+
+ # A regular expression used to match backtrace lines.
+ self._re_backtrace = re.compile(
+ r'.*#(?P<frame>[0-9]{2})\s+' +
+ r'(..)\s+' +
+ r'(?P<rel_pc>' + hex_addr + r')\s+' +
+ r'(?P<location>[^ \t]+)' +
+ r'(\s+\(offset 0x(?P<offset>[0-9a-f]+)\))?')
+
+ # In certain cases, offset will be provided as <location>+0x<offset>
+ # instead of <location> (offset 0x<offset>). This is a regexp to detect
+ # this.
+ self._re_location_offset = re.compile(
+ r'.*\+0x(?P<offset>[0-9a-f]+)$')
+
+ self._apk_translator = apk_translator
+ self._in_section = False
+
+ def _ParseLine(self, line):
+ """Used internally to detect and decompose backtrace input lines.
+
+ Args:
+ line: input tombstone line.
+ Returns:
+ A LineTuple instance on success, None on failure.
+ """
+ if not self._in_section:
+ self._in_section = line.startswith('backtrace:')
+ return None
+
+ line = line.rstrip()
+ m = self._re_backtrace.match(line)
+ if not m:
+ self._in_section = False
+ return None
+
+ location = m.group('location')
+ offset = m.group('offset')
+ if not offset:
+ m2 = self._re_location_offset.match(location)
+ if m2:
+ offset = m2.group('offset')
+ location = location[0:m2.start('offset') - 3]
+
+ if not offset:
+ return None
+
+ offset = int(offset, 16)
+ rel_pc = int(m.group('rel_pc'), 16)
+
+ # Two cases to consider here:
+ #
+ # * If this is a library file directly mapped in memory, then |rel_pc|
+ # if the direct offset within the library, and doesn't need any kind
+ # of adjustement.
+ #
+ # * If this is a library mapped directly from an .apk file, then
+ # |rel_pc| is the offset in the APK, and |offset| happens to be the
+ # load base of the corresponding library.
+ #
+ if location.endswith('.so'):
+ # For a native library directly mapped from the file system,
+ return self.LineTuple(rel_pc, location, offset, m)
+
+ if location.endswith('.apk'):
+ # For a native library inside an memory-mapped APK file,
+ new_location, new_offset = self._apk_translator.TranslatePath(
+ location, offset)
+
+ return self.LineTuple(rel_pc, new_location, new_offset, m)
+
+ # Ignore anything else (e.g. .oat or .odex files).
+ return None
+
+ def FindLibraryOffsets(self, input_lines, in_section=False):
+ """Parse a tombstone's backtrace section and find all library offsets in it.
+
+ Args:
+ input_lines: List or iterables of intput tombstone lines.
+ in_section: Optional. If True, considers that the stack section has
+ already started.
+ Returns:
+ A dictionary mapping device library paths to sets of offsets within
+ then.
+ """
+ self._in_section = in_section
+ result = collections.defaultdict(set)
+ for line in input_lines:
+ t = self._ParseLine(line)
+ if not t:
+ continue
+
+ result[t.location].add(t.offset + t.rel_pc)
+ return result
+
+ def TranslateLine(self, line, symbol_resolver):
+ """Symbolize backtrace line if recognized.
+
+ Args:
+ line: input backtrace line.
+ symbol_resolver: symbol resolver instance to use. This method will
+ call its FindSymbolInfo(device_lib_path, lib_offset) method to
+ convert offsets into symbol informations strings.
+ Returns:
+ Translated line (unchanged if not recognized as a back trace).
+ """
+ t = self._ParseLine(line)
+ if not t:
+ return line
+
+ symbol_info = symbol_resolver.FindSymbolInfo(t.location,
+ t.offset + t.rel_pc)
+ if not symbol_info:
+ symbol_info = 'offset 0x%x' % t.offset
+
+ pos = t.match.start('location')
+ pos2 = t.match.end('offset') + 1
+ if pos2 <= 0:
+ pos2 = t.match.end('location')
+ return '%s%s (%s)%s' % (line[:pos], t.location, symbol_info, line[pos2:])
+
+
+class StackTranslator(object):
+ """Translates stack-related lines in a tombstone or crash report."""
+
+ # A named tuple describing relevant stack input lines.
+ # Fields:
+ # address: Address as it appears in the stack.
+ # lib_path: Library path where |address| is mapped.
+ # lib_offset: Library load base offset. for |lib_path|.
+ # match: Corresponding regular expression match object.
+ LineTuple = collections.namedtuple('StackLineTuple',
+ 'address, lib_path, lib_offset, match')
+
+ def __init__(self, android_abi, memory_map, apk_translator):
+ """Initialize instance."""
+ hex_addr = _HexAddressRegexpFor(android_abi)
+
+ # pylint: disable=line-too-long
+ # A regular expression used to recognize stack entries like:
+ #
+ # #05 bf89a180 bf89a1e4 [stack]
+ # bf89a1c8 a0c01c51 /data/app/com.google.android.apps.chrome-2/base.apk
+ # bf89a080 00000000
+ # ........ ........
+ # pylint: enable=line-too-long
+ self._re_stack_line = re.compile(
+ r'\s+(?P<frame_number>#[0-9]+)?\s*' +
+ r'(?P<stack_addr>' + hex_addr + r')\s+' +
+ r'(?P<stack_value>' + hex_addr + r')' +
+ r'(\s+(?P<location>[^ \t]+))?')
+
+ self._re_stack_abbrev = re.compile(r'\s+[.]+\s+[.]+')
+
+ self._memory_map = memory_map
+ self._apk_translator = apk_translator
+ self._in_section = False
+
+ def _ParseLine(self, line):
+ """Check a given input line for a relevant _re_stack_line match.
+
+ Args:
+ line: input tombstone line.
+ Returns:
+ A LineTuple instance on success, None on failure.
+ """
+ line = line.rstrip()
+ if not self._in_section:
+ self._in_section = line.startswith('stack:')
+ return None
+
+ m = self._re_stack_line.match(line)
+ if not m:
+ if not self._re_stack_abbrev.match(line):
+ self._in_section = False
+ return None
+
+ location = m.group('location')
+ if not location:
+ return None
+
+ if not location.endswith('.apk') and not location.endswith('.so'):
+ return None
+
+ addr = int(m.group('stack_value'), 16)
+ t = self._memory_map.FindSectionForAddress(addr)
+ if t is None:
+ return None
+
+ lib_path = t.path
+ lib_offset = t.offset + (addr - t.address)
+
+ if lib_path.endswith('.apk'):
+ lib_path, lib_offset = self._apk_translator.TranslatePath(
+ lib_path, lib_offset)
+
+ return self.LineTuple(addr, lib_path, lib_offset, m)
+
+ def FindLibraryOffsets(self, input_lines, in_section=False):
+ """Parse a tombstone's stack section and find all library offsets in it.
+
+ Args:
+ input_lines: List or iterables of intput tombstone lines.
+ in_section: Optional. If True, considers that the stack section has
+ already started.
+ Returns:
+ A dictionary mapping device library paths to sets of offsets within
+ then.
+ """
+ result = collections.defaultdict(set)
+ self._in_section = in_section
+ for line in input_lines:
+ t = self._ParseLine(line)
+ if t:
+ result[t.lib_path].add(t.lib_offset)
+ return result
+
+ def TranslateLine(self, line, symbol_resolver=None):
+ """Try to translate a line of the stack dump."""
+ t = self._ParseLine(line)
+ if not t:
+ return line
+
+ symbol_info = symbol_resolver.FindSymbolInfo(t.lib_path, t.lib_offset)
+ if not symbol_info:
+ return line
+
+ pos = t.match.start('location')
+ pos2 = t.match.end('location')
+ return '%s%s (%s)%s' % (line[:pos], t.lib_path, symbol_info, line[pos2:])
diff --git a/third_party/libwebrtc/build/android/pylib/symbols/symbol_utils_unittest.py b/third_party/libwebrtc/build/android/pylib/symbols/symbol_utils_unittest.py
new file mode 100755
index 0000000000..2ec81f96fa
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/symbols/symbol_utils_unittest.py
@@ -0,0 +1,942 @@
+#!/usr/bin/env vpython3
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import contextlib
+import logging
+import os
+import re
+import shutil
+import tempfile
+import unittest
+
+from pylib.symbols import apk_native_libs_unittest
+from pylib.symbols import mock_addr2line
+from pylib.symbols import symbol_utils
+
+_MOCK_ELF_DATA = apk_native_libs_unittest.MOCK_ELF_DATA
+
+_MOCK_A2L_PATH = os.path.join(os.path.dirname(mock_addr2line.__file__),
+ 'mock_addr2line')
+
+
+# pylint: disable=line-too-long
+
+# list of (start_offset, end_offset, size, libpath) tuples corresponding
+# to the content of base.apk. This was taken from an x86 ChromeModern.apk
+# component build.
+_TEST_APK_LIBS = [
+ (0x01331000, 0x013696bc, 0x000386bc, 'libaccessibility.cr.so'),
+ (0x0136a000, 0x013779c4, 0x0000d9c4, 'libanimation.cr.so'),
+ (0x01378000, 0x0137f7e8, 0x000077e8, 'libapdu.cr.so'),
+ (0x01380000, 0x0155ccc8, 0x001dccc8, 'libbase.cr.so'),
+ (0x0155d000, 0x015ab98c, 0x0004e98c, 'libbase_i18n.cr.so'),
+ (0x015ac000, 0x015dff4c, 0x00033f4c, 'libbindings.cr.so'),
+ (0x015e0000, 0x015f5a54, 0x00015a54, 'libbindings_base.cr.so'),
+ (0x0160e000, 0x01731960, 0x00123960, 'libblink_common.cr.so'),
+ (0x01732000, 0x0174ce54, 0x0001ae54, 'libblink_controller.cr.so'),
+ (0x0174d000, 0x0318c528, 0x01a3f528, 'libblink_core.cr.so'),
+ (0x0318d000, 0x03191700, 0x00004700, 'libblink_mojom_broadcastchannel_bindings_shared.cr.so'),
+ (0x03192000, 0x03cd7918, 0x00b45918, 'libblink_modules.cr.so'),
+ (0x03cd8000, 0x03d137d0, 0x0003b7d0, 'libblink_mojo_bindings_shared.cr.so'),
+ (0x03d14000, 0x03d2670c, 0x0001270c, 'libblink_offscreen_canvas_mojo_bindings_shared.cr.so'),
+ (0x03d27000, 0x046c7054, 0x009a0054, 'libblink_platform.cr.so'),
+ (0x046c8000, 0x0473fbfc, 0x00077bfc, 'libbluetooth.cr.so'),
+ (0x04740000, 0x04878f40, 0x00138f40, 'libboringssl.cr.so'),
+ (0x04879000, 0x0498466c, 0x0010b66c, 'libc++_shared.so'),
+ (0x04985000, 0x0498d93c, 0x0000893c, 'libcaptive_portal.cr.so'),
+ (0x0498e000, 0x049947cc, 0x000067cc, 'libcapture_base.cr.so'),
+ (0x04995000, 0x04b39f18, 0x001a4f18, 'libcapture_lib.cr.so'),
+ (0x04b3a000, 0x04b488ec, 0x0000e8ec, 'libcbor.cr.so'),
+ (0x04b49000, 0x04e9ea5c, 0x00355a5c, 'libcc.cr.so'),
+ (0x04e9f000, 0x04ed6404, 0x00037404, 'libcc_animation.cr.so'),
+ (0x04ed7000, 0x04ef5ab4, 0x0001eab4, 'libcc_base.cr.so'),
+ (0x04ef6000, 0x04fd9364, 0x000e3364, 'libcc_blink.cr.so'),
+ (0x04fda000, 0x04fe2758, 0x00008758, 'libcc_debug.cr.so'),
+ (0x04fe3000, 0x0500ae0c, 0x00027e0c, 'libcc_ipc.cr.so'),
+ (0x0500b000, 0x05078f38, 0x0006df38, 'libcc_paint.cr.so'),
+ (0x05079000, 0x0507e734, 0x00005734, 'libcdm_manager.cr.so'),
+ (0x0507f000, 0x06f4d744, 0x01ece744, 'libchrome.cr.so'),
+ (0x06f54000, 0x06feb830, 0x00097830, 'libchromium_sqlite3.cr.so'),
+ (0x06fec000, 0x0706f554, 0x00083554, 'libclient.cr.so'),
+ (0x07070000, 0x0708da60, 0x0001da60, 'libcloud_policy_proto_generated_compile.cr.so'),
+ (0x0708e000, 0x07121f28, 0x00093f28, 'libcodec.cr.so'),
+ (0x07122000, 0x07134ab8, 0x00012ab8, 'libcolor_space.cr.so'),
+ (0x07135000, 0x07138614, 0x00003614, 'libcommon.cr.so'),
+ (0x07139000, 0x0717c938, 0x00043938, 'libcompositor.cr.so'),
+ (0x0717d000, 0x0923d78c, 0x020c078c, 'libcontent.cr.so'),
+ (0x0923e000, 0x092ae87c, 0x0007087c, 'libcontent_common_mojo_bindings_shared.cr.so'),
+ (0x092af000, 0x092be718, 0x0000f718, 'libcontent_public_common_mojo_bindings_shared.cr.so'),
+ (0x092bf000, 0x092d9a20, 0x0001aa20, 'libcrash_key.cr.so'),
+ (0x092da000, 0x092eda58, 0x00013a58, 'libcrcrypto.cr.so'),
+ (0x092ee000, 0x092f16e0, 0x000036e0, 'libdevice_base.cr.so'),
+ (0x092f2000, 0x092fe8d8, 0x0000c8d8, 'libdevice_event_log.cr.so'),
+ (0x092ff000, 0x093026a4, 0x000036a4, 'libdevice_features.cr.so'),
+ (0x09303000, 0x093f1220, 0x000ee220, 'libdevice_gamepad.cr.so'),
+ (0x093f2000, 0x09437f54, 0x00045f54, 'libdevice_vr_mojo_bindings.cr.so'),
+ (0x09438000, 0x0954c168, 0x00114168, 'libdevice_vr_mojo_bindings_blink.cr.so'),
+ (0x0954d000, 0x0955d720, 0x00010720, 'libdevice_vr_mojo_bindings_shared.cr.so'),
+ (0x0955e000, 0x0956b9c0, 0x0000d9c0, 'libdevices.cr.so'),
+ (0x0956c000, 0x0957cae8, 0x00010ae8, 'libdiscardable_memory_client.cr.so'),
+ (0x0957d000, 0x09588854, 0x0000b854, 'libdiscardable_memory_common.cr.so'),
+ (0x09589000, 0x0959cbb4, 0x00013bb4, 'libdiscardable_memory_service.cr.so'),
+ (0x0959d000, 0x095b6b90, 0x00019b90, 'libdisplay.cr.so'),
+ (0x095b7000, 0x095be930, 0x00007930, 'libdisplay_types.cr.so'),
+ (0x095bf000, 0x095c46c4, 0x000056c4, 'libdisplay_util.cr.so'),
+ (0x095c5000, 0x095f54a4, 0x000304a4, 'libdomain_reliability.cr.so'),
+ (0x095f6000, 0x0966fe08, 0x00079e08, 'libembedder.cr.so'),
+ (0x09670000, 0x096735f8, 0x000035f8, 'libembedder_switches.cr.so'),
+ (0x09674000, 0x096a3460, 0x0002f460, 'libevents.cr.so'),
+ (0x096a4000, 0x096b6d40, 0x00012d40, 'libevents_base.cr.so'),
+ (0x096b7000, 0x0981a778, 0x00163778, 'libffmpeg.cr.so'),
+ (0x0981b000, 0x09945c94, 0x0012ac94, 'libfido.cr.so'),
+ (0x09946000, 0x09a330dc, 0x000ed0dc, 'libfingerprint.cr.so'),
+ (0x09a34000, 0x09b53170, 0x0011f170, 'libfreetype_harfbuzz.cr.so'),
+ (0x09b54000, 0x09bc5c5c, 0x00071c5c, 'libgcm.cr.so'),
+ (0x09bc6000, 0x09cc8584, 0x00102584, 'libgeolocation.cr.so'),
+ (0x09cc9000, 0x09cdc8d4, 0x000138d4, 'libgeometry.cr.so'),
+ (0x09cdd000, 0x09cec8b4, 0x0000f8b4, 'libgeometry_skia.cr.so'),
+ (0x09ced000, 0x09d10e14, 0x00023e14, 'libgesture_detection.cr.so'),
+ (0x09d11000, 0x09d7595c, 0x0006495c, 'libgfx.cr.so'),
+ (0x09d76000, 0x09d7d7cc, 0x000077cc, 'libgfx_ipc.cr.so'),
+ (0x09d7e000, 0x09d82708, 0x00004708, 'libgfx_ipc_buffer_types.cr.so'),
+ (0x09d83000, 0x09d89748, 0x00006748, 'libgfx_ipc_color.cr.so'),
+ (0x09d8a000, 0x09d8f6f4, 0x000056f4, 'libgfx_ipc_geometry.cr.so'),
+ (0x09d90000, 0x09d94754, 0x00004754, 'libgfx_ipc_skia.cr.so'),
+ (0x09d95000, 0x09d9869c, 0x0000369c, 'libgfx_switches.cr.so'),
+ (0x09d99000, 0x09dba0ac, 0x000210ac, 'libgin.cr.so'),
+ (0x09dbb000, 0x09e0a8cc, 0x0004f8cc, 'libgl_in_process_context.cr.so'),
+ (0x09e0b000, 0x09e17a18, 0x0000ca18, 'libgl_init.cr.so'),
+ (0x09e18000, 0x09ee34e4, 0x000cb4e4, 'libgl_wrapper.cr.so'),
+ (0x09ee4000, 0x0a1a2e00, 0x002bee00, 'libgles2.cr.so'),
+ (0x0a1a3000, 0x0a24556c, 0x000a256c, 'libgles2_implementation.cr.so'),
+ (0x0a246000, 0x0a267038, 0x00021038, 'libgles2_utils.cr.so'),
+ (0x0a268000, 0x0a3288e4, 0x000c08e4, 'libgpu.cr.so'),
+ (0x0a329000, 0x0a3627ec, 0x000397ec, 'libgpu_ipc_service.cr.so'),
+ (0x0a363000, 0x0a388a18, 0x00025a18, 'libgpu_util.cr.so'),
+ (0x0a389000, 0x0a506d8c, 0x0017dd8c, 'libhost.cr.so'),
+ (0x0a507000, 0x0a6f0ec0, 0x001e9ec0, 'libicui18n.cr.so'),
+ (0x0a6f1000, 0x0a83b4c8, 0x0014a4c8, 'libicuuc.cr.so'),
+ (0x0a83c000, 0x0a8416e4, 0x000056e4, 'libinterfaces_shared.cr.so'),
+ (0x0a842000, 0x0a87e2a0, 0x0003c2a0, 'libipc.cr.so'),
+ (0x0a87f000, 0x0a88c98c, 0x0000d98c, 'libipc_mojom.cr.so'),
+ (0x0a88d000, 0x0a8926e4, 0x000056e4, 'libipc_mojom_shared.cr.so'),
+ (0x0a893000, 0x0a8a1e18, 0x0000ee18, 'libkeyed_service_content.cr.so'),
+ (0x0a8a2000, 0x0a8b4a30, 0x00012a30, 'libkeyed_service_core.cr.so'),
+ (0x0a8b5000, 0x0a930a80, 0x0007ba80, 'libleveldatabase.cr.so'),
+ (0x0a931000, 0x0a9b3908, 0x00082908, 'libmanager.cr.so'),
+ (0x0a9b4000, 0x0aea9bb4, 0x004f5bb4, 'libmedia.cr.so'),
+ (0x0aeaa000, 0x0b08cb88, 0x001e2b88, 'libmedia_blink.cr.so'),
+ (0x0b08d000, 0x0b0a4728, 0x00017728, 'libmedia_devices_mojo_bindings_shared.cr.so'),
+ (0x0b0a5000, 0x0b1943ec, 0x000ef3ec, 'libmedia_gpu.cr.so'),
+ (0x0b195000, 0x0b2d07d4, 0x0013b7d4, 'libmedia_mojo_services.cr.so'),
+ (0x0b2d1000, 0x0b2d4760, 0x00003760, 'libmessage_center.cr.so'),
+ (0x0b2d5000, 0x0b2e0938, 0x0000b938, 'libmessage_support.cr.so'),
+ (0x0b2e1000, 0x0b2f3ad0, 0x00012ad0, 'libmetrics_cpp.cr.so'),
+ (0x0b2f4000, 0x0b313bb8, 0x0001fbb8, 'libmidi.cr.so'),
+ (0x0b314000, 0x0b31b848, 0x00007848, 'libmojo_base_lib.cr.so'),
+ (0x0b31c000, 0x0b3329f8, 0x000169f8, 'libmojo_base_mojom.cr.so'),
+ (0x0b333000, 0x0b34b98c, 0x0001898c, 'libmojo_base_mojom_blink.cr.so'),
+ (0x0b34c000, 0x0b354700, 0x00008700, 'libmojo_base_mojom_shared.cr.so'),
+ (0x0b355000, 0x0b3608b0, 0x0000b8b0, 'libmojo_base_shared_typemap_traits.cr.so'),
+ (0x0b361000, 0x0b3ad454, 0x0004c454, 'libmojo_edk.cr.so'),
+ (0x0b3ae000, 0x0b3c4a20, 0x00016a20, 'libmojo_edk_ports.cr.so'),
+ (0x0b3c5000, 0x0b3d38a0, 0x0000e8a0, 'libmojo_mojom_bindings.cr.so'),
+ (0x0b3d4000, 0x0b3da6e8, 0x000066e8, 'libmojo_mojom_bindings_shared.cr.so'),
+ (0x0b3db000, 0x0b3e27f0, 0x000077f0, 'libmojo_public_system.cr.so'),
+ (0x0b3e3000, 0x0b3fa9fc, 0x000179fc, 'libmojo_public_system_cpp.cr.so'),
+ (0x0b3fb000, 0x0b407728, 0x0000c728, 'libmojom_core_shared.cr.so'),
+ (0x0b408000, 0x0b421744, 0x00019744, 'libmojom_platform_shared.cr.so'),
+ (0x0b422000, 0x0b43451c, 0x0001251c, 'libnative_theme.cr.so'),
+ (0x0b435000, 0x0baaa1bc, 0x006751bc, 'libnet.cr.so'),
+ (0x0bac4000, 0x0bb74670, 0x000b0670, 'libnetwork_cpp.cr.so'),
+ (0x0bb75000, 0x0bbaee8c, 0x00039e8c, 'libnetwork_cpp_base.cr.so'),
+ (0x0bbaf000, 0x0bd21844, 0x00172844, 'libnetwork_service.cr.so'),
+ (0x0bd22000, 0x0bd256e4, 0x000036e4, 'libnetwork_session_configurator.cr.so'),
+ (0x0bd26000, 0x0bd33734, 0x0000d734, 'libonc.cr.so'),
+ (0x0bd34000, 0x0bd9ce18, 0x00068e18, 'libperfetto.cr.so'),
+ (0x0bd9d000, 0x0bda4854, 0x00007854, 'libplatform.cr.so'),
+ (0x0bda5000, 0x0bec5ce4, 0x00120ce4, 'libpolicy_component.cr.so'),
+ (0x0bec6000, 0x0bf5ab58, 0x00094b58, 'libpolicy_proto.cr.so'),
+ (0x0bf5b000, 0x0bf86fbc, 0x0002bfbc, 'libprefs.cr.so'),
+ (0x0bf87000, 0x0bfa5d74, 0x0001ed74, 'libprinting.cr.so'),
+ (0x0bfa6000, 0x0bfe0e80, 0x0003ae80, 'libprotobuf_lite.cr.so'),
+ (0x0bfe1000, 0x0bff0a18, 0x0000fa18, 'libproxy_config.cr.so'),
+ (0x0bff1000, 0x0c0f6654, 0x00105654, 'libpublic.cr.so'),
+ (0x0c0f7000, 0x0c0fa6a4, 0x000036a4, 'librange.cr.so'),
+ (0x0c0fb000, 0x0c118058, 0x0001d058, 'libraster.cr.so'),
+ (0x0c119000, 0x0c133d00, 0x0001ad00, 'libresource_coordinator_cpp.cr.so'),
+ (0x0c134000, 0x0c1396a0, 0x000056a0, 'libresource_coordinator_cpp_base.cr.so'),
+ (0x0c13a000, 0x0c1973b8, 0x0005d3b8, 'libresource_coordinator_public_mojom.cr.so'),
+ (0x0c198000, 0x0c2033e8, 0x0006b3e8, 'libresource_coordinator_public_mojom_blink.cr.so'),
+ (0x0c204000, 0x0c219744, 0x00015744, 'libresource_coordinator_public_mojom_shared.cr.so'),
+ (0x0c21a000, 0x0c21e700, 0x00004700, 'libsandbox.cr.so'),
+ (0x0c21f000, 0x0c22f96c, 0x0001096c, 'libsandbox_services.cr.so'),
+ (0x0c230000, 0x0c249d58, 0x00019d58, 'libseccomp_bpf.cr.so'),
+ (0x0c24a000, 0x0c24e714, 0x00004714, 'libseccomp_starter_android.cr.so'),
+ (0x0c24f000, 0x0c4ae9f0, 0x0025f9f0, 'libservice.cr.so'),
+ (0x0c4af000, 0x0c4c3ae4, 0x00014ae4, 'libservice_manager_cpp.cr.so'),
+ (0x0c4c4000, 0x0c4cb708, 0x00007708, 'libservice_manager_cpp_types.cr.so'),
+ (0x0c4cc000, 0x0c4fbe30, 0x0002fe30, 'libservice_manager_mojom.cr.so'),
+ (0x0c4fc000, 0x0c532e78, 0x00036e78, 'libservice_manager_mojom_blink.cr.so'),
+ (0x0c533000, 0x0c53669c, 0x0000369c, 'libservice_manager_mojom_constants.cr.so'),
+ (0x0c537000, 0x0c53e85c, 0x0000785c, 'libservice_manager_mojom_constants_blink.cr.so'),
+ (0x0c53f000, 0x0c542668, 0x00003668, 'libservice_manager_mojom_constants_shared.cr.so'),
+ (0x0c543000, 0x0c54d700, 0x0000a700, 'libservice_manager_mojom_shared.cr.so'),
+ (0x0c54e000, 0x0c8fc6ec, 0x003ae6ec, 'libsessions.cr.so'),
+ (0x0c8fd000, 0x0c90a924, 0x0000d924, 'libshared_memory_support.cr.so'),
+ (0x0c90b000, 0x0c9148ec, 0x000098ec, 'libshell_dialogs.cr.so'),
+ (0x0c915000, 0x0cf8de70, 0x00678e70, 'libskia.cr.so'),
+ (0x0cf8e000, 0x0cf978bc, 0x000098bc, 'libsnapshot.cr.so'),
+ (0x0cf98000, 0x0cfb7d9c, 0x0001fd9c, 'libsql.cr.so'),
+ (0x0cfb8000, 0x0cfbe744, 0x00006744, 'libstartup_tracing.cr.so'),
+ (0x0cfbf000, 0x0d19b4e4, 0x001dc4e4, 'libstorage_browser.cr.so'),
+ (0x0d19c000, 0x0d2a773c, 0x0010b73c, 'libstorage_common.cr.so'),
+ (0x0d2a8000, 0x0d2ac6fc, 0x000046fc, 'libsurface.cr.so'),
+ (0x0d2ad000, 0x0d2baa98, 0x0000da98, 'libtracing.cr.so'),
+ (0x0d2bb000, 0x0d2f36b0, 0x000386b0, 'libtracing_cpp.cr.so'),
+ (0x0d2f4000, 0x0d326e70, 0x00032e70, 'libtracing_mojom.cr.so'),
+ (0x0d327000, 0x0d33270c, 0x0000b70c, 'libtracing_mojom_shared.cr.so'),
+ (0x0d333000, 0x0d46d804, 0x0013a804, 'libui_android.cr.so'),
+ (0x0d46e000, 0x0d4cb3f8, 0x0005d3f8, 'libui_base.cr.so'),
+ (0x0d4cc000, 0x0d4dbc40, 0x0000fc40, 'libui_base_ime.cr.so'),
+ (0x0d4dc000, 0x0d4e58d4, 0x000098d4, 'libui_data_pack.cr.so'),
+ (0x0d4e6000, 0x0d51d1e0, 0x000371e0, 'libui_devtools.cr.so'),
+ (0x0d51e000, 0x0d52b984, 0x0000d984, 'libui_message_center_cpp.cr.so'),
+ (0x0d52c000, 0x0d539a48, 0x0000da48, 'libui_touch_selection.cr.so'),
+ (0x0d53a000, 0x0d55bc60, 0x00021c60, 'liburl.cr.so'),
+ (0x0d55c000, 0x0d55f6b4, 0x000036b4, 'liburl_ipc.cr.so'),
+ (0x0d560000, 0x0d5af110, 0x0004f110, 'liburl_matcher.cr.so'),
+ (0x0d5b0000, 0x0d5e2fac, 0x00032fac, 'libuser_manager.cr.so'),
+ (0x0d5e3000, 0x0d5e66e4, 0x000036e4, 'libuser_prefs.cr.so'),
+ (0x0d5e7000, 0x0e3e1cc8, 0x00dfacc8, 'libv8.cr.so'),
+ (0x0e3e2000, 0x0e400ae0, 0x0001eae0, 'libv8_libbase.cr.so'),
+ (0x0e401000, 0x0e4d91d4, 0x000d81d4, 'libviz_common.cr.so'),
+ (0x0e4da000, 0x0e4df7e4, 0x000057e4, 'libviz_resource_format.cr.so'),
+ (0x0e4e0000, 0x0e5b7120, 0x000d7120, 'libweb_dialogs.cr.so'),
+ (0x0e5b8000, 0x0e5c7a18, 0x0000fa18, 'libwebdata_common.cr.so'),
+ (0x0e5c8000, 0x0e61bfe4, 0x00053fe4, 'libwtf.cr.so'),
+]
+
+
+# A small memory map fragment extracted from a tombstone for a process that
+# had loaded the APK corresponding to _TEST_APK_LIBS above.
+_TEST_MEMORY_MAP = r'''memory map:
+12c00000-12ccafff rw- 0 cb000 /dev/ashmem/dalvik-main space (deleted)
+12ccb000-130cafff rw- cb000 400000 /dev/ashmem/dalvik-main space (deleted)
+130cb000-32bfffff --- 4cb000 1fb35000 /dev/ashmem/dalvik-main space (deleted)
+32c00000-32c00fff rw- 0 1000 /dev/ashmem/dalvik-main space 1 (deleted)
+32c01000-52bfffff --- 1000 1ffff000 /dev/ashmem/dalvik-main space 1 (deleted)
+6f3b8000-6fd90fff rw- 0 9d9000 /data/dalvik-cache/x86/system@framework@boot.art
+6fd91000-71c42fff r-- 0 1eb2000 /data/dalvik-cache/x86/system@framework@boot.oat
+71c43000-7393efff r-x 1eb2000 1cfc000 /data/dalvik-cache/x86/system@framework@boot.oat (load base 0x71c43000)
+7393f000-7393ffff rw- 3bae000 1000 /data/dalvik-cache/x86/system@framework@boot.oat
+73940000-73a1bfff rw- 0 dc000 /dev/ashmem/dalvik-zygote space (deleted)
+73a1c000-73a1cfff rw- 0 1000 /dev/ashmem/dalvik-non moving space (deleted)
+73a1d000-73a2dfff rw- 1000 11000 /dev/ashmem/dalvik-non moving space (deleted)
+73a2e000-77540fff --- 12000 3b13000 /dev/ashmem/dalvik-non moving space (deleted)
+77541000-7793ffff rw- 3b25000 3ff000 /dev/ashmem/dalvik-non moving space (deleted)
+923aa000-92538fff r-- 8a9000 18f000 /data/app/com.example.app-2/base.apk
+92539000-9255bfff r-- 0 23000 /data/data/com.example.app/app_data/paks/es.pak@162db1c6689
+9255c000-92593fff r-- 213000 38000 /data/app/com.example.app-2/base.apk
+92594000-925c0fff r-- 87d000 2d000 /data/app/com.example.app-2/base.apk
+925c1000-927d3fff r-- a37000 213000 /data/app/com.example.app-2/base.apk
+927d4000-92e07fff r-- 24a000 634000 /data/app/com.example.app-2/base.apk
+92e08000-92e37fff r-- a931000 30000 /data/app/com.example.app-2/base.apk
+92e38000-92e86fff r-x a961000 4f000 /data/app/com.example.app-2/base.apk
+92e87000-92e8afff rw- a9b0000 4000 /data/app/com.example.app-2/base.apk
+92e8b000-92e8bfff rw- 0 1000
+92e8c000-92e9dfff r-- d5b0000 12000 /data/app/com.example.app-2/base.apk
+92e9e000-92ebcfff r-x d5c2000 1f000 /data/app/com.example.app-2/base.apk
+92ebd000-92ebefff rw- d5e1000 2000 /data/app/com.example.app-2/base.apk
+92ebf000-92ebffff rw- 0 1000
+'''
+
+# list of (address, size, path, offset) tuples that must appear in
+# _TEST_MEMORY_MAP. Not all sections need to be listed.
+_TEST_MEMORY_MAP_SECTIONS = [
+ (0x923aa000, 0x18f000, '/data/app/com.example.app-2/base.apk', 0x8a9000),
+ (0x9255c000, 0x038000, '/data/app/com.example.app-2/base.apk', 0x213000),
+ (0x92594000, 0x02d000, '/data/app/com.example.app-2/base.apk', 0x87d000),
+ (0x925c1000, 0x213000, '/data/app/com.example.app-2/base.apk', 0xa37000),
+]
+
+_EXPECTED_TEST_MEMORY_MAP = r'''memory map:
+12c00000-12ccafff rw- 0 cb000 /dev/ashmem/dalvik-main space (deleted)
+12ccb000-130cafff rw- cb000 400000 /dev/ashmem/dalvik-main space (deleted)
+130cb000-32bfffff --- 4cb000 1fb35000 /dev/ashmem/dalvik-main space (deleted)
+32c00000-32c00fff rw- 0 1000 /dev/ashmem/dalvik-main space 1 (deleted)
+32c01000-52bfffff --- 1000 1ffff000 /dev/ashmem/dalvik-main space 1 (deleted)
+6f3b8000-6fd90fff rw- 0 9d9000 /data/dalvik-cache/x86/system@framework@boot.art
+6fd91000-71c42fff r-- 0 1eb2000 /data/dalvik-cache/x86/system@framework@boot.oat
+71c43000-7393efff r-x 1eb2000 1cfc000 /data/dalvik-cache/x86/system@framework@boot.oat (load base 0x71c43000)
+7393f000-7393ffff rw- 3bae000 1000 /data/dalvik-cache/x86/system@framework@boot.oat
+73940000-73a1bfff rw- 0 dc000 /dev/ashmem/dalvik-zygote space (deleted)
+73a1c000-73a1cfff rw- 0 1000 /dev/ashmem/dalvik-non moving space (deleted)
+73a1d000-73a2dfff rw- 1000 11000 /dev/ashmem/dalvik-non moving space (deleted)
+73a2e000-77540fff --- 12000 3b13000 /dev/ashmem/dalvik-non moving space (deleted)
+77541000-7793ffff rw- 3b25000 3ff000 /dev/ashmem/dalvik-non moving space (deleted)
+923aa000-92538fff r-- 8a9000 18f000 /data/app/com.example.app-2/base.apk
+92539000-9255bfff r-- 0 23000 /data/data/com.example.app/app_data/paks/es.pak@162db1c6689
+9255c000-92593fff r-- 213000 38000 /data/app/com.example.app-2/base.apk
+92594000-925c0fff r-- 87d000 2d000 /data/app/com.example.app-2/base.apk
+925c1000-927d3fff r-- a37000 213000 /data/app/com.example.app-2/base.apk
+927d4000-92e07fff r-- 24a000 634000 /data/app/com.example.app-2/base.apk
+92e08000-92e37fff r-- a931000 30000 /data/app/com.example.app-2/base.apk!lib/libmanager.cr.so (offset 0x0)
+92e38000-92e86fff r-x a961000 4f000 /data/app/com.example.app-2/base.apk!lib/libmanager.cr.so (offset 0x30000)
+92e87000-92e8afff rw- a9b0000 4000 /data/app/com.example.app-2/base.apk!lib/libmanager.cr.so (offset 0x7f000)
+92e8b000-92e8bfff rw- 0 1000
+92e8c000-92e9dfff r-- d5b0000 12000 /data/app/com.example.app-2/base.apk!lib/libuser_manager.cr.so (offset 0x0)
+92e9e000-92ebcfff r-x d5c2000 1f000 /data/app/com.example.app-2/base.apk!lib/libuser_manager.cr.so (offset 0x12000)
+92ebd000-92ebefff rw- d5e1000 2000 /data/app/com.example.app-2/base.apk!lib/libuser_manager.cr.so (offset 0x31000)
+92ebf000-92ebffff rw- 0 1000
+'''
+
+# Example stack section, taken from the same tombstone that _TEST_MEMORY_MAP
+# was extracted from.
+_TEST_STACK = r'''stack:
+ bf89a070 b7439468 /system/lib/libc.so
+ bf89a074 bf89a1e4 [stack]
+ bf89a078 932d4000 /data/app/com.example.app-2/base.apk
+ bf89a07c b73bfbc9 /system/lib/libc.so (pthread_mutex_lock+65)
+ bf89a080 00000000
+ bf89a084 4000671c /dev/ashmem/dalvik-main space 1 (deleted)
+ bf89a088 932d1d86 /data/app/com.example.app-2/base.apk
+ bf89a08c b743671c /system/lib/libc.so
+ bf89a090 b77f8c00 /system/bin/linker
+ bf89a094 b743cc90
+ bf89a098 932d1d4a /data/app/com.example.app-2/base.apk
+ bf89a09c b73bf271 /system/lib/libc.so (__pthread_internal_find(long)+65)
+ bf89a0a0 b743cc90
+ bf89a0a4 bf89a0b0 [stack]
+ bf89a0a8 bf89a0b8 [stack]
+ bf89a0ac 00000008
+ ........ ........
+ #00 bf89a0b0 00000006
+ bf89a0b4 00000002
+ bf89a0b8 b743671c /system/lib/libc.so
+ bf89a0bc b73bf5d9 /system/lib/libc.so (pthread_kill+71)
+ #01 bf89a0c0 00006937
+ bf89a0c4 00006937
+ bf89a0c8 00000006
+ bf89a0cc b77fd3a9 /system/bin/app_process32 (sigprocmask+141)
+ bf89a0d0 00000002
+ bf89a0d4 bf89a0ec [stack]
+ bf89a0d8 00000000
+ bf89a0dc b743671c /system/lib/libc.so
+ bf89a0e0 bf89a12c [stack]
+ bf89a0e4 bf89a1e4 [stack]
+ bf89a0e8 932d1d4a /data/app/com.example.app-2/base.apk
+ bf89a0ec b7365206 /system/lib/libc.so (raise+37)
+ #02 bf89a0f0 b77f8c00 /system/bin/linker
+ bf89a0f4 00000006
+ bf89a0f8 b7439468 /system/lib/libc.so
+ bf89a0fc b743671c /system/lib/libc.so
+ bf89a100 bf89a12c [stack]
+ bf89a104 b743671c /system/lib/libc.so
+ bf89a108 bf89a12c [stack]
+ bf89a10c b735e9e5 /system/lib/libc.so (abort+81)
+ #03 bf89a110 00000006
+ bf89a114 bf89a12c [stack]
+ bf89a118 00000000
+ bf89a11c b55a3d3b /system/lib/libprotobuf-cpp-lite.so (google::protobuf::internal::DefaultLogHandler(google::protobuf::LogLevel, char const*, int, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&)+99)
+ bf89a120 b7439468 /system/lib/libc.so
+ bf89a124 b55ba38d /system/lib/libprotobuf-cpp-lite.so
+ bf89a128 b55ba408 /system/lib/libprotobuf-cpp-lite.so
+ bf89a12c ffffffdf
+ bf89a130 0000003d
+ bf89a134 adfedf00 [anon:libc_malloc]
+ bf89a138 bf89a158 [stack]
+ #04 bf89a13c a0cee7f0 /data/app/com.example.app-2/base.apk
+ bf89a140 b55c1cb0 /system/lib/libprotobuf-cpp-lite.so
+ bf89a144 bf89a1e4 [stack]
+'''
+
+# Expected value of _TEST_STACK after translation of addresses in the APK
+# into offsets into libraries.
+_EXPECTED_STACK = r'''stack:
+ bf89a070 b7439468 /system/lib/libc.so
+ bf89a074 bf89a1e4 [stack]
+ bf89a078 932d4000 /data/app/com.example.app-2/base.apk
+ bf89a07c b73bfbc9 /system/lib/libc.so (pthread_mutex_lock+65)
+ bf89a080 00000000
+ bf89a084 4000671c /dev/ashmem/dalvik-main space 1 (deleted)
+ bf89a088 932d1d86 /data/app/com.example.app-2/base.apk
+ bf89a08c b743671c /system/lib/libc.so
+ bf89a090 b77f8c00 /system/bin/linker
+ bf89a094 b743cc90
+ bf89a098 932d1d4a /data/app/com.example.app-2/base.apk
+ bf89a09c b73bf271 /system/lib/libc.so (__pthread_internal_find(long)+65)
+ bf89a0a0 b743cc90
+ bf89a0a4 bf89a0b0 [stack]
+ bf89a0a8 bf89a0b8 [stack]
+ bf89a0ac 00000008
+ ........ ........
+ #00 bf89a0b0 00000006
+ bf89a0b4 00000002
+ bf89a0b8 b743671c /system/lib/libc.so
+ bf89a0bc b73bf5d9 /system/lib/libc.so (pthread_kill+71)
+ #01 bf89a0c0 00006937
+ bf89a0c4 00006937
+ bf89a0c8 00000006
+ bf89a0cc b77fd3a9 /system/bin/app_process32 (sigprocmask+141)
+ bf89a0d0 00000002
+ bf89a0d4 bf89a0ec [stack]
+ bf89a0d8 00000000
+ bf89a0dc b743671c /system/lib/libc.so
+ bf89a0e0 bf89a12c [stack]
+ bf89a0e4 bf89a1e4 [stack]
+ bf89a0e8 932d1d4a /data/app/com.example.app-2/base.apk
+ bf89a0ec b7365206 /system/lib/libc.so (raise+37)
+ #02 bf89a0f0 b77f8c00 /system/bin/linker
+ bf89a0f4 00000006
+ bf89a0f8 b7439468 /system/lib/libc.so
+ bf89a0fc b743671c /system/lib/libc.so
+ bf89a100 bf89a12c [stack]
+ bf89a104 b743671c /system/lib/libc.so
+ bf89a108 bf89a12c [stack]
+ bf89a10c b735e9e5 /system/lib/libc.so (abort+81)
+ #03 bf89a110 00000006
+ bf89a114 bf89a12c [stack]
+ bf89a118 00000000
+ bf89a11c b55a3d3b /system/lib/libprotobuf-cpp-lite.so (google::protobuf::internal::DefaultLogHandler(google::protobuf::LogLevel, char const*, int, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&)+99)
+ bf89a120 b7439468 /system/lib/libc.so
+ bf89a124 b55ba38d /system/lib/libprotobuf-cpp-lite.so
+ bf89a128 b55ba408 /system/lib/libprotobuf-cpp-lite.so
+ bf89a12c ffffffdf
+ bf89a130 0000003d
+ bf89a134 adfedf00 [anon:libc_malloc]
+ bf89a138 bf89a158 [stack]
+ #04 bf89a13c a0cee7f0 /data/app/com.example.app-2/base.apk
+ bf89a140 b55c1cb0 /system/lib/libprotobuf-cpp-lite.so
+ bf89a144 bf89a1e4 [stack]
+'''
+
+_TEST_BACKTRACE = r'''backtrace:
+ #00 pc 00084126 /system/lib/libc.so (tgkill+22)
+ #01 pc 000815d8 /system/lib/libc.so (pthread_kill+70)
+ #02 pc 00027205 /system/lib/libc.so (raise+36)
+ #03 pc 000209e4 /system/lib/libc.so (abort+80)
+ #04 pc 0000cf73 /system/lib/libprotobuf-cpp-lite.so (google::protobuf::internal::LogMessage::Finish()+117)
+ #05 pc 0000cf8e /system/lib/libprotobuf-cpp-lite.so (google::protobuf::internal::LogFinisher::operator=(google::protobuf::internal::LogMessage&)+26)
+ #06 pc 0000d27f /system/lib/libprotobuf-cpp-lite.so (google::protobuf::internal::VerifyVersion(int, int, char const*)+574)
+ #07 pc 007cd236 /data/app/com.google.android.apps.chrome-2/base.apk (offset 0x598d000)
+ #08 pc 000111a9 /data/app/com.google.android.apps.chrome-2/base.apk (offset 0xbfc2000)
+ #09 pc 00013228 /data/app/com.google.android.apps.chrome-2/base.apk (offset 0xbfc2000)
+ #10 pc 000131de /data/app/com.google.android.apps.chrome-2/base.apk (offset 0xbfc2000)
+ #11 pc 007cd2d8 /data/app/com.google.android.apps.chrome-2/base.apk (offset 0x598d000)
+ #12 pc 007cd956 /data/app/com.google.android.apps.chrome-2/base.apk (offset 0x598d000)
+ #13 pc 007c2d4a /data/app/com.google.android.apps.chrome-2/base.apk (offset 0x598d000)
+ #14 pc 009fc9f1 /data/app/com.google.android.apps.chrome-2/base.apk (offset 0x598d000)
+ #15 pc 009fc8ea /data/app/com.google.android.apps.chrome-2/base.apk (offset 0x598d000)
+ #16 pc 00561c63 /data/app/com.google.android.apps.chrome-2/base.apk (offset 0x598d000)
+ #17 pc 0106fbdb /data/app/com.google.android.apps.chrome-2/base.apk (offset 0x598d000)
+ #18 pc 004d7371 /data/app/com.google.android.apps.chrome-2/base.apk (offset 0x598d000)
+ #19 pc 004d8159 /data/app/com.google.android.apps.chrome-2/base.apk (offset 0x598d000)
+ #20 pc 004d7b96 /data/app/com.google.android.apps.chrome-2/base.apk (offset 0x598d000)
+ #21 pc 004da4b6 /data/app/com.google.android.apps.chrome-2/base.apk (offset 0x598d000)
+ #22 pc 005ab66c /data/app/com.google.android.apps.chrome-2/base.apk (offset 0x7daa000)
+ #23 pc 005afca2 /data/app/com.google.android.apps.chrome-2/base.apk (offset 0x7daa000)
+ #24 pc 0000cae8 /data/app/com.google.android.apps.chrome-2/base.apk (offset 0x598d000)
+ #25 pc 00ce864f /data/app/com.google.android.apps.chrome-2/base.apk (offset 0x7daa000)
+ #26 pc 00ce8dfa /data/app/com.google.android.apps.chrome-2/base.apk (offset 0x7daa000)
+ #27 pc 00ce74c6 /data/app/com.google.android.apps.chrome-2/base.apk (offset 0x7daa000)
+ #28 pc 00004616 /data/app/com.google.android.apps.chrome-2/base.apk (offset 0x961e000)
+ #29 pc 00ce8215 /data/app/com.google.android.apps.chrome-2/base.apk (offset 0x7daa000)
+ #30 pc 0013d8c7 /system/lib/libart.so (art_quick_generic_jni_trampoline+71)
+ #31 pc 00137c52 /system/lib/libart.so (art_quick_invoke_static_stub+418)
+ #32 pc 00143651 /system/lib/libart.so (art::ArtMethod::Invoke(art::Thread*, unsigned int*, unsigned int, art::JValue*, char const*)+353)
+ #33 pc 005e06ae /system/lib/libart.so (artInterpreterToCompiledCodeBridge+190)
+ #34 pc 00328b5d /system/lib/libart.so (bool art::interpreter::DoCall<false, false>(art::ArtMethod*, art::Thread*, art::ShadowFrame&, art::Instruction const*, unsigned short, art::JValue*)+445)
+ #35 pc 0032cfc0 /system/lib/libart.so (bool art::interpreter::DoInvoke<(art::InvokeType)0, false, false>(art::Thread*, art::ShadowFrame&, art::Instruction const*, unsigned short, art::JValue*)+160)
+ #36 pc 000fc703 /system/lib/libart.so (art::JValue art::interpreter::ExecuteGotoImpl<false, false>(art::Thread*, art::DexFile::CodeItem const*, art::ShadowFrame&, art::JValue)+29891)
+ #37 pc 00300af7 /system/lib/libart.so (artInterpreterToInterpreterBridge+188)
+ #38 pc 00328b5d /system/lib/libart.so (bool art::interpreter::DoCall<false, false>(art::ArtMethod*, art::Thread*, art::ShadowFrame&, art::Instruction const*, unsigned short, art::JValue*)+445)
+ #39 pc 0032cfc0 /system/lib/libart.so (bool art::interpreter::DoInvoke<(art::InvokeType)0, false, false>(art::Thread*, art::ShadowFrame&, art::Instruction const*, unsigned short, art::JValue*)+160)
+ #40 pc 000fc703 /system/lib/libart.so (art::JValue art::interpreter::ExecuteGotoImpl<false, false>(art::Thread*, art::DexFile::CodeItem const*, art::ShadowFrame&, art::JValue)+29891)
+ #41 pc 00300af7 /system/lib/libart.so (artInterpreterToInterpreterBridge+188)
+ #42 pc 00328b5d /system/lib/libart.so (bool art::interpreter::DoCall<false, false>(art::ArtMethod*, art::Thread*, art::ShadowFrame&, art::Instruction const*, unsigned short, art::JValue*)+445)
+ #43 pc 0032ebf9 /system/lib/libart.so (bool art::interpreter::DoInvoke<(art::InvokeType)2, false, false>(art::Thread*, art::ShadowFrame&, art::Instruction const*, unsigned short, art::JValue*)+297)
+ #44 pc 000fc955 /system/lib/libart.so (art::JValue art::interpreter::ExecuteGotoImpl<false, false>(art::Thread*, art::DexFile::CodeItem const*, art::ShadowFrame&, art::JValue)+30485)
+ #45 pc 00300af7 /system/lib/libart.so (artInterpreterToInterpreterBridge+188)
+ #46 pc 00328b5d /system/lib/libart.so (bool art::interpreter::DoCall<false, false>(art::ArtMethod*, art::Thread*, art::ShadowFrame&, art::Instruction const*, unsigned short, art::JValue*)+445)
+ #47 pc 0033090c /system/lib/libart.so (bool art::interpreter::DoInvoke<(art::InvokeType)4, false, false>(art::Thread*, art::ShadowFrame&, art::Instruction const*, unsigned short, art::JValue*)+636)
+ #48 pc 000fc67f /system/lib/libart.so (art::JValue art::interpreter::ExecuteGotoImpl<false, false>(art::Thread*, art::DexFile::CodeItem const*, art::ShadowFrame&, art::JValue)+29759)
+ #49 pc 00300700 /system/lib/libart.so (art::interpreter::EnterInterpreterFromEntryPoint(art::Thread*, art::DexFile::CodeItem const*, art::ShadowFrame*)+128)
+ #50 pc 00667c73 /system/lib/libart.so (artQuickToInterpreterBridge+808)
+ #51 pc 0013d98d /system/lib/libart.so (art_quick_to_interpreter_bridge+77)
+ #52 pc 7264bc5b /data/dalvik-cache/x86/system@framework@boot.oat (offset 0x1eb2000)
+'''
+
+_EXPECTED_BACKTRACE = r'''backtrace:
+ #00 pc 00084126 /system/lib/libc.so (tgkill+22)
+ #01 pc 000815d8 /system/lib/libc.so (pthread_kill+70)
+ #02 pc 00027205 /system/lib/libc.so (raise+36)
+ #03 pc 000209e4 /system/lib/libc.so (abort+80)
+ #04 pc 0000cf73 /system/lib/libprotobuf-cpp-lite.so (google::protobuf::internal::LogMessage::Finish()+117)
+ #05 pc 0000cf8e /system/lib/libprotobuf-cpp-lite.so (google::protobuf::internal::LogFinisher::operator=(google::protobuf::internal::LogMessage&)+26)
+ #06 pc 0000d27f /system/lib/libprotobuf-cpp-lite.so (google::protobuf::internal::VerifyVersion(int, int, char const*)+574)
+ #07 pc 007cd236 /data/app/com.google.android.apps.chrome-2/base.apk!lib/libchrome.cr.so (offset 0x90e000)
+ #08 pc 000111a9 /data/app/com.google.android.apps.chrome-2/base.apk!lib/libprotobuf_lite.cr.so (offset 0x1c000)
+ #09 pc 00013228 /data/app/com.google.android.apps.chrome-2/base.apk!lib/libprotobuf_lite.cr.so (offset 0x1c000)
+ #10 pc 000131de /data/app/com.google.android.apps.chrome-2/base.apk!lib/libprotobuf_lite.cr.so (offset 0x1c000)
+ #11 pc 007cd2d8 /data/app/com.google.android.apps.chrome-2/base.apk!lib/libchrome.cr.so (offset 0x90e000)
+ #12 pc 007cd956 /data/app/com.google.android.apps.chrome-2/base.apk!lib/libchrome.cr.so (offset 0x90e000)
+ #13 pc 007c2d4a /data/app/com.google.android.apps.chrome-2/base.apk!lib/libchrome.cr.so (offset 0x90e000)
+ #14 pc 009fc9f1 /data/app/com.google.android.apps.chrome-2/base.apk!lib/libchrome.cr.so (offset 0x90e000)
+ #15 pc 009fc8ea /data/app/com.google.android.apps.chrome-2/base.apk!lib/libchrome.cr.so (offset 0x90e000)
+ #16 pc 00561c63 /data/app/com.google.android.apps.chrome-2/base.apk!lib/libchrome.cr.so (offset 0x90e000)
+ #17 pc 0106fbdb /data/app/com.google.android.apps.chrome-2/base.apk!lib/libchrome.cr.so (offset 0x90e000)
+ #18 pc 004d7371 /data/app/com.google.android.apps.chrome-2/base.apk!lib/libchrome.cr.so (offset 0x90e000)
+ #19 pc 004d8159 /data/app/com.google.android.apps.chrome-2/base.apk!lib/libchrome.cr.so (offset 0x90e000)
+ #20 pc 004d7b96 /data/app/com.google.android.apps.chrome-2/base.apk!lib/libchrome.cr.so (offset 0x90e000)
+ #21 pc 004da4b6 /data/app/com.google.android.apps.chrome-2/base.apk!lib/libchrome.cr.so (offset 0x90e000)
+ #22 pc 005ab66c /data/app/com.google.android.apps.chrome-2/base.apk!lib/libcontent.cr.so (offset 0xc2d000)
+ #23 pc 005afca2 /data/app/com.google.android.apps.chrome-2/base.apk!lib/libcontent.cr.so (offset 0xc2d000)
+ #24 pc 0000cae8 /data/app/com.google.android.apps.chrome-2/base.apk!lib/libchrome.cr.so (offset 0x90e000)
+ #25 pc 00ce864f /data/app/com.google.android.apps.chrome-2/base.apk!lib/libcontent.cr.so (offset 0xc2d000)
+ #26 pc 00ce8dfa /data/app/com.google.android.apps.chrome-2/base.apk!lib/libcontent.cr.so (offset 0xc2d000)
+ #27 pc 00ce74c6 /data/app/com.google.android.apps.chrome-2/base.apk!lib/libcontent.cr.so (offset 0xc2d000)
+ #28 pc 00004616 /data/app/com.google.android.apps.chrome-2/base.apk!lib/libembedder.cr.so (offset 0x28000)
+ #29 pc 00ce8215 /data/app/com.google.android.apps.chrome-2/base.apk!lib/libcontent.cr.so (offset 0xc2d000)
+ #30 pc 0013d8c7 /system/lib/libart.so (art_quick_generic_jni_trampoline+71)
+ #31 pc 00137c52 /system/lib/libart.so (art_quick_invoke_static_stub+418)
+ #32 pc 00143651 /system/lib/libart.so (art::ArtMethod::Invoke(art::Thread*, unsigned int*, unsigned int, art::JValue*, char const*)+353)
+ #33 pc 005e06ae /system/lib/libart.so (artInterpreterToCompiledCodeBridge+190)
+ #34 pc 00328b5d /system/lib/libart.so (bool art::interpreter::DoCall<false, false>(art::ArtMethod*, art::Thread*, art::ShadowFrame&, art::Instruction const*, unsigned short, art::JValue*)+445)
+ #35 pc 0032cfc0 /system/lib/libart.so (bool art::interpreter::DoInvoke<(art::InvokeType)0, false, false>(art::Thread*, art::ShadowFrame&, art::Instruction const*, unsigned short, art::JValue*)+160)
+ #36 pc 000fc703 /system/lib/libart.so (art::JValue art::interpreter::ExecuteGotoImpl<false, false>(art::Thread*, art::DexFile::CodeItem const*, art::ShadowFrame&, art::JValue)+29891)
+ #37 pc 00300af7 /system/lib/libart.so (artInterpreterToInterpreterBridge+188)
+ #38 pc 00328b5d /system/lib/libart.so (bool art::interpreter::DoCall<false, false>(art::ArtMethod*, art::Thread*, art::ShadowFrame&, art::Instruction const*, unsigned short, art::JValue*)+445)
+ #39 pc 0032cfc0 /system/lib/libart.so (bool art::interpreter::DoInvoke<(art::InvokeType)0, false, false>(art::Thread*, art::ShadowFrame&, art::Instruction const*, unsigned short, art::JValue*)+160)
+ #40 pc 000fc703 /system/lib/libart.so (art::JValue art::interpreter::ExecuteGotoImpl<false, false>(art::Thread*, art::DexFile::CodeItem const*, art::ShadowFrame&, art::JValue)+29891)
+ #41 pc 00300af7 /system/lib/libart.so (artInterpreterToInterpreterBridge+188)
+ #42 pc 00328b5d /system/lib/libart.so (bool art::interpreter::DoCall<false, false>(art::ArtMethod*, art::Thread*, art::ShadowFrame&, art::Instruction const*, unsigned short, art::JValue*)+445)
+ #43 pc 0032ebf9 /system/lib/libart.so (bool art::interpreter::DoInvoke<(art::InvokeType)2, false, false>(art::Thread*, art::ShadowFrame&, art::Instruction const*, unsigned short, art::JValue*)+297)
+ #44 pc 000fc955 /system/lib/libart.so (art::JValue art::interpreter::ExecuteGotoImpl<false, false>(art::Thread*, art::DexFile::CodeItem const*, art::ShadowFrame&, art::JValue)+30485)
+ #45 pc 00300af7 /system/lib/libart.so (artInterpreterToInterpreterBridge+188)
+ #46 pc 00328b5d /system/lib/libart.so (bool art::interpreter::DoCall<false, false>(art::ArtMethod*, art::Thread*, art::ShadowFrame&, art::Instruction const*, unsigned short, art::JValue*)+445)
+ #47 pc 0033090c /system/lib/libart.so (bool art::interpreter::DoInvoke<(art::InvokeType)4, false, false>(art::Thread*, art::ShadowFrame&, art::Instruction const*, unsigned short, art::JValue*)+636)
+ #48 pc 000fc67f /system/lib/libart.so (art::JValue art::interpreter::ExecuteGotoImpl<false, false>(art::Thread*, art::DexFile::CodeItem const*, art::ShadowFrame&, art::JValue)+29759)
+ #49 pc 00300700 /system/lib/libart.so (art::interpreter::EnterInterpreterFromEntryPoint(art::Thread*, art::DexFile::CodeItem const*, art::ShadowFrame*)+128)
+ #50 pc 00667c73 /system/lib/libart.so (artQuickToInterpreterBridge+808)
+ #51 pc 0013d98d /system/lib/libart.so (art_quick_to_interpreter_bridge+77)
+ #52 pc 7264bc5b /data/dalvik-cache/x86/system@framework@boot.oat (offset 0x1eb2000)
+'''
+
+_EXPECTED_BACKTRACE_OFFSETS_MAP = {
+ '/data/app/com.google.android.apps.chrome-2/base.apk!lib/libprotobuf_lite.cr.so':
+ set([
+ 0x1c000 + 0x111a9,
+ 0x1c000 + 0x13228,
+ 0x1c000 + 0x131de,
+ ]),
+
+ '/data/app/com.google.android.apps.chrome-2/base.apk!lib/libchrome.cr.so':
+ set([
+ 0x90e000 + 0x7cd236,
+ 0x90e000 + 0x7cd2d8,
+ 0x90e000 + 0x7cd956,
+ 0x90e000 + 0x7c2d4a,
+ 0x90e000 + 0x9fc9f1,
+ 0x90e000 + 0x9fc8ea,
+ 0x90e000 + 0x561c63,
+ 0x90e000 + 0x106fbdb,
+ 0x90e000 + 0x4d7371,
+ 0x90e000 + 0x4d8159,
+ 0x90e000 + 0x4d7b96,
+ 0x90e000 + 0x4da4b6,
+ 0x90e000 + 0xcae8,
+ ]),
+ '/data/app/com.google.android.apps.chrome-2/base.apk!lib/libcontent.cr.so':
+ set([
+ 0xc2d000 + 0x5ab66c,
+ 0xc2d000 + 0x5afca2,
+ 0xc2d000 + 0xce864f,
+ 0xc2d000 + 0xce8dfa,
+ 0xc2d000 + 0xce74c6,
+ 0xc2d000 + 0xce8215,
+ ]),
+ '/data/app/com.google.android.apps.chrome-2/base.apk!lib/libembedder.cr.so':
+ set([
+ 0x28000 + 0x4616,
+ ])
+}
+
+# pylint: enable=line-too-long
+
+_ONE_MB = 1024 * 1024
+_TEST_SYMBOL_DATA = {
+ # Regular symbols
+ 0: 'mock_sym_for_addr_0 [mock_src/libmock1.so.c:0]',
+ 0x1000: 'mock_sym_for_addr_4096 [mock_src/libmock1.so.c:4096]',
+
+ # Symbols without source file path.
+ _ONE_MB: 'mock_sym_for_addr_1048576 [??:0]',
+ _ONE_MB + 0x8234: 'mock_sym_for_addr_1081908 [??:0]',
+
+ # Unknown symbol.
+ 2 * _ONE_MB: '?? [??:0]',
+
+ # Inlined symbol.
+ 3 * _ONE_MB:
+ 'mock_sym_for_addr_3145728_inner [mock_src/libmock1.so.c:3145728]',
+}
+
+@contextlib.contextmanager
+def _TempDir():
+ dirname = tempfile.mkdtemp()
+ try:
+ yield dirname
+ finally:
+ shutil.rmtree(dirname)
+
+
+def _TouchFile(path):
+ # Create parent directories.
+ try:
+ os.makedirs(os.path.dirname(path))
+ except OSError:
+ pass
+ with open(path, 'a'):
+ os.utime(path, None)
+
+class MockApkTranslator(object):
+ """A mock ApkLibraryPathTranslator object used for testing."""
+
+ # Regex that matches the content of APK native library map files generated
+ # with apk_lib_dump.py.
+ _RE_MAP_FILE = re.compile(
+ r'0x(?P<file_start>[0-9a-f]+)\s+' +
+ r'0x(?P<file_end>[0-9a-f]+)\s+' +
+ r'0x(?P<file_size>[0-9a-f]+)\s+' +
+ r'0x(?P<lib_path>[0-9a-f]+)\s+')
+
+ def __init__(self, test_apk_libs=None):
+ """Initialize instance.
+
+ Args:
+ test_apk_libs: Optional list of (file_start, file_end, size, lib_path)
+ tuples, like _TEST_APK_LIBS for example. This will be used to
+ implement TranslatePath().
+ """
+ self._apk_libs = []
+ if test_apk_libs:
+ self._AddLibEntries(test_apk_libs)
+
+ def _AddLibEntries(self, entries):
+ self._apk_libs = sorted(self._apk_libs + entries, key=lambda x: x[0])
+
+ def ReadMapFile(self, file_path):
+ """Read an .apk.native-libs file that was produced with apk_lib_dump.py.
+
+ Args:
+ file_path: input path to .apk.native-libs file. Its format is
+ essentially: 0x<start> 0x<end> 0x<size> <library-path>
+ """
+ new_libs = []
+ with open(file_path) as f:
+ for line in f.readlines():
+ m = MockApkTranslator._RE_MAP_FILE.match(line)
+ if m:
+ file_start = int(m.group('file_start'), 16)
+ file_end = int(m.group('file_end'), 16)
+ file_size = int(m.group('file_size'), 16)
+ lib_path = m.group('lib_path')
+ # Sanity check
+ if file_start + file_size != file_end:
+ logging.warning('%s: Inconsistent (start, end, size) values '
+ '(0x%x, 0x%x, 0x%x)',
+ file_path, file_start, file_end, file_size)
+ else:
+ new_libs.append((file_start, file_end, file_size, lib_path))
+
+ self._AddLibEntries(new_libs)
+
+ def TranslatePath(self, lib_path, lib_offset):
+ """Translate an APK file path + offset into a library path + offset."""
+ min_pos = 0
+ max_pos = len(self._apk_libs)
+ while min_pos < max_pos:
+ mid_pos = (min_pos + max_pos) // 2
+ mid_entry = self._apk_libs[mid_pos]
+ mid_offset = mid_entry[0]
+ mid_size = mid_entry[2]
+ if lib_offset < mid_offset:
+ max_pos = mid_pos
+ elif lib_offset >= mid_offset + mid_size:
+ min_pos = mid_pos + 1
+ else:
+ # Found it
+ new_path = '%s!lib/%s' % (lib_path, mid_entry[3])
+ new_offset = lib_offset - mid_offset
+ return (new_path, new_offset)
+
+ return lib_path, lib_offset
+
+
+class HostLibraryFinderTest(unittest.TestCase):
+
+ def testEmpty(self):
+ finder = symbol_utils.HostLibraryFinder()
+ self.assertIsNone(finder.Find('/data/data/com.example.app-1/lib/libfoo.so'))
+ self.assertIsNone(
+ finder.Find('/data/data/com.example.app-1/base.apk!lib/libfoo.so'))
+
+
+ def testSimpleDirectory(self):
+ finder = symbol_utils.HostLibraryFinder()
+ with _TempDir() as tmp_dir:
+ host_libfoo_path = os.path.join(tmp_dir, 'libfoo.so')
+ host_libbar_path = os.path.join(tmp_dir, 'libbar.so')
+ _TouchFile(host_libfoo_path)
+ _TouchFile(host_libbar_path)
+
+ finder.AddSearchDir(tmp_dir)
+
+ # Regular library path (extracted at installation by the PackageManager).
+ # Note that the extraction path has changed between Android releases,
+ # i.e. it can be /data/app/, /data/data/ or /data/app-lib/ depending
+ # on the system.
+ self.assertEqual(
+ host_libfoo_path,
+ finder.Find('/data/app-lib/com.example.app-1/lib/libfoo.so'))
+
+ # Verify that the path doesn't really matter
+ self.assertEqual(
+ host_libfoo_path,
+ finder.Find('/whatever/what.apk!lib/libfoo.so'))
+
+ self.assertEqual(
+ host_libbar_path,
+ finder.Find('/data/data/com.example.app-1/lib/libbar.so'))
+
+ self.assertIsNone(
+ finder.Find('/data/data/com.example.app-1/lib/libunknown.so'))
+
+
+ def testMultipleDirectories(self):
+ with _TempDir() as tmp_dir:
+ # Create the following files:
+ # <tmp_dir>/aaa/
+ # libfoo.so
+ # <tmp_dir>/bbb/
+ # libbar.so
+ # libfoo.so (this one should never be seen because 'aaa'
+ # shall be first in the search path list).
+ #
+ aaa_dir = os.path.join(tmp_dir, 'aaa')
+ bbb_dir = os.path.join(tmp_dir, 'bbb')
+ os.makedirs(aaa_dir)
+ os.makedirs(bbb_dir)
+
+ host_libfoo_path = os.path.join(aaa_dir, 'libfoo.so')
+ host_libbar_path = os.path.join(bbb_dir, 'libbar.so')
+ host_libfoo2_path = os.path.join(bbb_dir, 'libfoo.so')
+
+ _TouchFile(host_libfoo_path)
+ _TouchFile(host_libbar_path)
+ _TouchFile(host_libfoo2_path)
+
+ finder = symbol_utils.HostLibraryFinder()
+ finder.AddSearchDir(aaa_dir)
+ finder.AddSearchDir(bbb_dir)
+
+ self.assertEqual(
+ host_libfoo_path,
+ finder.Find('/data/data/com.example.app-1/lib/libfoo.so'))
+
+ self.assertEqual(
+ host_libfoo_path,
+ finder.Find('/data/whatever/base.apk!lib/libfoo.so'))
+
+ self.assertEqual(
+ host_libbar_path,
+ finder.Find('/data/data/com.example.app-1/lib/libbar.so'))
+
+ self.assertIsNone(
+ finder.Find('/data/data/com.example.app-1/lib/libunknown.so'))
+
+
+class ElfSymbolResolverTest(unittest.TestCase):
+
+ def testCreation(self):
+ resolver = symbol_utils.ElfSymbolResolver(
+ addr2line_path_for_tests=_MOCK_A2L_PATH)
+ self.assertTrue(resolver)
+
+ def testWithSimpleOffsets(self):
+ resolver = symbol_utils.ElfSymbolResolver(
+ addr2line_path_for_tests=_MOCK_A2L_PATH)
+ resolver.SetAndroidAbi('ignored-abi')
+
+ for addr, expected_sym in _TEST_SYMBOL_DATA.items():
+ self.assertEqual(resolver.FindSymbolInfo('/some/path/libmock1.so', addr),
+ expected_sym)
+
+ def testWithPreResolvedSymbols(self):
+ resolver = symbol_utils.ElfSymbolResolver(
+ addr2line_path_for_tests=_MOCK_A2L_PATH)
+ resolver.SetAndroidAbi('ignored-abi')
+ resolver.AddLibraryOffsets('/some/path/libmock1.so',
+ list(_TEST_SYMBOL_DATA.keys()))
+
+ resolver.DisallowSymbolizerForTesting()
+
+ for addr, expected_sym in _TEST_SYMBOL_DATA.items():
+ sym_info = resolver.FindSymbolInfo('/some/path/libmock1.so', addr)
+ self.assertIsNotNone(sym_info, 'None symbol info for addr %x' % addr)
+ self.assertEqual(
+ sym_info, expected_sym,
+ 'Invalid symbol info for addr %x [%s] expected [%s]' % (
+ addr, sym_info, expected_sym))
+
+
+class MemoryMapTest(unittest.TestCase):
+
+ def testCreation(self):
+ mem_map = symbol_utils.MemoryMap('test-abi32')
+ self.assertIsNone(mem_map.FindSectionForAddress(0))
+
+ def testParseLines(self):
+ mem_map = symbol_utils.MemoryMap('test-abi32')
+ mem_map.ParseLines(_TEST_MEMORY_MAP.splitlines())
+ for exp_addr, exp_size, exp_path, exp_offset in _TEST_MEMORY_MAP_SECTIONS:
+ text = '(addr:%x, size:%x, path:%s, offset=%x)' % (
+ exp_addr, exp_size, exp_path, exp_offset)
+
+ t = mem_map.FindSectionForAddress(exp_addr)
+ self.assertTrue(t, 'Could not find %s' % text)
+ self.assertEqual(t.address, exp_addr)
+ self.assertEqual(t.size, exp_size)
+ self.assertEqual(t.offset, exp_offset)
+ self.assertEqual(t.path, exp_path)
+
+ def testTranslateLine(self):
+ android_abi = 'test-abi'
+ apk_translator = MockApkTranslator(_TEST_APK_LIBS)
+ mem_map = symbol_utils.MemoryMap(android_abi)
+ for line, expected_line in zip(_TEST_MEMORY_MAP.splitlines(),
+ _EXPECTED_TEST_MEMORY_MAP.splitlines()):
+ self.assertEqual(mem_map.TranslateLine(line, apk_translator),
+ expected_line)
+
+class StackTranslatorTest(unittest.TestCase):
+
+ def testSimpleStack(self):
+ android_abi = 'test-abi32'
+ mem_map = symbol_utils.MemoryMap(android_abi)
+ mem_map.ParseLines(_TEST_MEMORY_MAP)
+ apk_translator = MockApkTranslator(_TEST_APK_LIBS)
+ stack_translator = symbol_utils.StackTranslator(android_abi, mem_map,
+ apk_translator)
+ input_stack = _TEST_STACK.splitlines()
+ expected_stack = _EXPECTED_STACK.splitlines()
+ self.assertEqual(len(input_stack), len(expected_stack))
+ for stack_line, expected_line in zip(input_stack, expected_stack):
+ new_line = stack_translator.TranslateLine(stack_line)
+ self.assertEqual(new_line, expected_line)
+
+
+class MockSymbolResolver(symbol_utils.SymbolResolver):
+
+ # A regex matching a symbol definition as it appears in a test symbol file.
+ # Format is: <hex-offset> <whitespace> <symbol-string>
+ _RE_SYMBOL_DEFINITION = re.compile(
+ r'(?P<offset>[0-9a-f]+)\s+(?P<symbol>.*)')
+
+ def __init__(self):
+ super(MockSymbolResolver, self).__init__()
+ self._map = collections.defaultdict(dict)
+
+ def AddTestLibrarySymbols(self, lib_name, offsets_map):
+ """Add a new test entry for a given library name.
+
+ Args:
+ lib_name: Library name (e.g. 'libfoo.so')
+ offsets_map: A mapping from offsets to symbol info strings.
+ """
+ self._map[lib_name] = offsets_map
+
+ def ReadTestFile(self, file_path, lib_name):
+ """Read a single test symbol file, matching a given library.
+
+ Args:
+ file_path: Input file path.
+ lib_name: Library name these symbols correspond to (e.g. 'libfoo.so')
+ """
+ with open(file_path) as f:
+ for line in f.readlines():
+ line = line.rstrip()
+ m = MockSymbolResolver._RE_SYMBOL_DEFINITION.match(line)
+ if m:
+ offset = int(m.group('offset'))
+ symbol = m.group('symbol')
+ self._map[lib_name][offset] = symbol
+
+ def ReadTestFilesInDir(self, dir_path, file_suffix):
+ """Read all symbol test files in a given directory.
+
+ Args:
+ dir_path: Directory path.
+ file_suffix: File suffix used to detect test symbol files.
+ """
+ for filename in os.listdir(dir_path):
+ if filename.endswith(file_suffix):
+ lib_name = filename[:-len(file_suffix)]
+ self.ReadTestFile(os.path.join(dir_path, filename), lib_name)
+
+ def FindSymbolInfo(self, device_path, device_offset):
+ """Implement SymbolResolver.FindSymbolInfo."""
+ lib_name = os.path.basename(device_path)
+ offsets = self._map.get(lib_name)
+ if not offsets:
+ return None
+
+ return offsets.get(device_offset)
+
+
+class BacktraceTranslatorTest(unittest.TestCase):
+
+ def testEmpty(self):
+ android_abi = 'test-abi'
+ apk_translator = MockApkTranslator()
+ backtrace_translator = symbol_utils.BacktraceTranslator(android_abi,
+ apk_translator)
+ self.assertTrue(backtrace_translator)
+
+ def testFindLibraryOffsets(self):
+ android_abi = 'test-abi'
+ apk_translator = MockApkTranslator(_TEST_APK_LIBS)
+ backtrace_translator = symbol_utils.BacktraceTranslator(android_abi,
+ apk_translator)
+ input_backtrace = _EXPECTED_BACKTRACE.splitlines()
+ expected_lib_offsets_map = _EXPECTED_BACKTRACE_OFFSETS_MAP
+ offset_map = backtrace_translator.FindLibraryOffsets(input_backtrace)
+ for lib_path, offsets in offset_map.items():
+ self.assertTrue(lib_path in expected_lib_offsets_map,
+ '%s is not in expected library-offsets map!' % lib_path)
+ sorted_offsets = sorted(offsets)
+ sorted_expected_offsets = sorted(expected_lib_offsets_map[lib_path])
+ self.assertEqual(sorted_offsets, sorted_expected_offsets,
+ '%s has invalid offsets %s expected %s' % (
+ lib_path, sorted_offsets, sorted_expected_offsets))
+
+ def testTranslateLine(self):
+ android_abi = 'test-abi'
+ apk_translator = MockApkTranslator(_TEST_APK_LIBS)
+ backtrace_translator = symbol_utils.BacktraceTranslator(android_abi,
+ apk_translator)
+ input_backtrace = _TEST_BACKTRACE.splitlines()
+ expected_backtrace = _EXPECTED_BACKTRACE.splitlines()
+ self.assertEqual(len(input_backtrace), len(expected_backtrace))
+ for trace_line, expected_line in zip(input_backtrace, expected_backtrace):
+ line = backtrace_translator.TranslateLine(trace_line,
+ MockSymbolResolver())
+ self.assertEqual(line, expected_line)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/libwebrtc/build/android/pylib/utils/__init__.py b/third_party/libwebrtc/build/android/pylib/utils/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/__init__.py
diff --git a/third_party/libwebrtc/build/android/pylib/utils/app_bundle_utils.py b/third_party/libwebrtc/build/android/pylib/utils/app_bundle_utils.py
new file mode 100644
index 0000000000..986e12688e
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/app_bundle_utils.py
@@ -0,0 +1,169 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import logging
+import os
+import re
+import sys
+import tempfile
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'gyp'))
+
+from util import build_utils
+from util import md5_check
+from util import resource_utils
+import bundletool
+
+# List of valid modes for GenerateBundleApks()
+BUILD_APKS_MODES = ('default', 'universal', 'system', 'system_compressed')
+OPTIMIZE_FOR_OPTIONS = ('ABI', 'SCREEN_DENSITY', 'LANGUAGE',
+ 'TEXTURE_COMPRESSION_FORMAT')
+_SYSTEM_MODES = ('system_compressed', 'system')
+
+_ALL_ABIS = ['armeabi-v7a', 'arm64-v8a', 'x86', 'x86_64']
+
+
+def _CreateDeviceSpec(bundle_path, sdk_version, locales):
+ if not sdk_version:
+ manifest_data = bundletool.RunBundleTool(
+ ['dump', 'manifest', '--bundle', bundle_path])
+ sdk_version = int(
+ re.search(r'minSdkVersion.*?(\d+)', manifest_data).group(1))
+
+ # Setting sdkVersion=minSdkVersion prevents multiple per-minSdkVersion .apk
+ # files from being created within the .apks file.
+ return {
+ 'screenDensity': 1000, # Ignored since we don't split on density.
+ 'sdkVersion': sdk_version,
+ 'supportedAbis': _ALL_ABIS, # Our .aab files are already split on abi.
+ 'supportedLocales': locales,
+ }
+
+
+def GenerateBundleApks(bundle_path,
+ bundle_apks_path,
+ aapt2_path,
+ keystore_path,
+ keystore_password,
+ keystore_alias,
+ mode=None,
+ local_testing=False,
+ minimal=False,
+ minimal_sdk_version=None,
+ check_for_noop=True,
+ system_image_locales=None,
+ optimize_for=None):
+ """Generate an .apks archive from a an app bundle if needed.
+
+ Args:
+ bundle_path: Input bundle file path.
+ bundle_apks_path: Output bundle .apks archive path. Name must end with
+ '.apks' or this operation will fail.
+ aapt2_path: Path to aapt2 build tool.
+ keystore_path: Path to keystore.
+ keystore_password: Keystore password, as a string.
+ keystore_alias: Keystore signing key alias.
+ mode: Build mode, which must be either None or one of BUILD_APKS_MODES.
+ minimal: Create the minimal set of apks possible (english-only).
+ minimal_sdk_version: Use this sdkVersion when |minimal| or
+ |system_image_locales| args are present.
+ check_for_noop: Use md5_check to short-circuit when inputs have not changed.
+ system_image_locales: Locales to package in the APK when mode is "system"
+ or "system_compressed".
+ optimize_for: Overrides split configuration, which must be None or
+ one of OPTIMIZE_FOR_OPTIONS.
+ """
+ device_spec = None
+ if minimal_sdk_version:
+ assert minimal or system_image_locales, (
+ 'minimal_sdk_version is only used when minimal or system_image_locales '
+ 'is specified')
+ if minimal:
+ # Measure with one language split installed. Use Hindi because it is
+ # popular. resource_size.py looks for splits/base-hi.apk.
+ # Note: English is always included since it's in base-master.apk.
+ device_spec = _CreateDeviceSpec(bundle_path, minimal_sdk_version, ['hi'])
+ elif mode in _SYSTEM_MODES:
+ if not system_image_locales:
+ raise Exception('system modes require system_image_locales')
+ # Bundletool doesn't seem to understand device specs with locales in the
+ # form of "<lang>-r<region>", so just provide the language code instead.
+ locales = [
+ resource_utils.ToAndroidLocaleName(l).split('-')[0]
+ for l in system_image_locales
+ ]
+ device_spec = _CreateDeviceSpec(bundle_path, minimal_sdk_version, locales)
+
+ def rebuild():
+ logging.info('Building %s', bundle_apks_path)
+ with tempfile.NamedTemporaryFile(suffix='.apks') as tmp_apks_file:
+ cmd_args = [
+ 'build-apks',
+ '--aapt2=%s' % aapt2_path,
+ '--output=%s' % tmp_apks_file.name,
+ '--bundle=%s' % bundle_path,
+ '--ks=%s' % keystore_path,
+ '--ks-pass=pass:%s' % keystore_password,
+ '--ks-key-alias=%s' % keystore_alias,
+ '--overwrite',
+ ]
+
+ if local_testing:
+ cmd_args += ['--local-testing']
+
+ if mode is not None:
+ if mode not in BUILD_APKS_MODES:
+ raise Exception('Invalid mode parameter %s (should be in %s)' %
+ (mode, BUILD_APKS_MODES))
+ cmd_args += ['--mode=' + mode]
+
+ if optimize_for:
+ if optimize_for not in OPTIMIZE_FOR_OPTIONS:
+ raise Exception('Invalid optimize_for parameter %s '
+ '(should be in %s)' %
+ (mode, OPTIMIZE_FOR_OPTIONS))
+ cmd_args += ['--optimize-for=' + optimize_for]
+
+ with tempfile.NamedTemporaryFile(mode='w', suffix='.json') as spec_file:
+ if device_spec:
+ json.dump(device_spec, spec_file)
+ spec_file.flush()
+ cmd_args += ['--device-spec=' + spec_file.name]
+ bundletool.RunBundleTool(cmd_args)
+
+ # Make the resulting .apks file hermetic.
+ with build_utils.TempDir() as temp_dir, \
+ build_utils.AtomicOutput(bundle_apks_path, only_if_changed=False) as f:
+ files = build_utils.ExtractAll(tmp_apks_file.name, temp_dir)
+ build_utils.DoZip(files, f, base_dir=temp_dir)
+
+ if check_for_noop:
+ # NOTE: BUNDLETOOL_JAR_PATH is added to input_strings, rather than
+ # input_paths, to speed up MD5 computations by about 400ms (the .jar file
+ # contains thousands of class files which are checked independently,
+ # resulting in an .md5.stamp of more than 60000 lines!).
+ input_paths = [bundle_path, aapt2_path, keystore_path]
+ input_strings = [
+ keystore_password,
+ keystore_alias,
+ bundletool.BUNDLETOOL_JAR_PATH,
+ # NOTE: BUNDLETOOL_VERSION is already part of BUNDLETOOL_JAR_PATH, but
+ # it's simpler to assume that this may not be the case in the future.
+ bundletool.BUNDLETOOL_VERSION,
+ device_spec,
+ ]
+ if mode is not None:
+ input_strings.append(mode)
+
+ # Avoid rebuilding (saves ~20s) when the input files have not changed. This
+ # is essential when calling the apk_operations.py script multiple times with
+ # the same bundle (e.g. out/Debug/bin/monochrome_public_bundle run).
+ md5_check.CallAndRecordIfStale(
+ rebuild,
+ input_paths=input_paths,
+ input_strings=input_strings,
+ output_paths=[bundle_apks_path])
+ else:
+ rebuild()
diff --git a/third_party/libwebrtc/build/android/pylib/utils/argparse_utils.py b/third_party/libwebrtc/build/android/pylib/utils/argparse_utils.py
new file mode 100644
index 0000000000..bd603c9d5a
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/argparse_utils.py
@@ -0,0 +1,52 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+
+import argparse
+
+
+class CustomHelpAction(argparse.Action):
+ '''Allows defining custom help actions.
+
+ Help actions can run even when the parser would otherwise fail on missing
+ arguments. The first help or custom help command mentioned on the command
+ line will have its help text displayed.
+
+ Usage:
+ parser = argparse.ArgumentParser(...)
+ CustomHelpAction.EnableFor(parser)
+ parser.add_argument('--foo-help',
+ action='custom_help',
+ custom_help_text='this is the help message',
+ help='What this helps with')
+ '''
+ # Derived from argparse._HelpAction from
+ # https://github.com/python/cpython/blob/master/Lib/argparse.py
+
+ # pylint: disable=redefined-builtin
+ # (complains about 'help' being redefined)
+ def __init__(self,
+ option_strings,
+ dest=argparse.SUPPRESS,
+ default=argparse.SUPPRESS,
+ custom_help_text=None,
+ help=None):
+ super(CustomHelpAction, self).__init__(option_strings=option_strings,
+ dest=dest,
+ default=default,
+ nargs=0,
+ help=help)
+
+ if not custom_help_text:
+ raise ValueError('custom_help_text is required')
+ self._help_text = custom_help_text
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ print(self._help_text)
+ parser.exit()
+
+ @staticmethod
+ def EnableFor(parser):
+ parser.register('action', 'custom_help', CustomHelpAction)
diff --git a/third_party/libwebrtc/build/android/pylib/utils/chrome_proxy_utils.py b/third_party/libwebrtc/build/android/pylib/utils/chrome_proxy_utils.py
new file mode 100644
index 0000000000..149d0b9c8c
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/chrome_proxy_utils.py
@@ -0,0 +1,171 @@
+# Copyright 2020 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Utilities for setting up and tear down WPR and TsProxy service."""
+
+from py_utils import ts_proxy_server
+from py_utils import webpagereplay_go_server
+
+from devil.android import forwarder
+
+PROXY_HOST_IP = '127.0.0.1'
+# From Catapult/WebPageReplay document.
+IGNORE_CERT_ERROR_SPKI_LIST = 'PhrPvGIaAMmd29hj8BCZOq096yj7uMpRNHpn5PDxI6I='
+PROXY_SERVER = 'socks5://localhost'
+DEFAULT_DEVICE_PORT = 1080
+DEFAULT_ROUND_TRIP_LATENCY_MS = 100
+DEFAULT_DOWNLOAD_BANDWIDTH_KBPS = 72000
+DEFAULT_UPLOAD_BANDWIDTH_KBPS = 72000
+
+
+class WPRServer(object):
+ """Utils to set up a webpagereplay_go_server instance."""
+
+ def __init__(self):
+ self._archive_path = None
+ self._host_http_port = 0
+ self._host_https_port = 0
+ self._record_mode = False
+ self._server = None
+
+ def StartServer(self, wpr_archive_path):
+ """Starts a webpagereplay_go_server instance."""
+ if wpr_archive_path == self._archive_path and self._server:
+ # Reuse existing webpagereplay_go_server instance.
+ return
+
+ if self._server:
+ self.StopServer()
+
+ replay_options = []
+ if self._record_mode:
+ replay_options.append('--record')
+
+ ports = {}
+ if not self._server:
+ self._server = webpagereplay_go_server.ReplayServer(
+ wpr_archive_path,
+ PROXY_HOST_IP,
+ http_port=self._host_http_port,
+ https_port=self._host_https_port,
+ replay_options=replay_options)
+ self._archive_path = wpr_archive_path
+ ports = self._server.StartServer()
+
+ self._host_http_port = ports['http']
+ self._host_https_port = ports['https']
+
+ def StopServer(self):
+ """Stops the webpagereplay_go_server instance and resets archive."""
+ self._server.StopServer()
+ self._server = None
+ self._host_http_port = 0
+ self._host_https_port = 0
+
+ @staticmethod
+ def SetServerBinaryPath(go_binary_path):
+ """Sets the go_binary_path for webpagereplay_go_server.ReplayServer."""
+ webpagereplay_go_server.ReplayServer.SetGoBinaryPath(go_binary_path)
+
+ @property
+ def record_mode(self):
+ return self._record_mode
+
+ @record_mode.setter
+ def record_mode(self, value):
+ self._record_mode = value
+
+ @property
+ def http_port(self):
+ return self._host_http_port
+
+ @property
+ def https_port(self):
+ return self._host_https_port
+
+ @property
+ def archive_path(self):
+ return self._archive_path
+
+
+class ChromeProxySession(object):
+ """Utils to help set up a Chrome Proxy."""
+
+ def __init__(self, device_proxy_port=DEFAULT_DEVICE_PORT):
+ self._device_proxy_port = device_proxy_port
+ self._ts_proxy_server = ts_proxy_server.TsProxyServer(PROXY_HOST_IP)
+ self._wpr_server = WPRServer()
+
+ @property
+ def wpr_record_mode(self):
+ """Returns whether this proxy session was running in record mode."""
+ return self._wpr_server.record_mode
+
+ @wpr_record_mode.setter
+ def wpr_record_mode(self, value):
+ self._wpr_server.record_mode = value
+
+ @property
+ def wpr_replay_mode(self):
+ """Returns whether this proxy session was running in replay mode."""
+ return not self._wpr_server.record_mode
+
+ @property
+ def wpr_archive_path(self):
+ """Returns the wpr archive file path used in this proxy session."""
+ return self._wpr_server.archive_path
+
+ @property
+ def device_proxy_port(self):
+ return self._device_proxy_port
+
+ def GetFlags(self):
+ """Gets the chrome command line flags to be needed by ChromeProxySession."""
+ extra_flags = []
+
+ extra_flags.append('--ignore-certificate-errors-spki-list=%s' %
+ IGNORE_CERT_ERROR_SPKI_LIST)
+ extra_flags.append('--proxy-server=%s:%s' %
+ (PROXY_SERVER, self._device_proxy_port))
+ return extra_flags
+
+ @staticmethod
+ def SetWPRServerBinary(go_binary_path):
+ """Sets the WPR server go_binary_path."""
+ WPRServer.SetServerBinaryPath(go_binary_path)
+
+ def Start(self, device, wpr_archive_path):
+ """Starts the wpr_server as well as the ts_proxy server and setups env.
+
+ Args:
+ device: A DeviceUtils instance.
+ wpr_archive_path: A abs path to the wpr archive file.
+
+ """
+ self._wpr_server.StartServer(wpr_archive_path)
+ self._ts_proxy_server.StartServer()
+
+ # Maps device port to host port
+ forwarder.Forwarder.Map(
+ [(self._device_proxy_port, self._ts_proxy_server.port)], device)
+ # Maps tsProxy port to wpr http/https ports
+ self._ts_proxy_server.UpdateOutboundPorts(
+ http_port=self._wpr_server.http_port,
+ https_port=self._wpr_server.https_port)
+ self._ts_proxy_server.UpdateTrafficSettings(
+ round_trip_latency_ms=DEFAULT_ROUND_TRIP_LATENCY_MS,
+ download_bandwidth_kbps=DEFAULT_DOWNLOAD_BANDWIDTH_KBPS,
+ upload_bandwidth_kbps=DEFAULT_UPLOAD_BANDWIDTH_KBPS)
+
+ def Stop(self, device):
+ """Stops the wpr_server, and ts_proxy server and tears down env.
+
+ Note that Stop does not reset wpr_record_mode, wpr_replay_mode,
+ wpr_archive_path property.
+
+ Args:
+ device: A DeviceUtils instance.
+ """
+ self._wpr_server.StopServer()
+ self._ts_proxy_server.StopServer()
+ forwarder.Forwarder.UnmapDevicePort(self._device_proxy_port, device)
diff --git a/third_party/libwebrtc/build/android/pylib/utils/chrome_proxy_utils_test.py b/third_party/libwebrtc/build/android/pylib/utils/chrome_proxy_utils_test.py
new file mode 100755
index 0000000000..7a52024661
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/chrome_proxy_utils_test.py
@@ -0,0 +1,235 @@
+#!/usr/bin/env vpython3
+# Copyright 2020 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Tests for chrome_proxy_utils."""
+
+#pylint: disable=protected-access
+
+import os
+import unittest
+
+from pylib.utils import chrome_proxy_utils
+
+from devil.android import forwarder
+from devil.android import device_utils
+from devil.android.sdk import adb_wrapper
+from py_utils import ts_proxy_server
+from py_utils import webpagereplay_go_server
+
+import mock # pylint: disable=import-error
+
+
+def _DeviceUtilsMock(test_serial, is_ready=True):
+ """Returns a DeviceUtils instance based on given serial."""
+ adb = mock.Mock(spec=adb_wrapper.AdbWrapper)
+ adb.__str__ = mock.Mock(return_value=test_serial)
+ adb.GetDeviceSerial.return_value = test_serial
+ adb.is_ready = is_ready
+ return device_utils.DeviceUtils(adb)
+
+
+class ChromeProxySessionTest(unittest.TestCase):
+ """Unittest for ChromeProxySession."""
+
+ #pylint: disable=no-self-use
+
+ @mock.patch.object(forwarder.Forwarder, 'Map')
+ @mock.patch.object(chrome_proxy_utils.WPRServer, 'StartServer')
+ @mock.patch.object(ts_proxy_server.TsProxyServer, 'StartServer')
+ @mock.patch.object(ts_proxy_server.TsProxyServer, 'UpdateOutboundPorts')
+ @mock.patch.object(ts_proxy_server.TsProxyServer, 'UpdateTrafficSettings')
+ @mock.patch('py_utils.ts_proxy_server.TsProxyServer.port',
+ new_callable=mock.PropertyMock)
+ def test_Start(self, port_mock, traffic_setting_mock, outboundport_mock,
+ start_server_mock, wpr_mock, forwarder_mock):
+ chrome_proxy = chrome_proxy_utils.ChromeProxySession(4)
+ chrome_proxy._wpr_server._host_http_port = 1
+ chrome_proxy._wpr_server._host_https_port = 2
+ port_mock.return_value = 3
+ device = _DeviceUtilsMock('01234')
+ chrome_proxy.Start(device, 'abc')
+
+ forwarder_mock.assert_called_once_with([(4, 3)], device)
+ wpr_mock.assert_called_once_with('abc')
+ start_server_mock.assert_called_once()
+ outboundport_mock.assert_called_once_with(http_port=1, https_port=2)
+ traffic_setting_mock.assert_called_once_with(download_bandwidth_kbps=72000,
+ round_trip_latency_ms=100,
+ upload_bandwidth_kbps=72000)
+ port_mock.assert_called_once()
+
+ @mock.patch.object(forwarder.Forwarder, 'UnmapDevicePort')
+ @mock.patch.object(chrome_proxy_utils.WPRServer, 'StopServer')
+ @mock.patch.object(ts_proxy_server.TsProxyServer, 'StopServer')
+ def test_Stop(self, ts_proxy_mock, wpr_mock, forwarder_mock):
+ chrome_proxy = chrome_proxy_utils.ChromeProxySession(4)
+ device = _DeviceUtilsMock('01234')
+ chrome_proxy.wpr_record_mode = True
+ chrome_proxy._wpr_server._archive_path = 'abc'
+ chrome_proxy.Stop(device)
+
+ forwarder_mock.assert_called_once_with(4, device)
+ wpr_mock.assert_called_once_with()
+ ts_proxy_mock.assert_called_once_with()
+
+ #pylint: enable=no-self-use
+
+ @mock.patch.object(forwarder.Forwarder, 'UnmapDevicePort')
+ @mock.patch.object(webpagereplay_go_server.ReplayServer, 'StopServer')
+ @mock.patch.object(ts_proxy_server.TsProxyServer, 'StopServer')
+ def test_Stop_WithProperties(self, ts_proxy_mock, wpr_mock, forwarder_mock):
+ chrome_proxy = chrome_proxy_utils.ChromeProxySession(4)
+ chrome_proxy._wpr_server._server = webpagereplay_go_server.ReplayServer(
+ os.path.abspath(__file__), chrome_proxy_utils.PROXY_HOST_IP, 0, 0, [])
+ chrome_proxy._wpr_server._archive_path = os.path.abspath(__file__)
+ device = _DeviceUtilsMock('01234')
+ chrome_proxy.wpr_record_mode = True
+ chrome_proxy.Stop(device)
+
+ forwarder_mock.assert_called_once_with(4, device)
+ wpr_mock.assert_called_once_with()
+ ts_proxy_mock.assert_called_once_with()
+ self.assertFalse(chrome_proxy.wpr_replay_mode)
+ self.assertEqual(chrome_proxy.wpr_archive_path, os.path.abspath(__file__))
+
+ def test_SetWPRRecordMode(self):
+ chrome_proxy = chrome_proxy_utils.ChromeProxySession(4)
+ chrome_proxy.wpr_record_mode = True
+ self.assertTrue(chrome_proxy._wpr_server.record_mode)
+ self.assertTrue(chrome_proxy.wpr_record_mode)
+ self.assertFalse(chrome_proxy.wpr_replay_mode)
+
+ chrome_proxy.wpr_record_mode = False
+ self.assertFalse(chrome_proxy._wpr_server.record_mode)
+ self.assertFalse(chrome_proxy.wpr_record_mode)
+ self.assertTrue(chrome_proxy.wpr_replay_mode)
+
+ def test_SetWPRArchivePath(self):
+ chrome_proxy = chrome_proxy_utils.ChromeProxySession(4)
+ chrome_proxy._wpr_server._archive_path = 'abc'
+ self.assertEqual(chrome_proxy.wpr_archive_path, 'abc')
+
+ def test_UseDefaultDeviceProxyPort(self):
+ chrome_proxy = chrome_proxy_utils.ChromeProxySession()
+ expected_flags = [
+ '--ignore-certificate-errors-spki-list='
+ 'PhrPvGIaAMmd29hj8BCZOq096yj7uMpRNHpn5PDxI6I=',
+ '--proxy-server=socks5://localhost:1080'
+ ]
+ self.assertEqual(chrome_proxy.device_proxy_port, 1080)
+ self.assertListEqual(chrome_proxy.GetFlags(), expected_flags)
+
+ def test_UseNewDeviceProxyPort(self):
+ chrome_proxy = chrome_proxy_utils.ChromeProxySession(1)
+ expected_flags = [
+ '--ignore-certificate-errors-spki-list='
+ 'PhrPvGIaAMmd29hj8BCZOq096yj7uMpRNHpn5PDxI6I=',
+ '--proxy-server=socks5://localhost:1'
+ ]
+ self.assertEqual(chrome_proxy.device_proxy_port, 1)
+ self.assertListEqual(chrome_proxy.GetFlags(), expected_flags)
+
+
+class WPRServerTest(unittest.TestCase):
+ @mock.patch('py_utils.webpagereplay_go_server.ReplayServer')
+ def test_StartSever_fresh_replaymode(self, wpr_mock):
+ wpr_server = chrome_proxy_utils.WPRServer()
+ wpr_archive_file = os.path.abspath(__file__)
+ wpr_server.StartServer(wpr_archive_file)
+
+ wpr_mock.assert_called_once_with(wpr_archive_file,
+ '127.0.0.1',
+ http_port=0,
+ https_port=0,
+ replay_options=[])
+
+ self.assertEqual(wpr_server._archive_path, wpr_archive_file)
+ self.assertTrue(wpr_server._server)
+
+ @mock.patch('py_utils.webpagereplay_go_server.ReplayServer')
+ def test_StartSever_fresh_recordmode(self, wpr_mock):
+ wpr_server = chrome_proxy_utils.WPRServer()
+ wpr_server.record_mode = True
+ wpr_server.StartServer(os.path.abspath(__file__))
+ wpr_archive_file = os.path.abspath(__file__)
+
+ wpr_mock.assert_called_once_with(wpr_archive_file,
+ '127.0.0.1',
+ http_port=0,
+ https_port=0,
+ replay_options=['--record'])
+
+ self.assertEqual(wpr_server._archive_path, os.path.abspath(__file__))
+ self.assertTrue(wpr_server._server)
+
+ #pylint: disable=no-self-use
+
+ @mock.patch.object(webpagereplay_go_server.ReplayServer, 'StartServer')
+ def test_StartSever_recordmode(self, start_server_mock):
+ wpr_server = chrome_proxy_utils.WPRServer()
+ start_server_mock.return_value = {'http': 1, 'https': 2}
+ wpr_server.StartServer(os.path.abspath(__file__))
+
+ start_server_mock.assert_called_once()
+ self.assertEqual(wpr_server._host_http_port, 1)
+ self.assertEqual(wpr_server._host_https_port, 2)
+ self.assertEqual(wpr_server._archive_path, os.path.abspath(__file__))
+ self.assertTrue(wpr_server._server)
+
+ @mock.patch.object(webpagereplay_go_server.ReplayServer, 'StartServer')
+ def test_StartSever_reuseServer(self, start_server_mock):
+ wpr_server = chrome_proxy_utils.WPRServer()
+ wpr_server._server = webpagereplay_go_server.ReplayServer(
+ os.path.abspath(__file__),
+ chrome_proxy_utils.PROXY_HOST_IP,
+ http_port=0,
+ https_port=0,
+ replay_options=[])
+ wpr_server._archive_path = os.path.abspath(__file__)
+ wpr_server.StartServer(os.path.abspath(__file__))
+ start_server_mock.assert_not_called()
+
+ @mock.patch.object(webpagereplay_go_server.ReplayServer, 'StartServer')
+ @mock.patch.object(webpagereplay_go_server.ReplayServer, 'StopServer')
+ def test_StartSever_notReuseServer(self, stop_server_mock, start_server_mock):
+ wpr_server = chrome_proxy_utils.WPRServer()
+ wpr_server._server = webpagereplay_go_server.ReplayServer(
+ os.path.abspath(__file__),
+ chrome_proxy_utils.PROXY_HOST_IP,
+ http_port=0,
+ https_port=0,
+ replay_options=[])
+ wpr_server._archive_path = ''
+ wpr_server.StartServer(os.path.abspath(__file__))
+ start_server_mock.assert_called_once()
+ stop_server_mock.assert_called_once()
+
+ #pylint: enable=no-self-use
+
+ @mock.patch.object(webpagereplay_go_server.ReplayServer, 'StopServer')
+ def test_StopServer(self, stop_server_mock):
+ wpr_server = chrome_proxy_utils.WPRServer()
+ wpr_server._server = webpagereplay_go_server.ReplayServer(
+ os.path.abspath(__file__),
+ chrome_proxy_utils.PROXY_HOST_IP,
+ http_port=0,
+ https_port=0,
+ replay_options=[])
+ wpr_server.StopServer()
+ stop_server_mock.assert_called_once()
+ self.assertFalse(wpr_server._server)
+ self.assertFalse(wpr_server._archive_path)
+ self.assertFalse(wpr_server.http_port)
+ self.assertFalse(wpr_server.https_port)
+
+ def test_SetWPRRecordMode(self):
+ wpr_server = chrome_proxy_utils.WPRServer()
+ wpr_server.record_mode = True
+ self.assertTrue(wpr_server.record_mode)
+ wpr_server.record_mode = False
+ self.assertFalse(wpr_server.record_mode)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/third_party/libwebrtc/build/android/pylib/utils/decorators.py b/third_party/libwebrtc/build/android/pylib/utils/decorators.py
new file mode 100644
index 0000000000..8eec1d1e58
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/decorators.py
@@ -0,0 +1,37 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import functools
+import logging
+
+
+def Memoize(f):
+ """Decorator to cache return values of function."""
+ memoize_dict = {}
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ key = repr((args, kwargs))
+ if key not in memoize_dict:
+ memoize_dict[key] = f(*args, **kwargs)
+ return memoize_dict[key]
+ return wrapper
+
+
+def NoRaiseException(default_return_value=None, exception_message=''):
+ """Returns decorator that catches and logs uncaught Exceptions.
+
+ Args:
+ default_return_value: Value to return in the case of uncaught Exception.
+ exception_message: Message for uncaught exceptions.
+ """
+ def decorator(f):
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ try:
+ return f(*args, **kwargs)
+ except Exception: # pylint: disable=broad-except
+ logging.exception(exception_message)
+ return default_return_value
+ return wrapper
+ return decorator
diff --git a/third_party/libwebrtc/build/android/pylib/utils/decorators_test.py b/third_party/libwebrtc/build/android/pylib/utils/decorators_test.py
new file mode 100755
index 0000000000..5d39846824
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/decorators_test.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env vpython3
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unit tests for decorators.py."""
+
+import unittest
+
+from pylib.utils import decorators
+
+
+class NoRaiseExceptionDecoratorTest(unittest.TestCase):
+
+ def testFunctionDoesNotRaiseException(self):
+ """Tests that the |NoRaiseException| decorator catches exception."""
+
+ @decorators.NoRaiseException()
+ def raiseException():
+ raise Exception()
+
+ try:
+ raiseException()
+ except Exception: # pylint: disable=broad-except
+ self.fail('Exception was not caught by |NoRaiseException| decorator')
+
+ def testFunctionReturnsCorrectValues(self):
+ """Tests that the |NoRaiseException| decorator returns correct values."""
+
+ @decorators.NoRaiseException(default_return_value=111)
+ def raiseException():
+ raise Exception()
+
+ @decorators.NoRaiseException(default_return_value=111)
+ def doesNotRaiseException():
+ return 999
+
+ self.assertEqual(raiseException(), 111)
+ self.assertEqual(doesNotRaiseException(), 999)
+
+
+class MemoizeDecoratorTest(unittest.TestCase):
+
+ def testFunctionExceptionNotMemoized(self):
+ """Tests that |Memoize| decorator does not cache exception results."""
+
+ class ExceptionType1(Exception):
+ pass
+
+ class ExceptionType2(Exception):
+ pass
+
+ @decorators.Memoize
+ def raiseExceptions():
+ if raiseExceptions.count == 0:
+ raiseExceptions.count += 1
+ raise ExceptionType1()
+
+ if raiseExceptions.count == 1:
+ raise ExceptionType2()
+ raiseExceptions.count = 0
+
+ with self.assertRaises(ExceptionType1):
+ raiseExceptions()
+ with self.assertRaises(ExceptionType2):
+ raiseExceptions()
+
+ def testFunctionResultMemoized(self):
+ """Tests that |Memoize| decorator caches results."""
+
+ @decorators.Memoize
+ def memoized():
+ memoized.count += 1
+ return memoized.count
+ memoized.count = 0
+
+ def notMemoized():
+ notMemoized.count += 1
+ return notMemoized.count
+ notMemoized.count = 0
+
+ self.assertEqual(memoized(), 1)
+ self.assertEqual(memoized(), 1)
+ self.assertEqual(memoized(), 1)
+
+ self.assertEqual(notMemoized(), 1)
+ self.assertEqual(notMemoized(), 2)
+ self.assertEqual(notMemoized(), 3)
+
+ def testFunctionMemoizedBasedOnArgs(self):
+ """Tests that |Memoize| caches results based on args and kwargs."""
+
+ @decorators.Memoize
+ def returnValueBasedOnArgsKwargs(a, k=0):
+ return a + k
+
+ self.assertEqual(returnValueBasedOnArgsKwargs(1, 1), 2)
+ self.assertEqual(returnValueBasedOnArgsKwargs(1, 2), 3)
+ self.assertEqual(returnValueBasedOnArgsKwargs(2, 1), 3)
+ self.assertEqual(returnValueBasedOnArgsKwargs(3, 3), 6)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/third_party/libwebrtc/build/android/pylib/utils/device_dependencies.py b/third_party/libwebrtc/build/android/pylib/utils/device_dependencies.py
new file mode 100644
index 0000000000..9cb5bd892a
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/device_dependencies.py
@@ -0,0 +1,136 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import re
+
+from pylib import constants
+
+
+_EXCLUSIONS = [
+ re.compile(r'.*OWNERS'), # Should never be included.
+ re.compile(r'.*\.crx'), # Chrome extension zip files.
+ re.compile(os.path.join('.*',
+ r'\.git.*')), # Any '.git*' directories/files.
+ re.compile(r'.*\.so'), # Libraries packed into .apk.
+ re.compile(r'.*Mojo.*manifest\.json'), # Some source_set()s pull these in.
+ re.compile(r'.*\.py'), # Some test_support targets include python deps.
+ re.compile(r'.*\.apk'), # Should be installed separately.
+ re.compile(r'.*lib.java/.*'), # Never need java intermediates.
+
+ # Test filter files:
+ re.compile(r'.*/testing/buildbot/filters/.*'),
+
+ # Chrome external extensions config file.
+ re.compile(r'.*external_extensions\.json'),
+
+ # Exists just to test the compile, not to be run.
+ re.compile(r'.*jni_generator_tests'),
+
+ # v8's blobs and icu data get packaged into APKs.
+ re.compile(r'.*snapshot_blob.*\.bin'),
+ re.compile(r'.*icudtl.bin'),
+
+ # Scripts that are needed by swarming, but not on devices:
+ re.compile(r'.*llvm-symbolizer'),
+ re.compile(r'.*md5sum_bin'),
+ re.compile(os.path.join('.*', 'development', 'scripts', 'stack')),
+
+ # Required for java deobfuscation on the host:
+ re.compile(r'.*build/android/stacktrace/.*'),
+ re.compile(r'.*third_party/jdk/.*'),
+ re.compile(r'.*third_party/proguard/.*'),
+
+ # Build artifacts:
+ re.compile(r'.*\.stamp'),
+ re.compile(r'.*.pak\.info'),
+ re.compile(r'.*\.incremental\.json'),
+]
+
+
+def _FilterDataDeps(abs_host_files):
+ exclusions = _EXCLUSIONS + [
+ re.compile(os.path.join(constants.GetOutDirectory(), 'bin'))
+ ]
+ return [p for p in abs_host_files if not any(r.match(p) for r in exclusions)]
+
+
+def DevicePathComponentsFor(host_path, output_directory):
+ """Returns the device path components for a given host path.
+
+ This returns the device path as a list of joinable path components,
+ with None as the first element to indicate that the path should be
+ rooted at $EXTERNAL_STORAGE.
+
+ e.g., given
+
+ '$RUNTIME_DEPS_ROOT_DIR/foo/bar/baz.txt'
+
+ this would return
+
+ [None, 'foo', 'bar', 'baz.txt']
+
+ This handles a couple classes of paths differently than it otherwise would:
+ - All .pak files get mapped to top-level paks/
+ - All other dependencies get mapped to the top level directory
+ - If a file is not in the output directory then it's relative path to
+ the output directory will start with .. strings, so we remove those
+ and then the path gets mapped to the top-level directory
+ - If a file is in the output directory then the relative path to the
+ output directory gets mapped to the top-level directory
+
+ e.g. given
+
+ '$RUNTIME_DEPS_ROOT_DIR/out/Release/icu_fake_dir/icudtl.dat'
+
+ this would return
+
+ [None, 'icu_fake_dir', 'icudtl.dat']
+
+ Args:
+ host_path: The absolute path to the host file.
+ Returns:
+ A list of device path components.
+ """
+ if (host_path.startswith(output_directory) and
+ os.path.splitext(host_path)[1] == '.pak'):
+ return [None, 'paks', os.path.basename(host_path)]
+
+ rel_host_path = os.path.relpath(host_path, output_directory)
+
+ device_path_components = [None]
+ p = rel_host_path
+ while p:
+ p, d = os.path.split(p)
+ # The relative path from the output directory to a file under the runtime
+ # deps root directory may start with multiple .. strings, so they need to
+ # be skipped.
+ if d and d != os.pardir:
+ device_path_components.insert(1, d)
+ return device_path_components
+
+
+def GetDataDependencies(runtime_deps_path):
+ """Returns a list of device data dependencies.
+
+ Args:
+ runtime_deps_path: A str path to the .runtime_deps file.
+ Returns:
+ A list of (host_path, device_path) tuples.
+ """
+ if not runtime_deps_path:
+ return []
+
+ with open(runtime_deps_path, 'r') as runtime_deps_file:
+ rel_host_files = [l.strip() for l in runtime_deps_file if l]
+
+ output_directory = constants.GetOutDirectory()
+ abs_host_files = [
+ os.path.abspath(os.path.join(output_directory, r))
+ for r in rel_host_files]
+ filtered_abs_host_files = _FilterDataDeps(abs_host_files)
+ # TODO(crbug.com/752610): Filter out host executables, and investigate
+ # whether other files could be filtered as well.
+ return [(f, DevicePathComponentsFor(f, output_directory))
+ for f in filtered_abs_host_files]
diff --git a/third_party/libwebrtc/build/android/pylib/utils/device_dependencies_test.py b/third_party/libwebrtc/build/android/pylib/utils/device_dependencies_test.py
new file mode 100755
index 0000000000..35879882b7
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/device_dependencies_test.py
@@ -0,0 +1,52 @@
+#! /usr/bin/env vpython3
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import unittest
+
+from pylib import constants
+from pylib.utils import device_dependencies
+
+
+class DevicePathComponentsForTest(unittest.TestCase):
+
+ def testCheckedInFile(self):
+ test_path = os.path.join(constants.DIR_SOURCE_ROOT, 'foo', 'bar', 'baz.txt')
+ output_directory = os.path.join(
+ constants.DIR_SOURCE_ROOT, 'out-foo', 'Release')
+ self.assertEqual([None, 'foo', 'bar', 'baz.txt'],
+ device_dependencies.DevicePathComponentsFor(
+ test_path, output_directory))
+
+ def testOutputDirectoryFile(self):
+ test_path = os.path.join(constants.DIR_SOURCE_ROOT, 'out-foo', 'Release',
+ 'icudtl.dat')
+ output_directory = os.path.join(
+ constants.DIR_SOURCE_ROOT, 'out-foo', 'Release')
+ self.assertEqual([None, 'icudtl.dat'],
+ device_dependencies.DevicePathComponentsFor(
+ test_path, output_directory))
+
+ def testOutputDirectorySubdirFile(self):
+ test_path = os.path.join(constants.DIR_SOURCE_ROOT, 'out-foo', 'Release',
+ 'test_dir', 'icudtl.dat')
+ output_directory = os.path.join(
+ constants.DIR_SOURCE_ROOT, 'out-foo', 'Release')
+ self.assertEqual([None, 'test_dir', 'icudtl.dat'],
+ device_dependencies.DevicePathComponentsFor(
+ test_path, output_directory))
+
+ def testOutputDirectoryPakFile(self):
+ test_path = os.path.join(constants.DIR_SOURCE_ROOT, 'out-foo', 'Release',
+ 'foo.pak')
+ output_directory = os.path.join(
+ constants.DIR_SOURCE_ROOT, 'out-foo', 'Release')
+ self.assertEqual([None, 'paks', 'foo.pak'],
+ device_dependencies.DevicePathComponentsFor(
+ test_path, output_directory))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/libwebrtc/build/android/pylib/utils/dexdump.py b/third_party/libwebrtc/build/android/pylib/utils/dexdump.py
new file mode 100644
index 0000000000..f81ac603d4
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/dexdump.py
@@ -0,0 +1,136 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import re
+import shutil
+import sys
+import tempfile
+from xml.etree import ElementTree
+
+from devil.utils import cmd_helper
+from pylib import constants
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'gyp'))
+from util import build_utils
+
+DEXDUMP_PATH = os.path.join(constants.ANDROID_SDK_TOOLS, 'dexdump')
+
+
+def Dump(apk_path):
+ """Dumps class and method information from a APK into a dict via dexdump.
+
+ Args:
+ apk_path: An absolute path to an APK file to dump.
+ Returns:
+ A dict in the following format:
+ {
+ <package_name>: {
+ 'classes': {
+ <class_name>: {
+ 'methods': [<method_1>, <method_2>]
+ }
+ }
+ }
+ }
+ """
+ try:
+ dexfile_dir = tempfile.mkdtemp()
+ parsed_dex_files = []
+ for dex_file in build_utils.ExtractAll(apk_path,
+ dexfile_dir,
+ pattern='*classes*.dex'):
+ output_xml = cmd_helper.GetCmdOutput(
+ [DEXDUMP_PATH, '-l', 'xml', dex_file])
+ # Dexdump doesn't escape its XML output very well; decode it as utf-8 with
+ # invalid sequences replaced, then remove forbidden characters and
+ # re-encode it (as etree expects a byte string as input so it can figure
+ # out the encoding itself from the XML declaration)
+ BAD_XML_CHARS = re.compile(
+ u'[\x00-\x08\x0b-\x0c\x0e-\x1f\x7f-\x84\x86-\x9f' +
+ u'\ud800-\udfff\ufdd0-\ufddf\ufffe-\uffff]')
+ if sys.version_info[0] < 3:
+ decoded_xml = output_xml.decode('utf-8', 'replace')
+ clean_xml = BAD_XML_CHARS.sub(u'\ufffd', decoded_xml)
+ else:
+ # Line duplicated to avoid pylint redefined-variable-type error.
+ clean_xml = BAD_XML_CHARS.sub(u'\ufffd', output_xml)
+ parsed_dex_files.append(
+ _ParseRootNode(ElementTree.fromstring(clean_xml.encode('utf-8'))))
+ return parsed_dex_files
+ finally:
+ shutil.rmtree(dexfile_dir)
+
+
+def _ParseRootNode(root):
+ """Parses the XML output of dexdump. This output is in the following format.
+
+ This is a subset of the information contained within dexdump output.
+
+ <api>
+ <package name="foo.bar">
+ <class name="Class" extends="foo.bar.SuperClass">
+ <field name="Field">
+ </field>
+ <constructor name="Method">
+ <parameter name="Param" type="int">
+ </parameter>
+ </constructor>
+ <method name="Method">
+ <parameter name="Param" type="int">
+ </parameter>
+ </method>
+ </class>
+ </package>
+ </api>
+ """
+ results = {}
+ for child in root:
+ if child.tag == 'package':
+ package_name = child.attrib['name']
+ parsed_node = _ParsePackageNode(child)
+ if package_name in results:
+ results[package_name]['classes'].update(parsed_node['classes'])
+ else:
+ results[package_name] = parsed_node
+ return results
+
+
+def _ParsePackageNode(package_node):
+ """Parses a <package> node from the dexdump xml output.
+
+ Returns:
+ A dict in the format:
+ {
+ 'classes': {
+ <class_1>: {
+ 'methods': [<method_1>, <method_2>]
+ },
+ <class_2>: {
+ 'methods': [<method_1>, <method_2>]
+ },
+ }
+ }
+ """
+ classes = {}
+ for child in package_node:
+ if child.tag == 'class':
+ classes[child.attrib['name']] = _ParseClassNode(child)
+ return {'classes': classes}
+
+
+def _ParseClassNode(class_node):
+ """Parses a <class> node from the dexdump xml output.
+
+ Returns:
+ A dict in the format:
+ {
+ 'methods': [<method_1>, <method_2>]
+ }
+ """
+ methods = []
+ for child in class_node:
+ if child.tag == 'method':
+ methods.append(child.attrib['name'])
+ return {'methods': methods, 'superclass': class_node.attrib['extends']}
diff --git a/third_party/libwebrtc/build/android/pylib/utils/dexdump_test.py b/third_party/libwebrtc/build/android/pylib/utils/dexdump_test.py
new file mode 100755
index 0000000000..fc2914a4e5
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/dexdump_test.py
@@ -0,0 +1,141 @@
+#! /usr/bin/env vpython3
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+from xml.etree import ElementTree
+
+from pylib.utils import dexdump
+
+# pylint: disable=protected-access
+
+
+class DexdumpXMLParseTest(unittest.TestCase):
+
+ def testParseRootXmlNode(self):
+ example_xml_string = (
+ '<api>'
+ '<package name="com.foo.bar1">'
+ '<class'
+ ' name="Class1"'
+ ' extends="java.lang.Object"'
+ ' abstract="false"'
+ ' static="false"'
+ ' final="true"'
+ ' visibility="public">'
+ '<method'
+ ' name="class1Method1"'
+ ' return="java.lang.String"'
+ ' abstract="false"'
+ ' native="false"'
+ ' synchronized="false"'
+ ' static="false"'
+ ' final="false"'
+ ' visibility="public">'
+ '</method>'
+ '<method'
+ ' name="class1Method2"'
+ ' return="viod"'
+ ' abstract="false"'
+ ' native="false"'
+ ' synchronized="false"'
+ ' static="false"'
+ ' final="false"'
+ ' visibility="public">'
+ '</method>'
+ '</class>'
+ '<class'
+ ' name="Class2"'
+ ' extends="java.lang.Object"'
+ ' abstract="false"'
+ ' static="false"'
+ ' final="true"'
+ ' visibility="public">'
+ '<method'
+ ' name="class2Method1"'
+ ' return="java.lang.String"'
+ ' abstract="false"'
+ ' native="false"'
+ ' synchronized="false"'
+ ' static="false"'
+ ' final="false"'
+ ' visibility="public">'
+ '</method>'
+ '</class>'
+ '</package>'
+ '<package name="com.foo.bar2">'
+ '</package>'
+ '<package name="com.foo.bar3">'
+ '</package>'
+ '</api>')
+
+ actual = dexdump._ParseRootNode(
+ ElementTree.fromstring(example_xml_string))
+
+ expected = {
+ 'com.foo.bar1' : {
+ 'classes': {
+ 'Class1': {
+ 'methods': ['class1Method1', 'class1Method2'],
+ 'superclass': 'java.lang.Object',
+ },
+ 'Class2': {
+ 'methods': ['class2Method1'],
+ 'superclass': 'java.lang.Object',
+ }
+ },
+ },
+ 'com.foo.bar2' : {'classes': {}},
+ 'com.foo.bar3' : {'classes': {}},
+ }
+ self.assertEqual(expected, actual)
+
+ def testParsePackageNode(self):
+ example_xml_string = (
+ '<package name="com.foo.bar">'
+ '<class name="Class1" extends="java.lang.Object">'
+ '</class>'
+ '<class name="Class2" extends="java.lang.Object">'
+ '</class>'
+ '</package>')
+
+
+ actual = dexdump._ParsePackageNode(
+ ElementTree.fromstring(example_xml_string))
+
+ expected = {
+ 'classes': {
+ 'Class1': {
+ 'methods': [],
+ 'superclass': 'java.lang.Object',
+ },
+ 'Class2': {
+ 'methods': [],
+ 'superclass': 'java.lang.Object',
+ },
+ },
+ }
+ self.assertEqual(expected, actual)
+
+ def testParseClassNode(self):
+ example_xml_string = (
+ '<class name="Class1" extends="java.lang.Object">'
+ '<method name="method1">'
+ '</method>'
+ '<method name="method2">'
+ '</method>'
+ '</class>')
+
+ actual = dexdump._ParseClassNode(
+ ElementTree.fromstring(example_xml_string))
+
+ expected = {
+ 'methods': ['method1', 'method2'],
+ 'superclass': 'java.lang.Object',
+ }
+ self.assertEqual(expected, actual)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/libwebrtc/build/android/pylib/utils/gold_utils.py b/third_party/libwebrtc/build/android/pylib/utils/gold_utils.py
new file mode 100644
index 0000000000..0b79a6d7cb
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/gold_utils.py
@@ -0,0 +1,78 @@
+# Copyright 2020 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""//build/android implementations of //testing/skia_gold_common.
+
+Used for interacting with the Skia Gold image diffing service.
+"""
+
+import os
+import shutil
+
+from devil.utils import cmd_helper
+from pylib.base.output_manager import Datatype
+from pylib.constants import host_paths
+from pylib.utils import repo_utils
+
+with host_paths.SysPath(host_paths.BUILD_PATH):
+ from skia_gold_common import skia_gold_session
+ from skia_gold_common import skia_gold_session_manager
+ from skia_gold_common import skia_gold_properties
+
+
+class AndroidSkiaGoldSession(skia_gold_session.SkiaGoldSession):
+ def _StoreDiffLinks(self, image_name, output_manager, output_dir):
+ """See SkiaGoldSession._StoreDiffLinks for general documentation.
+
+ |output_manager| must be a build.android.pylib.base.OutputManager instance.
+ """
+ given_path = closest_path = diff_path = None
+ # The directory should contain "input-<hash>.png", "closest-<hash>.png",
+ # and "diff.png".
+ for f in os.listdir(output_dir):
+ filepath = os.path.join(output_dir, f)
+ if f.startswith('input-'):
+ given_path = filepath
+ elif f.startswith('closest-'):
+ closest_path = filepath
+ elif f == 'diff.png':
+ diff_path = filepath
+ results = self._comparison_results.setdefault(image_name,
+ self.ComparisonResults())
+ if given_path:
+ with output_manager.ArchivedTempfile('given_%s.png' % image_name,
+ 'gold_local_diffs',
+ Datatype.PNG) as given_file:
+ shutil.move(given_path, given_file.name)
+ results.local_diff_given_image = given_file.Link()
+ if closest_path:
+ with output_manager.ArchivedTempfile('closest_%s.png' % image_name,
+ 'gold_local_diffs',
+ Datatype.PNG) as closest_file:
+ shutil.move(closest_path, closest_file.name)
+ results.local_diff_closest_image = closest_file.Link()
+ if diff_path:
+ with output_manager.ArchivedTempfile('diff_%s.png' % image_name,
+ 'gold_local_diffs',
+ Datatype.PNG) as diff_file:
+ shutil.move(diff_path, diff_file.name)
+ results.local_diff_diff_image = diff_file.Link()
+
+ @staticmethod
+ def _RunCmdForRcAndOutput(cmd):
+ rc, stdout, _ = cmd_helper.GetCmdStatusOutputAndError(cmd,
+ merge_stderr=True)
+ return rc, stdout
+
+
+class AndroidSkiaGoldSessionManager(
+ skia_gold_session_manager.SkiaGoldSessionManager):
+ @staticmethod
+ def GetSessionClass():
+ return AndroidSkiaGoldSession
+
+
+class AndroidSkiaGoldProperties(skia_gold_properties.SkiaGoldProperties):
+ @staticmethod
+ def _GetGitOriginMasterHeadSha1():
+ return repo_utils.GetGitOriginMasterHeadSHA1(host_paths.DIR_SOURCE_ROOT)
diff --git a/third_party/libwebrtc/build/android/pylib/utils/gold_utils_test.py b/third_party/libwebrtc/build/android/pylib/utils/gold_utils_test.py
new file mode 100755
index 0000000000..cc1da043fc
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/gold_utils_test.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env vpython3
+# Copyright 2020 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Tests for gold_utils."""
+
+#pylint: disable=protected-access
+
+import contextlib
+import os
+import tempfile
+import unittest
+
+from pylib.constants import host_paths
+from pylib.utils import gold_utils
+
+with host_paths.SysPath(host_paths.BUILD_PATH):
+ from skia_gold_common import unittest_utils
+
+import mock # pylint: disable=import-error
+from pyfakefs import fake_filesystem_unittest # pylint: disable=import-error
+
+createSkiaGoldArgs = unittest_utils.createSkiaGoldArgs
+
+
+def assertArgWith(test, arg_list, arg, value):
+ i = arg_list.index(arg)
+ test.assertEqual(arg_list[i + 1], value)
+
+
+class AndroidSkiaGoldSessionDiffTest(fake_filesystem_unittest.TestCase):
+ def setUp(self):
+ self.setUpPyfakefs()
+ self._working_dir = tempfile.mkdtemp()
+ self._json_keys = tempfile.NamedTemporaryFile(delete=False).name
+
+ @mock.patch.object(gold_utils.AndroidSkiaGoldSession, '_RunCmdForRcAndOutput')
+ def test_commandCommonArgs(self, cmd_mock):
+ cmd_mock.return_value = (None, None)
+ args = createSkiaGoldArgs(git_revision='a', local_pixel_tests=False)
+ sgp = gold_utils.AndroidSkiaGoldProperties(args)
+ session = gold_utils.AndroidSkiaGoldSession(self._working_dir,
+ sgp,
+ self._json_keys,
+ 'corpus',
+ instance='instance')
+ session.Diff('name', 'png_file', None)
+ call_args = cmd_mock.call_args[0][0]
+ self.assertIn('diff', call_args)
+ assertArgWith(self, call_args, '--corpus', 'corpus')
+ # TODO(skbug.com/10610): Remove the -public once we go back to using the
+ # non-public instance, or add a second test for testing that the correct
+ # instance is chosen if we decide to support both depending on what the
+ # user is authenticated for.
+ assertArgWith(self, call_args, '--instance', 'instance-public')
+ assertArgWith(self, call_args, '--input', 'png_file')
+ assertArgWith(self, call_args, '--test', 'name')
+ # TODO(skbug.com/10611): Re-add this assert and remove the check for the
+ # absence of the directory once we switch back to using the proper working
+ # directory.
+ # assertArgWith(self, call_args, '--work-dir', self._working_dir)
+ self.assertNotIn(self._working_dir, call_args)
+ i = call_args.index('--out-dir')
+ # The output directory should be a subdirectory of the working directory.
+ self.assertIn(self._working_dir, call_args[i + 1])
+
+
+class AndroidSkiaGoldSessionDiffLinksTest(fake_filesystem_unittest.TestCase):
+ class FakeArchivedFile(object):
+ def __init__(self, path):
+ self.name = path
+
+ def Link(self):
+ return 'file://' + self.name
+
+ class FakeOutputManager(object):
+ def __init__(self):
+ self.output_dir = tempfile.mkdtemp()
+
+ @contextlib.contextmanager
+ def ArchivedTempfile(self, image_name, _, __):
+ filepath = os.path.join(self.output_dir, image_name)
+ yield AndroidSkiaGoldSessionDiffLinksTest.FakeArchivedFile(filepath)
+
+ def setUp(self):
+ self.setUpPyfakefs()
+ self._working_dir = tempfile.mkdtemp()
+ self._json_keys = tempfile.NamedTemporaryFile(delete=False).name
+
+ def test_outputManagerUsed(self):
+ args = createSkiaGoldArgs(git_revision='a', local_pixel_tests=True)
+ sgp = gold_utils.AndroidSkiaGoldProperties(args)
+ session = gold_utils.AndroidSkiaGoldSession(self._working_dir, sgp,
+ self._json_keys, None, None)
+ with open(os.path.join(self._working_dir, 'input-inputhash.png'), 'w') as f:
+ f.write('input')
+ with open(os.path.join(self._working_dir, 'closest-closesthash.png'),
+ 'w') as f:
+ f.write('closest')
+ with open(os.path.join(self._working_dir, 'diff.png'), 'w') as f:
+ f.write('diff')
+
+ output_manager = AndroidSkiaGoldSessionDiffLinksTest.FakeOutputManager()
+ session._StoreDiffLinks('foo', output_manager, self._working_dir)
+
+ copied_input = os.path.join(output_manager.output_dir, 'given_foo.png')
+ copied_closest = os.path.join(output_manager.output_dir, 'closest_foo.png')
+ copied_diff = os.path.join(output_manager.output_dir, 'diff_foo.png')
+ with open(copied_input) as f:
+ self.assertEqual(f.read(), 'input')
+ with open(copied_closest) as f:
+ self.assertEqual(f.read(), 'closest')
+ with open(copied_diff) as f:
+ self.assertEqual(f.read(), 'diff')
+
+ self.assertEqual(session.GetGivenImageLink('foo'), 'file://' + copied_input)
+ self.assertEqual(session.GetClosestImageLink('foo'),
+ 'file://' + copied_closest)
+ self.assertEqual(session.GetDiffImageLink('foo'), 'file://' + copied_diff)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/third_party/libwebrtc/build/android/pylib/utils/google_storage_helper.py b/third_party/libwebrtc/build/android/pylib/utils/google_storage_helper.py
new file mode 100644
index 0000000000..94efe33f85
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/google_storage_helper.py
@@ -0,0 +1,129 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Helper functions to upload data to Google Storage.
+
+Text data should be streamed to logdog using |logdog_helper| module.
+Due to logdog not having image or HTML viewer, those instead should be uploaded
+to Google Storage directly using this module.
+"""
+
+import logging
+import os
+import sys
+import time
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
+
+from pylib.constants import host_paths
+from pylib.utils import decorators
+
+if host_paths.DEVIL_PATH not in sys.path:
+ sys.path.append(host_paths.DEVIL_PATH)
+from devil.utils import cmd_helper
+
+_GSUTIL_PATH = os.path.join(
+ host_paths.DIR_SOURCE_ROOT, 'third_party', 'catapult',
+ 'third_party', 'gsutil', 'gsutil.py')
+_PUBLIC_URL = 'https://storage.googleapis.com/%s/'
+_AUTHENTICATED_URL = 'https://storage.cloud.google.com/%s/'
+
+
+@decorators.NoRaiseException(default_return_value='')
+def upload(name, filepath, bucket, gs_args=None, command_args=None,
+ content_type=None, authenticated_link=True):
+ """Uploads data to Google Storage.
+
+ Args:
+ name: Name of the file on Google Storage.
+ filepath: Path to file you want to upload.
+ bucket: Bucket to upload file to.
+ content_type: Content type to upload as. If not specified, Google storage
+ will attempt to infer content type from file extension.
+ authenticated_link: Whether to return a link that requires user to
+ authenticate with a Google account. Setting this to false will return
+ a link that does not require user to be signed into Google account but
+ will only work for completely public storage buckets.
+ Returns:
+ Web link to item uploaded to Google Storage bucket.
+ """
+ bucket = _format_bucket_name(bucket)
+
+ gs_path = 'gs://%s/%s' % (bucket, name)
+ logging.info('Uploading %s to %s', filepath, gs_path)
+
+ cmd = [_GSUTIL_PATH, '-q']
+ cmd.extend(gs_args or [])
+ if content_type:
+ cmd.extend(['-h', 'Content-Type:%s' % content_type])
+ cmd.extend(['cp'] + (command_args or []) + [filepath, gs_path])
+
+ cmd_helper.RunCmd(cmd)
+
+ return get_url_link(name, bucket, authenticated_link)
+
+
+@decorators.NoRaiseException(default_return_value='')
+def read_from_link(link):
+ # Note that urlparse returns the path with an initial '/', so we only need to
+ # add one more after the 'gs;'
+ gs_path = 'gs:/%s' % urlparse(link).path
+ cmd = [_GSUTIL_PATH, '-q', 'cat', gs_path]
+ return cmd_helper.GetCmdOutput(cmd)
+
+
+@decorators.NoRaiseException(default_return_value=False)
+def exists(name, bucket):
+ bucket = _format_bucket_name(bucket)
+ gs_path = 'gs://%s/%s' % (bucket, name)
+
+ cmd = [_GSUTIL_PATH, '-q', 'stat', gs_path]
+ return_code = cmd_helper.RunCmd(cmd)
+ return return_code == 0
+
+
+# TODO(jbudorick): Delete this function. Only one user of it.
+def unique_name(basename, suffix='', timestamp=True, device=None):
+ """Helper function for creating a unique name for a file to store in GS.
+
+ Args:
+ basename: Base of the unique filename.
+ suffix: Suffix of filename.
+ timestamp: Whether or not to add a timestamp to name.
+ device: Device to add device serial of to name.
+ """
+ return '%s%s%s%s' % (
+ basename,
+ '_%s' % time.strftime('%Y_%m_%d_T%H_%M_%S-UTC', time.gmtime())
+ if timestamp else '',
+ '_%s' % device.serial if device else '',
+ suffix)
+
+
+def get_url_link(name, bucket, authenticated_link=True):
+ """Get url link before/without uploading.
+
+ Args:
+ name: Name of the file on Google Storage.
+ bucket: Bucket to upload file to.
+ authenticated_link: Whether to return a link that requires user to
+ authenticate with a Google account. Setting this to false will return
+ a link that does not require user to be signed into Google account but
+ will only work for completely public storage buckets.
+ Returns:
+ Web link to item to be uploaded to Google Storage bucket
+ """
+ bucket = _format_bucket_name(bucket)
+ url_template = _AUTHENTICATED_URL if authenticated_link else _PUBLIC_URL
+ return os.path.join(url_template % bucket, name)
+
+
+def _format_bucket_name(bucket):
+ if bucket.startswith('gs://'):
+ bucket = bucket[len('gs://'):]
+ if bucket.endswith('/'):
+ bucket = bucket[:-1]
+ return bucket
diff --git a/third_party/libwebrtc/build/android/pylib/utils/instrumentation_tracing.py b/third_party/libwebrtc/build/android/pylib/utils/instrumentation_tracing.py
new file mode 100644
index 0000000000..f1d03a0dcf
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/instrumentation_tracing.py
@@ -0,0 +1,204 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Functions to instrument all Python function calls.
+
+This generates a JSON file readable by Chrome's about:tracing. To use it,
+either call start_instrumenting and stop_instrumenting at the appropriate times,
+or use the Instrument context manager.
+
+A function is only traced if it is from a Python module that matches at least
+one regular expression object in to_include, and does not match any in
+to_exclude. In between the start and stop events, every function call of a
+function from such a module will be added to the trace.
+"""
+
+import contextlib
+import functools
+import inspect
+import os
+import re
+import sys
+import threading
+
+from py_trace_event import trace_event
+
+
+# Modules to exclude by default (to avoid problems like infinite loops)
+DEFAULT_EXCLUDE = [r'py_trace_event\..*']
+
+class _TraceArguments(object):
+ def __init__(self):
+ """Wraps a dictionary to ensure safe evaluation of repr()."""
+ self._arguments = {}
+
+ @staticmethod
+ def _safeStringify(item):
+ try:
+ item_str = repr(item)
+ except Exception: # pylint: disable=broad-except
+ try:
+ item_str = str(item)
+ except Exception: # pylint: disable=broad-except
+ item_str = "<ERROR>"
+ return item_str
+
+ def add(self, key, val):
+ key_str = _TraceArguments._safeStringify(key)
+ val_str = _TraceArguments._safeStringify(val)
+
+ self._arguments[key_str] = val_str
+
+ def __repr__(self):
+ return repr(self._arguments)
+
+
+saved_thread_ids = set()
+
+def _shouldTrace(frame, to_include, to_exclude, included, excluded):
+ """
+ Decides whether or not the function called in frame should be traced.
+
+ Args:
+ frame: The Python frame object of this function call.
+ to_include: Set of regex objects for modules which should be traced.
+ to_exclude: Set of regex objects for modules which should not be traced.
+ included: Set of module names we've determined should be traced.
+ excluded: Set of module names we've determined should not be traced.
+ """
+ if not inspect.getmodule(frame):
+ return False
+
+ module_name = inspect.getmodule(frame).__name__
+
+ if module_name in included:
+ includes = True
+ elif to_include:
+ includes = any([pattern.match(module_name) for pattern in to_include])
+ else:
+ includes = True
+
+ if includes:
+ included.add(module_name)
+ else:
+ return False
+
+ # Find the modules of every function in the stack trace.
+ frames = inspect.getouterframes(frame)
+ calling_module_names = [inspect.getmodule(fr[0]).__name__ for fr in frames]
+
+ # Return False for anything with an excluded module's function anywhere in the
+ # stack trace (even if the function itself is in an included module).
+ if to_exclude:
+ for calling_module in calling_module_names:
+ if calling_module in excluded:
+ return False
+ for pattern in to_exclude:
+ if pattern.match(calling_module):
+ excluded.add(calling_module)
+ return False
+
+ return True
+
+def _generate_trace_function(to_include, to_exclude):
+ to_include = {re.compile(item) for item in to_include}
+ to_exclude = {re.compile(item) for item in to_exclude}
+ to_exclude.update({re.compile(item) for item in DEFAULT_EXCLUDE})
+
+ included = set()
+ excluded = set()
+
+ tracing_pid = os.getpid()
+
+ def traceFunction(frame, event, arg):
+ del arg
+
+ # Don't try to trace in subprocesses.
+ if os.getpid() != tracing_pid:
+ sys.settrace(None)
+ return None
+
+ # pylint: disable=unused-argument
+ if event not in ("call", "return"):
+ return None
+
+ function_name = frame.f_code.co_name
+ filename = frame.f_code.co_filename
+ line_number = frame.f_lineno
+
+ if _shouldTrace(frame, to_include, to_exclude, included, excluded):
+ if event == "call":
+ # This function is beginning; we save the thread name (if that hasn't
+ # been done), record the Begin event, and return this function to be
+ # used as the local trace function.
+
+ thread_id = threading.current_thread().ident
+
+ if thread_id not in saved_thread_ids:
+ thread_name = threading.current_thread().name
+
+ trace_event.trace_set_thread_name(thread_name)
+
+ saved_thread_ids.add(thread_id)
+
+ arguments = _TraceArguments()
+ # The function's argument values are stored in the frame's
+ # |co_varnames| as the first |co_argcount| elements. (Following that
+ # are local variables.)
+ for idx in range(frame.f_code.co_argcount):
+ arg_name = frame.f_code.co_varnames[idx]
+ arguments.add(arg_name, frame.f_locals[arg_name])
+ trace_event.trace_begin(function_name, arguments=arguments,
+ module=inspect.getmodule(frame).__name__,
+ filename=filename, line_number=line_number)
+
+ # Return this function, so it gets used as the "local trace function"
+ # within this function's frame (and in particular, gets called for this
+ # function's "return" event).
+ return traceFunction
+
+ if event == "return":
+ trace_event.trace_end(function_name)
+ return None
+
+ return traceFunction
+
+
+def no_tracing(f):
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ trace_func = sys.gettrace()
+ try:
+ sys.settrace(None)
+ threading.settrace(None)
+ return f(*args, **kwargs)
+ finally:
+ sys.settrace(trace_func)
+ threading.settrace(trace_func)
+ return wrapper
+
+
+def start_instrumenting(output_file, to_include=(), to_exclude=()):
+ """Enable tracing of all function calls (from specified modules)."""
+ trace_event.trace_enable(output_file)
+
+ traceFunc = _generate_trace_function(to_include, to_exclude)
+ sys.settrace(traceFunc)
+ threading.settrace(traceFunc)
+
+
+def stop_instrumenting():
+ trace_event.trace_disable()
+
+ sys.settrace(None)
+ threading.settrace(None)
+
+
+@contextlib.contextmanager
+def Instrument(output_file, to_include=(), to_exclude=()):
+ try:
+ start_instrumenting(output_file, to_include, to_exclude)
+ yield None
+ finally:
+ stop_instrumenting()
diff --git a/third_party/libwebrtc/build/android/pylib/utils/local_utils.py b/third_party/libwebrtc/build/android/pylib/utils/local_utils.py
new file mode 100644
index 0000000000..027cca3925
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/local_utils.py
@@ -0,0 +1,19 @@
+# Copyright 2020 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Utilities for determining if a test is being run locally or not."""
+
+import os
+
+
+def IsOnSwarming():
+ """Determines whether we are on swarming or not.
+
+ Returns:
+ True if the test is being run on swarming, otherwise False.
+ """
+ # Look for the presence of the SWARMING_SERVER environment variable as a
+ # heuristic to determine whether we're running on a workstation or a bot.
+ # This should always be set on swarming, but would be strange to be set on
+ # a workstation.
+ return 'SWARMING_SERVER' in os.environ
diff --git a/third_party/libwebrtc/build/android/pylib/utils/logdog_helper.py b/third_party/libwebrtc/build/android/pylib/utils/logdog_helper.py
new file mode 100644
index 0000000000..3000a2f7cb
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/logdog_helper.py
@@ -0,0 +1,96 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Helper functions to upload data to logdog."""
+
+import logging
+import os
+import sys
+
+from pylib import constants
+from pylib.utils import decorators
+
+sys.path.insert(
+ 0,
+ os.path.abspath(
+ os.path.join(constants.DIR_SOURCE_ROOT, 'third_party', 'logdog')))
+from logdog import bootstrap # pylint: disable=import-error
+
+
+@decorators.NoRaiseException(default_return_value='',
+ exception_message=('Ignore this exception. '
+ 'crbug.com/675666'))
+def text(name, data, content_type=None):
+ """Uploads text to logdog.
+
+ Args:
+ name: Name of the logdog stream.
+ data: String with data you want to upload.
+ content_type: The optional content type of the stream. If None, a
+ default content type will be chosen.
+
+ Returns:
+ Link to view uploaded text in logdog viewer.
+ """
+ logging.info('Writing text to logdog stream, %s', name)
+ with get_logdog_client().text(name, content_type=content_type) as stream:
+ stream.write(data)
+ return stream.get_viewer_url()
+
+
+@decorators.NoRaiseException(default_return_value=None,
+ exception_message=('Ignore this exception. '
+ 'crbug.com/675666'))
+def open_text(name):
+ """Returns a file like object which you can write to.
+
+ Args:
+ name: Name of the logdog stream.
+
+ Returns:
+ A file like object. close() file when done.
+ """
+ logging.info('Opening text logdog stream, %s', name)
+ return get_logdog_client().open_text(name)
+
+
+@decorators.NoRaiseException(default_return_value='',
+ exception_message=('Ignore this exception. '
+ 'crbug.com/675666'))
+def binary(name, binary_path):
+ """Uploads binary to logdog.
+
+ Args:
+ name: Name of the logdog stream.
+ binary_path: Path to binary you want to upload.
+
+ Returns:
+ Link to view uploaded binary in logdog viewer.
+ """
+ logging.info('Writing binary to logdog stream, %s', name)
+ with get_logdog_client().binary(name) as stream:
+ with open(binary_path, 'r') as f:
+ stream.write(f.read())
+ return stream.get_viewer_url()
+
+
+@decorators.NoRaiseException(default_return_value='',
+ exception_message=('Ignore this exception. '
+ 'crbug.com/675666'))
+def get_viewer_url(name):
+ """Get Logdog viewer URL.
+
+ Args:
+ name: Name of the logdog stream.
+
+ Returns:
+ Link to view uploaded binary in logdog viewer.
+ """
+ return get_logdog_client().get_viewer_url(name)
+
+
+@decorators.Memoize
+def get_logdog_client():
+ logging.info('Getting logdog client.')
+ return bootstrap.ButlerBootstrap.probe().stream_client()
diff --git a/third_party/libwebrtc/build/android/pylib/utils/logging_utils.py b/third_party/libwebrtc/build/android/pylib/utils/logging_utils.py
new file mode 100644
index 0000000000..846d336c2c
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/logging_utils.py
@@ -0,0 +1,136 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import contextlib
+import logging
+import os
+
+from pylib.constants import host_paths
+
+_COLORAMA_PATH = os.path.join(
+ host_paths.DIR_SOURCE_ROOT, 'third_party', 'colorama', 'src')
+
+with host_paths.SysPath(_COLORAMA_PATH, position=0):
+ import colorama
+
+BACK = colorama.Back
+FORE = colorama.Fore
+STYLE = colorama.Style
+
+
+class _ColorFormatter(logging.Formatter):
+ # pylint does not see members added dynamically in the constructor.
+ # pylint: disable=no-member
+ color_map = {
+ logging.DEBUG: (FORE.CYAN),
+ logging.WARNING: (FORE.YELLOW),
+ logging.ERROR: (FORE.RED),
+ logging.CRITICAL: (BACK.RED),
+ }
+
+ def __init__(self, wrapped_formatter=None):
+ """Wraps a |logging.Formatter| and adds color."""
+ super(_ColorFormatter, self).__init__(self)
+ self._wrapped_formatter = wrapped_formatter or logging.Formatter()
+
+ #override
+ def format(self, record):
+ message = self._wrapped_formatter.format(record)
+ return self.Colorize(message, record.levelno)
+
+ def Colorize(self, message, log_level):
+ try:
+ return (''.join(self.color_map[log_level]) + message +
+ colorama.Style.RESET_ALL)
+ except KeyError:
+ return message
+
+
+class ColorStreamHandler(logging.StreamHandler):
+ """Handler that can be used to colorize logging output.
+
+ Example using a specific logger:
+
+ logger = logging.getLogger('my_logger')
+ logger.addHandler(ColorStreamHandler())
+ logger.info('message')
+
+ Example using the root logger:
+
+ ColorStreamHandler.MakeDefault()
+ logging.info('message')
+
+ """
+ def __init__(self, force_color=False):
+ super(ColorStreamHandler, self).__init__()
+ self.force_color = force_color
+ self.setFormatter(logging.Formatter())
+
+ @property
+ def is_tty(self):
+ isatty = getattr(self.stream, 'isatty', None)
+ return isatty and isatty()
+
+ #override
+ def setFormatter(self, formatter):
+ if self.force_color or self.is_tty:
+ formatter = _ColorFormatter(formatter)
+ super(ColorStreamHandler, self).setFormatter(formatter)
+
+ @staticmethod
+ def MakeDefault(force_color=False):
+ """
+ Replaces the default logging handlers with a coloring handler. To use
+ a colorizing handler at the same time as others, either register them
+ after this call, or add the ColorStreamHandler on the logger using
+ Logger.addHandler()
+
+ Args:
+ force_color: Set to True to bypass the tty check and always colorize.
+ """
+ # If the existing handlers aren't removed, messages are duplicated
+ logging.getLogger().handlers = []
+ logging.getLogger().addHandler(ColorStreamHandler(force_color))
+
+
+@contextlib.contextmanager
+def OverrideColor(level, color):
+ """Temporarily override the logging color for a specified level.
+
+ Args:
+ level: logging level whose color gets overridden.
+ color: tuple of formats to apply to log lines.
+ """
+ prev_colors = {}
+ for handler in logging.getLogger().handlers:
+ if isinstance(handler.formatter, _ColorFormatter):
+ prev_colors[handler.formatter] = handler.formatter.color_map[level]
+ handler.formatter.color_map[level] = color
+ try:
+ yield
+ finally:
+ for formatter, prev_color in prev_colors.items():
+ formatter.color_map[level] = prev_color
+
+
+@contextlib.contextmanager
+def SuppressLogging(level=logging.ERROR):
+ """Momentarilly suppress logging events from all loggers.
+
+ TODO(jbudorick): This is not thread safe. Log events from other threads might
+ also inadvertently disappear.
+
+ Example:
+
+ with logging_utils.SuppressLogging():
+ # all but CRITICAL logging messages are suppressed
+ logging.info('just doing some thing') # not shown
+ logging.critical('something really bad happened') # still shown
+
+ Args:
+ level: logging events with this or lower levels are suppressed.
+ """
+ logging.disable(level)
+ yield
+ logging.disable(logging.NOTSET)
diff --git a/third_party/libwebrtc/build/android/pylib/utils/maven_downloader.py b/third_party/libwebrtc/build/android/pylib/utils/maven_downloader.py
new file mode 100755
index 0000000000..7247f7c88c
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/maven_downloader.py
@@ -0,0 +1,140 @@
+#!/usr/bin/env vpython3
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import errno
+import logging
+import os
+import shutil
+import sys
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
+import devil_chromium # pylint: disable=unused-import
+from devil.utils import cmd_helper
+from devil.utils import parallelizer
+
+
+def _MakeDirsIfAbsent(path):
+ try:
+ os.makedirs(path)
+ except OSError as err:
+ if err.errno != errno.EEXIST or not os.path.isdir(path):
+ raise
+
+
+class MavenDownloader(object):
+ '''
+ Downloads and installs the requested artifacts from the Google Maven repo.
+ The artifacts are expected to be specified in the format
+ "group_id:artifact_id:version:file_type", as the default file type is JAR
+ but most Android libraries are provided as AARs, which would otherwise fail
+ downloading. See Install()
+ '''
+
+ # Remote repository to download the artifacts from. The support library and
+ # Google Play service are only distributed there, but third party libraries
+ # could use Maven Central or JCenter for example. The default Maven remote
+ # is Maven Central.
+ _REMOTE_REPO = 'https://maven.google.com'
+
+ # Default Maven repository.
+ _DEFAULT_REPO_PATH = os.path.join(
+ os.path.expanduser('~'), '.m2', 'repository')
+
+ def __init__(self, debug=False):
+ self._repo_path = MavenDownloader._DEFAULT_REPO_PATH
+ self._remote_url = MavenDownloader._REMOTE_REPO
+ self._debug = debug
+
+ def Install(self, target_repo, artifacts, include_poms=False):
+ logging.info('Installing %d artifacts...', len(artifacts))
+ downloaders = [_SingleArtifactDownloader(self, artifact, target_repo)
+ for artifact in artifacts]
+ if self._debug:
+ for downloader in downloaders:
+ downloader.Run(include_poms)
+ else:
+ parallelizer.SyncParallelizer(downloaders).Run(include_poms)
+ logging.info('%d artifacts installed to %s', len(artifacts), target_repo)
+
+ @property
+ def repo_path(self):
+ return self._repo_path
+
+ @property
+ def remote_url(self):
+ return self._remote_url
+
+ @property
+ def debug(self):
+ return self._debug
+
+
+class _SingleArtifactDownloader(object):
+ '''Handles downloading and installing a single Maven artifact.'''
+
+ _POM_FILE_TYPE = 'pom'
+
+ def __init__(self, download_manager, artifact, target_repo):
+ self._download_manager = download_manager
+ self._artifact = artifact
+ self._target_repo = target_repo
+
+ def Run(self, include_pom=False):
+ parts = self._artifact.split(':')
+ if len(parts) != 4:
+ raise Exception('Artifacts expected as '
+ '"group_id:artifact_id:version:file_type".')
+ group_id, artifact_id, version, file_type = parts
+ self._InstallArtifact(group_id, artifact_id, version, file_type)
+
+ if include_pom and file_type != _SingleArtifactDownloader._POM_FILE_TYPE:
+ self._InstallArtifact(group_id, artifact_id, version,
+ _SingleArtifactDownloader._POM_FILE_TYPE)
+
+ def _InstallArtifact(self, group_id, artifact_id, version, file_type):
+ logging.debug('Processing %s', self._artifact)
+
+ download_relpath = self._DownloadArtifact(
+ group_id, artifact_id, version, file_type)
+ logging.debug('Downloaded.')
+
+ install_path = self._ImportArtifact(download_relpath)
+ logging.debug('Installed %s', os.path.relpath(install_path))
+
+ def _DownloadArtifact(self, group_id, artifact_id, version, file_type):
+ '''
+ Downloads the specified artifact using maven, to its standard location, see
+ MavenDownloader._DEFAULT_REPO_PATH.
+ '''
+ cmd = ['mvn',
+ 'org.apache.maven.plugins:maven-dependency-plugin:RELEASE:get',
+ '-DremoteRepositories={}'.format(self._download_manager.remote_url),
+ '-Dartifact={}:{}:{}:{}'.format(group_id, artifact_id, version,
+ file_type)]
+
+ stdout = None if self._download_manager.debug else open(os.devnull, 'wb')
+
+ try:
+ ret_code = cmd_helper.Call(cmd, stdout=stdout)
+ if ret_code != 0:
+ raise Exception('Command "{}" failed'.format(' '.join(cmd)))
+ except OSError as e:
+ if e.errno == os.errno.ENOENT:
+ raise Exception('mvn command not found. Please install Maven.')
+ raise
+
+ return os.path.join(os.path.join(*group_id.split('.')),
+ artifact_id,
+ version,
+ '{}-{}.{}'.format(artifact_id, version, file_type))
+
+ def _ImportArtifact(self, artifact_path):
+ src_dir = os.path.join(self._download_manager.repo_path, artifact_path)
+ dst_dir = os.path.join(self._target_repo, os.path.dirname(artifact_path))
+
+ _MakeDirsIfAbsent(dst_dir)
+ shutil.copy(src_dir, dst_dir)
+
+ return dst_dir
diff --git a/third_party/libwebrtc/build/android/pylib/utils/proguard.py b/third_party/libwebrtc/build/android/pylib/utils/proguard.py
new file mode 100644
index 0000000000..9d5bae285a
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/proguard.py
@@ -0,0 +1,285 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import re
+import tempfile
+
+from devil.utils import cmd_helper
+from pylib import constants
+
+
+_PROGUARD_CLASS_RE = re.compile(r'\s*?- Program class:\s*([\S]+)$')
+_PROGUARD_SUPERCLASS_RE = re.compile(r'\s*? Superclass:\s*([\S]+)$')
+_PROGUARD_SECTION_RE = re.compile(
+ r'^(Interfaces|Constant Pool|Fields|Methods|Class file attributes) '
+ r'\(count = \d+\):$')
+_PROGUARD_METHOD_RE = re.compile(r'\s*?- Method:\s*(\S*)[(].*$')
+_PROGUARD_ANNOTATION_RE = re.compile(r'^(\s*?)- Annotation \[L(\S*);\]:$')
+_ELEMENT_PRIMITIVE = 0
+_ELEMENT_ARRAY = 1
+_ELEMENT_ANNOTATION = 2
+_PROGUARD_ELEMENT_RES = [
+ (_ELEMENT_PRIMITIVE,
+ re.compile(r'^(\s*?)- Constant element value \[(\S*) .*\]$')),
+ (_ELEMENT_ARRAY,
+ re.compile(r'^(\s*?)- Array element value \[(\S*)\]:$')),
+ (_ELEMENT_ANNOTATION,
+ re.compile(r'^(\s*?)- Annotation element value \[(\S*)\]:$'))
+]
+_PROGUARD_INDENT_WIDTH = 2
+_PROGUARD_ANNOTATION_VALUE_RE = re.compile(r'^(\s*?)- \S+? \[(.*)\]$')
+
+
+def _GetProguardPath():
+ return os.path.join(constants.DIR_SOURCE_ROOT, 'third_party', 'proguard',
+ 'lib', 'proguard603.jar')
+
+
+def Dump(jar_path):
+ """Dumps class and method information from a JAR into a dict via proguard.
+
+ Args:
+ jar_path: An absolute path to the JAR file to dump.
+ Returns:
+ A dict in the following format:
+ {
+ 'classes': [
+ {
+ 'class': '',
+ 'superclass': '',
+ 'annotations': {/* dict -- see below */},
+ 'methods': [
+ {
+ 'method': '',
+ 'annotations': {/* dict -- see below */},
+ },
+ ...
+ ],
+ },
+ ...
+ ],
+ }
+
+ Annotations dict format:
+ {
+ 'empty-annotation-class-name': None,
+ 'annotation-class-name': {
+ 'field': 'primitive-value',
+ 'field': [ 'array-item-1', 'array-item-2', ... ],
+ 'field': {
+ /* Object value */
+ 'field': 'primitive-value',
+ 'field': [ 'array-item-1', 'array-item-2', ... ],
+ 'field': { /* Object value */ }
+ }
+ }
+ }
+
+ Note that for top-level annotations their class names are used for
+ identification, whereas for any nested annotations the corresponding
+ field names are used.
+
+ One drawback of this approach is that an array containing empty
+ annotation classes will be represented as an array of 'None' values,
+ thus it will not be possible to find out annotation class names.
+ On the other hand, storing both annotation class name and the field name
+ would produce a very complex JSON.
+ """
+
+ with tempfile.NamedTemporaryFile() as proguard_output:
+ cmd_helper.GetCmdStatusAndOutput([
+ 'java',
+ '-jar', _GetProguardPath(),
+ '-injars', jar_path,
+ '-dontshrink', '-dontoptimize', '-dontobfuscate', '-dontpreverify',
+ '-dump', proguard_output.name])
+ return Parse(proguard_output)
+
+class _AnnotationElement(object):
+ def __init__(self, name, ftype, depth):
+ self.ref = None
+ self.name = name
+ self.ftype = ftype
+ self.depth = depth
+
+class _ParseState(object):
+ _INITIAL_VALUES = (lambda: None, list, dict)
+ # Empty annotations are represented as 'None', not as an empty dictionary.
+ _LAZY_INITIAL_VALUES = (lambda: None, list, lambda: None)
+
+ def __init__(self):
+ self._class_result = None
+ self._method_result = None
+ self._parse_annotations = False
+ self._annotation_stack = []
+
+ def ResetPerSection(self, section_name):
+ self.InitMethod(None)
+ self._parse_annotations = (
+ section_name in ['Class file attributes', 'Methods'])
+
+ def ParseAnnotations(self):
+ return self._parse_annotations
+
+ def CreateAndInitClass(self, class_name):
+ self.InitMethod(None)
+ self._class_result = {
+ 'class': class_name,
+ 'superclass': '',
+ 'annotations': {},
+ 'methods': [],
+ }
+ return self._class_result
+
+ def HasCurrentClass(self):
+ return bool(self._class_result)
+
+ def SetSuperClass(self, superclass):
+ assert self.HasCurrentClass()
+ self._class_result['superclass'] = superclass
+
+ def InitMethod(self, method_name):
+ self._annotation_stack = []
+ if method_name:
+ self._method_result = {
+ 'method': method_name,
+ 'annotations': {},
+ }
+ self._class_result['methods'].append(self._method_result)
+ else:
+ self._method_result = None
+
+ def InitAnnotation(self, annotation, depth):
+ if not self._annotation_stack:
+ # Add a fake parent element comprising 'annotations' dictionary,
+ # so we can work uniformly with both top-level and nested annotations.
+ annotations = _AnnotationElement(
+ '<<<top level>>>', _ELEMENT_ANNOTATION, depth - 1)
+ if self._method_result:
+ annotations.ref = self._method_result['annotations']
+ else:
+ annotations.ref = self._class_result['annotations']
+ self._annotation_stack = [annotations]
+ self._BacktrackAnnotationStack(depth)
+ if not self.HasCurrentAnnotation():
+ self._annotation_stack.append(
+ _AnnotationElement(annotation, _ELEMENT_ANNOTATION, depth))
+ self._CreateAnnotationPlaceHolder(self._LAZY_INITIAL_VALUES)
+
+ def HasCurrentAnnotation(self):
+ return len(self._annotation_stack) > 1
+
+ def InitAnnotationField(self, field, field_type, depth):
+ self._BacktrackAnnotationStack(depth)
+ # Create the parent representation, if needed. E.g. annotations
+ # are represented with `None`, not with `{}` until they receive the first
+ # field.
+ self._CreateAnnotationPlaceHolder(self._INITIAL_VALUES)
+ if self._annotation_stack[-1].ftype == _ELEMENT_ARRAY:
+ # Nested arrays are not allowed in annotations.
+ assert not field_type == _ELEMENT_ARRAY
+ # Use array index instead of bogus field name.
+ field = len(self._annotation_stack[-1].ref)
+ self._annotation_stack.append(_AnnotationElement(field, field_type, depth))
+ self._CreateAnnotationPlaceHolder(self._LAZY_INITIAL_VALUES)
+
+ def UpdateCurrentAnnotationFieldValue(self, value, depth):
+ self._BacktrackAnnotationStack(depth)
+ self._InitOrUpdateCurrentField(value)
+
+ def _CreateAnnotationPlaceHolder(self, constructors):
+ assert self.HasCurrentAnnotation()
+ field = self._annotation_stack[-1]
+ if field.ref is None:
+ field.ref = constructors[field.ftype]()
+ self._InitOrUpdateCurrentField(field.ref)
+
+ def _BacktrackAnnotationStack(self, depth):
+ stack = self._annotation_stack
+ while len(stack) > 0 and stack[-1].depth >= depth:
+ stack.pop()
+
+ def _InitOrUpdateCurrentField(self, value):
+ assert self.HasCurrentAnnotation()
+ parent = self._annotation_stack[-2]
+ assert not parent.ref is None
+ # There can be no nested constant element values.
+ assert parent.ftype in [_ELEMENT_ARRAY, _ELEMENT_ANNOTATION]
+ field = self._annotation_stack[-1]
+ if isinstance(value, str) and not field.ftype == _ELEMENT_PRIMITIVE:
+ # The value comes from the output parser via
+ # UpdateCurrentAnnotationFieldValue, and should be a value of a constant
+ # element. If it isn't, just skip it.
+ return
+ if parent.ftype == _ELEMENT_ARRAY and field.name >= len(parent.ref):
+ parent.ref.append(value)
+ else:
+ parent.ref[field.name] = value
+
+
+def _GetDepth(prefix):
+ return len(prefix) // _PROGUARD_INDENT_WIDTH
+
+def Parse(proguard_output):
+ results = {
+ 'classes': [],
+ }
+
+ state = _ParseState()
+
+ for line in proguard_output:
+ line = line.strip('\r\n')
+
+ m = _PROGUARD_CLASS_RE.match(line)
+ if m:
+ results['classes'].append(
+ state.CreateAndInitClass(m.group(1).replace('/', '.')))
+ continue
+
+ if not state.HasCurrentClass():
+ continue
+
+ m = _PROGUARD_SUPERCLASS_RE.match(line)
+ if m:
+ state.SetSuperClass(m.group(1).replace('/', '.'))
+ continue
+
+ m = _PROGUARD_SECTION_RE.match(line)
+ if m:
+ state.ResetPerSection(m.group(1))
+ continue
+
+ m = _PROGUARD_METHOD_RE.match(line)
+ if m:
+ state.InitMethod(m.group(1))
+ continue
+
+ if not state.ParseAnnotations():
+ continue
+
+ m = _PROGUARD_ANNOTATION_RE.match(line)
+ if m:
+ # Ignore the annotation package.
+ state.InitAnnotation(m.group(2).split('/')[-1], _GetDepth(m.group(1)))
+ continue
+
+ if state.HasCurrentAnnotation():
+ m = None
+ for (element_type, element_re) in _PROGUARD_ELEMENT_RES:
+ m = element_re.match(line)
+ if m:
+ state.InitAnnotationField(
+ m.group(2), element_type, _GetDepth(m.group(1)))
+ break
+ if m:
+ continue
+ m = _PROGUARD_ANNOTATION_VALUE_RE.match(line)
+ if m:
+ state.UpdateCurrentAnnotationFieldValue(
+ m.group(2), _GetDepth(m.group(1)))
+ else:
+ state.InitMethod(None)
+
+ return results
diff --git a/third_party/libwebrtc/build/android/pylib/utils/proguard_test.py b/third_party/libwebrtc/build/android/pylib/utils/proguard_test.py
new file mode 100755
index 0000000000..775bbbac35
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/proguard_test.py
@@ -0,0 +1,495 @@
+#! /usr/bin/env vpython3
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from pylib.utils import proguard
+
+class TestParse(unittest.TestCase):
+
+ def setUp(self):
+ self.maxDiff = None
+
+ def testClass(self):
+ actual = proguard.Parse(
+ ['- Program class: org/example/Test',
+ ' Superclass: java/lang/Object'])
+ expected = {
+ 'classes': [
+ {
+ 'class': 'org.example.Test',
+ 'superclass': 'java.lang.Object',
+ 'annotations': {},
+ 'methods': []
+ }
+ ]
+ }
+ self.assertEqual(expected, actual)
+
+ def testMethod(self):
+ actual = proguard.Parse(
+ ['- Program class: org/example/Test',
+ 'Methods (count = 1):',
+ '- Method: <init>()V'])
+ expected = {
+ 'classes': [
+ {
+ 'class': 'org.example.Test',
+ 'superclass': '',
+ 'annotations': {},
+ 'methods': [
+ {
+ 'method': '<init>',
+ 'annotations': {}
+ }
+ ]
+ }
+ ]
+ }
+ self.assertEqual(expected, actual)
+
+ def testClassAnnotation(self):
+ actual = proguard.Parse(
+ ['- Program class: org/example/Test',
+ 'Class file attributes (count = 3):',
+ ' - Annotation [Lorg/example/Annotation;]:',
+ ' - Annotation [Lorg/example/AnnotationWithValue;]:',
+ ' - Constant element value [attr \'13\']',
+ ' - Utf8 [val]',
+ ' - Annotation [Lorg/example/AnnotationWithTwoValues;]:',
+ ' - Constant element value [attr1 \'13\']',
+ ' - Utf8 [val1]',
+ ' - Constant element value [attr2 \'13\']',
+ ' - Utf8 [val2]'])
+ expected = {
+ 'classes': [
+ {
+ 'class': 'org.example.Test',
+ 'superclass': '',
+ 'annotations': {
+ 'Annotation': None,
+ 'AnnotationWithValue': {'attr': 'val'},
+ 'AnnotationWithTwoValues': {'attr1': 'val1', 'attr2': 'val2'}
+ },
+ 'methods': []
+ }
+ ]
+ }
+ self.assertEqual(expected, actual)
+
+ def testClassAnnotationWithArrays(self):
+ actual = proguard.Parse(
+ ['- Program class: org/example/Test',
+ 'Class file attributes (count = 3):',
+ ' - Annotation [Lorg/example/AnnotationWithEmptyArray;]:',
+ ' - Array element value [arrayAttr]:',
+ ' - Annotation [Lorg/example/AnnotationWithOneElemArray;]:',
+ ' - Array element value [arrayAttr]:',
+ ' - Constant element value [(default) \'13\']',
+ ' - Utf8 [val]',
+ ' - Annotation [Lorg/example/AnnotationWithTwoElemArray;]:',
+ ' - Array element value [arrayAttr]:',
+ ' - Constant element value [(default) \'13\']',
+ ' - Utf8 [val1]',
+ ' - Constant element value [(default) \'13\']',
+ ' - Utf8 [val2]'])
+ expected = {
+ 'classes': [
+ {
+ 'class': 'org.example.Test',
+ 'superclass': '',
+ 'annotations': {
+ 'AnnotationWithEmptyArray': {'arrayAttr': []},
+ 'AnnotationWithOneElemArray': {'arrayAttr': ['val']},
+ 'AnnotationWithTwoElemArray': {'arrayAttr': ['val1', 'val2']}
+ },
+ 'methods': []
+ }
+ ]
+ }
+ self.assertEqual(expected, actual)
+
+ def testNestedClassAnnotations(self):
+ actual = proguard.Parse(
+ ['- Program class: org/example/Test',
+ 'Class file attributes (count = 1):',
+ ' - Annotation [Lorg/example/OuterAnnotation;]:',
+ ' - Constant element value [outerAttr \'13\']',
+ ' - Utf8 [outerVal]',
+ ' - Array element value [outerArr]:',
+ ' - Constant element value [(default) \'13\']',
+ ' - Utf8 [outerArrVal1]',
+ ' - Constant element value [(default) \'13\']',
+ ' - Utf8 [outerArrVal2]',
+ ' - Annotation element value [emptyAnn]:',
+ ' - Annotation [Lorg/example/EmptyAnnotation;]:',
+ ' - Annotation element value [ann]:',
+ ' - Annotation [Lorg/example/InnerAnnotation;]:',
+ ' - Constant element value [innerAttr \'13\']',
+ ' - Utf8 [innerVal]',
+ ' - Array element value [innerArr]:',
+ ' - Constant element value [(default) \'13\']',
+ ' - Utf8 [innerArrVal1]',
+ ' - Constant element value [(default) \'13\']',
+ ' - Utf8 [innerArrVal2]',
+ ' - Annotation element value [emptyInnerAnn]:',
+ ' - Annotation [Lorg/example/EmptyAnnotation;]:'])
+ expected = {
+ 'classes': [
+ {
+ 'class': 'org.example.Test',
+ 'superclass': '',
+ 'annotations': {
+ 'OuterAnnotation': {
+ 'outerAttr': 'outerVal',
+ 'outerArr': ['outerArrVal1', 'outerArrVal2'],
+ 'emptyAnn': None,
+ 'ann': {
+ 'innerAttr': 'innerVal',
+ 'innerArr': ['innerArrVal1', 'innerArrVal2'],
+ 'emptyInnerAnn': None
+ }
+ }
+ },
+ 'methods': []
+ }
+ ]
+ }
+ self.assertEqual(expected, actual)
+
+ def testClassArraysOfAnnotations(self):
+ actual = proguard.Parse(
+ ['- Program class: org/example/Test',
+ 'Class file attributes (count = 1):',
+ ' - Annotation [Lorg/example/OuterAnnotation;]:',
+ ' - Array element value [arrayWithEmptyAnnotations]:',
+ ' - Annotation element value [(default)]:',
+ ' - Annotation [Lorg/example/EmptyAnnotation;]:',
+ ' - Annotation element value [(default)]:',
+ ' - Annotation [Lorg/example/EmptyAnnotation;]:',
+ ' - Array element value [outerArray]:',
+ ' - Annotation element value [(default)]:',
+ ' - Annotation [Lorg/example/InnerAnnotation;]:',
+ ' - Constant element value [innerAttr \'115\']',
+ ' - Utf8 [innerVal]',
+ ' - Array element value [arguments]:',
+ ' - Annotation element value [(default)]:',
+ ' - Annotation [Lorg/example/InnerAnnotation$Argument;]:',
+ ' - Constant element value [arg1Attr \'115\']',
+ ' - Utf8 [arg1Val]',
+ ' - Array element value [arg1Array]:',
+ ' - Constant element value [(default) \'73\']',
+ ' - Integer [11]',
+ ' - Constant element value [(default) \'73\']',
+ ' - Integer [12]',
+ ' - Annotation element value [(default)]:',
+ ' - Annotation [Lorg/example/InnerAnnotation$Argument;]:',
+ ' - Constant element value [arg2Attr \'115\']',
+ ' - Utf8 [arg2Val]',
+ ' - Array element value [arg2Array]:',
+ ' - Constant element value [(default) \'73\']',
+ ' - Integer [21]',
+ ' - Constant element value [(default) \'73\']',
+ ' - Integer [22]'])
+ expected = {
+ 'classes': [
+ {
+ 'class': 'org.example.Test',
+ 'superclass': '',
+ 'annotations': {
+ 'OuterAnnotation': {
+ 'arrayWithEmptyAnnotations': [None, None],
+ 'outerArray': [
+ {
+ 'innerAttr': 'innerVal',
+ 'arguments': [
+ {'arg1Attr': 'arg1Val', 'arg1Array': ['11', '12']},
+ {'arg2Attr': 'arg2Val', 'arg2Array': ['21', '22']}
+ ]
+ }
+ ]
+ }
+ },
+ 'methods': []
+ }
+ ]
+ }
+ self.assertEqual(expected, actual)
+
+ def testReadFullClassFileAttributes(self):
+ actual = proguard.Parse(
+ ['- Program class: org/example/Test',
+ 'Class file attributes (count = 3):',
+ ' - Source file attribute:',
+ ' - Utf8 [Class.java]',
+ ' - Runtime visible annotations attribute:',
+ ' - Annotation [Lorg/example/IntValueAnnotation;]:',
+ ' - Constant element value [value \'73\']',
+ ' - Integer [19]',
+ ' - Inner classes attribute (count = 1)',
+ ' - InnerClassesInfo:',
+ ' Access flags: 0x9 = public static',
+ ' - Class [org/example/Class1]',
+ ' - Class [org/example/Class2]',
+ ' - Utf8 [OnPageFinishedHelper]'])
+ expected = {
+ 'classes': [
+ {
+ 'class': 'org.example.Test',
+ 'superclass': '',
+ 'annotations': {
+ 'IntValueAnnotation': {
+ 'value': '19',
+ }
+ },
+ 'methods': []
+ }
+ ]
+ }
+ self.assertEqual(expected, actual)
+
+ def testMethodAnnotation(self):
+ actual = proguard.Parse(
+ ['- Program class: org/example/Test',
+ 'Methods (count = 1):',
+ '- Method: Test()V',
+ ' - Annotation [Lorg/example/Annotation;]:',
+ ' - Annotation [Lorg/example/AnnotationWithValue;]:',
+ ' - Constant element value [attr \'13\']',
+ ' - Utf8 [val]',
+ ' - Annotation [Lorg/example/AnnotationWithTwoValues;]:',
+ ' - Constant element value [attr1 \'13\']',
+ ' - Utf8 [val1]',
+ ' - Constant element value [attr2 \'13\']',
+ ' - Utf8 [val2]'])
+ expected = {
+ 'classes': [
+ {
+ 'class': 'org.example.Test',
+ 'superclass': '',
+ 'annotations': {},
+ 'methods': [
+ {
+ 'method': 'Test',
+ 'annotations': {
+ 'Annotation': None,
+ 'AnnotationWithValue': {'attr': 'val'},
+ 'AnnotationWithTwoValues': {'attr1': 'val1', 'attr2': 'val2'}
+ },
+ }
+ ]
+ }
+ ]
+ }
+ self.assertEqual(expected, actual)
+
+ def testMethodAnnotationWithArrays(self):
+ actual = proguard.Parse(
+ ['- Program class: org/example/Test',
+ 'Methods (count = 1):',
+ '- Method: Test()V',
+ ' - Annotation [Lorg/example/AnnotationWithEmptyArray;]:',
+ ' - Array element value [arrayAttr]:',
+ ' - Annotation [Lorg/example/AnnotationWithOneElemArray;]:',
+ ' - Array element value [arrayAttr]:',
+ ' - Constant element value [(default) \'13\']',
+ ' - Utf8 [val]',
+ ' - Annotation [Lorg/example/AnnotationWithTwoElemArray;]:',
+ ' - Array element value [arrayAttr]:',
+ ' - Constant element value [(default) \'13\']',
+ ' - Utf8 [val1]',
+ ' - Constant element value [(default) \'13\']',
+ ' - Utf8 [val2]'])
+ expected = {
+ 'classes': [
+ {
+ 'class': 'org.example.Test',
+ 'superclass': '',
+ 'annotations': {},
+ 'methods': [
+ {
+ 'method': 'Test',
+ 'annotations': {
+ 'AnnotationWithEmptyArray': {'arrayAttr': []},
+ 'AnnotationWithOneElemArray': {'arrayAttr': ['val']},
+ 'AnnotationWithTwoElemArray': {'arrayAttr': ['val1', 'val2']}
+ },
+ }
+ ]
+ }
+ ]
+ }
+ self.assertEqual(expected, actual)
+
+ def testMethodAnnotationWithPrimitivesAndArrays(self):
+ actual = proguard.Parse(
+ ['- Program class: org/example/Test',
+ 'Methods (count = 1):',
+ '- Method: Test()V',
+ ' - Annotation [Lorg/example/AnnotationPrimitiveThenArray;]:',
+ ' - Constant element value [attr \'13\']',
+ ' - Utf8 [val]',
+ ' - Array element value [arrayAttr]:',
+ ' - Constant element value [(default) \'13\']',
+ ' - Utf8 [val]',
+ ' - Annotation [Lorg/example/AnnotationArrayThenPrimitive;]:',
+ ' - Array element value [arrayAttr]:',
+ ' - Constant element value [(default) \'13\']',
+ ' - Utf8 [val]',
+ ' - Constant element value [attr \'13\']',
+ ' - Utf8 [val]',
+ ' - Annotation [Lorg/example/AnnotationTwoArrays;]:',
+ ' - Array element value [arrayAttr1]:',
+ ' - Constant element value [(default) \'13\']',
+ ' - Utf8 [val1]',
+ ' - Array element value [arrayAttr2]:',
+ ' - Constant element value [(default) \'13\']',
+ ' - Utf8 [val2]'])
+ expected = {
+ 'classes': [
+ {
+ 'class': 'org.example.Test',
+ 'superclass': '',
+ 'annotations': {},
+ 'methods': [
+ {
+ 'method': 'Test',
+ 'annotations': {
+ 'AnnotationPrimitiveThenArray': {'attr': 'val',
+ 'arrayAttr': ['val']},
+ 'AnnotationArrayThenPrimitive': {'arrayAttr': ['val'],
+ 'attr': 'val'},
+ 'AnnotationTwoArrays': {'arrayAttr1': ['val1'],
+ 'arrayAttr2': ['val2']}
+ },
+ }
+ ]
+ }
+ ]
+ }
+ self.assertEqual(expected, actual)
+
+ def testNestedMethodAnnotations(self):
+ actual = proguard.Parse(
+ ['- Program class: org/example/Test',
+ 'Methods (count = 1):',
+ '- Method: Test()V',
+ ' - Annotation [Lorg/example/OuterAnnotation;]:',
+ ' - Constant element value [outerAttr \'13\']',
+ ' - Utf8 [outerVal]',
+ ' - Array element value [outerArr]:',
+ ' - Constant element value [(default) \'13\']',
+ ' - Utf8 [outerArrVal1]',
+ ' - Constant element value [(default) \'13\']',
+ ' - Utf8 [outerArrVal2]',
+ ' - Annotation element value [emptyAnn]:',
+ ' - Annotation [Lorg/example/EmptyAnnotation;]:',
+ ' - Annotation element value [ann]:',
+ ' - Annotation [Lorg/example/InnerAnnotation;]:',
+ ' - Constant element value [innerAttr \'13\']',
+ ' - Utf8 [innerVal]',
+ ' - Array element value [innerArr]:',
+ ' - Constant element value [(default) \'13\']',
+ ' - Utf8 [innerArrVal1]',
+ ' - Constant element value [(default) \'13\']',
+ ' - Utf8 [innerArrVal2]',
+ ' - Annotation element value [emptyInnerAnn]:',
+ ' - Annotation [Lorg/example/EmptyAnnotation;]:'])
+ expected = {
+ 'classes': [
+ {
+ 'class': 'org.example.Test',
+ 'superclass': '',
+ 'annotations': {},
+ 'methods': [
+ {
+ 'method': 'Test',
+ 'annotations': {
+ 'OuterAnnotation': {
+ 'outerAttr': 'outerVal',
+ 'outerArr': ['outerArrVal1', 'outerArrVal2'],
+ 'emptyAnn': None,
+ 'ann': {
+ 'innerAttr': 'innerVal',
+ 'innerArr': ['innerArrVal1', 'innerArrVal2'],
+ 'emptyInnerAnn': None
+ }
+ }
+ },
+ }
+ ]
+ }
+ ]
+ }
+ self.assertEqual(expected, actual)
+
+ def testMethodArraysOfAnnotations(self):
+ actual = proguard.Parse(
+ ['- Program class: org/example/Test',
+ 'Methods (count = 1):',
+ '- Method: Test()V',
+ ' - Annotation [Lorg/example/OuterAnnotation;]:',
+ ' - Array element value [arrayWithEmptyAnnotations]:',
+ ' - Annotation element value [(default)]:',
+ ' - Annotation [Lorg/example/EmptyAnnotation;]:',
+ ' - Annotation element value [(default)]:',
+ ' - Annotation [Lorg/example/EmptyAnnotation;]:',
+ ' - Array element value [outerArray]:',
+ ' - Annotation element value [(default)]:',
+ ' - Annotation [Lorg/example/InnerAnnotation;]:',
+ ' - Constant element value [innerAttr \'115\']',
+ ' - Utf8 [innerVal]',
+ ' - Array element value [arguments]:',
+ ' - Annotation element value [(default)]:',
+ ' - Annotation [Lorg/example/InnerAnnotation$Argument;]:',
+ ' - Constant element value [arg1Attr \'115\']',
+ ' - Utf8 [arg1Val]',
+ ' - Array element value [arg1Array]:',
+ ' - Constant element value [(default) \'73\']',
+ ' - Integer [11]',
+ ' - Constant element value [(default) \'73\']',
+ ' - Integer [12]',
+ ' - Annotation element value [(default)]:',
+ ' - Annotation [Lorg/example/InnerAnnotation$Argument;]:',
+ ' - Constant element value [arg2Attr \'115\']',
+ ' - Utf8 [arg2Val]',
+ ' - Array element value [arg2Array]:',
+ ' - Constant element value [(default) \'73\']',
+ ' - Integer [21]',
+ ' - Constant element value [(default) \'73\']',
+ ' - Integer [22]'])
+ expected = {
+ 'classes': [
+ {
+ 'class': 'org.example.Test',
+ 'superclass': '',
+ 'annotations': {},
+ 'methods': [
+ {
+ 'method': 'Test',
+ 'annotations': {
+ 'OuterAnnotation': {
+ 'arrayWithEmptyAnnotations': [None, None],
+ 'outerArray': [
+ {
+ 'innerAttr': 'innerVal',
+ 'arguments': [
+ {'arg1Attr': 'arg1Val', 'arg1Array': ['11', '12']},
+ {'arg2Attr': 'arg2Val', 'arg2Array': ['21', '22']}
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+ ]
+ }
+ self.assertEqual(expected, actual)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/libwebrtc/build/android/pylib/utils/repo_utils.py b/third_party/libwebrtc/build/android/pylib/utils/repo_utils.py
new file mode 100644
index 0000000000..f9d300a214
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/repo_utils.py
@@ -0,0 +1,22 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from devil.utils import cmd_helper
+
+
+def GetGitHeadSHA1(in_directory):
+ """Returns the git hash tag for the given directory.
+
+ Args:
+ in_directory: The directory where git is to be run.
+ """
+ command_line = ['git', 'log', '-1', '--pretty=format:%H']
+ output = cmd_helper.GetCmdOutput(command_line, cwd=in_directory)
+ return output[0:40]
+
+
+def GetGitOriginMasterHeadSHA1(in_directory):
+ command_line = ['git', 'rev-parse', 'origin/master']
+ output = cmd_helper.GetCmdOutput(command_line, cwd=in_directory)
+ return output.strip()
diff --git a/third_party/libwebrtc/build/android/pylib/utils/shared_preference_utils.py b/third_party/libwebrtc/build/android/pylib/utils/shared_preference_utils.py
new file mode 100644
index 0000000000..64c4c3f919
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/shared_preference_utils.py
@@ -0,0 +1,116 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Utility functions for modifying an app's settings file using JSON."""
+
+import json
+import logging
+
+
+def UnicodeToStr(data):
+ """Recursively converts any Unicode to Python strings.
+
+ Args:
+ data: The data to be converted.
+
+ Return:
+ A copy of the given data, but with instances of Unicode converted to Python
+ strings.
+ """
+ if isinstance(data, dict):
+ return {
+ UnicodeToStr(key): UnicodeToStr(value)
+ for key, value in data.items()
+ }
+ elif isinstance(data, list):
+ return [UnicodeToStr(element) for element in data]
+ try:
+ # Python-2 compatibility.
+ if isinstance(data, unicode):
+ return data.encode('utf-8')
+ except NameError:
+ # Strings are already unicode in python3.
+ pass
+ return data
+
+
+def ExtractSettingsFromJson(filepath):
+ """Extracts the settings data from the given JSON file.
+
+ Args:
+ filepath: The path to the JSON file to read.
+
+ Return:
+ The data read from the JSON file with strings converted to Python strings.
+ """
+ # json.load() loads strings as unicode, which causes issues when trying
+ # to edit string values in preference files, so convert to Python strings
+ with open(filepath) as prefs_file:
+ return UnicodeToStr(json.load(prefs_file))
+
+
+def ApplySharedPreferenceSetting(shared_pref, setting):
+ """Applies the given app settings to the given device.
+
+ Modifies an installed app's settings by modifying its shared preference
+ settings file. Provided settings data must be a settings dictionary,
+ which are in the following format:
+ {
+ "package": "com.example.package",
+ "filename": "AppSettingsFile.xml",
+ "supports_encrypted_path": true,
+ "set": {
+ "SomeBoolToSet": true,
+ "SomeStringToSet": "StringValue",
+ },
+ "remove": [
+ "list",
+ "of",
+ "keys",
+ "to",
+ "remove",
+ ]
+ }
+
+ Example JSON files that can be read with ExtractSettingsFromJson and passed to
+ this function are in //chrome/android/shared_preference_files/test/.
+
+ Args:
+ shared_pref: The devil SharedPrefs object for the device the settings will
+ be applied to.
+ setting: A settings dictionary to apply.
+ """
+ shared_pref.Load()
+ for key in setting.get('remove', []):
+ try:
+ shared_pref.Remove(key)
+ except KeyError:
+ logging.warning("Attempted to remove non-existent key %s", key)
+ for key, value in setting.get('set', {}).items():
+ is_set = False
+ if not is_set and isinstance(value, bool):
+ shared_pref.SetBoolean(key, value)
+ is_set = True
+ try:
+ # Python-2 compatibility.
+ if not is_set and isinstance(value, basestring):
+ shared_pref.SetString(key, value)
+ is_set = True
+ if not is_set and (isinstance(value, long) or isinstance(value, int)):
+ shared_pref.SetLong(key, value)
+ is_set = True
+ except NameError:
+ if not is_set and isinstance(value, str):
+ shared_pref.SetString(key, value)
+ is_set = True
+ if not is_set and isinstance(value, int):
+ shared_pref.SetLong(key, value)
+ is_set = True
+ if not is_set and isinstance(value, list):
+ shared_pref.SetStringSet(key, value)
+ is_set = True
+ if not is_set:
+ raise ValueError("Given invalid value type %s for key %s" % (
+ str(type(value)), key))
+ shared_pref.Commit()
diff --git a/third_party/libwebrtc/build/android/pylib/utils/simpleperf.py b/third_party/libwebrtc/build/android/pylib/utils/simpleperf.py
new file mode 100644
index 0000000000..b3ba00e6c2
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/simpleperf.py
@@ -0,0 +1,260 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import contextlib
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+
+from devil import devil_env
+from devil.android import device_signal
+from devil.android.sdk import version_codes
+from pylib import constants
+
+
+def _ProcessType(proc):
+ _, _, suffix = proc.name.partition(':')
+ if not suffix:
+ return 'browser'
+ if suffix.startswith('sandboxed_process'):
+ return 'renderer'
+ if suffix.startswith('privileged_process'):
+ return 'gpu'
+ return None
+
+
+def _GetSpecifiedPID(device, package_name, process_specifier):
+ if process_specifier is None:
+ return None
+
+ # Check for numeric PID
+ try:
+ pid = int(process_specifier)
+ return pid
+ except ValueError:
+ pass
+
+ # Check for exact process name; can be any of these formats:
+ # <package>:<process name>, i.e. 'org.chromium.chrome:sandboxed_process0'
+ # :<process name>, i.e. ':sandboxed_process0'
+ # <process name>, i.e. 'sandboxed_process0'
+ full_process_name = process_specifier
+ if process_specifier.startswith(':'):
+ full_process_name = package_name + process_specifier
+ elif ':' not in process_specifier:
+ full_process_name = '%s:%s' % (package_name, process_specifier)
+ matching_processes = device.ListProcesses(full_process_name)
+ if len(matching_processes) == 1:
+ return matching_processes[0].pid
+ if len(matching_processes) > 1:
+ raise RuntimeError('Found %d processes with name "%s".' % (
+ len(matching_processes), process_specifier))
+
+ # Check for process type (i.e. 'renderer')
+ package_processes = device.ListProcesses(package_name)
+ matching_processes = [p for p in package_processes if (
+ _ProcessType(p) == process_specifier)]
+ if process_specifier == 'renderer' and len(matching_processes) > 1:
+ raise RuntimeError('Found %d renderer processes; please re-run with only '
+ 'one open tab.' % len(matching_processes))
+ if len(matching_processes) != 1:
+ raise RuntimeError('Found %d processes of type "%s".' % (
+ len(matching_processes), process_specifier))
+ return matching_processes[0].pid
+
+
+def _ThreadsForProcess(device, pid):
+ # The thread list output format for 'ps' is the same regardless of version.
+ # Here's the column headers, and a sample line for a thread belonging to
+ # pid 12345 (note that the last few columns are not aligned with headers):
+ #
+ # USER PID TID PPID VSZ RSS WCHAN ADDR S CMD
+ # u0_i101 12345 24680 567 1357902 97531 futex_wait_queue_me e85acd9c S \
+ # CrRendererMain
+ if device.build_version_sdk >= version_codes.OREO:
+ pid_regex = (
+ r'^[[:graph:]]\{1,\}[[:blank:]]\{1,\}%d[[:blank:]]\{1,\}' % pid)
+ ps_cmd = "ps -T -e | grep '%s'" % pid_regex
+ ps_output_lines = device.RunShellCommand(
+ ps_cmd, shell=True, check_return=True)
+ else:
+ ps_cmd = ['ps', '-p', str(pid), '-t']
+ ps_output_lines = device.RunShellCommand(ps_cmd, check_return=True)
+ result = []
+ for l in ps_output_lines:
+ fields = l.split()
+ # fields[2] is tid, fields[-1] is thread name. Output may include an entry
+ # for the process itself with tid=pid; omit that one.
+ if fields[2] == str(pid):
+ continue
+ result.append((int(fields[2]), fields[-1]))
+ return result
+
+
+def _ThreadType(thread_name):
+ if not thread_name:
+ return 'unknown'
+ if (thread_name.startswith('Chrome_ChildIO') or
+ thread_name.startswith('Chrome_IO')):
+ return 'io'
+ if thread_name.startswith('Compositor'):
+ return 'compositor'
+ if (thread_name.startswith('ChildProcessMai') or
+ thread_name.startswith('CrGpuMain') or
+ thread_name.startswith('CrRendererMain')):
+ return 'main'
+ if thread_name.startswith('RenderThread'):
+ return 'render'
+
+
+def _GetSpecifiedTID(device, pid, thread_specifier):
+ if thread_specifier is None:
+ return None
+
+ # Check for numeric TID
+ try:
+ tid = int(thread_specifier)
+ return tid
+ except ValueError:
+ pass
+
+ # Check for thread type
+ if pid is not None:
+ matching_threads = [t for t in _ThreadsForProcess(device, pid) if (
+ _ThreadType(t[1]) == thread_specifier)]
+ if len(matching_threads) != 1:
+ raise RuntimeError('Found %d threads of type "%s".' % (
+ len(matching_threads), thread_specifier))
+ return matching_threads[0][0]
+
+ return None
+
+
+def PrepareDevice(device):
+ if device.build_version_sdk < version_codes.NOUGAT:
+ raise RuntimeError('Simpleperf profiling is only supported on Android N '
+ 'and later.')
+
+ # Necessary for profiling
+ # https://android-review.googlesource.com/c/platform/system/sepolicy/+/234400
+ device.SetProp('security.perf_harden', '0')
+
+
+def InstallSimpleperf(device, package_name):
+ package_arch = device.GetPackageArchitecture(package_name) or 'armeabi-v7a'
+ host_simpleperf_path = devil_env.config.LocalPath('simpleperf', package_arch)
+ if not host_simpleperf_path:
+ raise Exception('Could not get path to simpleperf executable on host.')
+ device_simpleperf_path = '/'.join(
+ ('/data/local/tmp/profilers', package_arch, 'simpleperf'))
+ device.PushChangedFiles([(host_simpleperf_path, device_simpleperf_path)])
+ return device_simpleperf_path
+
+
+@contextlib.contextmanager
+def RunSimpleperf(device, device_simpleperf_path, package_name,
+ process_specifier, thread_specifier, profiler_args,
+ host_out_path):
+ pid = _GetSpecifiedPID(device, package_name, process_specifier)
+ tid = _GetSpecifiedTID(device, pid, thread_specifier)
+ if pid is None and tid is None:
+ raise RuntimeError('Could not find specified process/thread running on '
+ 'device. Make sure the apk is already running before '
+ 'attempting to profile.')
+ profiler_args = list(profiler_args)
+ if profiler_args and profiler_args[0] == 'record':
+ profiler_args.pop(0)
+ if '--call-graph' not in profiler_args and '-g' not in profiler_args:
+ profiler_args.append('-g')
+ if '-f' not in profiler_args:
+ profiler_args.extend(('-f', '1000'))
+ device_out_path = '/data/local/tmp/perf.data'
+ if '-o' in profiler_args:
+ device_out_path = profiler_args[profiler_args.index('-o') + 1]
+ else:
+ profiler_args.extend(('-o', device_out_path))
+
+ if tid:
+ profiler_args.extend(('-t', str(tid)))
+ else:
+ profiler_args.extend(('-p', str(pid)))
+
+ adb_shell_simpleperf_process = device.adb.StartShell(
+ [device_simpleperf_path, 'record'] + profiler_args)
+
+ completed = False
+ try:
+ yield
+ completed = True
+
+ finally:
+ device.KillAll('simpleperf', signum=device_signal.SIGINT, blocking=True,
+ quiet=True)
+ if completed:
+ adb_shell_simpleperf_process.wait()
+ device.PullFile(device_out_path, host_out_path)
+
+
+def ConvertSimpleperfToPprof(simpleperf_out_path, build_directory,
+ pprof_out_path):
+ # The simpleperf scripts require the unstripped libs to be installed in the
+ # same directory structure as the libs on the device. Much of the logic here
+ # is just figuring out and creating the necessary directory structure, and
+ # symlinking the unstripped shared libs.
+
+ # Get the set of libs that we can symbolize
+ unstripped_lib_dir = os.path.join(build_directory, 'lib.unstripped')
+ unstripped_libs = set(
+ f for f in os.listdir(unstripped_lib_dir) if f.endswith('.so'))
+
+ # report.py will show the directory structure above the shared libs;
+ # that is the directory structure we need to recreate on the host.
+ script_dir = devil_env.config.LocalPath('simpleperf_scripts')
+ report_path = os.path.join(script_dir, 'report.py')
+ report_cmd = [sys.executable, report_path, '-i', simpleperf_out_path]
+ device_lib_path = None
+ for line in subprocess.check_output(
+ report_cmd, stderr=subprocess.STDOUT).splitlines():
+ fields = line.split()
+ if len(fields) < 5:
+ continue
+ shlib_path = fields[4]
+ shlib_dirname, shlib_basename = shlib_path.rpartition('/')[::2]
+ if shlib_basename in unstripped_libs:
+ device_lib_path = shlib_dirname
+ break
+ if not device_lib_path:
+ raise RuntimeError('No chrome-related symbols in profiling data in %s. '
+ 'Either the process was idle for the entire profiling '
+ 'period, or something went very wrong (and you should '
+ 'file a bug at crbug.com/new with component '
+ 'Speed>Tracing, and assign it to szager@chromium.org).'
+ % simpleperf_out_path)
+
+ # Recreate the directory structure locally, and symlink unstripped libs.
+ processing_dir = tempfile.mkdtemp()
+ try:
+ processing_lib_dir = os.path.join(
+ processing_dir, 'binary_cache', device_lib_path.lstrip('/'))
+ os.makedirs(processing_lib_dir)
+ for lib in unstripped_libs:
+ unstripped_lib_path = os.path.join(unstripped_lib_dir, lib)
+ processing_lib_path = os.path.join(processing_lib_dir, lib)
+ os.symlink(unstripped_lib_path, processing_lib_path)
+
+ # Run the script to annotate symbols and convert from simpleperf format to
+ # pprof format.
+ pprof_converter_script = os.path.join(
+ script_dir, 'pprof_proto_generator.py')
+ pprof_converter_cmd = [
+ sys.executable, pprof_converter_script, '-i', simpleperf_out_path, '-o',
+ os.path.abspath(pprof_out_path), '--ndk_path',
+ constants.ANDROID_NDK_ROOT
+ ]
+ subprocess.check_output(pprof_converter_cmd, stderr=subprocess.STDOUT,
+ cwd=processing_dir)
+ finally:
+ shutil.rmtree(processing_dir, ignore_errors=True)
diff --git a/third_party/libwebrtc/build/android/pylib/utils/test_filter.py b/third_party/libwebrtc/build/android/pylib/utils/test_filter.py
new file mode 100644
index 0000000000..7bafd002e3
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/test_filter.py
@@ -0,0 +1,148 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import re
+
+
+_CMDLINE_NAME_SEGMENT_RE = re.compile(
+ r' with(?:out)? \{[^\}]*\}')
+
+class ConflictingPositiveFiltersException(Exception):
+ """Raised when both filter file and filter argument have positive filters."""
+
+
+def ParseFilterFile(input_lines):
+ """Converts test filter file contents to positive and negative pattern lists.
+
+ See //testing/buildbot/filters/README.md for description of the
+ syntax that |input_lines| are expected to follow.
+
+ See
+ https://github.com/google/googletest/blob/master/docs/advanced.md#running-a-subset-of-the-tests
+ for description of the syntax that --gtest_filter argument should follow.
+
+ Args:
+ input_lines: An iterable (e.g. a list or a file) containing input lines.
+ Returns:
+ tuple containing the lists of positive patterns and negative patterns
+ """
+ # Strip comments and whitespace from each line and filter non-empty lines.
+ stripped_lines = (l.split('#', 1)[0].strip() for l in input_lines)
+ filter_lines = [l for l in stripped_lines if l]
+
+ # Split the tests into positive and negative patterns (gtest treats
+ # every pattern after the first '-' sign as an exclusion).
+ positive_patterns = [l for l in filter_lines if l[0] != '-']
+ negative_patterns = [l[1:] for l in filter_lines if l[0] == '-']
+ return positive_patterns, negative_patterns
+
+
+def AddFilterOptions(parser):
+ """Adds filter command-line options to the provided parser.
+
+ Args:
+ parser: an argparse.ArgumentParser instance.
+ """
+ parser.add_argument(
+ # Deprecated argument.
+ '--gtest-filter-file',
+ # New argument.
+ '--test-launcher-filter-file',
+ action='append',
+ dest='test_filter_files',
+ help='Path to file that contains googletest-style filter strings. '
+ 'See also //testing/buildbot/filters/README.md.')
+
+ filter_group = parser.add_mutually_exclusive_group()
+ filter_group.add_argument(
+ '-f', '--test-filter', '--gtest_filter', '--gtest-filter',
+ dest='test_filter',
+ help='googletest-style filter string.',
+ default=os.environ.get('GTEST_FILTER'))
+ filter_group.add_argument(
+ '--isolated-script-test-filter',
+ help='isolated script filter string. '
+ 'Like gtest filter strings, but with :: separators instead of :')
+
+
+def AppendPatternsToFilter(test_filter, positive_patterns=None,
+ negative_patterns=None):
+ """Returns a test-filter string with additional patterns.
+
+ Args:
+ test_filter: test filter string
+ positive_patterns: list of positive patterns to add to string
+ negative_patterns: list of negative patterns to add to string
+ """
+ positives = []
+ negatives = []
+ positive = ''
+ negative = ''
+
+ split_filter = test_filter.split('-', 1)
+ if len(split_filter) == 1:
+ positive = split_filter[0]
+ else:
+ positive, negative = split_filter
+
+ positives += [f for f in positive.split(':') if f]
+ negatives += [f for f in negative.split(':') if f]
+
+ positives += positive_patterns if positive_patterns else []
+ negatives += negative_patterns if negative_patterns else []
+
+ final_filter = ':'.join([p.replace('#', '.') for p in positives])
+ if negatives:
+ final_filter += '-' + ':'.join([n.replace('#', '.') for n in negatives])
+ return final_filter
+
+
+def HasPositivePatterns(test_filter):
+ """Returns True if test_filter contains a positive pattern, else False
+
+ Args:
+ test_filter: test-filter style string
+ """
+ return bool(len(test_filter) > 0 and test_filter[0] != '-')
+
+
+def InitializeFilterFromArgs(args):
+ """Returns a filter string from the command-line option values.
+
+ Args:
+ args: an argparse.Namespace instance resulting from a using parser
+ to which the filter options above were added.
+
+ Raises:
+ ConflictingPositiveFiltersException if both filter file and command line
+ specify positive filters.
+ """
+ test_filter = ''
+ if args.isolated_script_test_filter:
+ args.test_filter = args.isolated_script_test_filter.replace('::', ':')
+ if args.test_filter:
+ test_filter = _CMDLINE_NAME_SEGMENT_RE.sub(
+ '', args.test_filter.replace('#', '.'))
+
+ if not args.test_filter_files:
+ return test_filter
+
+ # At this point it's potentially several files, in a list and ; separated
+ for test_filter_file in args.test_filter_files:
+ # At this point it's potentially several files, ; separated
+ for test_filter_file in test_filter_file.split(';'):
+ # At this point it's individual files
+ with open(test_filter_file, 'r') as f:
+ positive_file_patterns, negative_file_patterns = ParseFilterFile(f)
+ if positive_file_patterns and HasPositivePatterns(test_filter):
+ raise ConflictingPositiveFiltersException(
+ 'Cannot specify positive pattern in both filter file and ' +
+ 'filter command line argument')
+ test_filter = AppendPatternsToFilter(
+ test_filter,
+ positive_patterns=positive_file_patterns,
+ negative_patterns=negative_file_patterns)
+
+ return test_filter
diff --git a/third_party/libwebrtc/build/android/pylib/utils/test_filter_test.py b/third_party/libwebrtc/build/android/pylib/utils/test_filter_test.py
new file mode 100755
index 0000000000..3f1f21e4cb
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/test_filter_test.py
@@ -0,0 +1,247 @@
+#!/usr/bin/env vpython3
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import sys
+import tempfile
+import unittest
+
+from pylib.utils import test_filter
+
+class ParseFilterFileTest(unittest.TestCase):
+
+ def testParseFilterFile_commentsAndBlankLines(self):
+ input_lines = [
+ 'positive1',
+ '# comment',
+ 'positive2 # Another comment',
+ ''
+ 'positive3'
+ ]
+ actual = test_filter.ParseFilterFile(input_lines)
+ expected = ['positive1', 'positive2', 'positive3'], []
+ self.assertEqual(expected, actual)
+
+ def testParseFilterFile_onlyPositive(self):
+ input_lines = [
+ 'positive1',
+ 'positive2'
+ ]
+ actual = test_filter.ParseFilterFile(input_lines)
+ expected = ['positive1', 'positive2'], []
+ self.assertEqual(expected, actual)
+
+ def testParseFilterFile_onlyNegative(self):
+ input_lines = [
+ '-negative1',
+ '-negative2'
+ ]
+ actual = test_filter.ParseFilterFile(input_lines)
+ expected = [], ['negative1', 'negative2']
+ self.assertEqual(expected, actual)
+
+ def testParseFilterFile_positiveAndNegative(self):
+ input_lines = [
+ 'positive1',
+ 'positive2',
+ '-negative1',
+ '-negative2'
+ ]
+ actual = test_filter.ParseFilterFile(input_lines)
+ expected = ['positive1', 'positive2'], ['negative1', 'negative2']
+ self.assertEqual(expected, actual)
+
+
+class InitializeFilterFromArgsTest(unittest.TestCase):
+
+ def testInitializeBasicFilter(self):
+ parser = argparse.ArgumentParser()
+ test_filter.AddFilterOptions(parser)
+ args = parser.parse_args([
+ '--test-filter',
+ 'FooTest.testFoo:BarTest.testBar'])
+ expected = 'FooTest.testFoo:BarTest.testBar'
+ actual = test_filter.InitializeFilterFromArgs(args)
+ self.assertEqual(actual, expected)
+
+ def testInitializeJavaStyleFilter(self):
+ parser = argparse.ArgumentParser()
+ test_filter.AddFilterOptions(parser)
+ args = parser.parse_args([
+ '--test-filter',
+ 'FooTest#testFoo:BarTest#testBar'])
+ expected = 'FooTest.testFoo:BarTest.testBar'
+ actual = test_filter.InitializeFilterFromArgs(args)
+ self.assertEqual(actual, expected)
+
+ def testInitializeBasicIsolatedScript(self):
+ parser = argparse.ArgumentParser()
+ test_filter.AddFilterOptions(parser)
+ args = parser.parse_args([
+ '--isolated-script-test-filter',
+ 'FooTest.testFoo::BarTest.testBar'])
+ expected = 'FooTest.testFoo:BarTest.testBar'
+ actual = test_filter.InitializeFilterFromArgs(args)
+ self.assertEqual(actual, expected)
+
+ def testFilterArgWithPositiveFilterInFilterFile(self):
+ parser = argparse.ArgumentParser()
+ test_filter.AddFilterOptions(parser)
+ with tempfile.NamedTemporaryFile(mode='w') as tmp_file:
+ tmp_file.write('positive1\npositive2\n-negative2\n-negative3\n')
+ tmp_file.seek(0)
+ args = parser.parse_args([
+ '--test-filter=-negative1',
+ '--test-launcher-filter-file',
+ tmp_file.name])
+ expected = 'positive1:positive2-negative1:negative2:negative3'
+ actual = test_filter.InitializeFilterFromArgs(args)
+ self.assertEqual(actual, expected)
+
+ def testFilterFileWithPositiveFilterInFilterArg(self):
+ parser = argparse.ArgumentParser()
+ test_filter.AddFilterOptions(parser)
+ with tempfile.NamedTemporaryFile(mode='w') as tmp_file:
+ tmp_file.write('-negative2\n-negative3\n')
+ tmp_file.seek(0)
+ args = parser.parse_args([
+ '--test-filter',
+ 'positive1:positive2-negative1',
+ '--test-launcher-filter-file',
+ tmp_file.name])
+ expected = 'positive1:positive2-negative1:negative2:negative3'
+ actual = test_filter.InitializeFilterFromArgs(args)
+ self.assertEqual(actual, expected)
+
+ def testPositiveFilterInBothFileAndArg(self):
+ parser = argparse.ArgumentParser()
+ test_filter.AddFilterOptions(parser)
+ with tempfile.NamedTemporaryFile(mode='w') as tmp_file:
+ tmp_file.write('positive1\n')
+ tmp_file.seek(0)
+ args = parser.parse_args([
+ '--test-filter',
+ 'positive2',
+ '--test-launcher-filter-file',
+ tmp_file.name])
+ with self.assertRaises(test_filter.ConflictingPositiveFiltersException):
+ test_filter.InitializeFilterFromArgs(args)
+
+ def testFilterArgWithFilterFileAllNegative(self):
+ parser = argparse.ArgumentParser()
+ test_filter.AddFilterOptions(parser)
+ with tempfile.NamedTemporaryFile(mode='w') as tmp_file:
+ tmp_file.write('-negative3\n-negative4\n')
+ tmp_file.seek(0)
+ args = parser.parse_args([
+ '--test-filter=-negative1:negative2',
+ '--test-launcher-filter-file',
+ tmp_file.name])
+ expected = '-negative1:negative2:negative3:negative4'
+ actual = test_filter.InitializeFilterFromArgs(args)
+ self.assertEqual(actual, expected)
+
+
+class AppendPatternsToFilter(unittest.TestCase):
+ def testAllEmpty(self):
+ expected = ''
+ actual = test_filter.AppendPatternsToFilter('', [], [])
+ self.assertEqual(actual, expected)
+
+ def testAppendOnlyPositiveToEmptyFilter(self):
+ expected = 'positive'
+ actual = test_filter.AppendPatternsToFilter('', ['positive'])
+ self.assertEqual(actual, expected)
+
+ def testAppendOnlyNegativeToEmptyFilter(self):
+ expected = '-negative'
+ actual = test_filter.AppendPatternsToFilter('',
+ negative_patterns=['negative'])
+ self.assertEqual(actual, expected)
+
+ def testAppendToEmptyFilter(self):
+ expected = 'positive-negative'
+ actual = test_filter.AppendPatternsToFilter('', ['positive'], ['negative'])
+ self.assertEqual(actual, expected)
+
+ def testAppendToPositiveOnlyFilter(self):
+ expected = 'positive1:positive2-negative'
+ actual = test_filter.AppendPatternsToFilter('positive1', ['positive2'],
+ ['negative'])
+ self.assertEqual(actual, expected)
+
+ def testAppendToNegativeOnlyFilter(self):
+ expected = 'positive-negative1:negative2'
+ actual = test_filter.AppendPatternsToFilter('-negative1', ['positive'],
+ ['negative2'])
+ self.assertEqual(actual, expected)
+
+ def testAppendPositiveToFilter(self):
+ expected = 'positive1:positive2-negative1'
+ actual = test_filter.AppendPatternsToFilter('positive1-negative1',
+ ['positive2'])
+ self.assertEqual(actual, expected)
+
+ def testAppendNegativeToFilter(self):
+ expected = 'positive1-negative1:negative2'
+ actual = test_filter.AppendPatternsToFilter('positive1-negative1',
+ negative_patterns=['negative2'])
+ self.assertEqual(actual, expected)
+
+ def testAppendBothToFilter(self):
+ expected = 'positive1:positive2-negative1:negative2'
+ actual = test_filter.AppendPatternsToFilter('positive1-negative1',
+ positive_patterns=['positive2'],
+ negative_patterns=['negative2'])
+ self.assertEqual(actual, expected)
+
+ def testAppendMultipleToFilter(self):
+ expected = 'positive1:positive2:positive3-negative1:negative2:negative3'
+ actual = test_filter.AppendPatternsToFilter('positive1-negative1',
+ ['positive2', 'positive3'],
+ ['negative2', 'negative3'])
+ self.assertEqual(actual, expected)
+
+ def testRepeatedAppendToFilter(self):
+ expected = 'positive1:positive2:positive3-negative1:negative2:negative3'
+ filter_string = test_filter.AppendPatternsToFilter('positive1-negative1',
+ ['positive2'],
+ ['negative2'])
+ actual = test_filter.AppendPatternsToFilter(filter_string, ['positive3'],
+ ['negative3'])
+ self.assertEqual(actual, expected)
+
+ def testAppendHashSeparatedPatternsToFilter(self):
+ expected = 'positive.test1:positive.test2-negative.test1:negative.test2'
+ actual = test_filter.AppendPatternsToFilter('positive#test1-negative#test1',
+ ['positive#test2'],
+ ['negative#test2'])
+ self.assertEqual(actual, expected)
+
+
+class HasPositivePatterns(unittest.TestCase):
+ def testEmpty(self):
+ expected = False
+ actual = test_filter.HasPositivePatterns('')
+ self.assertEqual(actual, expected)
+
+ def testHasOnlyPositive(self):
+ expected = True
+ actual = test_filter.HasPositivePatterns('positive')
+ self.assertEqual(actual, expected)
+
+ def testHasOnlyNegative(self):
+ expected = False
+ actual = test_filter.HasPositivePatterns('-negative')
+ self.assertEqual(actual, expected)
+
+ def testHasBoth(self):
+ expected = True
+ actual = test_filter.HasPositivePatterns('positive-negative')
+ self.assertEqual(actual, expected)
+
+
+if __name__ == '__main__':
+ sys.exit(unittest.main())
diff --git a/third_party/libwebrtc/build/android/pylib/utils/time_profile.py b/third_party/libwebrtc/build/android/pylib/utils/time_profile.py
new file mode 100644
index 0000000000..094799c4f2
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/time_profile.py
@@ -0,0 +1,45 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import time
+
+
+class TimeProfile(object):
+ """Class for simple profiling of action, with logging of cost."""
+
+ def __init__(self, description='operation'):
+ self._starttime = None
+ self._endtime = None
+ self._description = description
+ self.Start()
+
+ def Start(self):
+ self._starttime = time.time()
+ self._endtime = None
+
+ def GetDelta(self):
+ """Returns the rounded delta.
+
+ Also stops the timer if Stop() has not already been called.
+ """
+ if self._endtime is None:
+ self.Stop(log=False)
+ delta = self._endtime - self._starttime
+ delta = round(delta, 2) if delta < 10 else round(delta, 1)
+ return delta
+
+ def LogResult(self):
+ """Logs the result."""
+ logging.info('%s seconds to perform %s', self.GetDelta(), self._description)
+
+ def Stop(self, log=True):
+ """Stop profiling.
+
+ Args:
+ log: Log the delta (defaults to true).
+ """
+ self._endtime = time.time()
+ if log:
+ self.LogResult()
diff --git a/third_party/libwebrtc/build/android/pylib/utils/xvfb.py b/third_party/libwebrtc/build/android/pylib/utils/xvfb.py
new file mode 100644
index 0000000000..cb9d50e8fd
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/utils/xvfb.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=W0702
+
+import os
+import signal
+import subprocess
+import sys
+import time
+
+
+def _IsLinux():
+ """Return True if on Linux; else False."""
+ return sys.platform.startswith('linux')
+
+
+class Xvfb(object):
+ """Class to start and stop Xvfb if relevant. Nop if not Linux."""
+
+ def __init__(self):
+ self._pid = 0
+
+ def Start(self):
+ """Start Xvfb and set an appropriate DISPLAY environment. Linux only.
+
+ Copied from tools/code_coverage/coverage_posix.py
+ """
+ if not _IsLinux():
+ return
+ proc = subprocess.Popen(['Xvfb', ':9', '-screen', '0', '1024x768x24',
+ '-ac'],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ self._pid = proc.pid
+ if not self._pid:
+ raise Exception('Could not start Xvfb')
+ os.environ['DISPLAY'] = ':9'
+
+ # Now confirm, giving a chance for it to start if needed.
+ for _ in range(10):
+ proc = subprocess.Popen('xdpyinfo >/dev/null', shell=True)
+ _, retcode = os.waitpid(proc.pid, 0)
+ if retcode == 0:
+ break
+ time.sleep(0.25)
+ if retcode != 0:
+ raise Exception('Could not confirm Xvfb happiness')
+
+ def Stop(self):
+ """Stop Xvfb if needed. Linux only."""
+ if self._pid:
+ try:
+ os.kill(self._pid, signal.SIGKILL)
+ except:
+ pass
+ del os.environ['DISPLAY']
+ self._pid = 0
diff --git a/third_party/libwebrtc/build/android/pylib/valgrind_tools.py b/third_party/libwebrtc/build/android/pylib/valgrind_tools.py
new file mode 100644
index 0000000000..fec71beaf7
--- /dev/null
+++ b/third_party/libwebrtc/build/android/pylib/valgrind_tools.py
@@ -0,0 +1,116 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=R0201
+
+
+
+
+import logging
+import sys
+
+from devil.android import device_errors
+from devil.android.valgrind_tools import base_tool
+
+
+def SetChromeTimeoutScale(device, scale):
+ """Sets the timeout scale in /data/local/tmp/chrome_timeout_scale to scale."""
+ path = '/data/local/tmp/chrome_timeout_scale'
+ if not scale or scale == 1.0:
+ # Delete if scale is None/0.0/1.0 since the default timeout scale is 1.0
+ device.RemovePath(path, force=True, as_root=True)
+ else:
+ device.WriteFile(path, '%f' % scale, as_root=True)
+
+
+
+class AddressSanitizerTool(base_tool.BaseTool):
+ """AddressSanitizer tool."""
+
+ WRAPPER_NAME = '/system/bin/asanwrapper'
+ # Disable memcmp overlap check.There are blobs (gl drivers)
+ # on some android devices that use memcmp on overlapping regions,
+ # nothing we can do about that.
+ EXTRA_OPTIONS = 'strict_memcmp=0,use_sigaltstack=1'
+
+ def __init__(self, device):
+ super(AddressSanitizerTool, self).__init__()
+ self._device = device
+
+ @classmethod
+ def CopyFiles(cls, device):
+ """Copies ASan tools to the device."""
+ del device
+
+ def GetTestWrapper(self):
+ return AddressSanitizerTool.WRAPPER_NAME
+
+ def GetUtilWrapper(self):
+ """Returns the wrapper for utilities, such as forwarder.
+
+ AddressSanitizer wrapper must be added to all instrumented binaries,
+ including forwarder and the like. This can be removed if such binaries
+ were built without instrumentation. """
+ return self.GetTestWrapper()
+
+ def SetupEnvironment(self):
+ try:
+ self._device.EnableRoot()
+ except device_errors.CommandFailedError as e:
+ # Try to set the timeout scale anyway.
+ # TODO(jbudorick) Handle this exception appropriately after interface
+ # conversions are finished.
+ logging.error(str(e))
+ SetChromeTimeoutScale(self._device, self.GetTimeoutScale())
+
+ def CleanUpEnvironment(self):
+ SetChromeTimeoutScale(self._device, None)
+
+ def GetTimeoutScale(self):
+ # Very slow startup.
+ return 20.0
+
+
+TOOL_REGISTRY = {
+ 'asan': AddressSanitizerTool,
+}
+
+
+def CreateTool(tool_name, device):
+ """Creates a tool with the specified tool name.
+
+ Args:
+ tool_name: Name of the tool to create.
+ device: A DeviceUtils instance.
+ Returns:
+ A tool for the specified tool_name.
+ """
+ if not tool_name:
+ return base_tool.BaseTool()
+
+ ctor = TOOL_REGISTRY.get(tool_name)
+ if ctor:
+ return ctor(device)
+ else:
+ print('Unknown tool %s, available tools: %s' % (tool_name, ', '.join(
+ sorted(TOOL_REGISTRY.keys()))))
+ sys.exit(1)
+
+def PushFilesForTool(tool_name, device):
+ """Pushes the files required for |tool_name| to |device|.
+
+ Args:
+ tool_name: Name of the tool to create.
+ device: A DeviceUtils instance.
+ """
+ if not tool_name:
+ return
+
+ clazz = TOOL_REGISTRY.get(tool_name)
+ if clazz:
+ clazz.CopyFiles(device)
+ else:
+ print('Unknown tool %s, available tools: %s' % (tool_name, ', '.join(
+ sorted(TOOL_REGISTRY.keys()))))
+ sys.exit(1)