diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 19:33:14 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 19:33:14 +0000 |
commit | 36d22d82aa202bb199967e9512281e9a53db42c9 (patch) | |
tree | 105e8c98ddea1c1e4784a60a5a6410fa416be2de /third_party/libwebrtc/build/util | |
parent | Initial commit. (diff) | |
download | firefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.tar.xz firefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.zip |
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/libwebrtc/build/util')
31 files changed, 3677 insertions, 0 deletions
diff --git a/third_party/libwebrtc/build/util/BUILD.gn b/third_party/libwebrtc/build/util/BUILD.gn new file mode 100644 index 0000000000..2745449ea7 --- /dev/null +++ b/third_party/libwebrtc/build/util/BUILD.gn @@ -0,0 +1,38 @@ +# Copyright (c) 2013 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import("//build/util/lastchange.gni") + +action("chromium_git_revision") { + script = "version.py" + + template_file = "chromium_git_revision.h.in" + inputs = [ + lastchange_file, + template_file, + ] + + output_file = "$target_gen_dir/chromium_git_revision.h" + outputs = [ output_file ] + + args = [ + # LASTCHANGE contains "<build hash>-<ref>". The user agent only wants the + # "<build hash>" bit, so chop off everything after it. + "-e", + "LASTCHANGE=LASTCHANGE[:LASTCHANGE.find('-')]", + "-f", + rebase_path(lastchange_file, root_build_dir), + rebase_path(template_file, root_build_dir), + rebase_path(output_file, root_build_dir), + ] +} + +group("test_results") { + data = [ + "//.vpython", + "//.vpython3", + "//build/util/lib/__init__.py", + "//build/util/lib/results/", + ] +} diff --git a/third_party/libwebrtc/build/util/LASTCHANGE.committime b/third_party/libwebrtc/build/util/LASTCHANGE.committime new file mode 100644 index 0000000000..c5633f84f5 --- /dev/null +++ b/third_party/libwebrtc/build/util/LASTCHANGE.committime @@ -0,0 +1 @@ +1597352396
\ No newline at end of file diff --git a/third_party/libwebrtc/build/util/LASTCHANGE.dummy b/third_party/libwebrtc/build/util/LASTCHANGE.dummy new file mode 100644 index 0000000000..21bb3c33c7 --- /dev/null +++ b/third_party/libwebrtc/build/util/LASTCHANGE.dummy @@ -0,0 +1 @@ +LASTCHANGE=0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 diff --git a/third_party/libwebrtc/build/util/PRESUBMIT.py b/third_party/libwebrtc/build/util/PRESUBMIT.py new file mode 100644 index 0000000000..575c806d3c --- /dev/null +++ b/third_party/libwebrtc/build/util/PRESUBMIT.py @@ -0,0 +1,64 @@ +# Copyright 2019 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import re +"""Presubmit for build/util""" + + +USE_PYTHON3 = True + + +def _GetFilesToSkip(input_api): + files_to_skip = [] + affected_files = input_api.change.AffectedFiles() + version_script_change = next( + (f for f in affected_files + if re.search('\\/version\\.py$|\\/version_test\\.py$', f.LocalPath())), + None) + + if version_script_change is None: + files_to_skip.append('version_test\\.py$') + + android_chrome_version_script_change = next( + (f for f in affected_files if re.search( + '\\/android_chrome_version\\.py$|' + '\\/android_chrome_version_test\\.py$', f.LocalPath())), None) + + if android_chrome_version_script_change is None: + files_to_skip.append('android_chrome_version_test\\.py$') + + return files_to_skip + + +def _GetPythonUnitTests(input_api, output_api): + # No need to test if files are unchanged + files_to_skip = _GetFilesToSkip(input_api) + + return input_api.canned_checks.GetUnitTestsRecursively( + input_api, + output_api, + input_api.PresubmitLocalPath(), + files_to_check=['.*_test\\.py$'], + files_to_skip=files_to_skip, + run_on_python2=False, + run_on_python3=True, + skip_shebang_check=True) + + +def CommonChecks(input_api, output_api): + """Presubmit checks run on both upload and commit. + """ + checks = [] + checks.extend(_GetPythonUnitTests(input_api, output_api)) + return input_api.RunTests(checks, False) + + +def CheckChangeOnUpload(input_api, output_api): + """Presubmit checks on CL upload.""" + return CommonChecks(input_api, output_api) + + +def CheckChangeOnCommit(input_api, output_api): + """Presubmit checks on commit.""" + return CommonChecks(input_api, output_api) diff --git a/third_party/libwebrtc/build/util/android_chrome_version.py b/third_party/libwebrtc/build/util/android_chrome_version.py new file mode 100644 index 0000000000..5ec9e48d15 --- /dev/null +++ b/third_party/libwebrtc/build/util/android_chrome_version.py @@ -0,0 +1,214 @@ +# Copyright 2019 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +"""Different build variants of Chrome for Android have different version codes. + +For targets that have the same package name (e.g. Chrome, Chrome Modern, +Monochrome, Trichrome), Play Store considers them the same app and will push the +supported app with the highest version code to devices. Note that Play Store +does not support hosting two different apps with same version code and package +name. + +Each version code generated by this script will be used by one or more APKs. + +Webview channels must have unique version codes for a couple reasons: +a) Play Store does not support having the same version code for different + versions of a package. Without unique codes, promoting a beta apk to stable + would require first removing the beta version. +b) Firebase project support (used by official builders) requires unique + [version code + package name]. + We cannot add new webview package names for new channels because webview + packages are allowlisted by Android as webview providers. + +WEBVIEW_STABLE, WEBVIEW_BETA, WEBVIEW_DEV are all used for standalone webview, +whereas the others are used for various chrome APKs. + +Note that a package digit of '3' for Webview is reserved for Trichrome Webview. +The same versionCode is used for both Trichrome Chrome and Trichrome Webview. + +Version code values are constructed like this: + + {full BUILD number}{3 digits: PATCH}{1 digit: package}{1 digit: ABIs}. + +For example: + + Build 3721, patch 0, ChromeModern (1), on ARM64 (5): 372100015 + Build 3721, patch 9, Monochrome (2), on ARM (0): 372100920 + +""" + +# Package name version bits. +_PACKAGE_NAMES = { + 'CHROME': 0, + 'CHROME_MODERN': 10, + 'MONOCHROME': 20, + 'TRICHROME': 30, + 'WEBVIEW_STABLE': 0, + 'WEBVIEW_BETA': 10, + 'WEBVIEW_DEV': 20, +} + +""" "Next" builds get +5 on their package version code digit. + +We choose 5 because it won't conflict with values in _PACKAGE_NAMES. +""" +_NEXT_BUILD_VERSION_CODE_DIFF = 50 + +"""List of version numbers to be created for each build configuration. +Tuple format: + + (version code name), (package name), (supported ABIs) + +Here, (supported ABIs) is referring to the combination of browser ABI and +webview library ABI present in a particular APK. For example, 64_32 implies a +64-bit browser with an extra 32-bit Webview library. See also +_ABIS_TO_BIT_MASK. +""" +_APKS = { + '32': [ + ('CHROME', 'CHROME', '32'), + ('CHROME_MODERN', 'CHROME_MODERN', '32'), + ('MONOCHROME', 'MONOCHROME', '32'), + ('TRICHROME', 'TRICHROME', '32'), + ('WEBVIEW_STABLE', 'WEBVIEW_STABLE', '32'), + ('WEBVIEW_BETA', 'WEBVIEW_BETA', '32'), + ('WEBVIEW_DEV', 'WEBVIEW_DEV', '32'), + ], + '64': [ + ('CHROME', 'CHROME', '64'), + ('CHROME_MODERN', 'CHROME_MODERN', '64'), + ('MONOCHROME', 'MONOCHROME', '32_64'), + ('MONOCHROME_32', 'MONOCHROME', '32'), + ('MONOCHROME_32_64', 'MONOCHROME', '32_64'), + ('MONOCHROME_64_32', 'MONOCHROME', '64_32'), + ('MONOCHROME_64', 'MONOCHROME', '64'), + ('TRICHROME', 'TRICHROME', '32_64'), + ('TRICHROME_32', 'TRICHROME', '32'), + ('TRICHROME_32_64', 'TRICHROME', '32_64'), + ('TRICHROME_64_32', 'TRICHROME', '64_32'), + ('TRICHROME_64', 'TRICHROME', '64'), + ('WEBVIEW_STABLE', 'WEBVIEW_STABLE', '32_64'), + ('WEBVIEW_BETA', 'WEBVIEW_BETA', '32_64'), + ('WEBVIEW_DEV', 'WEBVIEW_DEV', '32_64'), + ('WEBVIEW_32_STABLE', 'WEBVIEW_STABLE', '32'), + ('WEBVIEW_32_BETA', 'WEBVIEW_BETA', '32'), + ('WEBVIEW_32_DEV', 'WEBVIEW_DEV', '32'), + ('WEBVIEW_64_STABLE', 'WEBVIEW_STABLE', '64'), + ('WEBVIEW_64_BETA', 'WEBVIEW_BETA', '64'), + ('WEBVIEW_64_DEV', 'WEBVIEW_DEV', '64'), + ] +} + +# Splits input build config architecture to manufacturer and bitness. +_ARCH_TO_MFG_AND_BITNESS = { + 'arm': ('arm', '32'), + 'arm64': ('arm', '64'), + 'x86': ('intel', '32'), + 'x64': ('intel', '64'), + 'mipsel': ('mipsel', '32'), +} + +# Expose the available choices to other scripts. +ARCH_CHOICES = _ARCH_TO_MFG_AND_BITNESS.keys() +""" +The architecture preference is encoded into the version_code for devices +that support multiple architectures. (exploiting play store logic that pushes +apk with highest version code) + +Detail: +Many Android devices support multiple architectures, and can run applications +built for any of them; the Play Store considers all of the supported +architectures compatible and does not, itself, have any preference for which +is "better". The common cases here: + +- All production arm64 devices can also run arm +- All production x64 devices can also run x86 +- Pretty much all production x86/x64 devices can also run arm (via a binary + translator) + +Since the Play Store has no particular preferences, you have to encode your own +preferences into the ordering of the version codes. There's a few relevant +things here: + +- For any android app, it's theoretically preferable to ship a 64-bit version to + 64-bit devices if it exists, because the 64-bit architectures are supposed to + be "better" than their 32-bit predecessors (unfortunately this is not always + true due to the effect on memory usage, but we currently deal with this by + simply not shipping a 64-bit version *at all* on the configurations where we + want the 32-bit version to be used). +- For any android app, it's definitely preferable to ship an x86 version to x86 + devices if it exists instead of an arm version, because running things through + the binary translator is a performance hit. +- For WebView, Monochrome, and Trichrome specifically, they are a special class + of APK called "multiarch" which means that they actually need to *use* more + than one architecture at runtime (rather than simply being compatible with + more than one). The 64-bit builds of these multiarch APKs contain both 32-bit + and 64-bit code, so that Webview is available for both ABIs. If you're + multiarch you *must* have a version that supports both 32-bit and 64-bit + version on a 64-bit device, otherwise it won't work properly. So, the 64-bit + version needs to be a higher versionCode, as otherwise a 64-bit device would + prefer the 32-bit version that does not include any 64-bit code, and fail. +- The relative order of mips isn't important, but it needs to be a *distinct* + value to the other architectures because all builds need unique version codes. +""" +_ABIS_TO_BIT_MASK = { + 'arm': { + '32': 0, + '32_64': 3, + '64_32': 4, + '64': 5, + }, + 'intel': { + '32': 1, + '32_64': 6, + '64_32': 7, + '64': 8, + }, + 'mipsel': { + '32': 2, + } +} + +def GenerateVersionCodes(version_values, arch, is_next_build): + """Build dict of version codes for the specified build architecture. Eg: + + { + 'CHROME_VERSION_CODE': '378100010', + 'MONOCHROME_VERSION_CODE': '378100013', + ... + } + + versionCode values are built like this: + {full BUILD int}{3 digits: PATCH}{1 digit: package}{1 digit: ABIs}. + + MAJOR and MINOR values are not used for generating versionCode. + - MINOR is always 0. It was used for something long ago in Chrome's history + but has not been used since, and has never been nonzero on Android. + - MAJOR is cosmetic and controlled by the release managers. MAJOR and BUILD + always have reasonable sort ordering: for two version codes A and B, it's + always the case that (A.MAJOR < B.MAJOR) implies (A.BUILD < B.BUILD), and + that (A.MAJOR > B.MAJOR) implies (A.BUILD > B.BUILD). This property is just + maintained by the humans who set MAJOR. + + Thus, this method is responsible for the final two digits of versionCode. + """ + + base_version_code = int( + '%s%03d00' % (version_values['BUILD'], int(version_values['PATCH']))) + + if is_next_build: + base_version_code += _NEXT_BUILD_VERSION_CODE_DIFF + + mfg, bitness = _ARCH_TO_MFG_AND_BITNESS[arch] + + version_codes = {} + + for apk, package, abis in _APKS[bitness]: + abi_bits = _ABIS_TO_BIT_MASK[mfg][abis] + package_bits = _PACKAGE_NAMES[package] + + version_code_name = apk + '_VERSION_CODE' + version_code_val = base_version_code + abi_bits + package_bits + version_codes[version_code_name] = str(version_code_val) + + return version_codes diff --git a/third_party/libwebrtc/build/util/android_chrome_version_test.py b/third_party/libwebrtc/build/util/android_chrome_version_test.py new file mode 100644 index 0000000000..eed77488cc --- /dev/null +++ b/third_party/libwebrtc/build/util/android_chrome_version_test.py @@ -0,0 +1,308 @@ +# Copyright 2019 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import unittest + +from android_chrome_version import GenerateVersionCodes + + +class _VersionTest(unittest.TestCase): + """Unittests for the android_chrome_version module. + """ + + EXAMPLE_VERSION_VALUES = { + 'MAJOR': '74', + 'MINOR': '0', + 'BUILD': '3720', + 'PATCH': '0', + } + + def testGenerateVersionCodesAndroidChrome(self): + """Assert it gives correct values for standard/example inputs""" + output = GenerateVersionCodes( + self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False) + + chrome_version_code = output['CHROME_VERSION_CODE'] + + self.assertEqual(chrome_version_code, '372000000') + + def testGenerateVersionCodesAndroidChromeModern(self): + """Assert it gives correct values for standard/example inputs""" + output = GenerateVersionCodes( + self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False) + + chrome_modern_version_code = output['CHROME_MODERN_VERSION_CODE'] + + self.assertEqual(chrome_modern_version_code, '372000010') + + def testGenerateVersionCodesAndroidMonochrome(self): + """Assert it gives correct values for standard/example inputs""" + output = GenerateVersionCodes( + self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False) + + monochrome_version_code = output['MONOCHROME_VERSION_CODE'] + + self.assertEqual(monochrome_version_code, '372000020') + + def testGenerateVersionCodesAndroidTrichrome(self): + """Assert it gives correct values for standard/example inputs""" + output = GenerateVersionCodes( + self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False) + + trichrome_version_code = output['TRICHROME_VERSION_CODE'] + + self.assertEqual(trichrome_version_code, '372000030') + + def testGenerateVersionCodesAndroidWebviewStable(self): + """Assert it gives correct values for standard/example inputs""" + output = GenerateVersionCodes( + self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False) + + webview_stable_version_code = output['WEBVIEW_STABLE_VERSION_CODE'] + + self.assertEqual(webview_stable_version_code, '372000000') + + def testGenerateVersionCodesAndroidWebviewBeta(self): + """Assert it gives correct values for standard/example inputs""" + output = GenerateVersionCodes( + self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False) + + webview_beta_version_code = output['WEBVIEW_BETA_VERSION_CODE'] + + self.assertEqual(webview_beta_version_code, '372000010') + + def testGenerateVersionCodesAndroidWebviewDev(self): + """Assert it gives correct values for standard/example inputs""" + output = GenerateVersionCodes( + self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False) + + webview_dev_version_code = output['WEBVIEW_DEV_VERSION_CODE'] + + self.assertEqual(webview_dev_version_code, '372000020') + + def testGenerateVersionCodesAndroidNextBuild(self): + """Assert it handles "next" builds correctly""" + output = GenerateVersionCodes( + self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=True) + + # Get just a sample of values + chrome_version_code = output['CHROME_VERSION_CODE'] + monochrome_version_code = output['MONOCHROME_VERSION_CODE'] + webview_stable_version_code = output['WEBVIEW_STABLE_VERSION_CODE'] + webview_beta_version_code = output['WEBVIEW_BETA_VERSION_CODE'] + + self.assertEqual(chrome_version_code, '372000050') + self.assertEqual(monochrome_version_code, '372000070') + self.assertEqual(webview_stable_version_code, '372000050') + self.assertEqual(webview_beta_version_code, '372000060') + + def testGenerateVersionCodesAndroidArchArm(self): + """Assert it handles different architectures correctly. + + Version codes for different builds need to be distinct and maintain a + certain ordering. + See docs in android_chrome_version._ABIS_TO_BIT_MASK for + reasoning. + """ + output = GenerateVersionCodes( + self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False) + arch_chrome_version_code = output['CHROME_VERSION_CODE'] + + self.assertEqual(arch_chrome_version_code, '372000000') + + def testGenerateVersionCodesAndroidArchX86(self): + """Assert it handles different architectures correctly. + + Version codes for different builds need to be distinct and maintain a + certain ordering. + See docstring on android_chrome_version._ABIS_TO_BIT_MASK for + reasoning. + """ + output = GenerateVersionCodes( + self.EXAMPLE_VERSION_VALUES, arch='x86', is_next_build=False) + arch_chrome_version_code = output['CHROME_VERSION_CODE'] + + self.assertEqual(arch_chrome_version_code, '372000001') + + def testGenerateVersionCodesAndroidArchMips(self): + """Assert it handles different architectures correctly. + + Version codes for different builds need to be distinct and maintain a + certain ordering. + See docstring on android_chrome_version._ABIS_TO_BIT_MASK for + reasoning. + """ + output = GenerateVersionCodes( + self.EXAMPLE_VERSION_VALUES, arch='mipsel', is_next_build=False) + arch_chrome_version_code = output['CHROME_VERSION_CODE'] + + self.assertEqual(arch_chrome_version_code, '372000002') + + def testGenerateVersionCodesAndroidArchArm64(self): + """Assert it handles different architectures correctly. + + Version codes for different builds need to be distinct and maintain a + certain ordering. + See docstring on android_chrome_version._ABIS_TO_BIT_MASK for + reasoning. + """ + output = GenerateVersionCodes( + self.EXAMPLE_VERSION_VALUES, arch='arm64', is_next_build=False) + arch_chrome_version_code = output['CHROME_VERSION_CODE'] + + self.assertEqual(arch_chrome_version_code, '372000005') + + def testGenerateVersionCodesAndroidArchArm64Variants(self): + """Assert it handles 64-bit-specific additional version codes correctly. + + Some additional version codes are generated for 64-bit architectures. + See docstring on android_chrome_version.ARCH64_APK_VARIANTS for more info. + """ + output = GenerateVersionCodes( + self.EXAMPLE_VERSION_VALUES, arch='arm64', is_next_build=False) + arch_monochrome_version_code = output['MONOCHROME_VERSION_CODE'] + arch_monochrome_32_version_code = output['MONOCHROME_32_VERSION_CODE'] + arch_monochrome_32_64_version_code = output['MONOCHROME_32_64_VERSION_CODE'] + arch_monochrome_64_32_version_code = output['MONOCHROME_64_32_VERSION_CODE'] + arch_monochrome_64_version_code = output['MONOCHROME_64_VERSION_CODE'] + arch_trichrome_version_code = output['TRICHROME_VERSION_CODE'] + arch_trichrome_32_version_code = output['TRICHROME_32_VERSION_CODE'] + arch_trichrome_32_64_version_code = output['TRICHROME_32_64_VERSION_CODE'] + arch_trichrome_64_32_version_code = output['TRICHROME_64_32_VERSION_CODE'] + arch_trichrome_64_version_code = output['TRICHROME_64_VERSION_CODE'] + + self.assertEqual(arch_monochrome_32_version_code, '372000020') + self.assertEqual(arch_monochrome_32_64_version_code, '372000023') + self.assertEqual(arch_monochrome_version_code, '372000023') + self.assertEqual(arch_monochrome_64_32_version_code, '372000024') + self.assertEqual(arch_monochrome_64_version_code, '372000025') + self.assertEqual(arch_trichrome_32_version_code, '372000030') + self.assertEqual(arch_trichrome_32_64_version_code, '372000033') + self.assertEqual(arch_trichrome_version_code, '372000033') + self.assertEqual(arch_trichrome_64_32_version_code, '372000034') + self.assertEqual(arch_trichrome_64_version_code, '372000035') + + def testGenerateVersionCodesAndroidArchX64(self): + """Assert it handles different architectures correctly. + + Version codes for different builds need to be distinct and maintain a + certain ordering. + See docstring on android_chrome_version._ABIS_TO_BIT_MASK for + reasoning. + """ + output = GenerateVersionCodes( + self.EXAMPLE_VERSION_VALUES, arch='x64', is_next_build=False) + arch_chrome_version_code = output['CHROME_VERSION_CODE'] + + self.assertEqual(arch_chrome_version_code, '372000008') + + def testGenerateVersionCodesAndroidArchX64Variants(self): + """Assert it handles 64-bit-specific additional version codes correctly. + + Some additional version codes are generated for 64-bit architectures. + See docstring on android_chrome_version.ARCH64_APK_VARIANTS for more info. + """ + output = GenerateVersionCodes( + self.EXAMPLE_VERSION_VALUES, arch='x64', is_next_build=False) + arch_monochrome_32_version_code = output['MONOCHROME_32_VERSION_CODE'] + arch_monochrome_32_64_version_code = output['MONOCHROME_32_64_VERSION_CODE'] + arch_monochrome_version_code = output['MONOCHROME_VERSION_CODE'] + arch_monochrome_64_32_version_code = output['MONOCHROME_64_32_VERSION_CODE'] + arch_monochrome_64_version_code = output['MONOCHROME_64_VERSION_CODE'] + arch_trichrome_32_version_code = output['TRICHROME_32_VERSION_CODE'] + arch_trichrome_32_64_version_code = output['TRICHROME_32_64_VERSION_CODE'] + arch_trichrome_version_code = output['TRICHROME_VERSION_CODE'] + arch_trichrome_64_32_version_code = output['TRICHROME_64_32_VERSION_CODE'] + arch_trichrome_64_version_code = output['TRICHROME_64_VERSION_CODE'] + + self.assertEqual(arch_monochrome_32_version_code, '372000021') + self.assertEqual(arch_monochrome_32_64_version_code, '372000026') + self.assertEqual(arch_monochrome_version_code, '372000026') + self.assertEqual(arch_monochrome_64_32_version_code, '372000027') + self.assertEqual(arch_monochrome_64_version_code, '372000028') + self.assertEqual(arch_trichrome_32_version_code, '372000031') + self.assertEqual(arch_trichrome_32_64_version_code, '372000036') + self.assertEqual(arch_trichrome_version_code, '372000036') + self.assertEqual(arch_trichrome_64_32_version_code, '372000037') + self.assertEqual(arch_trichrome_64_version_code, '372000038') + + def testGenerateVersionCodesAndroidArchOrderArm(self): + """Assert it handles different architectures correctly. + + Version codes for different builds need to be distinct and maintain a + certain ordering. + See docstring on android_chrome_version._ABIS_TO_BIT_MASK for + reasoning. + + Test arm-related values. + """ + arm_output = GenerateVersionCodes( + self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False) + arm64_output = GenerateVersionCodes( + self.EXAMPLE_VERSION_VALUES, arch='arm64', is_next_build=False) + + arm_chrome_version_code = arm_output['CHROME_VERSION_CODE'] + arm64_chrome_version_code = arm64_output['CHROME_VERSION_CODE'] + + self.assertLess(arm_chrome_version_code, arm64_chrome_version_code) + + def testGenerateVersionCodesAndroidArchOrderX86(self): + """Assert it handles different architectures correctly. + + Version codes for different builds need to be distinct and maintain a + certain ordering. + See docstring on android_chrome_version._ABIS_TO_BIT_MASK for + reasoning. + + Test x86-related values. + """ + x86_output = GenerateVersionCodes( + self.EXAMPLE_VERSION_VALUES, arch='x86', is_next_build=False) + x64_output = GenerateVersionCodes( + self.EXAMPLE_VERSION_VALUES, arch='x64', is_next_build=False) + + x86_chrome_version_code = x86_output['CHROME_VERSION_CODE'] + x64_chrome_version_code = x64_output['CHROME_VERSION_CODE'] + + self.assertLess(x86_chrome_version_code, x64_chrome_version_code) + + def testGenerateVersionCodesAndroidWebviewChannelOrderBeta(self): + """Assert webview beta channel is higher than stable. + + The channel-specific version codes for standalone webview needs to follow + the order stable < beta < dev. + + This allows that if a user opts into beta track, they will always have the + beta apk, including any finch experiments targeted at beta users, even when + beta and stable channels are otherwise on the same version. + """ + output = GenerateVersionCodes( + self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False) + + webview_stable_version_code = output['WEBVIEW_STABLE_VERSION_CODE'] + webview_beta_version_code = output['WEBVIEW_BETA_VERSION_CODE'] + + self.assertGreater(webview_beta_version_code, webview_stable_version_code) + + def testGenerateVersionCodesAndroidWebviewChannelOrderDev(self): + """Assert webview dev channel is higher than beta. + + The channel-specific version codes for standalone webview needs to follow + the order stable < beta < dev. + + This allows that if a user opts into dev track, they will always have the + dev apk, including any finch experiments targeted at dev users, even when + dev and beta channels are otherwise on the same version. + """ + output = GenerateVersionCodes( + self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False) + + webview_beta_version_code = output['WEBVIEW_BETA_VERSION_CODE'] + webview_dev_version_code = output['WEBVIEW_DEV_VERSION_CODE'] + + self.assertGreater(webview_dev_version_code, webview_beta_version_code) + + +if __name__ == '__main__': + unittest.main() diff --git a/third_party/libwebrtc/build/util/branding.gni b/third_party/libwebrtc/build/util/branding.gni new file mode 100644 index 0000000000..aa758e6a0e --- /dev/null +++ b/third_party/libwebrtc/build/util/branding.gni @@ -0,0 +1,45 @@ +# Copyright 2016 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# This exposes the Chrome branding as GN variables for use in build files. +# +# PREFER NOT TO USE THESE. The GYP build uses this kind of thing extensively. +# However, it is far better to write an action to generate a file at +# build-time with the information you need. This allows better dependency +# checking and GN will run faster. +# +# These values should only be used if you REALLY need to depend on them at +# build-time, for example, in the computation of output file names. + +import("//build/config/chrome_build.gni") + +_branding_dictionary_template = + "full_name = \"@PRODUCT_FULLNAME@\" " + + "short_name = \"@PRODUCT_SHORTNAME@\" " + + "bundle_id = \"@MAC_BUNDLE_ID@\" " + + "creator_code = \"@MAC_CREATOR_CODE@\" " + + "installer_full_name = \"@PRODUCT_INSTALLER_FULLNAME@\" " + + "installer_short_name = \"@PRODUCT_INSTALLER_SHORTNAME@\" " + + "team_id = \"@MAC_TEAM_ID@\" " + +_result = exec_script("version.py", + [ + "-f", + rebase_path(branding_file_path, root_build_dir), + "-t", + _branding_dictionary_template, + ], + "scope", + [ branding_file_path ]) + +chrome_product_full_name = _result.full_name +chrome_product_short_name = _result.short_name +chrome_product_installer_full_name = _result.installer_full_name +chrome_product_installer_short_name = _result.installer_short_name + +if (is_mac) { + chrome_mac_bundle_id = _result.bundle_id + chrome_mac_creator_code = _result.creator_code + chrome_mac_team_id = _result.team_id +} diff --git a/third_party/libwebrtc/build/util/chromium_git_revision.h.in b/third_party/libwebrtc/build/util/chromium_git_revision.h.in new file mode 100644 index 0000000000..41c115979c --- /dev/null +++ b/third_party/libwebrtc/build/util/chromium_git_revision.h.in @@ -0,0 +1,8 @@ +// Copyright 2021 The Chromium Authors. All rights reserved. +// Use of this source is governed by a BSD-style license that can be +// found in the LICENSE file. + +// chromium_git_revision.h is generated from chromium_git_revision.h.in. Edit +// the source! + +#define CHROMIUM_GIT_REVISION "@@LASTCHANGE@" diff --git a/third_party/libwebrtc/build/util/generate_wrapper.gni b/third_party/libwebrtc/build/util/generate_wrapper.gni new file mode 100644 index 0000000000..02e8bca1ff --- /dev/null +++ b/third_party/libwebrtc/build/util/generate_wrapper.gni @@ -0,0 +1,105 @@ +# Copyright 2019 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# Wraps a target and any of its arguments to an executable script. +# +# Many executable targets have build-time-constant arguments. This +# template allows those to be wrapped into a single, user- or bot-friendly +# script at build time. +# +# Paths to be wrapped should be relative to root_build_dir and should be +# wrapped in "@WrappedPath(...)"; see Example below. +# +# Variables: +# generator_script: Path to the script to use to perform the wrapping. +# Defaults to //build/util/generate_wrapper.py. Generally should only +# be set by other templates. +# wrapper_script: Output path. +# executable: Path to the executable to wrap. Can be a script or a +# build product. Paths can be relative to the containing gn file +# or source-absolute. +# executable_args: List of arguments to write into the wrapper. +# use_vpython3: If true, invoke the generated wrapper with vpython3 instead +# of vpython. +# +# Example wrapping a checked-in script: +# generate_wrapper("sample_wrapper") { +# executable = "//for/bar/sample.py" +# wrapper_script = "$root_build_dir/bin/run_sample" +# +# _sample_argument_path = "//sample/$target_cpu/lib/sample_lib.so" +# _rebased_sample_argument_path = rebase_path( +# _sample_argument_path, +# root_build_dir) +# executable_args = [ +# "--sample-lib", "@WrappedPath(${_rebased_sample_argument_path})", +# ] +# } +# +# Example wrapping a build product: +# generate_wrapper("sample_wrapper") { +# executable = "$root_build_dir/sample_build_product" +# wrapper_script = "$root_build_dir/bin/run_sample_build_product" +# } +template("generate_wrapper") { + _generator_script = "//build/util/generate_wrapper.py" + if (defined(invoker.generator_script)) { + _generator_script = invoker.generator_script + } + _executable_to_wrap = invoker.executable + _wrapper_script = invoker.wrapper_script + if (is_win) { + _wrapper_script += ".bat" + } + if (defined(invoker.executable_args)) { + _wrapped_arguments = invoker.executable_args + } else { + _wrapped_arguments = [] + } + + action(target_name) { + forward_variables_from(invoker, + TESTONLY_AND_VISIBILITY + [ + "data", + "data_deps", + "deps", + "sources", + ]) + script = _generator_script + if (!defined(data)) { + data = [] + } + data += [ _wrapper_script ] + outputs = [ _wrapper_script ] + + _rebased_executable_to_wrap = + rebase_path(_executable_to_wrap, root_build_dir) + _rebased_wrapper_script = rebase_path(_wrapper_script, root_build_dir) + if (is_win) { + _script_language = "batch" + } else { + _script_language = "bash" + } + args = [ + "--executable", + "@WrappedPath(${_rebased_executable_to_wrap})", + "--wrapper-script", + _rebased_wrapper_script, + "--output-directory", + rebase_path(root_build_dir, root_build_dir), + "--script-language", + _script_language, + ] + + if (defined(invoker.use_vpython3) && invoker.use_vpython3) { + args += [ "--use-vpython3" ] + } + args += [ "--" ] + args += _wrapped_arguments + + if (defined(invoker.write_runtime_deps)) { + write_runtime_deps = invoker.write_runtime_deps + } + } +} diff --git a/third_party/libwebrtc/build/util/generate_wrapper.py b/third_party/libwebrtc/build/util/generate_wrapper.py new file mode 100755 index 0000000000..07167e8655 --- /dev/null +++ b/third_party/libwebrtc/build/util/generate_wrapper.py @@ -0,0 +1,217 @@ +#!/usr/bin/env vpython +# Copyright 2019 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Wraps an executable and any provided arguments into an executable script.""" + +import argparse +import os +import sys +import textwrap + + +# The bash template passes the python script into vpython via stdin. +# The interpreter doesn't know about the script, so we have bash +# inject the script location. +BASH_TEMPLATE = textwrap.dedent("""\ + #!/usr/bin/env {vpython} + _SCRIPT_LOCATION = __file__ + {script} + """) + + +# The batch template reruns the batch script with vpython, with the -x +# flag instructing the interpreter to ignore the first line. The interpreter +# knows about the (batch) script in this case, so it can get the file location +# directly. +BATCH_TEMPLATE = textwrap.dedent("""\ + @SETLOCAL ENABLEDELAYEDEXPANSION \ + & {vpython}.bat -x "%~f0" %* \ + & EXIT /B !ERRORLEVEL! + _SCRIPT_LOCATION = __file__ + {script} + """) + + +SCRIPT_TEMPLATES = { + 'bash': BASH_TEMPLATE, + 'batch': BATCH_TEMPLATE, +} + + +PY_TEMPLATE = textwrap.dedent("""\ + import os + import re + import subprocess + import sys + + _WRAPPED_PATH_RE = re.compile(r'@WrappedPath\(([^)]+)\)') + _PATH_TO_OUTPUT_DIR = '{path_to_output_dir}' + _SCRIPT_DIR = os.path.dirname(os.path.realpath(_SCRIPT_LOCATION)) + + + def ExpandWrappedPath(arg): + m = _WRAPPED_PATH_RE.match(arg) + if m: + relpath = os.path.join( + os.path.relpath(_SCRIPT_DIR), _PATH_TO_OUTPUT_DIR, m.group(1)) + npath = os.path.normpath(relpath) + if os.path.sep not in npath: + # If the original path points to something in the current directory, + # returning the normalized version of it can be a problem. + # normpath() strips off the './' part of the path + # ('./foo' becomes 'foo'), which can be a problem if the result + # is passed to something like os.execvp(); in that case + # osexecvp() will search $PATH for the executable, rather than + # just execing the arg directly, and if '.' isn't in $PATH, this + # results in an error. + # + # So, we need to explicitly return './foo' (or '.\\foo' on windows) + # instead of 'foo'. + # + # Hopefully there are no cases where this causes a problem; if + # there are, we will either need to change the interface to + # WrappedPath() somehow to distinguish between the two, or + # somehow ensure that the wrapped executable doesn't hit cases + # like this. + return '.' + os.path.sep + npath + return npath + return arg + + + def ExpandWrappedPaths(args): + for i, arg in enumerate(args): + args[i] = ExpandWrappedPath(arg) + return args + + + def FindIsolatedOutdir(raw_args): + outdir = None + i = 0 + remaining_args = [] + while i < len(raw_args): + if raw_args[i] == '--isolated-outdir' and i < len(raw_args)-1: + outdir = raw_args[i+1] + i += 2 + elif raw_args[i].startswith('--isolated-outdir='): + outdir = raw_args[i][len('--isolated-outdir='):] + i += 1 + else: + remaining_args.append(raw_args[i]) + i += 1 + if not outdir and 'ISOLATED_OUTDIR' in os.environ: + outdir = os.environ['ISOLATED_OUTDIR'] + return outdir, remaining_args + + + def FilterIsolatedOutdirBasedArgs(outdir, args): + rargs = [] + i = 0 + while i < len(args): + if 'ISOLATED_OUTDIR' in args[i]: + if outdir: + # Rewrite the arg. + rargs.append(args[i].replace('${{ISOLATED_OUTDIR}}', + outdir).replace( + '$ISOLATED_OUTDIR', outdir)) + i += 1 + else: + # Simply drop the arg. + i += 1 + elif (not outdir and + args[i].startswith('-') and + '=' not in args[i] and + i < len(args) - 1 and + 'ISOLATED_OUTDIR' in args[i+1]): + # Parsing this case is ambiguous; if we're given + # `--foo $ISOLATED_OUTDIR` we can't tell if $ISOLATED_OUTDIR + # is meant to be the value of foo, or if foo takes no argument + # and $ISOLATED_OUTDIR is the first positional arg. + # + # We assume the former will be much more common, and so we + # need to drop --foo and $ISOLATED_OUTDIR. + i += 2 + else: + rargs.append(args[i]) + i += 1 + return rargs + + + def main(raw_args): + executable_path = ExpandWrappedPath('{executable_path}') + outdir, remaining_args = FindIsolatedOutdir(raw_args) + args = {executable_args} + args = FilterIsolatedOutdirBasedArgs(outdir, args) + executable_args = ExpandWrappedPaths(args) + cmd = [executable_path] + args + remaining_args + if executable_path.endswith('.py'): + cmd = [sys.executable] + cmd + return subprocess.call(cmd) + + + if __name__ == '__main__': + sys.exit(main(sys.argv[1:])) + """) + + +def Wrap(args): + """Writes a wrapped script according to the provided arguments. + + Arguments: + args: an argparse.Namespace object containing command-line arguments + as parsed by a parser returned by CreateArgumentParser. + """ + path_to_output_dir = os.path.relpath( + args.output_directory, + os.path.dirname(args.wrapper_script)) + + with open(args.wrapper_script, 'w') as wrapper_script: + py_contents = PY_TEMPLATE.format( + path_to_output_dir=path_to_output_dir, + executable_path=str(args.executable), + executable_args=str(args.executable_args)) + template = SCRIPT_TEMPLATES[args.script_language] + wrapper_script.write( + template.format(script=py_contents, vpython=args.vpython)) + os.chmod(args.wrapper_script, 0o750) + + return 0 + + +def CreateArgumentParser(): + """Creates an argparse.ArgumentParser instance.""" + parser = argparse.ArgumentParser() + parser.add_argument( + '--executable', + help='Executable to wrap.') + parser.add_argument( + '--wrapper-script', + help='Path to which the wrapper script will be written.') + parser.add_argument( + '--output-directory', + help='Path to the output directory.') + parser.add_argument( + '--script-language', + choices=SCRIPT_TEMPLATES.keys(), + help='Language in which the wrapper script will be written.') + parser.add_argument('--use-vpython3', + dest='vpython', + action='store_const', + const='vpython3', + default='vpython', + help='Use vpython3 instead of vpython') + parser.add_argument( + 'executable_args', nargs='*', + help='Arguments to wrap into the executable.') + return parser + + +def main(raw_args): + parser = CreateArgumentParser() + args = parser.parse_args(raw_args) + return Wrap(args) + + +if __name__ == '__main__': + sys.exit(main(sys.argv[1:])) diff --git a/third_party/libwebrtc/build/util/java_action.gni b/third_party/libwebrtc/build/util/java_action.gni new file mode 100644 index 0000000000..0615b38782 --- /dev/null +++ b/third_party/libwebrtc/build/util/java_action.gni @@ -0,0 +1,99 @@ +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +jarrunner = "//build/util/java_action.py" + +# Declare a target that runs a java command a single time. +# +# This target type allows you to run a java command a single time to produce +# one or more output files. If you want to run a java command for each of a +# set of input files, see "java_action_foreach". +# +# See "gn help action" for more information on how to use this target. This +# template is based on the "action" and supports the same variables. +template("java_action") { + assert(defined(invoker.script), + "Need script in $target_name listing the .jar file to run.") + assert(defined(invoker.outputs), + "Need outputs in $target_name listing the generated outputs.") + + jarscript = invoker.script + action(target_name) { + script = jarrunner + + inputs = [ jarscript ] + if (defined(invoker.inputs)) { + inputs += invoker.inputs + } + + args = [ + "-jar", + rebase_path(jarscript, root_build_dir), + ] + if (defined(invoker.args)) { + args += invoker.args + } + + forward_variables_from(invoker, + [ + "console", + "data", + "data_deps", + "depfile", + "deps", + "outputs", + "sources", + "testonly", + "visibility", + ]) + } +} + +# Declare a target that runs a java command over a set of files. +# +# This target type allows you to run a java command once-per-file over a set of +# sources. If you want to run a java command once that takes many files as +# input, see "java_action". +# +# See "gn help action_foreach" for more information on how to use this target. +# This template is based on the "action_foreach" supports the same variables. +template("java_action_foreach") { + assert(defined(invoker.script), + "Need script in $target_name listing the .jar file to run.") + assert(defined(invoker.outputs), + "Need outputs in $target_name listing the generated outputs.") + assert(defined(invoker.sources), + "Need sources in $target_name listing the target inputs.") + + jarscript = invoker.script + action_foreach(target_name) { + script = jarrunner + + inputs = [ jarscript ] + if (defined(invoker.inputs)) { + inputs += invoker.inputs + } + + args = [ + "-jar", + rebase_path(jarscript, root_build_dir), + ] + if (defined(invoker.args)) { + args += invoker.args + } + + forward_variables_from(invoker, + [ + "console", + "data", + "data_deps", + "depfile", + "deps", + "outputs", + "sources", + "testonly", + "visibility", + ]) + } +} diff --git a/third_party/libwebrtc/build/util/java_action.py b/third_party/libwebrtc/build/util/java_action.py new file mode 100755 index 0000000000..ed9bb601de --- /dev/null +++ b/third_party/libwebrtc/build/util/java_action.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Wrapper script to run java command as action with gn.""" + +import os +import subprocess +import sys + +EXIT_SUCCESS = 0 +EXIT_FAILURE = 1 + + +def IsExecutable(path): + """Returns whether file at |path| exists and is executable. + + Args: + path: absolute or relative path to test. + + Returns: + True if the file at |path| exists, False otherwise. + """ + return os.path.isfile(path) and os.access(path, os.X_OK) + + +def FindCommand(command): + """Looks up for |command| in PATH. + + Args: + command: name of the command to lookup, if command is a relative or + absolute path (i.e. contains some path separator) then only that + path will be tested. + + Returns: + Full path to command or None if the command was not found. + + On Windows, this respects the PATHEXT environment variable when the + command name does not have an extension. + """ + fpath, _ = os.path.split(command) + if fpath: + if IsExecutable(command): + return command + + if sys.platform == 'win32': + # On Windows, if the command does not have an extension, cmd.exe will + # try all extensions from PATHEXT when resolving the full path. + command, ext = os.path.splitext(command) + if not ext: + exts = os.environ['PATHEXT'].split(os.path.pathsep) + else: + exts = [ext] + else: + exts = [''] + + for path in os.environ['PATH'].split(os.path.pathsep): + for ext in exts: + path = os.path.join(path, command) + ext + if IsExecutable(path): + return path + + return None + + +def main(): + java_path = FindCommand('java') + if not java_path: + sys.stderr.write('java: command not found\n') + sys.exit(EXIT_FAILURE) + + args = sys.argv[1:] + if len(args) < 2 or args[0] != '-jar': + sys.stderr.write('usage: %s -jar JARPATH [java_args]...\n' % sys.argv[0]) + sys.exit(EXIT_FAILURE) + + return subprocess.check_call([java_path] + args) + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/third_party/libwebrtc/build/util/lastchange.gni b/third_party/libwebrtc/build/util/lastchange.gni new file mode 100644 index 0000000000..a13295900d --- /dev/null +++ b/third_party/libwebrtc/build/util/lastchange.gni @@ -0,0 +1,16 @@ +# Copyright 2018 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# This file is used to inject fixed dummy commit for commit independent +# reproducible binaries. + +declare_args() { + use_dummy_lastchange = false +} + +if (use_dummy_lastchange) { + lastchange_file = "//build/util/LASTCHANGE.dummy" +} else { + lastchange_file = "//build/util/LASTCHANGE" +} diff --git a/third_party/libwebrtc/build/util/lastchange.py b/third_party/libwebrtc/build/util/lastchange.py new file mode 100755 index 0000000000..02a36642b1 --- /dev/null +++ b/third_party/libwebrtc/build/util/lastchange.py @@ -0,0 +1,332 @@ +#!/usr/bin/env python +# Copyright (c) 2012 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" +lastchange.py -- Chromium revision fetching utility. +""" +from __future__ import print_function + +import argparse +import collections +import datetime +import logging +import os +import subprocess +import sys + +VersionInfo = collections.namedtuple("VersionInfo", + ("revision_id", "revision", "timestamp")) + +class GitError(Exception): + pass + +# This function exists for compatibility with logic outside this +# repository that uses this file as a library. +# TODO(eliribble) remove this function after it has been ported into +# the repositories that depend on it +def RunGitCommand(directory, command): + """ + Launches git subcommand. + + Errors are swallowed. + + Returns: + A process object or None. + """ + command = ['git'] + command + # Force shell usage under cygwin. This is a workaround for + # mysterious loss of cwd while invoking cygwin's git. + # We can't just pass shell=True to Popen, as under win32 this will + # cause CMD to be used, while we explicitly want a cygwin shell. + if sys.platform == 'cygwin': + command = ['sh', '-c', ' '.join(command)] + try: + proc = subprocess.Popen(command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=directory, + shell=(sys.platform=='win32')) + return proc + except OSError as e: + logging.error('Command %r failed: %s' % (' '.join(command), e)) + return None + + +def _RunGitCommand(directory, command): + """Launches git subcommand. + + Returns: + The stripped stdout of the git command. + Raises: + GitError on failure, including a nonzero return code. + """ + command = ['git'] + command + # Force shell usage under cygwin. This is a workaround for + # mysterious loss of cwd while invoking cygwin's git. + # We can't just pass shell=True to Popen, as under win32 this will + # cause CMD to be used, while we explicitly want a cygwin shell. + if sys.platform == 'cygwin': + command = ['sh', '-c', ' '.join(command)] + try: + logging.info("Executing '%s' in %s", ' '.join(command), directory) + proc = subprocess.Popen(command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=directory, + shell=(sys.platform=='win32')) + stdout, stderr = tuple(x.decode(encoding='utf_8') + for x in proc.communicate()) + stdout = stdout.strip() + logging.debug("returncode: %d", proc.returncode) + logging.debug("stdout: %s", stdout) + logging.debug("stderr: %s", stderr) + if proc.returncode != 0 or not stdout: + raise GitError(( + "Git command '{}' in {} failed: " + "rc={}, stdout='{}' stderr='{}'").format( + " ".join(command), directory, proc.returncode, stdout, stderr)) + return stdout + except OSError as e: + raise GitError("Git command 'git {}' in {} failed: {}".format( + " ".join(command), directory, e)) + + +def GetMergeBase(directory, ref): + """ + Return the merge-base of HEAD and ref. + + Args: + directory: The directory containing the .git directory. + ref: The ref to use to find the merge base. + Returns: + The git commit SHA of the merge-base as a string. + """ + logging.debug("Calculating merge base between HEAD and %s in %s", + ref, directory) + command = ['merge-base', 'HEAD', ref] + return _RunGitCommand(directory, command) + + +def FetchGitRevision(directory, commit_filter, start_commit="HEAD"): + """ + Fetch the Git hash (and Cr-Commit-Position if any) for a given directory. + + Args: + directory: The directory containing the .git directory. + commit_filter: A filter to supply to grep to filter commits + start_commit: A commit identifier. The result of this function + will be limited to only consider commits before the provided + commit. + Returns: + A VersionInfo object. On error all values will be 0. + """ + hash_ = '' + + git_args = ['log', '-1', '--format=%H %ct'] + if commit_filter is not None: + git_args.append('--grep=' + commit_filter) + + git_args.append(start_commit) + + output = _RunGitCommand(directory, git_args) + hash_, commit_timestamp = output.split() + if not hash_: + return VersionInfo('0', '0', 0) + + revision = hash_ + output = _RunGitCommand(directory, ['cat-file', 'commit', hash_]) + for line in reversed(output.splitlines()): + if line.startswith('Cr-Commit-Position:'): + pos = line.rsplit()[-1].strip() + logging.debug("Found Cr-Commit-Position '%s'", pos) + revision = "{}-{}".format(hash_, pos) + break + return VersionInfo(hash_, revision, int(commit_timestamp)) + + +def GetHeaderGuard(path): + """ + Returns the header #define guard for the given file path. + This treats everything after the last instance of "src/" as being a + relevant part of the guard. If there is no "src/", then the entire path + is used. + """ + src_index = path.rfind('src/') + if src_index != -1: + guard = path[src_index + 4:] + else: + guard = path + guard = guard.upper() + return guard.replace('/', '_').replace('.', '_').replace('\\', '_') + '_' + + +def GetHeaderContents(path, define, version): + """ + Returns what the contents of the header file should be that indicate the given + revision. + """ + header_guard = GetHeaderGuard(path) + + header_contents = """/* Generated by lastchange.py, do not edit.*/ + +#ifndef %(header_guard)s +#define %(header_guard)s + +#define %(define)s "%(version)s" + +#endif // %(header_guard)s +""" + header_contents = header_contents % { 'header_guard': header_guard, + 'define': define, + 'version': version } + return header_contents + + +def GetGitTopDirectory(source_dir): + """Get the top git directory - the directory that contains the .git directory. + + Args: + source_dir: The directory to search. + Returns: + The output of "git rev-parse --show-toplevel" as a string + """ + return _RunGitCommand(source_dir, ['rev-parse', '--show-toplevel']) + + +def WriteIfChanged(file_name, contents): + """ + Writes the specified contents to the specified file_name + iff the contents are different than the current contents. + Returns if new data was written. + """ + try: + old_contents = open(file_name, 'r').read() + except EnvironmentError: + pass + else: + if contents == old_contents: + return False + os.unlink(file_name) + open(file_name, 'w').write(contents) + return True + + +def main(argv=None): + if argv is None: + argv = sys.argv + + parser = argparse.ArgumentParser(usage="lastchange.py [options]") + parser.add_argument("-m", "--version-macro", + help=("Name of C #define when using --header. Defaults to " + "LAST_CHANGE.")) + parser.add_argument("-o", "--output", metavar="FILE", + help=("Write last change to FILE. " + "Can be combined with --header to write both files.")) + parser.add_argument("--header", metavar="FILE", + help=("Write last change to FILE as a C/C++ header. " + "Can be combined with --output to write both files.")) + parser.add_argument("--merge-base-ref", + default=None, + help=("Only consider changes since the merge " + "base between HEAD and the provided ref")) + parser.add_argument("--revision-id-only", action='store_true', + help=("Output the revision as a VCS revision ID only (in " + "Git, a 40-character commit hash, excluding the " + "Cr-Commit-Position).")) + parser.add_argument("--revision-id-prefix", + metavar="PREFIX", + help=("Adds a string prefix to the VCS revision ID.")) + parser.add_argument("--print-only", action="store_true", + help=("Just print the revision string. Overrides any " + "file-output-related options.")) + parser.add_argument("-s", "--source-dir", metavar="DIR", + help="Use repository in the given directory.") + parser.add_argument("--filter", metavar="REGEX", + help=("Only use log entries where the commit message " + "matches the supplied filter regex. Defaults to " + "'^Change-Id:' to suppress local commits."), + default='^Change-Id:') + + args, extras = parser.parse_known_args(argv[1:]) + + logging.basicConfig(level=logging.WARNING) + + out_file = args.output + header = args.header + commit_filter=args.filter + + while len(extras) and out_file is None: + if out_file is None: + out_file = extras.pop(0) + if extras: + sys.stderr.write('Unexpected arguments: %r\n\n' % extras) + parser.print_help() + sys.exit(2) + + source_dir = args.source_dir or os.path.dirname(os.path.abspath(__file__)) + try: + git_top_dir = GetGitTopDirectory(source_dir) + except GitError as e: + logging.error("Failed to get git top directory from '%s': %s", + source_dir, e) + return 2 + + if args.merge_base_ref: + try: + merge_base_sha = GetMergeBase(git_top_dir, args.merge_base_ref) + except GitError as e: + logging.error("You requested a --merge-base-ref value of '%s' but no " + "merge base could be found between it and HEAD. Git " + "reports: %s", args.merge_base_ref, e) + return 3 + else: + merge_base_sha = 'HEAD' + + try: + version_info = FetchGitRevision(git_top_dir, commit_filter, merge_base_sha) + except GitError as e: + logging.error("Failed to get version info: %s", e) + logging.info(("Falling back to a version of 0.0.0 to allow script to " + "finish. This is normal if you are bootstrapping a new environment " + "or do not have a git repository for any other reason. If not, this " + "could represent a serious error.")) + version_info = VersionInfo('0', '0', 0) + + revision_string = version_info.revision + if args.revision_id_only: + revision_string = version_info.revision_id + + if args.revision_id_prefix: + revision_string = args.revision_id_prefix + revision_string + + if args.print_only: + print(revision_string) + else: + lastchange_year = datetime.datetime.utcfromtimestamp( + version_info.timestamp).year + contents_lines = [ + "LASTCHANGE=%s" % revision_string, + "LASTCHANGE_YEAR=%s" % lastchange_year, + ] + contents = '\n'.join(contents_lines) + '\n' + if not out_file and not args.header: + sys.stdout.write(contents) + else: + if out_file: + committime_file = out_file + '.committime' + out_changed = WriteIfChanged(out_file, contents) + if out_changed or not os.path.exists(committime_file): + with open(committime_file, 'w') as timefile: + timefile.write(str(version_info.timestamp)) + if header: + WriteIfChanged(header, + GetHeaderContents(header, args.version_macro, + revision_string)) + + return 0 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/third_party/libwebrtc/build/util/lib/__init__.py b/third_party/libwebrtc/build/util/lib/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/third_party/libwebrtc/build/util/lib/__init__.py diff --git a/third_party/libwebrtc/build/util/lib/common/PRESUBMIT.py b/third_party/libwebrtc/build/util/lib/common/PRESUBMIT.py new file mode 100644 index 0000000000..7f280e5f5e --- /dev/null +++ b/third_party/libwebrtc/build/util/lib/common/PRESUBMIT.py @@ -0,0 +1,19 @@ +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +USE_PYTHON3 = True + + +def _RunTests(input_api, output_api): + return (input_api.canned_checks.RunUnitTestsInDirectory( + input_api, output_api, '.', files_to_check=[r'.+_test.py$'])) + + +def CheckChangeOnUpload(input_api, output_api): + return _RunTests(input_api, output_api) + + +def CheckChangeOnCommit(input_api, output_api): + return _RunTests(input_api, output_api) diff --git a/third_party/libwebrtc/build/util/lib/common/__init__.py b/third_party/libwebrtc/build/util/lib/common/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/third_party/libwebrtc/build/util/lib/common/__init__.py diff --git a/third_party/libwebrtc/build/util/lib/common/chrome_test_server_spawner.py b/third_party/libwebrtc/build/util/lib/common/chrome_test_server_spawner.py new file mode 100644 index 0000000000..bec81558d1 --- /dev/null +++ b/third_party/libwebrtc/build/util/lib/common/chrome_test_server_spawner.py @@ -0,0 +1,503 @@ +# Copyright 2017 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""A "Test Server Spawner" that handles killing/stopping per-test test servers. + +It's used to accept requests from the device to spawn and kill instances of the +chrome test server on the host. +""" +# pylint: disable=W0702 + +import json +import logging +import os +import select +import struct +import subprocess +import sys +import threading +import time + +from six.moves import BaseHTTPServer, urllib + + +SERVER_TYPES = { + 'http': '', + 'ftp': '-f', + 'ws': '--websocket', +} + + +_DIR_SOURCE_ROOT = os.path.abspath( + os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, + os.pardir)) + + +_logger = logging.getLogger(__name__) + + +# Path that are needed to import necessary modules when launching a testserver. +os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', '') + (':%s:%s:%s' + % (os.path.join(_DIR_SOURCE_ROOT, 'third_party'), + os.path.join(_DIR_SOURCE_ROOT, 'third_party', 'tlslite'), + os.path.join(_DIR_SOURCE_ROOT, 'net', 'tools', 'testserver'))) + + +# The timeout (in seconds) of starting up the Python test server. +_TEST_SERVER_STARTUP_TIMEOUT = 10 + + +def _GetServerTypeCommandLine(server_type): + """Returns the command-line by the given server type. + + Args: + server_type: the server type to be used (e.g. 'http'). + + Returns: + A string containing the command-line argument. + """ + if server_type not in SERVER_TYPES: + raise NotImplementedError('Unknown server type: %s' % server_type) + return SERVER_TYPES[server_type] + + +class PortForwarder: + def Map(self, port_pairs): + pass + + def GetDevicePortForHostPort(self, host_port): + """Returns the device port that corresponds to a given host port.""" + return host_port + + def WaitHostPortAvailable(self, port): + """Returns True if |port| is available.""" + return True + + def WaitPortNotAvailable(self, port): + """Returns True if |port| is not available.""" + return True + + def WaitDevicePortReady(self, port): + """Returns whether the provided port is used.""" + return True + + def Unmap(self, device_port): + """Unmaps specified port""" + pass + + +class TestServerThread(threading.Thread): + """A thread to run the test server in a separate process.""" + + def __init__(self, ready_event, arguments, port_forwarder): + """Initialize TestServerThread with the following argument. + + Args: + ready_event: event which will be set when the test server is ready. + arguments: dictionary of arguments to run the test server. + device: An instance of DeviceUtils. + tool: instance of runtime error detection tool. + """ + threading.Thread.__init__(self) + self.wait_event = threading.Event() + self.stop_event = threading.Event() + self.ready_event = ready_event + self.ready_event.clear() + self.arguments = arguments + self.port_forwarder = port_forwarder + self.test_server_process = None + self.is_ready = False + self.host_port = self.arguments['port'] + self.host_ocsp_port = 0 + assert isinstance(self.host_port, int) + # The forwarder device port now is dynamically allocated. + self.forwarder_device_port = 0 + self.forwarder_ocsp_device_port = 0 + # Anonymous pipe in order to get port info from test server. + self.pipe_in = None + self.pipe_out = None + self.process = None + self.command_line = [] + + def _WaitToStartAndGetPortFromTestServer(self): + """Waits for the Python test server to start and gets the port it is using. + + The port information is passed by the Python test server with a pipe given + by self.pipe_out. It is written as a result to |self.host_port|. + + Returns: + Whether the port used by the test server was successfully fetched. + """ + assert self.host_port == 0 and self.pipe_out and self.pipe_in + (in_fds, _, _) = select.select([self.pipe_in, ], [], [], + _TEST_SERVER_STARTUP_TIMEOUT) + if len(in_fds) == 0: + _logger.error('Failed to wait to the Python test server to be started.') + return False + # First read the data length as an unsigned 4-byte value. This + # is _not_ using network byte ordering since the Python test server packs + # size as native byte order and all Chromium platforms so far are + # configured to use little-endian. + # TODO(jnd): Change the Python test server and local_test_server_*.cc to + # use a unified byte order (either big-endian or little-endian). + data_length = os.read(self.pipe_in, struct.calcsize('=L')) + if data_length: + (data_length,) = struct.unpack('=L', data_length) + assert data_length + if not data_length: + _logger.error('Failed to get length of server data.') + return False + server_data_json = os.read(self.pipe_in, data_length) + if not server_data_json: + _logger.error('Failed to get server data.') + return False + _logger.info('Got port json data: %s', server_data_json) + + parsed_server_data = None + try: + parsed_server_data = json.loads(server_data_json) + except ValueError: + pass + + if not isinstance(parsed_server_data, dict): + _logger.error('Failed to parse server_data: %s' % server_data_json) + return False + + if not isinstance(parsed_server_data.get('port'), int): + _logger.error('Failed to get port information from the server data.') + return False + + self.host_port = parsed_server_data['port'] + self.host_ocsp_port = parsed_server_data.get('ocsp_port', 0) + + return self.port_forwarder.WaitPortNotAvailable(self.host_port) + + def _GenerateCommandLineArguments(self): + """Generates the command line to run the test server. + + Note that all options are processed by following the definitions in + testserver.py. + """ + if self.command_line: + return + + args_copy = dict(self.arguments) + + # Translate the server type. + type_cmd = _GetServerTypeCommandLine(args_copy.pop('server-type')) + if type_cmd: + self.command_line.append(type_cmd) + + # Use a pipe to get the port given by the instance of Python test server + # if the test does not specify the port. + assert self.host_port == args_copy['port'] + if self.host_port == 0: + (self.pipe_in, self.pipe_out) = os.pipe() + self.command_line.append('--startup-pipe=%d' % self.pipe_out) + + # Pass the remaining arguments as-is. + for key, values in args_copy.iteritems(): + if not isinstance(values, list): + values = [values] + for value in values: + if value is None: + self.command_line.append('--%s' % key) + else: + self.command_line.append('--%s=%s' % (key, value)) + + def _CloseUnnecessaryFDsForTestServerProcess(self): + # This is required to avoid subtle deadlocks that could be caused by the + # test server child process inheriting undesirable file descriptors such as + # file lock file descriptors. Note stdin, stdout, and stderr (0-2) are left + # alone and redirected with subprocess.Popen. It is important to leave those + # fds filled, or the test server will accidentally open other fds at those + # numbers. + for fd in xrange(3, 1024): + if fd != self.pipe_out: + try: + os.close(fd) + except: + pass + + def run(self): + _logger.info('Start running the thread!') + self.wait_event.clear() + self._GenerateCommandLineArguments() + # TODO(crbug.com/941669): When this script is ported to Python 3, replace + # 'vpython3' below with sys.executable. The call to + # vpython3 -vpython-tool install below can also be removed. + command = [ + 'vpython3', + os.path.join(_DIR_SOURCE_ROOT, 'net', 'tools', 'testserver', + 'testserver.py') + ] + self.command_line + _logger.info('Running: %s', command) + + # Disable PYTHONUNBUFFERED because it has a bad interaction with the + # testserver. Remove once this interaction is fixed. + unbuf = os.environ.pop('PYTHONUNBUFFERED', None) + + # Pass _DIR_SOURCE_ROOT as the child's working directory so that relative + # paths in the arguments are resolved correctly. devnull can be replaced + # with subprocess.DEVNULL in Python 3. + with open(os.devnull, 'r+b') as devnull: + # _WaitToStartAndGetPortFromTestServer has a short timeout. If the + # vpython3 cache is not initialized, launching the test server can take + # some time. Prewarm the cache before running the server. + subprocess.check_call( + [ + 'vpython3', '-vpython-spec', + os.path.join(_DIR_SOURCE_ROOT, '.vpython3'), '-vpython-tool', + 'install' + ], + preexec_fn=self._CloseUnnecessaryFDsForTestServerProcess, + stdin=devnull, + stdout=None, + stderr=None, + cwd=_DIR_SOURCE_ROOT) + + self.process = subprocess.Popen( + command, + preexec_fn=self._CloseUnnecessaryFDsForTestServerProcess, + stdin=devnull, + # Preserve stdout and stderr from the test server. + stdout=None, + stderr=None, + cwd=_DIR_SOURCE_ROOT) + if unbuf: + os.environ['PYTHONUNBUFFERED'] = unbuf + if self.process: + if self.pipe_out: + self.is_ready = self._WaitToStartAndGetPortFromTestServer() + else: + self.is_ready = self.port_forwarder.WaitPortNotAvailable(self.host_port) + + if self.is_ready: + port_map = [(0, self.host_port)] + if self.host_ocsp_port: + port_map.extend([(0, self.host_ocsp_port)]) + self.port_forwarder.Map(port_map) + + self.forwarder_device_port = \ + self.port_forwarder.GetDevicePortForHostPort(self.host_port) + if self.host_ocsp_port: + self.forwarder_ocsp_device_port = \ + self.port_forwarder.GetDevicePortForHostPort(self.host_ocsp_port) + + # Check whether the forwarder is ready on the device. + self.is_ready = self.forwarder_device_port and \ + self.port_forwarder.WaitDevicePortReady(self.forwarder_device_port) + + # Wake up the request handler thread. + self.ready_event.set() + # Keep thread running until Stop() gets called. + self.stop_event.wait() + if self.process.poll() is None: + self.process.kill() + # Wait for process to actually terminate. + # (crbug.com/946475) + self.process.wait() + + self.port_forwarder.Unmap(self.forwarder_device_port) + self.process = None + self.is_ready = False + if self.pipe_out: + os.close(self.pipe_in) + os.close(self.pipe_out) + self.pipe_in = None + self.pipe_out = None + _logger.info('Test-server has died.') + self.wait_event.set() + + def Stop(self): + """Blocks until the loop has finished. + + Note that this must be called in another thread. + """ + if not self.process: + return + self.stop_event.set() + self.wait_event.wait() + + +class SpawningServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): + """A handler used to process http GET/POST request.""" + + def _SendResponse(self, response_code, response_reason, additional_headers, + contents): + """Generates a response sent to the client from the provided parameters. + + Args: + response_code: number of the response status. + response_reason: string of reason description of the response. + additional_headers: dict of additional headers. Each key is the name of + the header, each value is the content of the header. + contents: string of the contents we want to send to client. + """ + self.send_response(response_code, response_reason) + self.send_header('Content-Type', 'text/html') + # Specify the content-length as without it the http(s) response will not + # be completed properly (and the browser keeps expecting data). + self.send_header('Content-Length', len(contents)) + for header_name in additional_headers: + self.send_header(header_name, additional_headers[header_name]) + self.end_headers() + self.wfile.write(contents) + self.wfile.flush() + + def _StartTestServer(self): + """Starts the test server thread.""" + _logger.info('Handling request to spawn a test server.') + content_type = self.headers.getheader('content-type') + if content_type != 'application/json': + raise Exception('Bad content-type for start request.') + content_length = self.headers.getheader('content-length') + if not content_length: + content_length = 0 + try: + content_length = int(content_length) + except: + raise Exception('Bad content-length for start request.') + _logger.info(content_length) + test_server_argument_json = self.rfile.read(content_length) + _logger.info(test_server_argument_json) + + if len(self.server.test_servers) >= self.server.max_instances: + self._SendResponse(400, 'Invalid request', {}, + 'Too many test servers running') + return + + ready_event = threading.Event() + new_server = TestServerThread(ready_event, + json.loads(test_server_argument_json), + self.server.port_forwarder) + new_server.setDaemon(True) + new_server.start() + ready_event.wait() + if new_server.is_ready: + response = {'port': new_server.forwarder_device_port, + 'message': 'started'}; + if new_server.forwarder_ocsp_device_port: + response['ocsp_port'] = new_server.forwarder_ocsp_device_port + self._SendResponse(200, 'OK', {}, json.dumps(response)) + _logger.info('Test server is running on port %d forwarded to %d.' % + (new_server.forwarder_device_port, new_server.host_port)) + port = new_server.forwarder_device_port + assert port not in self.server.test_servers + self.server.test_servers[port] = new_server + else: + new_server.Stop() + self._SendResponse(500, 'Test Server Error.', {}, '') + _logger.info('Encounter problem during starting a test server.') + + def _KillTestServer(self, params): + """Stops the test server instance.""" + try: + port = int(params['port'][0]) + except ValueError: + port = None + if port == None or port <= 0: + self._SendResponse(400, 'Invalid request.', {}, 'port must be specified') + return + + if port not in self.server.test_servers: + self._SendResponse(400, 'Invalid request.', {}, + "testserver isn't running on port %d" % port) + return + + server = self.server.test_servers.pop(port) + + _logger.info('Handling request to kill a test server on port: %d.', port) + server.Stop() + + # Make sure the status of test server is correct before sending response. + if self.server.port_forwarder.WaitHostPortAvailable(port): + self._SendResponse(200, 'OK', {}, 'killed') + _logger.info('Test server on port %d is killed', port) + else: + # We expect the port to be free, but nothing stops the system from + # binding something else to that port, so don't throw error. + # (crbug.com/946475) + self._SendResponse(200, 'OK', {}, '') + _logger.warn('Port %s is not free after killing test server.' % port) + + def log_message(self, format, *args): + # Suppress the default HTTP logging behavior if the logging level is higher + # than INFO. + if _logger.getEffectiveLevel() <= logging.INFO: + pass + + def do_POST(self): + parsed_path = urllib.parse.urlparse(self.path) + action = parsed_path.path + _logger.info('Action for POST method is: %s.', action) + if action == '/start': + self._StartTestServer() + else: + self._SendResponse(400, 'Unknown request.', {}, '') + _logger.info('Encounter unknown request: %s.', action) + + def do_GET(self): + parsed_path = urllib.parse.urlparse(self.path) + action = parsed_path.path + params = urllib.parse.parse_qs(parsed_path.query, keep_blank_values=1) + _logger.info('Action for GET method is: %s.', action) + for param in params: + _logger.info('%s=%s', param, params[param][0]) + if action == '/kill': + self._KillTestServer(params) + elif action == '/ping': + # The ping handler is used to check whether the spawner server is ready + # to serve the requests. We don't need to test the status of the test + # server when handling ping request. + self._SendResponse(200, 'OK', {}, 'ready') + _logger.info('Handled ping request and sent response.') + else: + self._SendResponse(400, 'Unknown request', {}, '') + _logger.info('Encounter unknown request: %s.', action) + + +class SpawningServer(object): + """The class used to start/stop a http server.""" + + def __init__(self, test_server_spawner_port, port_forwarder, max_instances): + self.server = BaseHTTPServer.HTTPServer(('', test_server_spawner_port), + SpawningServerRequestHandler) + self.server_port = self.server.server_port + _logger.info('Started test server spawner on port: %d.', self.server_port) + + self.server.port_forwarder = port_forwarder + self.server.test_servers = {} + self.server.max_instances = max_instances + + def _Listen(self): + _logger.info('Starting test server spawner.') + self.server.serve_forever() + + def Start(self): + """Starts the test server spawner.""" + listener_thread = threading.Thread(target=self._Listen) + listener_thread.setDaemon(True) + listener_thread.start() + + def Stop(self): + """Stops the test server spawner. + + Also cleans the server state. + """ + self.CleanupState() + self.server.shutdown() + + def CleanupState(self): + """Cleans up the spawning server state. + + This should be called if the test server spawner is reused, + to avoid sharing the test server instance. + """ + if self.server.test_servers: + _logger.warning('Not all test servers were stopped.') + for port in self.server.test_servers: + _logger.warning('Stopping test server on port %d' % port) + self.server.test_servers[port].Stop() + self.server.test_servers = {} diff --git a/third_party/libwebrtc/build/util/lib/common/perf_result_data_type.py b/third_party/libwebrtc/build/util/lib/common/perf_result_data_type.py new file mode 100644 index 0000000000..67b550a46c --- /dev/null +++ b/third_party/libwebrtc/build/util/lib/common/perf_result_data_type.py @@ -0,0 +1,20 @@ +# Copyright 2013 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +DEFAULT = 'default' +UNIMPORTANT = 'unimportant' +HISTOGRAM = 'histogram' +UNIMPORTANT_HISTOGRAM = 'unimportant-histogram' +INFORMATIONAL = 'informational' + +ALL_TYPES = [DEFAULT, UNIMPORTANT, HISTOGRAM, UNIMPORTANT_HISTOGRAM, + INFORMATIONAL] + + +def IsValidType(datatype): + return datatype in ALL_TYPES + + +def IsHistogram(datatype): + return (datatype == HISTOGRAM or datatype == UNIMPORTANT_HISTOGRAM) diff --git a/third_party/libwebrtc/build/util/lib/common/perf_tests_results_helper.py b/third_party/libwebrtc/build/util/lib/common/perf_tests_results_helper.py new file mode 100644 index 0000000000..153886dce5 --- /dev/null +++ b/third_party/libwebrtc/build/util/lib/common/perf_tests_results_helper.py @@ -0,0 +1,202 @@ +# Copyright 2013 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +from __future__ import print_function + +import re +import sys + +import json +import logging +import math + +import perf_result_data_type + + +# Mapping from result type to test output +RESULT_TYPES = {perf_result_data_type.UNIMPORTANT: 'RESULT ', + perf_result_data_type.DEFAULT: '*RESULT ', + perf_result_data_type.INFORMATIONAL: '', + perf_result_data_type.UNIMPORTANT_HISTOGRAM: 'HISTOGRAM ', + perf_result_data_type.HISTOGRAM: '*HISTOGRAM '} + + +def _EscapePerfResult(s): + """Escapes |s| for use in a perf result.""" + return re.sub('[\:|=/#&,]', '_', s) + + +def FlattenList(values): + """Returns a simple list without sub-lists.""" + ret = [] + for entry in values: + if isinstance(entry, list): + ret.extend(FlattenList(entry)) + else: + ret.append(entry) + return ret + + +def GeomMeanAndStdDevFromHistogram(histogram_json): + histogram = json.loads(histogram_json) + # Handle empty histograms gracefully. + if not 'buckets' in histogram: + return 0.0, 0.0 + count = 0 + sum_of_logs = 0 + for bucket in histogram['buckets']: + if 'high' in bucket: + bucket['mean'] = (bucket['low'] + bucket['high']) / 2.0 + else: + bucket['mean'] = bucket['low'] + if bucket['mean'] > 0: + sum_of_logs += math.log(bucket['mean']) * bucket['count'] + count += bucket['count'] + + if count == 0: + return 0.0, 0.0 + + sum_of_squares = 0 + geom_mean = math.exp(sum_of_logs / count) + for bucket in histogram['buckets']: + if bucket['mean'] > 0: + sum_of_squares += (bucket['mean'] - geom_mean) ** 2 * bucket['count'] + return geom_mean, math.sqrt(sum_of_squares / count) + + +def _ValueToString(v): + # Special case for floats so we don't print using scientific notation. + if isinstance(v, float): + return '%f' % v + else: + return str(v) + + +def _MeanAndStdDevFromList(values): + avg = None + sd = None + if len(values) > 1: + try: + value = '[%s]' % ','.join([_ValueToString(v) for v in values]) + avg = sum([float(v) for v in values]) / len(values) + sqdiffs = [(float(v) - avg) ** 2 for v in values] + variance = sum(sqdiffs) / (len(values) - 1) + sd = math.sqrt(variance) + except ValueError: + value = ', '.join(values) + else: + value = values[0] + return value, avg, sd + + +def PrintPages(page_list): + """Prints list of pages to stdout in the format required by perf tests.""" + print('Pages: [%s]' % ','.join([_EscapePerfResult(p) for p in page_list])) + + +def PrintPerfResult(measurement, trace, values, units, + result_type=perf_result_data_type.DEFAULT, + print_to_stdout=True): + """Prints numerical data to stdout in the format required by perf tests. + + The string args may be empty but they must not contain any colons (:) or + equals signs (=). + This is parsed by the buildbot using: + http://src.chromium.org/viewvc/chrome/trunk/tools/build/scripts/slave/process_log_utils.py + + Args: + measurement: A description of the quantity being measured, e.g. "vm_peak". + On the dashboard, this maps to a particular graph. Mandatory. + trace: A description of the particular data point, e.g. "reference". + On the dashboard, this maps to a particular "line" in the graph. + Mandatory. + values: A list of numeric measured values. An N-dimensional list will be + flattened and treated as a simple list. + units: A description of the units of measure, e.g. "bytes". + result_type: Accepts values of perf_result_data_type.ALL_TYPES. + print_to_stdout: If True, prints the output in stdout instead of returning + the output to caller. + + Returns: + String of the formated perf result. + """ + assert perf_result_data_type.IsValidType(result_type), \ + 'result type: %s is invalid' % result_type + + trace_name = _EscapePerfResult(trace) + + if (result_type == perf_result_data_type.UNIMPORTANT or + result_type == perf_result_data_type.DEFAULT or + result_type == perf_result_data_type.INFORMATIONAL): + assert isinstance(values, list) + assert '/' not in measurement + flattened_values = FlattenList(values) + assert len(flattened_values) + value, avg, sd = _MeanAndStdDevFromList(flattened_values) + output = '%s%s: %s%s%s %s' % ( + RESULT_TYPES[result_type], + _EscapePerfResult(measurement), + trace_name, + # Do not show equal sign if the trace is empty. Usually it happens when + # measurement is enough clear to describe the result. + '= ' if trace_name else '', + value, + units) + else: + assert perf_result_data_type.IsHistogram(result_type) + assert isinstance(values, list) + # The histograms can only be printed individually, there's no computation + # across different histograms. + assert len(values) == 1 + value = values[0] + output = '%s%s: %s= %s %s' % ( + RESULT_TYPES[result_type], + _EscapePerfResult(measurement), + trace_name, + value, + units) + avg, sd = GeomMeanAndStdDevFromHistogram(value) + + if avg: + output += '\nAvg %s: %f%s' % (measurement, avg, units) + if sd: + output += '\nSd %s: %f%s' % (measurement, sd, units) + if print_to_stdout: + print(output) + sys.stdout.flush() + return output + + +def ReportPerfResult(chart_data, graph_title, trace_title, value, units, + improvement_direction='down', important=True): + """Outputs test results in correct format. + + If chart_data is None, it outputs data in old format. If chart_data is a + dictionary, formats in chartjson format. If any other format defaults to + old format. + + Args: + chart_data: A dictionary corresponding to perf results in the chartjson + format. + graph_title: A string containing the name of the chart to add the result + to. + trace_title: A string containing the name of the trace within the chart + to add the result to. + value: The value of the result being reported. + units: The units of the value being reported. + improvement_direction: A string denoting whether higher or lower is + better for the result. Either 'up' or 'down'. + important: A boolean denoting whether the result is important or not. + """ + if chart_data and isinstance(chart_data, dict): + chart_data['charts'].setdefault(graph_title, {}) + chart_data['charts'][graph_title][trace_title] = { + 'type': 'scalar', + 'value': value, + 'units': units, + 'improvement_direction': improvement_direction, + 'important': important + } + else: + PrintPerfResult(graph_title, trace_title, [value], units) diff --git a/third_party/libwebrtc/build/util/lib/common/unittest_util.py b/third_party/libwebrtc/build/util/lib/common/unittest_util.py new file mode 100644 index 0000000000..d6ff7f6c22 --- /dev/null +++ b/third_party/libwebrtc/build/util/lib/common/unittest_util.py @@ -0,0 +1,155 @@ +# Copyright 2013 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Utilities for dealing with the python unittest module.""" + +import fnmatch +import re +import sys +import unittest + + +class _TextTestResult(unittest._TextTestResult): + """A test result class that can print formatted text results to a stream. + + Results printed in conformance with gtest output format, like: + [ RUN ] autofill.AutofillTest.testAutofillInvalid: "test desc." + [ OK ] autofill.AutofillTest.testAutofillInvalid + [ RUN ] autofill.AutofillTest.testFillProfile: "test desc." + [ OK ] autofill.AutofillTest.testFillProfile + [ RUN ] autofill.AutofillTest.testFillProfileCrazyCharacters: "Test." + [ OK ] autofill.AutofillTest.testFillProfileCrazyCharacters + """ + def __init__(self, stream, descriptions, verbosity): + unittest._TextTestResult.__init__(self, stream, descriptions, verbosity) + self._fails = set() + + def _GetTestURI(self, test): + return '%s.%s.%s' % (test.__class__.__module__, + test.__class__.__name__, + test._testMethodName) + + def getDescription(self, test): + return '%s: "%s"' % (self._GetTestURI(test), test.shortDescription()) + + def startTest(self, test): + unittest.TestResult.startTest(self, test) + self.stream.writeln('[ RUN ] %s' % self.getDescription(test)) + + def addSuccess(self, test): + unittest.TestResult.addSuccess(self, test) + self.stream.writeln('[ OK ] %s' % self._GetTestURI(test)) + + def addError(self, test, err): + unittest.TestResult.addError(self, test, err) + self.stream.writeln('[ ERROR ] %s' % self._GetTestURI(test)) + self._fails.add(self._GetTestURI(test)) + + def addFailure(self, test, err): + unittest.TestResult.addFailure(self, test, err) + self.stream.writeln('[ FAILED ] %s' % self._GetTestURI(test)) + self._fails.add(self._GetTestURI(test)) + + def getRetestFilter(self): + return ':'.join(self._fails) + + +class TextTestRunner(unittest.TextTestRunner): + """Test Runner for displaying test results in textual format. + + Results are displayed in conformance with google test output. + """ + + def __init__(self, verbosity=1): + unittest.TextTestRunner.__init__(self, stream=sys.stderr, + verbosity=verbosity) + + def _makeResult(self): + return _TextTestResult(self.stream, self.descriptions, self.verbosity) + + +def GetTestsFromSuite(suite): + """Returns all the tests from a given test suite.""" + tests = [] + for x in suite: + if isinstance(x, unittest.TestSuite): + tests += GetTestsFromSuite(x) + else: + tests += [x] + return tests + + +def GetTestNamesFromSuite(suite): + """Returns a list of every test name in the given suite.""" + return map(lambda x: GetTestName(x), GetTestsFromSuite(suite)) + + +def GetTestName(test): + """Gets the test name of the given unittest test.""" + return '.'.join([test.__class__.__module__, + test.__class__.__name__, + test._testMethodName]) + + +def FilterTestSuite(suite, gtest_filter): + """Returns a new filtered tests suite based on the given gtest filter. + + See https://github.com/google/googletest/blob/master/docs/advanced.md + for gtest_filter specification. + """ + return unittest.TestSuite(FilterTests(GetTestsFromSuite(suite), gtest_filter)) + + +def FilterTests(all_tests, gtest_filter): + """Filter a list of tests based on the given gtest filter. + + Args: + all_tests: List of tests (unittest.TestSuite) + gtest_filter: Filter to apply. + + Returns: + Filtered subset of the given list of tests. + """ + test_names = [GetTestName(test) for test in all_tests] + filtered_names = FilterTestNames(test_names, gtest_filter) + return [test for test in all_tests if GetTestName(test) in filtered_names] + + +def FilterTestNames(all_tests, gtest_filter): + """Filter a list of test names based on the given gtest filter. + + See https://github.com/google/googletest/blob/master/docs/advanced.md + for gtest_filter specification. + + Args: + all_tests: List of test names. + gtest_filter: Filter to apply. + + Returns: + Filtered subset of the given list of test names. + """ + pattern_groups = gtest_filter.split('-') + positive_patterns = ['*'] + if pattern_groups[0]: + positive_patterns = pattern_groups[0].split(':') + negative_patterns = [] + if len(pattern_groups) > 1: + negative_patterns = pattern_groups[1].split(':') + + neg_pats = None + if negative_patterns: + neg_pats = re.compile('|'.join(fnmatch.translate(p) for p in + negative_patterns)) + + tests = [] + test_set = set() + for pattern in positive_patterns: + pattern_tests = [ + test for test in all_tests + if (fnmatch.fnmatch(test, pattern) + and not (neg_pats and neg_pats.match(test)) + and test not in test_set)] + tests.extend(pattern_tests) + test_set.update(pattern_tests) + return tests diff --git a/third_party/libwebrtc/build/util/lib/common/unittest_util_test.py b/third_party/libwebrtc/build/util/lib/common/unittest_util_test.py new file mode 100755 index 0000000000..1514c9b6d4 --- /dev/null +++ b/third_party/libwebrtc/build/util/lib/common/unittest_util_test.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# pylint: disable=protected-access + +import logging +import sys +import unittest +import unittest_util + + +class FilterTestNamesTest(unittest.TestCase): + + possible_list = ["Foo.One", + "Foo.Two", + "Foo.Three", + "Bar.One", + "Bar.Two", + "Bar.Three", + "Quux.One", + "Quux.Two", + "Quux.Three"] + + def testMatchAll(self): + x = unittest_util.FilterTestNames(self.possible_list, "*") + self.assertEquals(x, self.possible_list) + + def testMatchPartial(self): + x = unittest_util.FilterTestNames(self.possible_list, "Foo.*") + self.assertEquals(x, ["Foo.One", "Foo.Two", "Foo.Three"]) + + def testMatchFull(self): + x = unittest_util.FilterTestNames(self.possible_list, "Foo.Two") + self.assertEquals(x, ["Foo.Two"]) + + def testMatchTwo(self): + x = unittest_util.FilterTestNames(self.possible_list, "Bar.*:Foo.*") + self.assertEquals(x, ["Bar.One", + "Bar.Two", + "Bar.Three", + "Foo.One", + "Foo.Two", + "Foo.Three"]) + + def testMatchWithNegative(self): + x = unittest_util.FilterTestNames(self.possible_list, "Bar.*:Foo.*-*.Three") + self.assertEquals(x, ["Bar.One", + "Bar.Two", + "Foo.One", + "Foo.Two"]) + + def testMatchOverlapping(self): + x = unittest_util.FilterTestNames(self.possible_list, "Bar.*:*.Two") + self.assertEquals(x, ["Bar.One", + "Bar.Two", + "Bar.Three", + "Foo.Two", + "Quux.Two"]) + + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.DEBUG) + unittest.main(verbosity=2) diff --git a/third_party/libwebrtc/build/util/lib/common/util.py b/third_party/libwebrtc/build/util/lib/common/util.py new file mode 100644 index 0000000000..a415b1f534 --- /dev/null +++ b/third_party/libwebrtc/build/util/lib/common/util.py @@ -0,0 +1,151 @@ +# Copyright 2013 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Generic utilities for all python scripts.""" + +import atexit +import httplib +import os +import signal +import stat +import subprocess +import sys +import tempfile +import urlparse + + +def GetPlatformName(): + """Return a string to be used in paths for the platform.""" + if IsWindows(): + return 'win' + if IsMac(): + return 'mac' + if IsLinux(): + return 'linux' + raise NotImplementedError('Unknown platform "%s".' % sys.platform) + + +def IsWindows(): + return sys.platform == 'cygwin' or sys.platform.startswith('win') + + +def IsLinux(): + return sys.platform.startswith('linux') + + +def IsMac(): + return sys.platform.startswith('darwin') + + +def _DeleteDir(path): + """Deletes a directory recursively, which must exist.""" + # Don't use shutil.rmtree because it can't delete read-only files on Win. + for root, dirs, files in os.walk(path, topdown=False): + for name in files: + filename = os.path.join(root, name) + os.chmod(filename, stat.S_IWRITE) + os.remove(filename) + for name in dirs: + os.rmdir(os.path.join(root, name)) + os.rmdir(path) + + +def Delete(path): + """Deletes the given file or directory (recursively), which must exist.""" + if os.path.isdir(path): + _DeleteDir(path) + else: + os.remove(path) + + +def MaybeDelete(path): + """Deletes the given file or directory (recurisvely), if it exists.""" + if os.path.exists(path): + Delete(path) + + +def MakeTempDir(parent_dir=None): + """Creates a temporary directory and returns an absolute path to it. + + The temporary directory is automatically deleted when the python interpreter + exits normally. + + Args: + parent_dir: the directory to create the temp dir in. If None, the system + temp dir is used. + + Returns: + The absolute path to the temporary directory. + """ + path = tempfile.mkdtemp(dir=parent_dir) + atexit.register(MaybeDelete, path) + return path + + +def Unzip(zip_path, output_dir): + """Unzips the given zip file using a system installed unzip tool. + + Args: + zip_path: zip file to unzip. + output_dir: directory to unzip the contents of the zip file. The directory + must exist. + + Raises: + RuntimeError if the unzip operation fails. + """ + if IsWindows(): + unzip_cmd = ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y'] + else: + unzip_cmd = ['unzip', '-o'] + unzip_cmd += [zip_path] + if RunCommand(unzip_cmd, output_dir) != 0: + raise RuntimeError('Unable to unzip %s to %s' % (zip_path, output_dir)) + + +def Kill(pid): + """Terminate the given pid.""" + if IsWindows(): + subprocess.call(['taskkill.exe', '/T', '/F', '/PID', str(pid)]) + else: + os.kill(pid, signal.SIGTERM) + + +def RunCommand(cmd, cwd=None): + """Runs the given command and returns the exit code. + + Args: + cmd: list of command arguments. + cwd: working directory to execute the command, or None if the current + working directory should be used. + + Returns: + The exit code of the command. + """ + process = subprocess.Popen(cmd, cwd=cwd) + process.wait() + return process.returncode + + +def DoesUrlExist(url): + """Determines whether a resource exists at the given URL. + + Args: + url: URL to be verified. + + Returns: + True if url exists, otherwise False. + """ + parsed = urlparse.urlparse(url) + try: + conn = httplib.HTTPConnection(parsed.netloc) + conn.request('HEAD', parsed.path) + response = conn.getresponse() + except (socket.gaierror, socket.error): + return False + finally: + conn.close() + # Follow both permanent (301) and temporary (302) redirects. + if response.status == 302 or response.status == 301: + return DoesUrlExist(response.getheader('location')) + return response.status == 200 diff --git a/third_party/libwebrtc/build/util/lib/results/__init__.py b/third_party/libwebrtc/build/util/lib/results/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/third_party/libwebrtc/build/util/lib/results/__init__.py diff --git a/third_party/libwebrtc/build/util/lib/results/result_sink.py b/third_party/libwebrtc/build/util/lib/results/result_sink.py new file mode 100644 index 0000000000..7d5ce6f980 --- /dev/null +++ b/third_party/libwebrtc/build/util/lib/results/result_sink.py @@ -0,0 +1,180 @@ +# Copyright 2020 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +from __future__ import absolute_import +import base64 +import json +import os + +import six + +import requests # pylint: disable=import-error +from lib.results import result_types + +# Maps result_types to the luci test-result.proto. +# https://godoc.org/go.chromium.org/luci/resultdb/proto/v1#TestStatus +RESULT_MAP = { + result_types.UNKNOWN: 'ABORT', + result_types.PASS: 'PASS', + result_types.FAIL: 'FAIL', + result_types.CRASH: 'CRASH', + result_types.TIMEOUT: 'ABORT', + result_types.SKIP: 'SKIP', + result_types.NOTRUN: 'SKIP', +} + + +def TryInitClient(): + """Tries to initialize a result_sink_client object. + + Assumes that rdb stream is already running. + + Returns: + A ResultSinkClient for the result_sink server else returns None. + """ + try: + with open(os.environ['LUCI_CONTEXT']) as f: + sink = json.load(f)['result_sink'] + return ResultSinkClient(sink) + except KeyError: + return None + + +class ResultSinkClient(object): + """A class to store the sink's post configurations and make post requests. + + This assumes that the rdb stream has been called already and that the + server is listening. + """ + + def __init__(self, context): + base_url = 'http://%s/prpc/luci.resultsink.v1.Sink' % context['address'] + self.test_results_url = base_url + '/ReportTestResults' + self.report_artifacts_url = base_url + '/ReportInvocationLevelArtifacts' + + self.headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json', + 'Authorization': 'ResultSink %s' % context['auth_token'], + } + + def Post(self, + test_id, + status, + duration, + test_log, + test_file, + artifacts=None, + failure_reason=None): + """Uploads the test result to the ResultSink server. + + This assumes that the rdb stream has been called already and that + server is ready listening. + + Args: + test_id: A string representing the test's name. + status: A string representing if the test passed, failed, etc... + duration: An int representing time in ms. + test_log: A string representing the test's output. + test_file: A string representing the file location of the test. + artifacts: An optional dict of artifacts to attach to the test. + failure_reason: An optional string with the reason why the test failed. + Should be None if the test did not fail. + + Returns: + N/A + """ + assert status in RESULT_MAP + expected = status in (result_types.PASS, result_types.SKIP) + result_db_status = RESULT_MAP[status] + + tr = { + 'expected': + expected, + 'status': + result_db_status, + 'tags': [ + { + 'key': 'test_name', + 'value': test_id, + }, + { + # Status before getting mapped to result_db statuses. + 'key': 'raw_status', + 'value': status, + } + ], + 'testId': + test_id, + 'testMetadata': { + 'name': test_id, + } + } + + artifacts = artifacts or {} + if test_log: + # Upload the original log without any modifications. + b64_log = six.ensure_str(base64.b64encode(six.ensure_binary(test_log))) + artifacts.update({'Test Log': {'contents': b64_log}}) + tr['summaryHtml'] = '<text-artifact artifact-id="Test Log" />' + if artifacts: + tr['artifacts'] = artifacts + if failure_reason: + tr['failureReason'] = { + 'primaryErrorMessage': _TruncateToUTF8Bytes(failure_reason, 1024) + } + + if duration is not None: + # Duration must be formatted to avoid scientific notation in case + # number is too small or too large. Result_db takes seconds, not ms. + # Need to use float() otherwise it does substitution first then divides. + tr['duration'] = '%.9fs' % float(duration / 1000.0) + + if test_file and str(test_file).startswith('//'): + tr['testMetadata']['location'] = { + 'file_name': test_file, + 'repo': 'https://chromium.googlesource.com/chromium/src', + } + + res = requests.post(url=self.test_results_url, + headers=self.headers, + data=json.dumps({'testResults': [tr]})) + res.raise_for_status() + + def ReportInvocationLevelArtifacts(self, artifacts): + """Uploads invocation-level artifacts to the ResultSink server. + + This is for artifacts that don't apply to a single test but to the test + invocation as a whole (eg: system logs). + + Args: + artifacts: A dict of artifacts to attach to the invocation. + """ + req = {'artifacts': artifacts} + res = requests.post(url=self.report_artifacts_url, + headers=self.headers, + data=json.dumps(req)) + res.raise_for_status() + + +def _TruncateToUTF8Bytes(s, length): + """ Truncates a string to a given number of bytes when encoded as UTF-8. + + Ensures the given string does not take more than length bytes when encoded + as UTF-8. Adds trailing ellipsis (...) if truncation occurred. A truncated + string may end up encoding to a length slightly shorter than length because + only whole Unicode codepoints are dropped. + + Args: + s: The string to truncate. + length: the length (in bytes) to truncate to. + """ + encoded = s.encode('utf-8') + if len(encoded) > length: + # Truncate, leaving space for trailing ellipsis (...). + encoded = encoded[:length - 3] + # Truncating the string encoded as UTF-8 may have left the final codepoint + # only partially present. Pass 'ignore' to acknowledge and ensure this is + # dropped. + return encoded.decode('utf-8', 'ignore') + "..." + return s diff --git a/third_party/libwebrtc/build/util/lib/results/result_sink_test.py b/third_party/libwebrtc/build/util/lib/results/result_sink_test.py new file mode 100755 index 0000000000..3486ad90d1 --- /dev/null +++ b/third_party/libwebrtc/build/util/lib/results/result_sink_test.py @@ -0,0 +1,102 @@ +#!/usr/bin/env vpython3 +# Copyright 2021 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import json +import os +import sys +import unittest + +# The following non-std imports are fetched via vpython. See the list at +# //.vpython3 +import mock # pylint: disable=import-error +import six + +_BUILD_UTIL_PATH = os.path.abspath( + os.path.join(os.path.dirname(__file__), '..', '..')) +if _BUILD_UTIL_PATH not in sys.path: + sys.path.insert(0, _BUILD_UTIL_PATH) + +from lib.results import result_sink +from lib.results import result_types + + +class InitClientTest(unittest.TestCase): + @mock.patch.dict(os.environ, {}, clear=True) + def testEmptyClient(self): + # No LUCI_CONTEXT env var should prevent a client from being created. + client = result_sink.TryInitClient() + self.assertIsNone(client) + + @mock.patch.dict(os.environ, {'LUCI_CONTEXT': 'some-file.json'}) + def testBasicClient(self): + luci_context_json = { + 'result_sink': { + 'address': 'some-ip-address', + 'auth_token': 'some-auth-token', + }, + } + if six.PY2: + open_builtin_path = '__builtin__.open' + else: + open_builtin_path = 'builtins.open' + with mock.patch(open_builtin_path, + mock.mock_open(read_data=json.dumps(luci_context_json))): + client = result_sink.TryInitClient() + self.assertEqual( + client.test_results_url, + 'http://some-ip-address/prpc/luci.resultsink.v1.Sink/ReportTestResults') + self.assertEqual(client.headers['Authorization'], + 'ResultSink some-auth-token') + + +class ClientTest(unittest.TestCase): + def setUp(self): + context = { + 'address': 'some-ip-address', + 'auth_token': 'some-auth-token', + } + self.client = result_sink.ResultSinkClient(context) + + @mock.patch('requests.post') + def testPostPassingTest(self, mock_post): + self.client.Post('some-test', result_types.PASS, 0, 'some-test-log', None) + self.assertEqual( + mock_post.call_args[1]['url'], + 'http://some-ip-address/prpc/luci.resultsink.v1.Sink/ReportTestResults') + data = json.loads(mock_post.call_args[1]['data']) + self.assertEqual(data['testResults'][0]['testId'], 'some-test') + self.assertEqual(data['testResults'][0]['status'], 'PASS') + + @mock.patch('requests.post') + def testPostFailingTest(self, mock_post): + self.client.Post('some-test', + result_types.FAIL, + 0, + 'some-test-log', + None, + failure_reason='omg test failure') + data = json.loads(mock_post.call_args[1]['data']) + self.assertEqual(data['testResults'][0]['status'], 'FAIL') + self.assertEqual(data['testResults'][0]['testMetadata']['name'], + 'some-test') + self.assertEqual( + data['testResults'][0]['failureReason']['primaryErrorMessage'], + 'omg test failure') + + @mock.patch('requests.post') + def testPostWithTestFile(self, mock_post): + self.client.Post('some-test', result_types.PASS, 0, 'some-test-log', + '//some/test.cc') + data = json.loads(mock_post.call_args[1]['data']) + self.assertEqual( + data['testResults'][0]['testMetadata']['location']['file_name'], + '//some/test.cc') + self.assertEqual(data['testResults'][0]['testMetadata']['name'], + 'some-test') + self.assertIsNotNone(data['testResults'][0]['summaryHtml']) + + +if __name__ == '__main__': + unittest.main() diff --git a/third_party/libwebrtc/build/util/lib/results/result_types.py b/third_party/libwebrtc/build/util/lib/results/result_types.py new file mode 100644 index 0000000000..48ba88cdbe --- /dev/null +++ b/third_party/libwebrtc/build/util/lib/results/result_types.py @@ -0,0 +1,25 @@ +# Copyright 2021 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +"""Module containing base test results classes.""" + +# The test passed. +PASS = 'SUCCESS' + +# The test was intentionally skipped. +SKIP = 'SKIPPED' + +# The test failed. +FAIL = 'FAILURE' + +# The test caused the containing process to crash. +CRASH = 'CRASH' + +# The test timed out. +TIMEOUT = 'TIMEOUT' + +# The test ran, but we couldn't determine what happened. +UNKNOWN = 'UNKNOWN' + +# The test did not run. +NOTRUN = 'NOTRUN' diff --git a/third_party/libwebrtc/build/util/process_version.gni b/third_party/libwebrtc/build/util/process_version.gni new file mode 100644 index 0000000000..591e0a4a9b --- /dev/null +++ b/third_party/libwebrtc/build/util/process_version.gni @@ -0,0 +1,128 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import("//build/util/lastchange.gni") + +# Runs the version processing script over the given template file to produce +# an output file. This is used for generating various forms of files that +# incorporate the product name and version. +# +# Unlike GYP, this will actually compile the resulting file, so you don't need +# to add it separately to the sources, just depend on the target. +# +# In GYP this is a rule that runs once per ".ver" file. In GN this just +# processes one file per invocation of the template so you may have to have +# multiple targets. +# +# Parameters: +# sources (optional): +# List of file names to read. When converting a GYP target, this should +# list the 'source' (see above) as well as any extra_variable_files. +# The files will be passed to version.py in the order specified here. +# +# output: +# File name of file to write. In GYP this is unspecified and it will +# make up a file name for you based on the input name, and tack on +# "_version.rc" to the end. But in GN you need to specify the full name. +# +# template_file (optional): +# Template file to use (not a list). Most Windows users that want to use +# this to process a .rc template should use process_version_rc_template(), +# defined in //chrome/process_version_rc_template.gni, instead. +# +# extra_args (optional): +# Extra arguments to pass to version.py. Any "-f <filename>" args should +# use sources instead. +# +# process_only (optional, defaults to false) +# Set to generate only one action that processes the version file and +# doesn't attempt to link the result into a source set. This is for if +# you are processing the version as data only. +# +# visibility (optional) +# +# Example: +# process_version("myversion") { +# sources = [ +# "//chrome/VERSION" +# "myfile.h.in" +# ] +# output = "$target_gen_dir/myfile.h" +# extra_args = [ "-e", "FOO=42" ] +# } +template("process_version") { + assert(defined(invoker.output), "Output must be defined for $target_name") + + process_only = defined(invoker.process_only) && invoker.process_only + + if (process_only) { + action_name = target_name + } else { + action_name = target_name + "_action" + source_set_name = target_name + } + + action(action_name) { + script = "//build/util/version.py" + + inputs = [ lastchange_file ] + if (defined(invoker.inputs)) { + inputs += invoker.inputs + } + if (defined(invoker.template_file)) { + inputs += [ invoker.template_file ] + } + + outputs = [ invoker.output ] + + args = [] + + if (is_official_build) { + args += [ "--official" ] + } + + if (defined(invoker.sources)) { + inputs += invoker.sources + foreach(i, invoker.sources) { + args += [ + "-f", + rebase_path(i, root_build_dir), + ] + } + } + + if (defined(invoker.extra_args)) { + args += invoker.extra_args + } + args += [ + "-o", + rebase_path(invoker.output, root_build_dir), + ] + if (defined(invoker.template_file)) { + args += [ rebase_path(invoker.template_file, root_build_dir) ] + } + + forward_variables_from(invoker, [ "deps" ]) + + if (process_only) { + # When processing only, visibility gets applied to this target. + forward_variables_from(invoker, [ "visibility" ]) + } else { + # When linking the result, only the source set can depend on the action. + visibility = [ ":$source_set_name" ] + } + } + + if (!process_only) { + source_set(source_set_name) { + forward_variables_from(invoker, + [ + "visibility", + "deps", + ]) + sources = get_target_outputs(":$action_name") + public_deps = [ ":$action_name" ] + } + } +} diff --git a/third_party/libwebrtc/build/util/version.gni b/third_party/libwebrtc/build/util/version.gni new file mode 100644 index 0000000000..998ce077d2 --- /dev/null +++ b/third_party/libwebrtc/build/util/version.gni @@ -0,0 +1,155 @@ +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# This exposes the Chrome version as GN variables for use in build files. +# This also generates the various version codes used for builds of chrome for +# android. +# +# PREFER NOT TO USE THESE. The GYP build uses this kind of thing extensively. +# However, it is far better to write an action (or use the process_version +# wrapper in build/util/version.gni) to generate a file at build-time with the +# information you need. This allows better dependency checking and GN will +# run faster. +# +# These values should only be used if you REALLY need to depend on them at +# build-time, for example, in the computation of output file names. + +# Give version.py a pattern that will expand to a GN scope consisting of +# all values we need at once. +_version_dictionary_template = "full = \"@MAJOR@.@MINOR@.@BUILD@.@PATCH@\" " + + "major = \"@MAJOR@\" minor = \"@MINOR@\" " + + "build = \"@BUILD@\" patch = \"@PATCH@\" " + +# The file containing the Chrome version number. +chrome_version_file = "//chrome/VERSION" + +_script_arguments = [] + +if (is_mac) { + _version_dictionary_template += "patch_hi = @PATCH_HI@ patch_lo = @PATCH_LO@ " + + _script_arguments += [ + "-e", + "PATCH_HI=int(PATCH)//256", + "-e", + "PATCH_LO=int(PATCH)%256", + ] +} else if (target_os == "android") { + import("//build/config/android/config.gni") + + _version_dictionary_template += + "chrome_version_code = " + "\"@CHROME_VERSION_CODE@\" " + + "chrome_modern_version_code = \"@CHROME_MODERN_VERSION_CODE@\" " + + "monochrome_version_code = \"@MONOCHROME_VERSION_CODE@\" " + + "trichrome_version_code = \"@TRICHROME_VERSION_CODE@\" " + + "webview_stable_version_code = \"@WEBVIEW_STABLE_VERSION_CODE@\" " + + "webview_beta_version_code = \"@WEBVIEW_BETA_VERSION_CODE@\" " + + "webview_dev_version_code = \"@WEBVIEW_DEV_VERSION_CODE@\" " + + if (target_cpu == "arm64" || target_cpu == "x64") { + _version_dictionary_template += "monochrome_32_version_code = \"@MONOCHROME_32_VERSION_CODE@\" " + "monochrome_32_64_version_code = \"@MONOCHROME_32_64_VERSION_CODE@\" " + "monochrome_64_32_version_code = \"@MONOCHROME_64_32_VERSION_CODE@\" " + "monochrome_64_version_code = \"@MONOCHROME_64_VERSION_CODE@\" " + "trichrome_32_version_code = \"@TRICHROME_32_VERSION_CODE@\" " + "trichrome_32_64_version_code = \"@TRICHROME_32_64_VERSION_CODE@\" " + "trichrome_64_32_version_code = \"@TRICHROME_64_32_VERSION_CODE@\" " + "trichrome_64_version_code = \"@TRICHROME_64_VERSION_CODE@\" " + "webview_32_stable_version_code = \"@WEBVIEW_32_STABLE_VERSION_CODE@\" " + "webview_32_beta_version_code = \"@WEBVIEW_32_BETA_VERSION_CODE@\" " + "webview_32_dev_version_code = \"@WEBVIEW_32_DEV_VERSION_CODE@\" " + "webview_64_stable_version_code = \"@WEBVIEW_64_STABLE_VERSION_CODE@\" " + "webview_64_beta_version_code = \"@WEBVIEW_64_BETA_VERSION_CODE@\" " + "webview_64_dev_version_code = \"@WEBVIEW_64_DEV_VERSION_CODE@\" " + } + + _script_arguments += [ + "-a", + target_cpu, + ] + + if (defined(final_android_sdk) && !final_android_sdk) { + _script_arguments += [ "--next" ] + } +} + +_script_arguments += [ + "-f", + rebase_path(chrome_version_file, root_build_dir), + "-t", + _version_dictionary_template, + "--os", + target_os, +] + +_result = exec_script("version.py", + _script_arguments, + "scope", + [ + chrome_version_file, + "android_chrome_version.py", + ]) + +# Full version. For example "45.0.12321.0" +chrome_version_full = _result.full + +# The consituent parts of the full version. +chrome_version_major = _result.major +chrome_version_minor = _result.minor +chrome_version_build = _result.build +chrome_version_patch = _result.patch + +if (is_mac) { + chrome_version_patch_hi = _result.patch_hi + chrome_version_patch_lo = _result.patch_lo + + chrome_dylib_version = "$chrome_version_build.$chrome_version_patch_hi" + + ".$chrome_version_patch_lo" +} else if (target_os == "android") { + forward_variables_from(_result, + [ + "chrome_modern_version_code", + "chrome_version_code", + "monochrome_version_code", + "monochrome_32_version_code", + "monochrome_32_64_version_code", + "monochrome_64_32_version_code", + "monochrome_64_version_code", + "trichrome_version_code", + "trichrome_32_version_code", + "trichrome_32_64_version_code", + "trichrome_64_32_version_code", + "trichrome_64_version_code", + "webview_beta_version_code", + "webview_dev_version_code", + "webview_stable_version_code", + "webview_32_beta_version_code", + "webview_32_dev_version_code", + "webview_32_stable_version_code", + "webview_64_beta_version_code", + "webview_64_dev_version_code", + "webview_64_stable_version_code", + ]) + + chrome_version_name = chrome_version_full + + lines_to_write = [ + "VersionName: $chrome_version_name", + "Chrome: $chrome_version_code", + "ChromeModern: $chrome_modern_version_code", + "Monochrome: $monochrome_version_code", + "TrichromeChrome: $trichrome_version_code", + "AndroidWebviewStable: $webview_stable_version_code", + "AndroidWebviewBeta: $webview_beta_version_code", + "AndroidWebviewDev: $webview_dev_version_code", + ] + + if (target_cpu == "arm64" || target_cpu == "x64") { + lines_to_write += [ + "Monochrome32: $monochrome_32_version_code", + "Monochrome3264: $monochrome_32_64_version_code", + "Monochrome6432: $monochrome_64_32_version_code", + "Monochrome64: $monochrome_64_version_code", + "TrichromeChrome32: $trichrome_32_version_code", + "TrichromeChrome3264: $trichrome_32_64_version_code", + "TrichromeChrome6432: $trichrome_64_32_version_code", + "TrichromeChrome64: $trichrome_64_version_code", + "AndroidWebview32Stable: $webview_32_stable_version_code", + "AndroidWebview32Beta: $webview_32_beta_version_code", + "AndroidWebview32Dev: $webview_32_dev_version_code", + "AndroidWebview64Stable: $webview_64_stable_version_code", + "AndroidWebview64Beta: $webview_64_beta_version_code", + "AndroidWebview64Dev: $webview_64_dev_version_code", + ] + } + + write_file("$root_out_dir/android_chrome_versions.txt", lines_to_write) +} diff --git a/third_party/libwebrtc/build/util/version.py b/third_party/libwebrtc/build/util/version.py new file mode 100755 index 0000000000..e93cfe40dc --- /dev/null +++ b/third_party/libwebrtc/build/util/version.py @@ -0,0 +1,268 @@ +#!/usr/bin/env python +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" +version.py -- Chromium version string substitution utility. +""" + +from __future__ import print_function + +import argparse +import os +import sys + +import android_chrome_version + + +def FetchValuesFromFile(values_dict, file_name): + """ + Fetches KEYWORD=VALUE settings from the specified file. + + Everything to the left of the first '=' is the keyword, + everything to the right is the value. No stripping of + white space, so beware. + + The file must exist, otherwise you get the Python exception from open(). + """ + for line in open(file_name, 'r').readlines(): + key, val = line.rstrip('\r\n').split('=', 1) + values_dict[key] = val + + +def FetchValues(file_list, is_official_build=None): + """ + Returns a dictionary of values to be used for substitution. + + Populates the dictionary with KEYWORD=VALUE settings from the files in + 'file_list'. + + Explicitly adds the following value from internal calculations: + + OFFICIAL_BUILD + """ + CHROME_BUILD_TYPE = os.environ.get('CHROME_BUILD_TYPE') + if CHROME_BUILD_TYPE == '_official' or is_official_build: + official_build = '1' + else: + official_build = '0' + + values = dict( + OFFICIAL_BUILD = official_build, + ) + + for file_name in file_list: + FetchValuesFromFile(values, file_name) + + script_dirname = os.path.dirname(os.path.realpath(__file__)) + lastchange_filename = os.path.join(script_dirname, "LASTCHANGE") + lastchange_values = {} + FetchValuesFromFile(lastchange_values, lastchange_filename) + + for placeholder_key, placeholder_value in values.items(): + values[placeholder_key] = SubstTemplate(placeholder_value, + lastchange_values) + + return values + + +def SubstTemplate(contents, values): + """ + Returns the template with substituted values from the specified dictionary. + + Keywords to be substituted are surrounded by '@': @KEYWORD@. + + No attempt is made to avoid recursive substitution. The order + of evaluation is random based on the order of the keywords returned + by the Python dictionary. So do NOT substitute a value that + contains any @KEYWORD@ strings expecting them to be recursively + substituted, okay? + """ + for key, val in values.items(): + try: + contents = contents.replace('@' + key + '@', val) + except TypeError: + print(repr(key), repr(val)) + return contents + + +def SubstFile(file_name, values): + """ + Returns the contents of the specified file_name with substituted values. + + Substituted values come from the specified dictionary. + + This is like SubstTemplate, except it operates on a file. + """ + template = open(file_name, 'r').read() + return SubstTemplate(template, values) + + +def WriteIfChanged(file_name, contents): + """ + Writes the specified contents to the specified file_name. + + Does nothing if the contents aren't different than the current contents. + """ + try: + old_contents = open(file_name, 'r').read() + except EnvironmentError: + pass + else: + if contents == old_contents: + return + os.unlink(file_name) + open(file_name, 'w').write(contents) + + +def BuildParser(): + """Build argparse parser, with added arguments.""" + parser = argparse.ArgumentParser() + parser.add_argument('-f', '--file', action='append', default=[], + help='Read variables from FILE.') + parser.add_argument('-i', '--input', default=None, + help='Read strings to substitute from FILE.') + parser.add_argument('-o', '--output', default=None, + help='Write substituted strings to FILE.') + parser.add_argument('-t', '--template', default=None, + help='Use TEMPLATE as the strings to substitute.') + parser.add_argument( + '-e', + '--eval', + action='append', + default=[], + help='Evaluate VAL after reading variables. Can be used ' + 'to synthesize variables. e.g. -e \'PATCH_HI=int(' + 'PATCH)//256.') + parser.add_argument( + '-a', + '--arch', + default=None, + choices=android_chrome_version.ARCH_CHOICES, + help='Set which cpu architecture the build is for.') + parser.add_argument('--os', default=None, help='Set the target os.') + parser.add_argument('--official', action='store_true', + help='Whether the current build should be an official ' + 'build, used in addition to the environment ' + 'variable.') + parser.add_argument( + '--next', + action='store_true', + help='Whether the current build should be a "next" ' + 'build, which targets pre-release versions of ' + 'Android') + parser.add_argument('args', nargs=argparse.REMAINDER, + help='For compatibility: INPUT and OUTPUT can be ' + 'passed as positional arguments.') + return parser + + +def BuildEvals(options, parser): + """Construct a dict of passed '-e' arguments for evaluating.""" + evals = {} + for expression in options.eval: + try: + evals.update(dict([expression.split('=', 1)])) + except ValueError: + parser.error('-e requires VAR=VAL') + return evals + + +def ModifyOptionsCompat(options, parser): + """Support compatibility with old versions. + + Specifically, for old versions that considered the first two + positional arguments shorthands for --input and --output. + """ + while len(options.args) and (options.input is None or options.output is None): + if options.input is None: + options.input = options.args.pop(0) + elif options.output is None: + options.output = options.args.pop(0) + if options.args: + parser.error('Unexpected arguments: %r' % options.args) + + +def GenerateValues(options, evals): + """Construct a dict of raw values used to generate output. + + e.g. this could return a dict like + { + 'BUILD': 74, + } + + which would be used to resolve a template like + 'build = "@BUILD@"' into 'build = "74"' + + """ + values = FetchValues(options.file, options.official) + + for key, val in evals.items(): + values[key] = str(eval(val, globals(), values)) + + if options.os == 'android': + android_chrome_version_codes = android_chrome_version.GenerateVersionCodes( + values, options.arch, options.next) + values.update(android_chrome_version_codes) + + return values + + +def GenerateOutputContents(options, values): + """Construct output string (e.g. from template). + + Arguments: + options -- argparse parsed arguments + values -- dict with raw values used to resolve the keywords in a template + string + """ + + if options.template is not None: + return SubstTemplate(options.template, values) + elif options.input: + return SubstFile(options.input, values) + else: + # Generate a default set of version information. + return """MAJOR=%(MAJOR)s +MINOR=%(MINOR)s +BUILD=%(BUILD)s +PATCH=%(PATCH)s +LASTCHANGE=%(LASTCHANGE)s +OFFICIAL_BUILD=%(OFFICIAL_BUILD)s +""" % values + + +def BuildOutput(args): + """Gets all input and output values needed for writing output.""" + # Build argparse parser with arguments + parser = BuildParser() + options = parser.parse_args(args) + + # Get dict of passed '-e' arguments for evaluating + evals = BuildEvals(options, parser) + # For compatibility with interface that considered first two positional + # arguments shorthands for --input and --output. + ModifyOptionsCompat(options, parser) + + # Get the raw values that will be used the generate the output + values = GenerateValues(options, evals) + # Get the output string + contents = GenerateOutputContents(options, values) + + return {'options': options, 'contents': contents} + + +def main(): + output = BuildOutput(sys.argv[1:]) + + if output['options'].output is not None: + WriteIfChanged(output['options'].output, output['contents']) + else: + print(output['contents']) + + return 0 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/third_party/libwebrtc/build/util/version_test.py b/third_party/libwebrtc/build/util/version_test.py new file mode 100644 index 0000000000..2a65ddc716 --- /dev/null +++ b/third_party/libwebrtc/build/util/version_test.py @@ -0,0 +1,174 @@ +# Copyright 2019 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import os +import unittest + +import mock +import version + + +def _ReplaceArgs(args, *replacements): + new_args = args[:] + for flag, val in replacements: + flag_index = args.index(flag) + new_args[flag_index + 1] = val + return new_args + + +class _VersionTest(unittest.TestCase): + """Unittests for the version module. + """ + + _CHROME_VERSION_FILE = os.path.join( + os.path.dirname(__file__), os.pardir, os.pardir, 'chrome', 'VERSION') + + _SCRIPT = os.path.join(os.path.dirname(__file__), 'version.py') + + _EXAMPLE_VERSION = { + 'MAJOR': '74', + 'MINOR': '0', + 'BUILD': '3720', + 'PATCH': '0', + } + + _EXAMPLE_TEMPLATE = ( + 'full = "@MAJOR@.@MINOR@.@BUILD@.@PATCH@" ' + 'major = "@MAJOR@" minor = "@MINOR@" ' + 'build = "@BUILD@" patch = "@PATCH@" version_id = @VERSION_ID@ ') + + _ANDROID_CHROME_VARS = [ + 'chrome_version_code', + 'chrome_modern_version_code', + 'monochrome_version_code', + 'trichrome_version_code', + 'webview_stable_version_code', + 'webview_beta_version_code', + 'webview_dev_version_code', + ] + + _EXAMPLE_ANDROID_TEMPLATE = ( + _EXAMPLE_TEMPLATE + ''.join( + ['%s = "@%s@" ' % (el, el.upper()) for el in _ANDROID_CHROME_VARS])) + + _EXAMPLE_ARGS = [ + '-f', + _CHROME_VERSION_FILE, + '-t', + _EXAMPLE_TEMPLATE, + ] + + _EXAMPLE_ANDROID_ARGS = _ReplaceArgs(_EXAMPLE_ARGS, + ['-t', _EXAMPLE_ANDROID_TEMPLATE]) + [ + '-a', + 'arm', + '--os', + 'android', + ] + + @staticmethod + def _RunBuildOutput(new_version_values={}, + get_new_args=lambda old_args: old_args): + """Parameterized helper method for running the main testable method in + version.py. + + Keyword arguments: + new_version_values -- dict used to update _EXAMPLE_VERSION + get_new_args -- lambda for updating _EXAMPLE_ANDROID_ARGS + """ + + with mock.patch('version.FetchValuesFromFile') as \ + fetch_values_from_file_mock: + + fetch_values_from_file_mock.side_effect = (lambda values, file : + values.update( + dict(_VersionTest._EXAMPLE_VERSION, **new_version_values))) + + new_args = get_new_args(_VersionTest._EXAMPLE_ARGS) + return version.BuildOutput(new_args) + + def testFetchValuesFromFile(self): + """It returns a dict in correct format - { <str>: <str> }, to verify + assumption of other tests that mock this function + """ + result = {} + version.FetchValuesFromFile(result, self._CHROME_VERSION_FILE) + + for key, val in result.iteritems(): + self.assertIsInstance(key, str) + self.assertIsInstance(val, str) + + def testBuildOutputAndroid(self): + """Assert it gives includes assignments of expected variables""" + output = self._RunBuildOutput( + get_new_args=lambda args: self._EXAMPLE_ANDROID_ARGS) + contents = output['contents'] + + self.assertRegexpMatches(contents, r'\bchrome_version_code = "\d+"\s') + self.assertRegexpMatches(contents, + r'\bchrome_modern_version_code = "\d+"\s') + self.assertRegexpMatches(contents, r'\bmonochrome_version_code = "\d+"\s') + self.assertRegexpMatches(contents, r'\btrichrome_version_code = "\d+"\s') + self.assertRegexpMatches(contents, + r'\bwebview_stable_version_code = "\d+"\s') + self.assertRegexpMatches(contents, r'\bwebview_beta_version_code = "\d+"\s') + self.assertRegexpMatches(contents, r'\bwebview_dev_version_code = "\d+"\s') + + def testBuildOutputAndroidArchVariantsArm64(self): + """Assert 64-bit-specific version codes""" + new_template = ( + self._EXAMPLE_ANDROID_TEMPLATE + + "monochrome_64_32_version_code = \"@MONOCHROME_64_32_VERSION_CODE@\" " + "monochrome_64_version_code = \"@MONOCHROME_64_VERSION_CODE@\" " + "trichrome_64_32_version_code = \"@TRICHROME_64_32_VERSION_CODE@\" " + "trichrome_64_version_code = \"@TRICHROME_64_VERSION_CODE@\" ") + args_with_template = _ReplaceArgs(self._EXAMPLE_ANDROID_ARGS, + ['-t', new_template]) + new_args = _ReplaceArgs(args_with_template, ['-a', 'arm64']) + output = self._RunBuildOutput(get_new_args=lambda args: new_args) + contents = output['contents'] + + self.assertRegexpMatches(contents, + r'\bmonochrome_64_32_version_code = "\d+"\s') + self.assertRegexpMatches(contents, + r'\bmonochrome_64_version_code = "\d+"\s') + self.assertRegexpMatches(contents, + r'\btrichrome_64_32_version_code = "\d+"\s') + self.assertRegexpMatches(contents, + r'\btrichrome_64_version_code = "\d+"\s') + + def testBuildOutputAndroidArchVariantsX64(self): + """Assert 64-bit-specific version codes""" + new_template = ( + self._EXAMPLE_ANDROID_TEMPLATE + + "monochrome_64_32_version_code = \"@MONOCHROME_64_32_VERSION_CODE@\" " + "monochrome_64_version_code = \"@MONOCHROME_64_VERSION_CODE@\" " + "trichrome_64_32_version_code = \"@TRICHROME_64_32_VERSION_CODE@\" " + "trichrome_64_version_code = \"@TRICHROME_64_VERSION_CODE@\" ") + args_with_template = _ReplaceArgs(self._EXAMPLE_ANDROID_ARGS, + ['-t', new_template]) + new_args = _ReplaceArgs(args_with_template, ['-a', 'x64']) + output = self._RunBuildOutput(get_new_args=lambda args: new_args) + contents = output['contents'] + + self.assertRegexpMatches(contents, + r'\bmonochrome_64_32_version_code = "\d+"\s') + self.assertRegexpMatches(contents, + r'\bmonochrome_64_version_code = "\d+"\s') + self.assertRegexpMatches(contents, + r'\btrichrome_64_32_version_code = "\d+"\s') + self.assertRegexpMatches(contents, + r'\btrichrome_64_version_code = "\d+"\s') + + def testBuildOutputAndroidChromeArchInput(self): + """Assert it raises an exception when using an invalid architecture input""" + new_args = _ReplaceArgs(self._EXAMPLE_ANDROID_ARGS, ['-a', 'foobar']) + with self.assertRaises(SystemExit) as cm: + self._RunBuildOutput(get_new_args=lambda args: new_args) + + self.assertEqual(cm.exception.code, 2) + + +if __name__ == '__main__': + unittest.main() |