summaryrefslogtreecommitdiffstats
path: root/third_party/libwebrtc/build/fuchsia
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
commit43a97878ce14b72f0981164f87f2e35e14151312 (patch)
tree620249daf56c0258faa40cbdcf9cfba06de2a846 /third_party/libwebrtc/build/fuchsia
parentInitial commit. (diff)
downloadfirefox-43a97878ce14b72f0981164f87f2e35e14151312.tar.xz
firefox-43a97878ce14b72f0981164f87f2e35e14151312.zip
Adding upstream version 110.0.1.upstream/110.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--third_party/libwebrtc/build/fuchsia/DIR_METADATA5
-rw-r--r--third_party/libwebrtc/build/fuchsia/OWNERS9
-rw-r--r--third_party/libwebrtc/build/fuchsia/PRESUBMIT.py35
-rw-r--r--third_party/libwebrtc/build/fuchsia/__init__.py0
-rw-r--r--third_party/libwebrtc/build/fuchsia/aemu_target.py116
-rwxr-xr-xthird_party/libwebrtc/build/fuchsia/binary_sizes.py556
-rwxr-xr-xthird_party/libwebrtc/build/fuchsia/binary_sizes_test.py61
-rw-r--r--third_party/libwebrtc/build/fuchsia/boot_data.py120
-rwxr-xr-xthird_party/libwebrtc/build/fuchsia/boot_data_test.py46
-rw-r--r--third_party/libwebrtc/build/fuchsia/common.py140
-rw-r--r--third_party/libwebrtc/build/fuchsia/common_args.py173
-rwxr-xr-xthird_party/libwebrtc/build/fuchsia/deploy_to_pkg_repo.py67
-rw-r--r--third_party/libwebrtc/build/fuchsia/device_target.py266
-rwxr-xr-xthird_party/libwebrtc/build/fuchsia/device_target_test.py103
-rw-r--r--third_party/libwebrtc/build/fuchsia/emu_target.py145
-rw-r--r--third_party/libwebrtc/build/fuchsia/fvdl_target.py204
-rwxr-xr-xthird_party/libwebrtc/build/fuchsia/fvdl_target_test.py102
-rw-r--r--third_party/libwebrtc/build/fuchsia/generic_x64_target.py99
-rw-r--r--third_party/libwebrtc/build/fuchsia/linux.sdk.sha11
-rw-r--r--third_party/libwebrtc/build/fuchsia/mac.sdk.sha11
-rw-r--r--third_party/libwebrtc/build/fuchsia/net_test_server.py90
-rw-r--r--third_party/libwebrtc/build/fuchsia/pkg_repo.py209
-rw-r--r--third_party/libwebrtc/build/fuchsia/qemu_image.py75
-rw-r--r--third_party/libwebrtc/build/fuchsia/qemu_target.py243
-rwxr-xr-xthird_party/libwebrtc/build/fuchsia/qemu_target_test.py58
-rw-r--r--third_party/libwebrtc/build/fuchsia/remote_cmd.py131
-rw-r--r--third_party/libwebrtc/build/fuchsia/run_test_package.py278
-rw-r--r--third_party/libwebrtc/build/fuchsia/runner_exceptions.py78
-rw-r--r--third_party/libwebrtc/build/fuchsia/runner_logs.py96
-rw-r--r--third_party/libwebrtc/build/fuchsia/sdk-bucket.txt0
-rw-r--r--third_party/libwebrtc/build/fuchsia/sdk-hash-files.list1
-rwxr-xr-xthird_party/libwebrtc/build/fuchsia/start_emulator.py84
-rw-r--r--third_party/libwebrtc/build/fuchsia/symbolizer.py70
-rw-r--r--third_party/libwebrtc/build/fuchsia/target.py336
-rwxr-xr-xthird_party/libwebrtc/build/fuchsia/test_runner.py264
-rwxr-xr-xthird_party/libwebrtc/build/fuchsia/update_images.py142
-rwxr-xr-xthird_party/libwebrtc/build/fuchsia/update_sdk.py168
37 files changed, 4572 insertions, 0 deletions
diff --git a/third_party/libwebrtc/build/fuchsia/DIR_METADATA b/third_party/libwebrtc/build/fuchsia/DIR_METADATA
new file mode 100644
index 0000000000..fe8198aeaa
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/DIR_METADATA
@@ -0,0 +1,5 @@
+monorail {
+ component: "Fuchsia"
+}
+
+team_email: "cr-fuchsia@chromium.org"
diff --git a/third_party/libwebrtc/build/fuchsia/OWNERS b/third_party/libwebrtc/build/fuchsia/OWNERS
new file mode 100644
index 0000000000..cd7c5cf64d
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/OWNERS
@@ -0,0 +1,9 @@
+ddorwin@chromium.org
+fdegans@chromium.org
+kmarshall@chromium.org
+qsr@chromium.org
+sergeyu@chromium.org
+wez@chromium.org
+
+per-file linux.sdk.sha1=chromium-autoroll@skia-public.iam.gserviceaccount.com
+per-file mac.sdk.sha1=chromium-autoroll@skia-public.iam.gserviceaccount.com
diff --git a/third_party/libwebrtc/build/fuchsia/PRESUBMIT.py b/third_party/libwebrtc/build/fuchsia/PRESUBMIT.py
new file mode 100644
index 0000000000..4cdd4d021a
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/PRESUBMIT.py
@@ -0,0 +1,35 @@
+# Copyright 2021 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Presubmit script for Fuchsia.
+
+See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
+details on the presubmit API built into depot_tools.
+"""
+
+
+def CommonChecks(input_api, output_api):
+ build_fuchsia_dir = input_api.PresubmitLocalPath()
+
+ def J(*dirs):
+ """Returns a path relative to presubmit directory."""
+ return input_api.os_path.join(build_fuchsia_dir, *dirs)
+
+ tests = []
+ tests.extend(
+ input_api.canned_checks.GetUnitTests(
+ input_api,
+ output_api,
+ unit_tests=[J('boot_data_test.py'),
+ J('fvdl_target_test.py')],
+ run_on_python2=False,
+ run_on_python3=True))
+ return input_api.RunTests(tests)
+
+
+def CheckChangeOnUpload(input_api, output_api):
+ return CommonChecks(input_api, output_api)
+
+
+def CheckChangeOnCommit(input_api, output_api):
+ return CommonChecks(input_api, output_api)
diff --git a/third_party/libwebrtc/build/fuchsia/__init__.py b/third_party/libwebrtc/build/fuchsia/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/__init__.py
diff --git a/third_party/libwebrtc/build/fuchsia/aemu_target.py b/third_party/libwebrtc/build/fuchsia/aemu_target.py
new file mode 100644
index 0000000000..6717005815
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/aemu_target.py
@@ -0,0 +1,116 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Implements commands for running and interacting with Fuchsia on AEMU."""
+
+import emu_target
+import os
+import platform
+import qemu_target
+import logging
+
+from common import GetEmuRootForPlatform
+
+
+def GetTargetType():
+ return AemuTarget
+
+
+class AemuTarget(qemu_target.QemuTarget):
+ EMULATOR_NAME = 'aemu'
+
+ def __init__(self, out_dir, target_cpu, system_log_file, cpu_cores,
+ require_kvm, ram_size_mb, enable_graphics, hardware_gpu):
+ super(AemuTarget, self).__init__(out_dir, target_cpu, system_log_file,
+ cpu_cores, require_kvm, ram_size_mb)
+
+ self._enable_graphics = enable_graphics
+ self._hardware_gpu = hardware_gpu
+
+ @staticmethod
+ def CreateFromArgs(args):
+ return AemuTarget(args.out_dir, args.target_cpu, args.system_log_file,
+ args.cpu_cores, args.require_kvm, args.ram_size_mb,
+ args.enable_graphics, args.hardware_gpu)
+
+ @staticmethod
+ def RegisterArgs(arg_parser):
+ aemu_args = arg_parser.add_argument_group('aemu', 'AEMU arguments')
+ aemu_args.add_argument('--enable-graphics',
+ action='store_true',
+ default=False,
+ help='Start AEMU with graphics instead of '\
+ 'headless.')
+ aemu_args.add_argument('--hardware-gpu',
+ action='store_true',
+ default=False,
+ help='Use local GPU hardware instead of '\
+ 'Swiftshader.')
+
+ def _EnsureEmulatorExists(self, path):
+ assert os.path.exists(path), \
+ 'This checkout is missing %s.' % (self.EMULATOR_NAME)
+
+ def _BuildCommand(self):
+ aemu_folder = GetEmuRootForPlatform(self.EMULATOR_NAME)
+
+ self._EnsureEmulatorExists(aemu_folder)
+ aemu_path = os.path.join(aemu_folder, 'emulator')
+
+ # `VirtioInput` is needed for touch input device support on Fuchsia.
+ # `RefCountPipe` is needed for proper cleanup of resources when a process
+ # that uses Vulkan dies inside the guest
+ aemu_features = 'VirtioInput,RefCountPipe'
+
+ # Configure the CPU to emulate.
+ # On Linux, we can enable lightweight virtualization (KVM) if the host and
+ # guest architectures are the same.
+ if self._IsKvmEnabled():
+ aemu_features += ',KVM,GLDirectMem,Vulkan'
+ else:
+ if self._target_cpu != 'arm64':
+ aemu_features += ',-GLDirectMem'
+
+ # Use Swiftshader for Vulkan if requested
+ gpu_target = 'swiftshader_indirect'
+ if self._hardware_gpu:
+ gpu_target = 'host'
+
+ aemu_command = [aemu_path]
+ if not self._enable_graphics:
+ aemu_command.append('-no-window')
+ # All args after -fuchsia flag gets passed to QEMU
+ aemu_command.extend([
+ '-feature', aemu_features, '-window-size', '1024x600', '-gpu',
+ gpu_target, '-verbose', '-fuchsia'
+ ])
+
+ aemu_command.extend(self._BuildQemuConfig())
+
+ aemu_command.extend([
+ '-vga', 'none',
+ '-device', 'virtio-keyboard-pci',
+ '-device', 'virtio_input_multi_touch_pci_1',
+ '-device', 'ich9-ahci,id=ahci'])
+ if platform.machine() == 'x86_64':
+ aemu_command.extend(['-device', 'isa-debug-exit,iobase=0xf4,iosize=0x04'])
+
+ logging.info(' '.join(aemu_command))
+ return aemu_command
+
+ def _GetVulkanIcdFile(self):
+ return os.path.join(GetEmuRootForPlatform(self.EMULATOR_NAME), 'lib64',
+ 'vulkan', 'vk_swiftshader_icd.json')
+
+ def _SetEnv(self):
+ env = os.environ.copy()
+ aemu_logging_env = {
+ "ANDROID_EMU_VK_NO_CLEANUP": "1",
+ "ANDROID_EMUGL_LOG_PRINT": "1",
+ "ANDROID_EMUGL_VERBOSE": "1",
+ "VK_ICD_FILENAMES": self._GetVulkanIcdFile(),
+ "VK_LOADER_DEBUG": "info,error",
+ }
+ env.update(aemu_logging_env)
+ return env
diff --git a/third_party/libwebrtc/build/fuchsia/binary_sizes.py b/third_party/libwebrtc/build/fuchsia/binary_sizes.py
new file mode 100755
index 0000000000..52d05999d0
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/binary_sizes.py
@@ -0,0 +1,556 @@
+#!/usr/bin/env python2
+#
+# Copyright 2020 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+'''Implements Chrome-Fuchsia package binary size checks.'''
+
+from __future__ import division
+from __future__ import print_function
+
+import argparse
+import collections
+import copy
+import json
+import logging
+import math
+import os
+import re
+import shutil
+import subprocess
+import sys
+import tempfile
+import time
+import traceback
+import uuid
+
+from common import GetHostToolPathFromPlatform, GetHostArchFromPlatform
+from common import SDK_ROOT, DIR_SOURCE_ROOT
+
+# Structure representing the compressed and uncompressed sizes for a Fuchsia
+# package.
+PackageSizes = collections.namedtuple('PackageSizes',
+ ['compressed', 'uncompressed'])
+
+# Structure representing a Fuchsia package blob and its compressed and
+# uncompressed sizes.
+Blob = collections.namedtuple(
+ 'Blob', ['name', 'hash', 'compressed', 'uncompressed', 'is_counted'])
+
+
+def CreateSizesExternalDiagnostic(sizes_guid):
+ """Creates a histogram external sizes diagnostic."""
+
+ benchmark_diagnostic = {
+ 'type': 'GenericSet',
+ 'guid': str(sizes_guid),
+ 'values': ['sizes'],
+ }
+
+ return benchmark_diagnostic
+
+
+def CreateSizesHistogramItem(name, size, sizes_guid):
+ """Create a performance dashboard histogram from the histogram template and
+ binary size data."""
+
+ # Chromium performance dashboard histogram containing binary size data.
+ histogram = {
+ 'name': name,
+ 'unit': 'sizeInBytes_smallerIsBetter',
+ 'diagnostics': {
+ 'benchmarks': str(sizes_guid),
+ },
+ 'sampleValues': [size],
+ 'running': [1, size, math.log(size), size, size, size, 0],
+ 'description': 'chrome-fuchsia package binary sizes',
+ 'summaryOptions': {
+ 'avg': True,
+ 'count': False,
+ 'max': False,
+ 'min': False,
+ 'std': False,
+ 'sum': False,
+ },
+ }
+
+ return histogram
+
+
+def CreateSizesHistogram(package_sizes):
+ """Create a performance dashboard histogram from binary size data."""
+
+ sizes_guid = uuid.uuid1()
+ histogram = [CreateSizesExternalDiagnostic(sizes_guid)]
+ for name, size in package_sizes.items():
+ histogram.append(
+ CreateSizesHistogramItem('%s_%s' % (name, 'compressed'),
+ size.compressed, sizes_guid))
+ histogram.append(
+ CreateSizesHistogramItem('%s_%s' % (name, 'uncompressed'),
+ size.uncompressed, sizes_guid))
+ return histogram
+
+
+def CreateTestResults(test_status, timestamp):
+ """Create test results data to write to JSON test results file.
+
+ The JSON data format is defined in
+ https://chromium.googlesource.com/chromium/src/+/main/docs/testing/json_test_results_format.md
+ """
+
+ results = {
+ 'tests': {},
+ 'interrupted': False,
+ 'path_delimiter': '.',
+ 'version': 3,
+ 'seconds_since_epoch': timestamp,
+ }
+
+ num_failures_by_type = {result: 0 for result in ['FAIL', 'PASS', 'CRASH']}
+ for metric in test_status:
+ actual_status = test_status[metric]
+ num_failures_by_type[actual_status] += 1
+ results['tests'][metric] = {
+ 'expected': 'PASS',
+ 'actual': actual_status,
+ }
+ results['num_failures_by_type'] = num_failures_by_type
+
+ return results
+
+
+def GetTestStatus(package_sizes, sizes_config, test_completed):
+ """Checks package sizes against size limits.
+
+ Returns a tuple of overall test pass/fail status and a dictionary mapping size
+ limit checks to PASS/FAIL/CRASH status."""
+
+ if not test_completed:
+ test_status = {'binary_sizes': 'CRASH'}
+ else:
+ test_status = {}
+ for metric, limit in sizes_config['size_limits'].items():
+ # Strip the "_compressed" suffix from |metric| if it exists.
+ match = re.match(r'(?P<name>\w+)_compressed', metric)
+ package_name = match.group('name') if match else metric
+ if package_name not in package_sizes:
+ raise Exception('package "%s" not in sizes "%s"' %
+ (package_name, str(package_sizes)))
+ if package_sizes[package_name].compressed <= limit:
+ test_status[metric] = 'PASS'
+ else:
+ test_status[metric] = 'FAIL'
+
+ all_tests_passed = all(status == 'PASS' for status in test_status.values())
+
+ return all_tests_passed, test_status
+
+
+def WriteSimpleTestResults(results_path, test_completed):
+ """Writes simplified test results file.
+
+ Used when test status is not available.
+ """
+
+ simple_isolated_script_output = {
+ 'valid': test_completed,
+ 'failures': [],
+ 'version': 'simplified',
+ }
+ with open(results_path, 'w') as output_file:
+ json.dump(simple_isolated_script_output, output_file)
+
+
+def WriteTestResults(results_path, test_completed, test_status, timestamp):
+ """Writes test results file containing test PASS/FAIL/CRASH statuses."""
+
+ if test_status:
+ test_results = CreateTestResults(test_status, timestamp)
+ with open(results_path, 'w') as results_file:
+ json.dump(test_results, results_file)
+ else:
+ WriteSimpleTestResults(results_path, test_completed)
+
+
+def WriteGerritPluginSizeData(output_path, package_sizes):
+ """Writes a package size dictionary in json format for the Gerrit binary
+ sizes plugin."""
+
+ with open(output_path, 'w') as sizes_file:
+ sizes_data = {name: size.compressed for name, size in package_sizes.items()}
+ json.dump(sizes_data, sizes_file)
+
+
+def WritePackageBlobsJson(json_path, package_blobs):
+ """Writes package blob information in human-readable JSON format.
+
+ The json data is an array of objects containing these keys:
+ 'path': string giving blob location in the local file system
+ 'merkle': the blob's Merkle hash
+ 'bytes': the number of uncompressed bytes in the blod
+ 'size': the size of the compressed blob in bytes. A multiple of the blobfs
+ block size (8192)
+ 'is_counted: true if the blob counts towards the package budget, or false
+ if not (for ICU blobs or blobs distributed in the SDK)"""
+
+ formatted_blob_stats_per_package = {}
+ for package in package_blobs:
+ blob_data = []
+ for blob_name in package_blobs[package]:
+ blob = package_blobs[package][blob_name]
+ blob_data.append({
+ 'path': blob.name,
+ 'merkle': blob.hash,
+ 'bytes': blob.uncompressed,
+ 'size': blob.compressed,
+ 'is_counted': blob.is_counted
+ })
+ formatted_blob_stats_per_package[package] = blob_data
+
+ with (open(json_path, 'w')) as json_file:
+ json.dump(formatted_blob_stats_per_package, json_file, indent=2)
+
+
+def GetCompressedSize(file_path):
+ """Measures file size after blobfs compression."""
+
+ compressor_path = GetHostToolPathFromPlatform('blobfs-compression')
+ try:
+ temp_dir = tempfile.mkdtemp()
+ compressed_file_path = os.path.join(temp_dir, os.path.basename(file_path))
+ compressor_cmd = [
+ compressor_path,
+ '--source_file=%s' % file_path,
+ '--compressed_file=%s' % compressed_file_path
+ ]
+ proc = subprocess.Popen(compressor_cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ proc.wait()
+ compressor_output = proc.stdout.read()
+ if proc.returncode != 0:
+ print(compressor_output, file=sys.stderr)
+ raise Exception('Error while running %s' % compressor_path)
+ finally:
+ shutil.rmtree(temp_dir)
+
+ # Match a compressed bytes total from blobfs-compression output like
+ # Wrote 360830 bytes (40% compression)
+ blobfs_compressed_bytes_re = r'Wrote\s+(?P<bytes>\d+)\s+bytes'
+
+ match = re.search(blobfs_compressed_bytes_re, compressor_output)
+ if not match:
+ print(compressor_output, file=sys.stderr)
+ raise Exception('Could not get compressed bytes for %s' % file_path)
+
+ # Round the compressed file size up to an integer number of blobfs blocks.
+ BLOBFS_BLOCK_SIZE = 8192 # Fuchsia's blobfs file system uses 8KiB blocks.
+ blob_bytes = int(match.group('bytes'))
+ return int(math.ceil(blob_bytes / BLOBFS_BLOCK_SIZE)) * BLOBFS_BLOCK_SIZE
+
+
+def ExtractFarFile(file_path, extract_dir):
+ """Extracts contents of a Fuchsia archive file to the specified directory."""
+
+ far_tool = GetHostToolPathFromPlatform('far')
+
+ if not os.path.isfile(far_tool):
+ raise Exception('Could not find FAR host tool "%s".' % far_tool)
+ if not os.path.isfile(file_path):
+ raise Exception('Could not find FAR file "%s".' % file_path)
+
+ subprocess.check_call([
+ far_tool, 'extract',
+ '--archive=%s' % file_path,
+ '--output=%s' % extract_dir
+ ])
+
+
+def GetBlobNameHashes(meta_dir):
+ """Returns mapping from Fuchsia pkgfs paths to blob hashes. The mapping is
+ read from the extracted meta.far archive contained in an extracted package
+ archive."""
+
+ blob_name_hashes = {}
+ contents_path = os.path.join(meta_dir, 'meta', 'contents')
+ with open(contents_path) as lines:
+ for line in lines:
+ (pkgfs_path, blob_hash) = line.strip().split('=')
+ blob_name_hashes[pkgfs_path] = blob_hash
+ return blob_name_hashes
+
+
+# Compiled regular expression matching strings like *.so, *.so.1, *.so.2, ...
+SO_FILENAME_REGEXP = re.compile(r'\.so(\.\d+)?$')
+
+
+def GetSdkModules():
+ """Finds shared objects (.so) under the Fuchsia SDK arch directory in dist or
+ lib subdirectories.
+
+ Returns a set of shared objects' filenames.
+ """
+
+ # Fuchsia SDK arch directory path (contains all shared object files).
+ sdk_arch_dir = os.path.join(SDK_ROOT, 'arch')
+ # Leaf subdirectories containing shared object files.
+ sdk_so_leaf_dirs = ['dist', 'lib']
+ # Match a shared object file name.
+ sdk_so_filename_re = r'\.so(\.\d+)?$'
+
+ lib_names = set()
+ for dirpath, _, file_names in os.walk(sdk_arch_dir):
+ if os.path.basename(dirpath) in sdk_so_leaf_dirs:
+ for name in file_names:
+ if SO_FILENAME_REGEXP.search(name):
+ lib_names.add(name)
+ return lib_names
+
+
+def FarBaseName(name):
+ _, name = os.path.split(name)
+ name = re.sub(r'\.far$', '', name)
+ return name
+
+
+def GetPackageMerkleRoot(far_file_path):
+ """Returns a package's Merkle digest."""
+
+ # The digest is the first word on the first line of the merkle tool's output.
+ merkle_tool = GetHostToolPathFromPlatform('merkleroot')
+ output = subprocess.check_output([merkle_tool, far_file_path])
+ return output.splitlines()[0].split()[0]
+
+
+def GetBlobs(far_file, build_out_dir):
+ """Calculates compressed and uncompressed blob sizes for specified FAR file.
+ Marks ICU blobs and blobs from SDK libraries as not counted."""
+
+ base_name = FarBaseName(far_file)
+
+ extract_dir = tempfile.mkdtemp()
+
+ # Extract files and blobs from the specified Fuchsia archive.
+ far_file_path = os.path.join(build_out_dir, far_file)
+ far_extract_dir = os.path.join(extract_dir, base_name)
+ ExtractFarFile(far_file_path, far_extract_dir)
+
+ # Extract the meta.far archive contained in the specified Fuchsia archive.
+ meta_far_file_path = os.path.join(far_extract_dir, 'meta.far')
+ meta_far_extract_dir = os.path.join(extract_dir, '%s_meta' % base_name)
+ ExtractFarFile(meta_far_file_path, meta_far_extract_dir)
+
+ # Map Linux filesystem blob names to blob hashes.
+ blob_name_hashes = GetBlobNameHashes(meta_far_extract_dir)
+
+ # "System" files whose sizes are not charged against component size budgets.
+ # Fuchsia SDK modules and the ICU icudtl.dat file sizes are not counted.
+ system_files = GetSdkModules() | set(['icudtl.dat'])
+
+ # Add the meta.far file blob.
+ blobs = {}
+ meta_name = 'meta.far'
+ meta_hash = GetPackageMerkleRoot(meta_far_file_path)
+ compressed = GetCompressedSize(meta_far_file_path)
+ uncompressed = os.path.getsize(meta_far_file_path)
+ blobs[meta_name] = Blob(meta_name, meta_hash, compressed, uncompressed, True)
+
+ # Add package blobs.
+ for blob_name, blob_hash in blob_name_hashes.items():
+ extracted_blob_path = os.path.join(far_extract_dir, blob_hash)
+ compressed = GetCompressedSize(extracted_blob_path)
+ uncompressed = os.path.getsize(extracted_blob_path)
+ is_counted = os.path.basename(blob_name) not in system_files
+ blobs[blob_name] = Blob(blob_name, blob_hash, compressed, uncompressed,
+ is_counted)
+
+ shutil.rmtree(extract_dir)
+
+ return blobs
+
+
+def GetPackageBlobs(far_files, build_out_dir):
+ """Returns dictionary mapping package names to blobs contained in the package.
+
+ Prints package blob size statistics."""
+
+ package_blobs = {}
+ for far_file in far_files:
+ package_name = FarBaseName(far_file)
+ if package_name in package_blobs:
+ raise Exception('Duplicate FAR file base name "%s".' % package_name)
+ package_blobs[package_name] = GetBlobs(far_file, build_out_dir)
+
+ # Print package blob sizes (does not count sharing).
+ for package_name in sorted(package_blobs.keys()):
+ print('Package blob sizes: %s' % package_name)
+ print('%-64s %12s %12s %s' %
+ ('blob hash', 'compressed', 'uncompressed', 'path'))
+ print('%s %s %s %s' % (64 * '-', 12 * '-', 12 * '-', 20 * '-'))
+ for blob_name in sorted(package_blobs[package_name].keys()):
+ blob = package_blobs[package_name][blob_name]
+ if blob.is_counted:
+ print('%64s %12d %12d %s' %
+ (blob.hash, blob.compressed, blob.uncompressed, blob.name))
+
+ return package_blobs
+
+
+def GetPackageSizes(package_blobs):
+ """Calculates compressed and uncompressed package sizes from blob sizes."""
+
+ # TODO(crbug.com/1126177): Use partial sizes for blobs shared by
+ # non Chrome-Fuchsia packages.
+
+ # Count number of packages sharing blobs (a count of 1 is not shared).
+ blob_counts = collections.defaultdict(int)
+ for package_name in package_blobs:
+ for blob_name in package_blobs[package_name]:
+ blob_counts[blob_name] += 1
+
+ # Package sizes are the sum of blob sizes divided by their share counts.
+ package_sizes = {}
+ for package_name in package_blobs:
+ compressed_total = 0
+ uncompressed_total = 0
+ for blob_name in package_blobs[package_name]:
+ blob = package_blobs[package_name][blob_name]
+ if blob.is_counted:
+ count = blob_counts[blob_name]
+ compressed_total += blob.compressed // count
+ uncompressed_total += blob.uncompressed // count
+ package_sizes[package_name] = PackageSizes(compressed_total,
+ uncompressed_total)
+
+ return package_sizes
+
+
+def GetBinarySizesAndBlobs(args, sizes_config):
+ """Get binary size data and contained blobs for packages specified in args.
+
+ If "total_size_name" is set, then computes a synthetic package size which is
+ the aggregated sizes across all packages."""
+
+ # Calculate compressed and uncompressed package sizes.
+ package_blobs = GetPackageBlobs(sizes_config['far_files'], args.build_out_dir)
+ package_sizes = GetPackageSizes(package_blobs)
+
+ # Optionally calculate total compressed and uncompressed package sizes.
+ if 'far_total_name' in sizes_config:
+ compressed = sum([a.compressed for a in package_sizes.values()])
+ uncompressed = sum([a.uncompressed for a in package_sizes.values()])
+ package_sizes[sizes_config['far_total_name']] = PackageSizes(
+ compressed, uncompressed)
+
+ for name, size in package_sizes.items():
+ print('%s: compressed size %d, uncompressed size %d' %
+ (name, size.compressed, size.uncompressed))
+
+ return package_sizes, package_blobs
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '--build-out-dir',
+ '--output-directory',
+ type=os.path.realpath,
+ required=True,
+ help='Location of the build artifacts.',
+ )
+ parser.add_argument(
+ '--isolated-script-test-output',
+ type=os.path.realpath,
+ help='File to which simplified JSON results will be written.')
+ parser.add_argument(
+ '--size-plugin-json-path',
+ help='Optional path for json size data for the Gerrit binary size plugin',
+ )
+ parser.add_argument(
+ '--sizes-path',
+ default=os.path.join('fuchsia', 'release', 'size_tests',
+ 'fyi_sizes.json'),
+ help='path to package size limits json file. The path is relative to '
+ 'the workspace src directory')
+ parser.add_argument('--verbose',
+ '-v',
+ action='store_true',
+ help='Enable verbose output')
+ # Accepted to conform to the isolated script interface, but ignored.
+ parser.add_argument('--isolated-script-test-filter', help=argparse.SUPPRESS)
+ parser.add_argument('--isolated-script-test-perf-output',
+ help=argparse.SUPPRESS)
+ args = parser.parse_args()
+
+ if args.verbose:
+ print('Fuchsia binary sizes')
+ print('Working directory', os.getcwd())
+ print('Args:')
+ for var in vars(args):
+ print(' {}: {}'.format(var, getattr(args, var) or ''))
+
+ if not os.path.isdir(args.build_out_dir):
+ raise Exception('Could not find build output directory "%s".' %
+ args.build_out_dir)
+
+ with open(os.path.join(DIR_SOURCE_ROOT, args.sizes_path)) as sizes_file:
+ sizes_config = json.load(sizes_file)
+
+ if args.verbose:
+ print('Sizes Config:')
+ print(json.dumps(sizes_config))
+
+ for far_rel_path in sizes_config['far_files']:
+ far_abs_path = os.path.join(args.build_out_dir, far_rel_path)
+ if not os.path.isfile(far_abs_path):
+ raise Exception('Could not find FAR file "%s".' % far_abs_path)
+
+ test_name = 'sizes'
+ timestamp = time.time()
+ test_completed = False
+ all_tests_passed = False
+ test_status = {}
+ package_sizes = {}
+ package_blobs = {}
+ sizes_histogram = []
+
+ results_directory = None
+ if args.isolated_script_test_output:
+ results_directory = os.path.join(
+ os.path.dirname(args.isolated_script_test_output), test_name)
+ if not os.path.exists(results_directory):
+ os.makedirs(results_directory)
+
+ try:
+ package_sizes, package_blobs = GetBinarySizesAndBlobs(args, sizes_config)
+ sizes_histogram = CreateSizesHistogram(package_sizes)
+ test_completed = True
+ except:
+ _, value, trace = sys.exc_info()
+ traceback.print_tb(trace)
+ print(str(value))
+ finally:
+ all_tests_passed, test_status = GetTestStatus(package_sizes, sizes_config,
+ test_completed)
+
+ if results_directory:
+ WriteTestResults(os.path.join(results_directory, 'test_results.json'),
+ test_completed, test_status, timestamp)
+ with open(os.path.join(results_directory, 'perf_results.json'), 'w') as f:
+ json.dump(sizes_histogram, f)
+ WritePackageBlobsJson(
+ os.path.join(results_directory, 'package_blobs.json'), package_blobs)
+
+ if args.isolated_script_test_output:
+ WriteTestResults(args.isolated_script_test_output, test_completed,
+ test_status, timestamp)
+
+ if args.size_plugin_json_path:
+ WriteGerritPluginSizeData(args.size_plugin_json_path, package_sizes)
+
+ return 0 if all_tests_passed else 1
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/third_party/libwebrtc/build/fuchsia/binary_sizes_test.py b/third_party/libwebrtc/build/fuchsia/binary_sizes_test.py
new file mode 100755
index 0000000000..962e4c9123
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/binary_sizes_test.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+# Copyright 2020 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import copy
+import math
+import os
+import shutil
+import subprocess
+import tempfile
+import time
+import unittest
+
+import binary_sizes
+
+from common import DIR_SOURCE_ROOT
+
+
+class TestBinarySizes(unittest.TestCase):
+ tmpdir = None
+
+ @classmethod
+ def setUpClass(cls):
+ cls.tmpdir = tempfile.mkdtemp()
+
+ @classmethod
+ def tearDownClass(cls):
+ shutil.rmtree(cls.tmpdir)
+
+ # TODO(crbug.com/1145648): Add tests covering FAR file input and histogram
+ # output.
+
+ def testCommitFromBuildProperty(self):
+ commit_position = binary_sizes.CommitPositionFromBuildProperty(
+ 'refs/heads/master@{#819458}')
+ self.assertEqual(commit_position, 819458)
+
+ def testCompressedSize(self):
+ """Verifies that the compressed file size can be extracted from the
+ blobfs-compression output."""
+
+ uncompressed_file = tempfile.NamedTemporaryFile(delete=False)
+ for line in range(200):
+ uncompressed_file.write(
+ 'Lorem ipsum dolor sit amet, consectetur adipiscing elit. '
+ 'Sed eleifend')
+ uncompressed_file.close()
+ compressed_path = uncompressed_file.name + '.compressed'
+ compressor_path = os.path.join(DIR_SOURCE_ROOT, 'third_party',
+ 'fuchsia-sdk', 'sdk', 'tools', 'x64',
+ 'blobfs-compression')
+ subprocess.call([compressor_path, uncompressed_file.name, compressed_path])
+ self.assertEqual(binary_sizes.CompressedSize(uncompressed_file.name),
+ os.path.getsize(compressed_path))
+ os.remove(uncompressed_file.name)
+ os.remove(compressed_path)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/libwebrtc/build/fuchsia/boot_data.py b/third_party/libwebrtc/build/fuchsia/boot_data.py
new file mode 100644
index 0000000000..f59d1974c8
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/boot_data.py
@@ -0,0 +1,120 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Functions used to provision Fuchsia boot images."""
+
+import common
+import logging
+import os
+import subprocess
+import tempfile
+import time
+import uuid
+
+_SSH_CONFIG_TEMPLATE = """
+Host *
+ CheckHostIP no
+ StrictHostKeyChecking no
+ ForwardAgent no
+ ForwardX11 no
+ User fuchsia
+ IdentitiesOnly yes
+ IdentityFile {identity}
+ ServerAliveInterval 2
+ ServerAliveCountMax 5
+ ControlMaster auto
+ ControlPersist 1m
+ ControlPath /tmp/ssh-%r@%h:%p
+ ConnectTimeout 5
+ """
+
+# Specifies boot files intended for use by an emulator.
+TARGET_TYPE_QEMU = 'qemu'
+
+# Specifies boot files intended for use by anything (incl. physical devices).
+TARGET_TYPE_GENERIC = 'generic'
+
+# Defaults used by Fuchsia SDK
+_SSH_DIR = os.path.expanduser('~/.ssh')
+_SSH_CONFIG_DIR = os.path.expanduser('~/.fuchsia')
+
+
+def _GetPubKeyPath():
+ """Returns a path to the generated SSH public key."""
+
+ return os.path.join(_SSH_DIR, 'fuchsia_ed25519.pub')
+
+
+def ProvisionSSH():
+ """Generates a key pair and config file for SSH."""
+
+ fuchsia_authorized_keys_path = os.path.join(_SSH_DIR,
+ 'fuchsia_authorized_keys')
+ id_key_path = os.path.join(_SSH_DIR, 'fuchsia_ed25519')
+ _GetPubKeyPath()
+
+ logging.debug('Generating SSH credentials.')
+
+ if not os.path.isfile(id_key_path):
+ subprocess.check_output([
+ 'ssh-keygen', '-P', '', '-t', 'ed25519', '-f', id_key_path, '-C',
+ 'generated by FEMU Start testing step'
+ ])
+
+ if not os.path.isfile(fuchsia_authorized_keys_path):
+ result = subprocess.check_output(['ssh-keygen', '-y', '-f', id_key_path])
+ with open(fuchsia_authorized_keys_path, 'w') as out:
+ out.write(result.decode('utf-8'))
+
+ if not os.path.exists(_SSH_CONFIG_DIR):
+ os.mkdir(_SSH_CONFIG_DIR)
+ elif not os.path.isdir(_SSH_CONFIG_DIR):
+ raise Exception(_SSH_CONFIG_DIR + ' is not a directory.')
+ ssh_config_path = os.path.join(_SSH_CONFIG_DIR, 'ssh_config')
+ with open(ssh_config_path, "w") as ssh_config:
+ ssh_config.write(
+ _SSH_CONFIG_TEMPLATE.format(identity=id_key_path))
+
+
+def GetTargetFile(filename, target_arch, target_type):
+ """Computes a path to |filename| in the Fuchsia boot image directory specific
+ to |target_type| and |target_arch|."""
+
+ assert target_type == TARGET_TYPE_QEMU or target_type == TARGET_TYPE_GENERIC
+
+ return os.path.join(common.IMAGES_ROOT, target_arch, target_type, filename)
+
+
+def GetSSHConfigPath():
+ return os.path.join(_SSH_CONFIG_DIR, 'ssh_config')
+
+
+def GetBootImage(output_dir, target_arch, target_type):
+ """"Gets a path to the Zircon boot image, with the SSH client public key
+ added."""
+ ProvisionSSH()
+ pubkey_path = _GetPubKeyPath()
+ zbi_tool = common.GetHostToolPathFromPlatform('zbi')
+ image_source_path = GetTargetFile('zircon-a.zbi', target_arch, target_type)
+ image_dest_path = os.path.join(output_dir, 'gen', 'fuchsia-with-keys.zbi')
+
+ cmd = [ zbi_tool, '-o', image_dest_path, image_source_path,
+ '-e', 'data/ssh/authorized_keys=' + pubkey_path ]
+ subprocess.check_call(cmd)
+
+ return image_dest_path
+
+
+def GetKernelArgs(output_dir):
+ return ['devmgr.epoch=%d' % time.time()]
+
+
+def AssertBootImagesExist(arch, platform):
+ assert os.path.exists(GetTargetFile('zircon-a.zbi', arch, platform)), \
+ 'This checkout is missing the files necessary for\n' \
+ 'booting this configuration of Fuchsia.\n' \
+ 'To check out the files, add this entry to the "custom_vars"\n' \
+ 'section of your .gclient file:\n\n' \
+ ' "checkout_fuchsia_boot_images": "%s.%s"\n\n' % \
+ (platform, arch)
diff --git a/third_party/libwebrtc/build/fuchsia/boot_data_test.py b/third_party/libwebrtc/build/fuchsia/boot_data_test.py
new file mode 100755
index 0000000000..aa652eb198
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/boot_data_test.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python3
+# Copyright 2021 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import boot_data
+import os
+import unittest
+from boot_data import _SSH_CONFIG_DIR, _SSH_DIR
+
+
+class TestBootData(unittest.TestCase):
+ def testProvisionSSHGeneratesFiles(self):
+ fuchsia_authorized_keys_path = os.path.join(_SSH_DIR,
+ 'fuchsia_authorized_keys')
+ fuchsia_id_key_path = os.path.join(_SSH_DIR, 'fuchsia_ed25519')
+ pub_keys_path = os.path.join(_SSH_DIR, 'fuchsia_ed25519.pub')
+ ssh_config_path = os.path.join(_SSH_CONFIG_DIR, 'ssh_config')
+ # Check if the keys exists before generating. If they do, delete them
+ # afterwards before asserting if ProvisionSSH works.
+ authorized_key_before = os.path.exists(fuchsia_authorized_keys_path)
+ id_keys_before = os.path.exists(fuchsia_id_key_path)
+ pub_keys_before = os.path.exists(pub_keys_path)
+ ssh_config_before = os.path.exists(ssh_config_path)
+ ssh_dir_before = os.path.exists(_SSH_CONFIG_DIR)
+ boot_data.ProvisionSSH()
+ authorized_key_after = os.path.exists(fuchsia_authorized_keys_path)
+ id_keys_after = os.path.exists(fuchsia_id_key_path)
+ ssh_config_after = os.path.exists(ssh_config_path)
+ if not authorized_key_before:
+ os.remove(fuchsia_authorized_keys_path)
+ if not id_keys_before:
+ os.remove(fuchsia_id_key_path)
+ if not pub_keys_before:
+ os.remove(pub_keys_path)
+ if not ssh_config_before:
+ os.remove(ssh_config_path)
+ if not ssh_dir_before:
+ os.rmdir(_SSH_CONFIG_DIR)
+ self.assertTrue(os.path.exists(authorized_key_after))
+ self.assertTrue(os.path.exists(id_keys_after))
+ self.assertTrue(os.path.exists(ssh_config_after))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/libwebrtc/build/fuchsia/common.py b/third_party/libwebrtc/build/fuchsia/common.py
new file mode 100644
index 0000000000..99ced81ee9
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/common.py
@@ -0,0 +1,140 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import platform
+import signal
+import socket
+import subprocess
+import sys
+import time
+import threading
+
+DIR_SOURCE_ROOT = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
+IMAGES_ROOT = os.path.join(
+ DIR_SOURCE_ROOT, 'third_party', 'fuchsia-sdk', 'images')
+SDK_ROOT = os.path.join(DIR_SOURCE_ROOT, 'third_party', 'fuchsia-sdk', 'sdk')
+
+def EnsurePathExists(path):
+ """Checks that the file |path| exists on the filesystem and returns the path
+ if it does, raising an exception otherwise."""
+
+ if not os.path.exists(path):
+ raise IOError('Missing file: ' + path)
+
+ return path
+
+def GetHostOsFromPlatform():
+ host_platform = sys.platform
+ if host_platform.startswith('linux'):
+ return 'linux'
+ elif host_platform.startswith('darwin'):
+ return 'mac'
+ raise Exception('Unsupported host platform: %s' % host_platform)
+
+def GetHostArchFromPlatform():
+ host_arch = platform.machine()
+ if host_arch == 'x86_64':
+ return 'x64'
+ elif host_arch == 'aarch64':
+ return 'arm64'
+ raise Exception('Unsupported host architecture: %s' % host_arch)
+
+def GetHostToolPathFromPlatform(tool):
+ host_arch = platform.machine()
+ return os.path.join(SDK_ROOT, 'tools', GetHostArchFromPlatform(), tool)
+
+
+def GetEmuRootForPlatform(emulator):
+ return os.path.join(
+ DIR_SOURCE_ROOT, 'third_party', '{0}-{1}-{2}'.format(
+ emulator, GetHostOsFromPlatform(), GetHostArchFromPlatform()))
+
+
+def ConnectPortForwardingTask(target, local_port, remote_port = 0):
+ """Establishes a port forwarding SSH task to a localhost TCP endpoint hosted
+ at port |local_port|. Blocks until port forwarding is established.
+
+ Returns the remote port number."""
+
+ forwarding_flags = ['-O', 'forward', # Send SSH mux control signal.
+ '-R', '%d:localhost:%d' % (remote_port, local_port),
+ '-v', # Get forwarded port info from stderr.
+ '-NT'] # Don't execute command; don't allocate terminal.
+
+ if remote_port != 0:
+ # Forward to a known remote port.
+ task = target.RunCommand([], ssh_args=forwarding_flags)
+ if task.returncode != 0:
+ raise Exception('Could not establish a port forwarding connection.')
+ return
+
+ task = target.RunCommandPiped([],
+ ssh_args=forwarding_flags,
+ stdout=subprocess.PIPE,
+ stderr=open('/dev/null'))
+ output = task.stdout.readlines()
+ task.wait()
+ if task.returncode != 0:
+ raise Exception('Got an error code when requesting port forwarding: %d' %
+ task.returncode)
+
+ parsed_port = int(output[0].strip())
+ logging.debug('Port forwarding established (local=%d, device=%d)' %
+ (local_port, parsed_port))
+ return parsed_port
+
+
+def GetAvailableTcpPort():
+ """Finds a (probably) open port by opening and closing a listen socket."""
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.bind(("", 0))
+ port = sock.getsockname()[1]
+ sock.close()
+ return port
+
+
+def SubprocessCallWithTimeout(command, silent=False, timeout_secs=None):
+ """Helper function for running a command.
+
+ Args:
+ command: The command to run.
+ silent: If true, stdout and stderr of the command will not be printed.
+ timeout_secs: Maximum amount of time allowed for the command to finish.
+
+ Returns:
+ A tuple of (return code, stdout, stderr) of the command. Raises
+ an exception if the subprocess times out.
+ """
+
+ if silent:
+ devnull = open(os.devnull, 'w')
+ process = subprocess.Popen(command, stdout=devnull, stderr=devnull)
+ else:
+ process = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ timeout_timer = None
+ if timeout_secs:
+
+ def interrupt_process():
+ process.send_signal(signal.SIGKILL)
+
+ timeout_timer = threading.Timer(timeout_secs, interrupt_process)
+
+ # Ensure that keyboard interrupts are handled properly (crbug/1198113).
+ timeout_timer.daemon = True
+
+ timeout_timer.start()
+
+ out, err = process.communicate()
+ if timeout_timer:
+ timeout_timer.cancel()
+
+ if process.returncode == -9:
+ raise Exception('Timeout when executing \"%s\".' % ' '.join(command))
+
+ return process.returncode, out, err
diff --git a/third_party/libwebrtc/build/fuchsia/common_args.py b/third_party/libwebrtc/build/fuchsia/common_args.py
new file mode 100644
index 0000000000..691fad67e6
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/common_args.py
@@ -0,0 +1,173 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import importlib
+import logging
+import os
+import sys
+
+from common import GetHostArchFromPlatform
+
+BUILTIN_TARGET_NAMES = ['aemu', 'qemu', 'device', 'fvdl']
+
+
+def _AddTargetSpecificationArgs(arg_parser):
+ """Returns a parser that handles the target type used for the test run."""
+
+ device_args = arg_parser.add_argument_group(
+ 'target',
+ 'Arguments specifying the Fuchsia target type. To see a list of '
+ 'arguments available for a specific target type, specify the desired '
+ 'target to use and add the --help flag.')
+ device_args.add_argument('--target-cpu',
+ default=GetHostArchFromPlatform(),
+ help='GN target_cpu setting for the build. Defaults '
+ 'to the same architecture as host cpu.')
+ device_args.add_argument('--device',
+ default=None,
+ choices=BUILTIN_TARGET_NAMES + ['custom'],
+ help='Choose to run on aemu|qemu|device. '
+ 'By default, Fuchsia will run on AEMU on x64 '
+ 'hosts and QEMU on arm64 hosts. Alternatively, '
+ 'setting to custom will require specifying the '
+ 'subclass of Target class used via the '
+ '--custom-device-target flag.')
+ device_args.add_argument('-d',
+ action='store_const',
+ dest='device',
+ const='device',
+ help='Run on device instead of emulator.')
+ device_args.add_argument('--custom-device-target',
+ default=None,
+ help='Specify path to file that contains the '
+ 'subclass of Target that will be used. Only '
+ 'needed if device specific operations such as '
+ 'paving is required.')
+
+
+def _GetPathToBuiltinTarget(target_name):
+ return '%s_target' % target_name
+
+
+def _LoadTargetClass(target_path):
+ try:
+ loaded_target = importlib.import_module(target_path)
+ except ImportError:
+ logging.error(
+ 'Cannot import from %s. Make sure that --custom-device-target '
+ 'is pointing to a file containing a target '
+ 'module.' % target_path)
+ raise
+ return loaded_target.GetTargetType()
+
+
+def AddCommonArgs(arg_parser):
+ """Adds command line arguments to |arg_parser| for options which are shared
+ across test and executable target types.
+
+ Args:
+ arg_parser: an ArgumentParser object."""
+
+ common_args = arg_parser.add_argument_group('common', 'Common arguments')
+ common_args.add_argument('--runner-logs-dir',
+ help='Directory to write test runner logs to.')
+ common_args.add_argument('--exclude-system-logs',
+ action='store_false',
+ dest='include_system_logs',
+ help='Do not show system log data.')
+ common_args.add_argument('--verbose',
+ '-v',
+ default=False,
+ action='store_true',
+ help='Enable debug-level logging.')
+ common_args.add_argument(
+ '--out-dir',
+ type=os.path.realpath,
+ help=('Path to the directory in which build files are located. '
+ 'Defaults to current directory.'))
+ common_args.add_argument('--system-log-file',
+ help='File to write system logs to. Specify '
+ '\'-\' to log to stdout.')
+ common_args.add_argument('--fuchsia-out-dir',
+ help='Path to a Fuchsia build output directory. '
+ 'Setting the GN arg '
+ '"default_fuchsia_build_dir_for_installation" '
+ 'will cause it to be passed here.')
+
+ package_args = arg_parser.add_argument_group('package', 'Fuchsia Packages')
+ package_args.add_argument(
+ '--package',
+ action='append',
+ help='Paths of the packages to install, including '
+ 'all dependencies.')
+ package_args.add_argument(
+ '--package-name',
+ help='Name of the package to execute, defined in ' + 'package metadata.')
+
+ emu_args = arg_parser.add_argument_group('emu', 'General emulator arguments')
+ emu_args.add_argument('--cpu-cores',
+ type=int,
+ default=4,
+ help='Sets the number of CPU cores to provide.')
+ emu_args.add_argument('--ram-size-mb',
+ type=int,
+ default=8192,
+ help='Sets the emulated RAM size (MB).'),
+ emu_args.add_argument('--allow-no-kvm',
+ action='store_false',
+ dest='require_kvm',
+ default=True,
+ help='Do not require KVM acceleration for '
+ 'emulators.')
+
+
+# Register the arguments for all known target types and the optional custom
+# target type (specified on the commandline).
+def AddTargetSpecificArgs(arg_parser):
+ # Parse the minimal set of arguments to determine if custom targets need to
+ # be loaded so that their arguments can be registered.
+ target_spec_parser = argparse.ArgumentParser(add_help=False)
+ _AddTargetSpecificationArgs(target_spec_parser)
+ target_spec_args, _ = target_spec_parser.parse_known_args()
+ _AddTargetSpecificationArgs(arg_parser)
+
+ for target in BUILTIN_TARGET_NAMES:
+ _LoadTargetClass(_GetPathToBuiltinTarget(target)).RegisterArgs(arg_parser)
+ if target_spec_args.custom_device_target:
+ _LoadTargetClass(
+ target_spec_args.custom_device_target).RegisterArgs(arg_parser)
+
+
+def ConfigureLogging(args):
+ """Configures the logging level based on command line |args|."""
+
+ logging.basicConfig(level=(logging.DEBUG if args.verbose else logging.INFO),
+ format='%(asctime)s:%(levelname)s:%(name)s:%(message)s')
+
+ # The test server spawner is too noisy with INFO level logging, so tweak
+ # its verbosity a bit by adjusting its logging level.
+ logging.getLogger('chrome_test_server_spawner').setLevel(
+ logging.DEBUG if args.verbose else logging.WARN)
+
+ # Verbose SCP output can be useful at times but oftentimes is just too noisy.
+ # Only enable it if -vv is passed.
+ logging.getLogger('ssh').setLevel(
+ logging.DEBUG if args.verbose else logging.WARN)
+
+
+def GetDeploymentTargetForArgs(args):
+ """Constructs a deployment target object using command line arguments.
+ If needed, an additional_args dict can be used to supplement the
+ command line arguments."""
+
+ if args.device == 'custom':
+ return _LoadTargetClass(args.custom_device_target).CreateFromArgs(args)
+
+ if args.device:
+ device = args.device
+ else:
+ device = 'aemu' if args.target_cpu == 'x64' else 'qemu'
+
+ return _LoadTargetClass(_GetPathToBuiltinTarget(device)).CreateFromArgs(args)
diff --git a/third_party/libwebrtc/build/fuchsia/deploy_to_pkg_repo.py b/third_party/libwebrtc/build/fuchsia/deploy_to_pkg_repo.py
new file mode 100755
index 0000000000..57635ee051
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/deploy_to_pkg_repo.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+#
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Deploys Fuchsia packages to a package repository in a Fuchsia
+build output directory."""
+
+import pkg_repo
+import argparse
+import os
+import sys
+
+
+# Populates the GDB-standard symbol directory structure |build_ids_path| with
+# the files and build IDs specified in |ids_txt_path|.
+def InstallSymbols(ids_txt_path, build_ids_path):
+ for entry in open(ids_txt_path, 'r'):
+ build_id, binary_relpath = entry.strip().split(' ')
+ binary_abspath = os.path.abspath(
+ os.path.join(os.path.dirname(ids_txt_path), binary_relpath))
+ symbol_dir = os.path.join(build_ids_path, build_id[:2])
+ symbol_file = os.path.join(symbol_dir, build_id[2:] + '.debug')
+
+ if not os.path.exists(symbol_dir):
+ os.makedirs(symbol_dir)
+
+ if os.path.islink(symbol_file) or os.path.exists(symbol_file):
+ # Clobber the existing entry to ensure that the symlink's target is
+ # up to date.
+ os.unlink(symbol_file)
+
+ os.symlink(os.path.relpath(binary_abspath, symbol_dir), symbol_file)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--package',
+ action='append',
+ required=True,
+ help='Paths to packages to install.')
+ parser.add_argument('--fuchsia-out-dir',
+ required=True,
+ help='Path to a Fuchsia build output directory. '
+ 'Setting the GN arg '
+ '"default_fuchsia_build_dir_for_installation" '
+ 'will cause it to be passed here.')
+ args = parser.parse_args()
+ assert args.package
+
+ fuchsia_out_dir = os.path.expanduser(args.fuchsia_out_dir)
+ repo = pkg_repo.ExternalPkgRepo(os.path.join(fuchsia_out_dir, 'amber-files'))
+ print('Installing packages and symbols in package repo %s...' %
+ repo.GetPath())
+
+ for package in args.package:
+ repo.PublishPackage(package)
+ InstallSymbols(os.path.join(os.path.dirname(package), 'ids.txt'),
+ os.path.join(fuchsia_out_dir, '.build-id'))
+
+ print('Installation success.')
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/third_party/libwebrtc/build/fuchsia/device_target.py b/third_party/libwebrtc/build/fuchsia/device_target.py
new file mode 100644
index 0000000000..6905a623dd
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/device_target.py
@@ -0,0 +1,266 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Implements commands for running and interacting with Fuchsia on devices."""
+
+import boot_data
+import logging
+import os
+import pkg_repo
+import re
+import subprocess
+import target
+import time
+
+from common import EnsurePathExists, GetHostToolPathFromPlatform
+
+# The maximum times to attempt mDNS resolution when connecting to a freshly
+# booted Fuchsia instance before aborting.
+BOOT_DISCOVERY_ATTEMPTS = 30
+
+# Number of failed connection attempts before redirecting system logs to stdout.
+CONNECT_RETRY_COUNT_BEFORE_LOGGING = 10
+
+# Number of seconds between each device discovery.
+BOOT_DISCOVERY_DELAY_SECS = 4
+
+# Time between a reboot command is issued and when connection attempts from the
+# host begin.
+_REBOOT_SLEEP_PERIOD = 20
+
+
+def GetTargetType():
+ return DeviceTarget
+
+
+class DeviceTarget(target.Target):
+ """Prepares a device to be used as a deployment target. Depending on the
+ command line parameters, it automatically handling a number of preparatory
+ steps relating to address resolution.
+
+ If |_node_name| is unset:
+ If there is one running device, use it for deployment and execution.
+
+ If there are more than one running devices, then abort and instruct the
+ user to re-run the command with |_node_name|
+
+ If |_node_name| is set:
+ If there is a running device with a matching nodename, then it is used
+ for deployment and execution.
+
+ If |_host| is set:
+ Deploy to a device at the host IP address as-is."""
+
+ def __init__(self,
+ out_dir,
+ target_cpu,
+ host=None,
+ node_name=None,
+ port=None,
+ ssh_config=None,
+ fuchsia_out_dir=None,
+ os_check='update',
+ system_log_file=None):
+ """out_dir: The directory which will contain the files that are
+ generated to support the deployment.
+ target_cpu: The CPU architecture of the deployment target. Can be
+ "x64" or "arm64".
+ host: The address of the deployment target device.
+ node_name: The node name of the deployment target device.
+ port: The port of the SSH service on the deployment target device.
+ ssh_config: The path to SSH configuration data.
+ fuchsia_out_dir: The path to a Fuchsia build output directory, for
+ deployments to devices paved with local Fuchsia builds.
+ os_check: If 'check', the target's SDK version must match.
+ If 'update', the target will be repaved if the SDK versions
+ mismatch.
+ If 'ignore', the target's SDK version is ignored."""
+
+ super(DeviceTarget, self).__init__(out_dir, target_cpu)
+
+ self._system_log_file = system_log_file
+ self._host = host
+ self._port = port
+ self._fuchsia_out_dir = None
+ self._node_name = node_name
+ self._os_check = os_check
+ self._pkg_repo = None
+
+ if self._host and self._node_name:
+ raise Exception('Only one of "--host" or "--name" can be specified.')
+
+ if fuchsia_out_dir:
+ if ssh_config:
+ raise Exception('Only one of "--fuchsia-out-dir" or "--ssh_config" can '
+ 'be specified.')
+
+ self._fuchsia_out_dir = os.path.expanduser(fuchsia_out_dir)
+ # Use SSH keys from the Fuchsia output directory.
+ self._ssh_config_path = os.path.join(self._fuchsia_out_dir, 'ssh-keys',
+ 'ssh_config')
+ self._os_check = 'ignore'
+
+ elif ssh_config:
+ # Use the SSH config provided via the commandline.
+ self._ssh_config_path = os.path.expanduser(ssh_config)
+
+ else:
+ # Default to using an automatically generated SSH config and keys.
+ boot_data.ProvisionSSH()
+ self._ssh_config_path = boot_data.GetSSHConfigPath()
+
+ @staticmethod
+ def CreateFromArgs(args):
+ return DeviceTarget(args.out_dir, args.target_cpu, args.host,
+ args.node_name, args.port, args.ssh_config,
+ args.fuchsia_out_dir, args.os_check,
+ args.system_log_file)
+
+ @staticmethod
+ def RegisterArgs(arg_parser):
+ device_args = arg_parser.add_argument_group(
+ 'device', 'External device deployment arguments')
+ device_args.add_argument('--host',
+ help='The IP of the target device. Optional.')
+ device_args.add_argument('--node-name',
+ help='The node-name of the device to boot or '
+ 'deploy to. Optional, will use the first '
+ 'discovered device if omitted.')
+ device_args.add_argument('--port',
+ '-p',
+ type=int,
+ default=None,
+ help='The port of the SSH service running on the '
+ 'device. Optional.')
+ device_args.add_argument('--ssh-config',
+ '-F',
+ help='The path to the SSH configuration used for '
+ 'connecting to the target device.')
+ device_args.add_argument(
+ '--os-check',
+ choices=['check', 'update', 'ignore'],
+ default='update',
+ help="Sets the OS version enforcement policy. If 'check', then the "
+ "deployment process will halt if the target\'s version doesn\'t "
+ "match. If 'update', then the target device will automatically "
+ "be repaved. If 'ignore', then the OS version won\'t be checked.")
+
+ def _ProvisionDeviceIfNecessary(self):
+ if self._Discover():
+ self._WaitUntilReady()
+ else:
+ raise Exception('Could not find device. If the device is connected '
+ 'to the host remotely, make sure that --host flag is '
+ 'set and that remote serving is set up.')
+
+ def _Discover(self):
+ """Queries mDNS for the IP address of a booted Fuchsia instance whose name
+ matches |_node_name| on the local area network. If |_node_name| isn't
+ specified, and there is only one device on the network, then returns the
+ IP address of that advice.
+
+ Sets |_host_name| and returns True if the device was found,
+ or waits up to |timeout| seconds and returns False if the device couldn't
+ be found."""
+
+ dev_finder_path = GetHostToolPathFromPlatform('device-finder')
+
+ if self._node_name:
+ command = [
+ dev_finder_path,
+ 'resolve',
+ '-device-limit',
+ '1', # Exit early as soon as a host is found.
+ self._node_name
+ ]
+ proc = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=open(os.devnull, 'w'))
+ else:
+ proc = self.RunFFXCommand(['target', 'list', '-f', 'simple'],
+ stdout=subprocess.PIPE,
+ stderr=open(os.devnull, 'w'))
+
+ output = set(proc.communicate()[0].strip().split('\n'))
+ if proc.returncode != 0:
+ return False
+
+ if self._node_name:
+ # Handle the result of "device-finder resolve".
+ self._host = output.pop().strip()
+ else:
+ name_host_pairs = [x.strip().split(' ') for x in output]
+
+ if len(name_host_pairs) > 1:
+ logging.info('More than one device was discovered on the network. '
+ 'Use --node-name <name> to specify the device to use.')
+ logging.info('List of devices:')
+ logging.info(output)
+ raise Exception('Ambiguous target device specification.')
+ assert len(name_host_pairs) == 1
+ # Check if device has both address and name.
+ if len(name_host_pairs[0]) < 2:
+ return False
+ self._host, self._node_name = name_host_pairs[0]
+
+ logging.info('Found device "%s" at address %s.' % (self._node_name,
+ self._host))
+
+ return True
+
+ def Start(self):
+ if self._host:
+ self._WaitUntilReady()
+ else:
+ self._ProvisionDeviceIfNecessary()
+
+ def GetPkgRepo(self):
+ if not self._pkg_repo:
+ if self._fuchsia_out_dir:
+ # Deploy to an already-booted device running a local Fuchsia build.
+ self._pkg_repo = pkg_repo.ExternalPkgRepo(
+ os.path.join(self._fuchsia_out_dir, 'amber-files'))
+ else:
+ # Create an ephemeral package repository, then start both "pm serve" as
+ # well as the bootserver.
+ self._pkg_repo = pkg_repo.ManagedPkgRepo(self)
+
+ return self._pkg_repo
+
+ def _ParseNodename(self, output):
+ # Parse the nodename from bootserver stdout.
+ m = re.search(r'.*Proceeding with nodename (?P<nodename>.*)$', output,
+ re.MULTILINE)
+ if not m:
+ raise Exception('Couldn\'t parse nodename from bootserver output.')
+ self._node_name = m.groupdict()['nodename']
+ logging.info('Booted device "%s".' % self._node_name)
+
+ # Repeatedly search for a device for |BOOT_DISCOVERY_ATTEMPT|
+ # number of attempts. If a device isn't found, wait
+ # |BOOT_DISCOVERY_DELAY_SECS| before searching again.
+ logging.info('Waiting for device to join network.')
+ for _ in xrange(BOOT_DISCOVERY_ATTEMPTS):
+ if self._Discover():
+ break
+ time.sleep(BOOT_DISCOVERY_DELAY_SECS)
+
+ if not self._host:
+ raise Exception('Device %s couldn\'t be discovered via mDNS.' %
+ self._node_name)
+
+ self._WaitUntilReady();
+
+ def _GetEndpoint(self):
+ return (self._host, self._port)
+
+ def _GetSshConfigPath(self):
+ return self._ssh_config_path
+
+ def Restart(self):
+ """Restart the device."""
+
+ self.RunCommandPiped('dm reboot')
+ time.sleep(_REBOOT_SLEEP_PERIOD)
+ self.Start()
diff --git a/third_party/libwebrtc/build/fuchsia/device_target_test.py b/third_party/libwebrtc/build/fuchsia/device_target_test.py
new file mode 100755
index 0000000000..52ead22495
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/device_target_test.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env vpython3
+# Copyright 2021 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Tests scenarios with number of devices and invalid devices"""
+import subprocess
+import unittest
+import unittest.mock as mock
+from argparse import Namespace
+from device_target import DeviceTarget
+from target import Target
+
+
+class TestDiscoverDeviceTarget(unittest.TestCase):
+ def setUp(self):
+ self.args = Namespace(out_dir='out/fuchsia',
+ target_cpu='x64',
+ host=None,
+ node_name=None,
+ port=None,
+ ssh_config=None,
+ fuchsia_out_dir=None,
+ os_check='update',
+ system_log_file=None)
+
+ def testNoNodeNameOneDeviceReturnNoneCheckNameAndAddress(self):
+ with (DeviceTarget.CreateFromArgs(self.args)) as device_target_instance:
+ with mock.patch.object(DeviceTarget, 'RunFFXCommand') as mock_ffx:
+ mock_spec_popen = mock.create_autospec(subprocess.Popen, instance=True)
+ mock_spec_popen.communicate.return_value = ('address device_name', '')
+ mock_spec_popen.returncode = 0
+ mock_ffx.return_value = mock_spec_popen
+ with mock.patch.object(Target,
+ '_WaitUntilReady') as mock_waituntilready:
+ mock_waituntilready.return_value = True
+ self.assertIsNone(device_target_instance.Start())
+ self.assertEqual(device_target_instance._node_name, 'device_name')
+ self.assertEqual(device_target_instance._host, 'address')
+
+ def testNoNodeNameTwoDevicesRaiseExceptionAmbiguousTarget(self):
+ with (DeviceTarget.CreateFromArgs(self.args)) as device_target_instance:
+ with mock.patch.object(DeviceTarget, 'RunFFXCommand') as mock_ffx:
+ mock_spec_popen = mock.create_autospec(subprocess.Popen, instance=True)
+ mock_spec_popen.communicate.return_value = ('address1 device_name1\n'
+ 'address2 device_name2', '')
+ mock_spec_popen.returncode = 0
+ mock_spec_popen.stdout = ''
+ mock_ffx.return_value = mock_spec_popen
+ with self.assertRaisesRegex(Exception,
+ 'Ambiguous target device specification.'):
+ device_target_instance.Start()
+ self.assertIsNone(device_target_instance._node_name)
+ self.assertIsNone(device_target_instance._host)
+
+ def testNoNodeNameDeviceDoesntHaveNameRaiseExceptionCouldNotFind(self):
+ with (DeviceTarget.CreateFromArgs(self.args)) as device_target_instance:
+ with mock.patch.object(DeviceTarget, 'RunFFXCommand') as mock_ffx:
+ mock_spec_popen = mock.create_autospec(subprocess.Popen, instance=True)
+ mock_spec_popen.communicate.return_value = ('address', '')
+ mock_spec_popen.returncode = 0
+ mock_ffx.return_value = mock_spec_popen
+ with self.assertRaisesRegex(Exception, 'Could not find device'):
+ device_target_instance.Start()
+ self.assertIsNone(device_target_instance._node_name)
+ self.assertIsNone(device_target_instance._host)
+
+ def testNodeNameDefinedDeviceFoundReturnNoneCheckNameAndHost(self):
+ self.args.node_name = 'device_name'
+ with (DeviceTarget.CreateFromArgs(self.args)) as device_target_instance:
+ with mock.patch('subprocess.Popen') as mock_popen:
+ mock_popen.returncode = ('address', 'device_name')
+ with mock.patch.object(Target,
+ '_WaitUntilReady') as mock_waituntilready:
+ mock_waituntilready.return_value = True
+ self.assertIsNone(device_target_instance.Start())
+ self.assertEqual(device_target_instance._node_name, 'device_name')
+ self.assertEqual(device_target_instance._host, 'address')
+
+ def testNodeNameDefinedDeviceNotFoundRaiseExceptionCouldNotFind(self):
+ self.args.node_name = 'wrong_device_name'
+ with (DeviceTarget.CreateFromArgs(self.args)) as device_target_instance:
+ with mock.patch('subprocess.Popen') as mock_popen:
+ mock_popen.returncode = ('', '')
+ with self.assertRaisesRegex(Exception, 'Could not find device'):
+ device_target_instance.Start()
+ self.assertIsNone(device_target_instance._node_name)
+ self.assertIsNone(device_target_instance._host)
+
+ def testNoDevicesFoundRaiseExceptionCouldNotFind(self):
+ with (DeviceTarget.CreateFromArgs(self.args)) as device_target_instance:
+ with mock.patch.object(DeviceTarget, 'RunFFXCommand') as mock_ffx:
+ mock_spec_popen = mock.create_autospec(subprocess.Popen, instance=True)
+ mock_spec_popen.communicate.return_value = ('', '')
+ mock_spec_popen.returncode = 0
+ mock_ffx.return_value = mock_spec_popen
+ with self.assertRaisesRegex(Exception, 'Could not find device'):
+ device_target_instance.Start()
+ self.assertIsNone(device_target_instance._node_name)
+ self.assertIsNone(device_target_instance._host)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/libwebrtc/build/fuchsia/emu_target.py b/third_party/libwebrtc/build/fuchsia/emu_target.py
new file mode 100644
index 0000000000..335f5418eb
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/emu_target.py
@@ -0,0 +1,145 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Implements commands for running/interacting with Fuchsia on an emulator."""
+
+import pkg_repo
+import boot_data
+import logging
+import os
+import runner_logs
+import subprocess
+import sys
+import target
+import tempfile
+
+
+class EmuTarget(target.Target):
+ def __init__(self, out_dir, target_cpu, system_log_file):
+ """out_dir: The directory which will contain the files that are
+ generated to support the emulator deployment.
+ target_cpu: The emulated target CPU architecture.
+ Can be 'x64' or 'arm64'."""
+
+ super(EmuTarget, self).__init__(out_dir, target_cpu)
+ self._emu_process = None
+ self._system_log_file = system_log_file
+ self._pkg_repo = None
+
+ def __enter__(self):
+ return self
+
+ def _BuildCommand(self):
+ """Build the command that will be run to start Fuchsia in the emulator."""
+ pass
+
+ def _SetEnv(self):
+ return os.environ.copy()
+
+ # Used by the context manager to ensure that the emulator is killed when
+ # the Python process exits.
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.Shutdown();
+
+ def Start(self):
+ emu_command = self._BuildCommand()
+
+ # We pass a separate stdin stream. Sharing stdin across processes
+ # leads to flakiness due to the OS prematurely killing the stream and the
+ # Python script panicking and aborting.
+ # The precise root cause is still nebulous, but this fix works.
+ # See crbug.com/741194.
+ logging.debug('Launching %s.' % (self.EMULATOR_NAME))
+ logging.debug(' '.join(emu_command))
+
+ # Zircon sends debug logs to serial port (see kernel.serial=legacy flag
+ # above). Serial port is redirected to a file through emulator stdout.
+ # If runner_logs are not enabled, we output the kernel serial log
+ # to a temporary file, and print that out if we are unable to connect to
+ # the emulator guest, to make it easier to diagnose connectivity issues.
+ temporary_log_file = None
+ if runner_logs.IsEnabled():
+ stdout = runner_logs.FileStreamFor('serial_log')
+ else:
+ temporary_log_file = tempfile.NamedTemporaryFile('w')
+ stdout = temporary_log_file
+
+ LogProcessStatistics('proc_stat_start_log')
+ LogSystemStatistics('system_statistics_start_log')
+
+ self._emu_process = subprocess.Popen(emu_command,
+ stdin=open(os.devnull),
+ stdout=stdout,
+ stderr=subprocess.STDOUT,
+ env=self._SetEnv())
+
+ try:
+ self._WaitUntilReady()
+ LogProcessStatistics('proc_stat_ready_log')
+ except target.FuchsiaTargetException:
+ if temporary_log_file:
+ logging.info('Kernel logs:\n' +
+ open(temporary_log_file.name, 'r').read())
+ raise
+
+ def GetPkgRepo(self):
+ if not self._pkg_repo:
+ self._pkg_repo = pkg_repo.ManagedPkgRepo(self)
+
+ return self._pkg_repo
+
+ def Shutdown(self):
+ if not self._emu_process:
+ logging.error('%s did not start' % (self.EMULATOR_NAME))
+ return
+ returncode = self._emu_process.poll()
+ if returncode == None:
+ logging.info('Shutting down %s' % (self.EMULATOR_NAME))
+ self._emu_process.kill()
+ elif returncode == 0:
+ logging.info('%s quit unexpectedly without errors' % self.EMULATOR_NAME)
+ elif returncode < 0:
+ logging.error('%s was terminated by signal %d' %
+ (self.EMULATOR_NAME, -returncode))
+ else:
+ logging.error('%s quit unexpectedly with exit code %d' %
+ (self.EMULATOR_NAME, returncode))
+
+ LogProcessStatistics('proc_stat_end_log')
+ LogSystemStatistics('system_statistics_end_log')
+
+
+ def _IsEmuStillRunning(self):
+ if not self._emu_process:
+ return False
+ return os.waitpid(self._emu_process.pid, os.WNOHANG)[0] == 0
+
+ def _GetEndpoint(self):
+ if not self._IsEmuStillRunning():
+ raise Exception('%s quit unexpectedly.' % (self.EMULATOR_NAME))
+ return ('localhost', self._host_ssh_port)
+
+ def _GetSshConfigPath(self):
+ return boot_data.GetSSHConfigPath()
+
+
+def LogSystemStatistics(log_file_name):
+ statistics_log = runner_logs.FileStreamFor(log_file_name)
+ # Log the cpu load and process information.
+ subprocess.call(['top', '-b', '-n', '1'],
+ stdin=open(os.devnull),
+ stdout=statistics_log,
+ stderr=subprocess.STDOUT)
+ subprocess.call(['ps', '-ax'],
+ stdin=open(os.devnull),
+ stdout=statistics_log,
+ stderr=subprocess.STDOUT)
+
+
+def LogProcessStatistics(log_file_name):
+ statistics_log = runner_logs.FileStreamFor(log_file_name)
+ subprocess.call(['cat', '/proc/stat'],
+ stdin=open(os.devnull),
+ stdout=statistics_log,
+ stderr=subprocess.STDOUT)
diff --git a/third_party/libwebrtc/build/fuchsia/fvdl_target.py b/third_party/libwebrtc/build/fuchsia/fvdl_target.py
new file mode 100644
index 0000000000..fa4fe4f004
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/fvdl_target.py
@@ -0,0 +1,204 @@
+# Copyright 2021 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Implements commands for running and interacting with Fuchsia on FVDL."""
+
+import boot_data
+import common
+import emu_target
+import logging
+import os
+import re
+import subprocess
+import tempfile
+
+_SSH_KEY_DIR = os.path.expanduser('~/.ssh')
+_DEFAULT_SSH_PORT = 22
+_DEVICE_PROTO_TEMPLATE = """
+device_spec: {{
+ horizontal_resolution: 1024
+ vertical_resolution: 600
+ vm_heap: 192
+ ram: {ramsize}
+ cache: 32
+ screen_density: 240
+}}
+"""
+
+
+def GetTargetType():
+ return FvdlTarget
+
+
+class EmulatorNetworkNotFoundError(Exception):
+ """Raised when emulator's address cannot be found"""
+ pass
+
+
+class FvdlTarget(emu_target.EmuTarget):
+ EMULATOR_NAME = 'aemu'
+ _FVDL_PATH = os.path.join(common.SDK_ROOT, 'tools', 'x64', 'fvdl')
+
+ def __init__(self, out_dir, target_cpu, system_log_file, require_kvm,
+ enable_graphics, hardware_gpu, with_network, ram_size_mb):
+ super(FvdlTarget, self).__init__(out_dir, target_cpu, system_log_file)
+ self._require_kvm = require_kvm
+ self._enable_graphics = enable_graphics
+ self._hardware_gpu = hardware_gpu
+ self._with_network = with_network
+ self._ram_size_mb = ram_size_mb
+
+ self._host = None
+ self._pid = None
+
+ # Use a temp file for vdl output.
+ self._vdl_output_file = tempfile.NamedTemporaryFile()
+
+ # Use a temp file for the device proto and write the ram size.
+ self._device_proto_file = tempfile.NamedTemporaryFile()
+ with open(self._device_proto_file.name, 'w') as file:
+ file.write(_DEVICE_PROTO_TEMPLATE.format(ramsize=self._ram_size_mb))
+
+ @staticmethod
+ def CreateFromArgs(args):
+ return FvdlTarget(args.out_dir, args.target_cpu, args.system_log_file,
+ args.require_kvm, args.enable_graphics, args.hardware_gpu,
+ args.with_network, args.ram_size_mb)
+
+ @staticmethod
+ def RegisterArgs(arg_parser):
+ fvdl_args = arg_parser.add_argument_group('fvdl', 'FVDL arguments')
+ fvdl_args.add_argument('--with-network',
+ action='store_true',
+ default=False,
+ help='Run emulator with emulated nic via tun/tap.')
+
+ def _BuildCommand(self):
+ boot_data.ProvisionSSH()
+ self._host_ssh_port = common.GetAvailableTcpPort()
+ kernel_image = common.EnsurePathExists(
+ boot_data.GetTargetFile('qemu-kernel.kernel', self._GetTargetSdkArch(),
+ boot_data.TARGET_TYPE_QEMU))
+ zbi_image = common.EnsurePathExists(
+ boot_data.GetTargetFile('zircon-a.zbi', self._GetTargetSdkArch(),
+ boot_data.TARGET_TYPE_QEMU))
+ fvm_image = common.EnsurePathExists(
+ boot_data.GetTargetFile('storage-full.blk', self._GetTargetSdkArch(),
+ boot_data.TARGET_TYPE_QEMU))
+ aemu_path = common.EnsurePathExists(
+ os.path.join(common.GetEmuRootForPlatform(self.EMULATOR_NAME),
+ 'emulator'))
+
+ emu_command = [
+ self._FVDL_PATH,
+ '--sdk',
+ 'start',
+ '--nopackageserver',
+ '--nointeractive',
+
+ # Host port mapping for user-networking mode.
+ '--port-map',
+ 'hostfwd=tcp::{}-:22'.format(self._host_ssh_port),
+
+ # no-interactive requires a --vdl-output flag to shutdown the emulator.
+ '--vdl-output',
+ self._vdl_output_file.name,
+
+ # Use existing images instead of downloading new ones.
+ '--kernel-image',
+ kernel_image,
+ '--zbi-image',
+ zbi_image,
+ '--fvm-image',
+ fvm_image,
+ '--image-architecture',
+ self._target_cpu,
+
+ # Use an existing emulator checked out by Chromium.
+ '--aemu-path',
+ aemu_path,
+
+ # Use this flag and temp file to define ram size.
+ '--device-proto',
+ self._device_proto_file.name
+ ]
+
+ if not self._require_kvm:
+ emu_command.append('--noacceleration')
+ if not self._enable_graphics:
+ emu_command.append('--headless')
+ if self._hardware_gpu:
+ emu_command.append('--host-gpu')
+ if self._with_network:
+ emu_command.append('-N')
+
+ logging.info('FVDL command: ' + ' '.join(emu_command))
+
+ return emu_command
+
+ def _WaitUntilReady(self):
+ # Indicates the FVDL command finished running.
+ self._emu_process.communicate()
+ super(FvdlTarget, self)._WaitUntilReady()
+
+ def _IsEmuStillRunning(self):
+ if not self._pid:
+ try:
+ with open(self._vdl_output_file.name) as vdl_file:
+ for line in vdl_file:
+ if 'pid' in line:
+ match = re.match(r'.*pid:\s*(\d*).*', line)
+ if match:
+ self._pid = match.group(1)
+ except IOError:
+ logging.error('vdl_output file no longer found. '
+ 'Cannot get emulator pid.')
+ return False
+ if subprocess.check_output(['ps', '-p', self._pid, 'o', 'comm=']):
+ return True
+ logging.error('Emulator pid no longer found. Emulator must be down.')
+ return False
+
+ def _GetEndpoint(self):
+ if self._with_network:
+ return self._GetNetworkAddress()
+ return ('localhost', self._host_ssh_port)
+
+ def _GetNetworkAddress(self):
+ if self._host:
+ return (self._host, _DEFAULT_SSH_PORT)
+ try:
+ with open(self._vdl_output_file.name) as vdl_file:
+ for line in vdl_file:
+ if 'network_address' in line:
+ address = re.match(r'.*network_address:\s*"\[(.*)\]".*', line)
+ if address:
+ self._host = address.group(1)
+ return (self._host, _DEFAULT_SSH_PORT)
+ logging.error('Network address not found.')
+ raise EmulatorNetworkNotFoundError()
+ except IOError as e:
+ logging.error('vdl_output file not found. Cannot get network address.')
+ raise
+
+ def Shutdown(self):
+ if not self._emu_process:
+ logging.error('%s did not start' % (self.EMULATOR_NAME))
+ return
+ femu_command = [
+ self._FVDL_PATH, '--sdk', 'kill', '--launched-proto',
+ self._vdl_output_file.name
+ ]
+ femu_process = subprocess.Popen(femu_command)
+ returncode = femu_process.wait()
+ if returncode == 0:
+ logging.info('FVDL shutdown successfully')
+ else:
+ logging.info('FVDL kill returned error status {}'.format(returncode))
+ emu_target.LogProcessStatistics('proc_stat_end_log')
+ emu_target.LogSystemStatistics('system_statistics_end_log')
+ self._vdl_output_file.close()
+ self._device_proto_file.close()
+
+ def _GetSshConfigPath(self):
+ return boot_data.GetSSHConfigPath()
diff --git a/third_party/libwebrtc/build/fuchsia/fvdl_target_test.py b/third_party/libwebrtc/build/fuchsia/fvdl_target_test.py
new file mode 100755
index 0000000000..de5e52f0a9
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/fvdl_target_test.py
@@ -0,0 +1,102 @@
+#!/usr/bin/env python3
+# Copyright 2021 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests different flags to see if they are being used correctly"""
+
+import boot_data
+import common
+import os
+import unittest
+import unittest.mock as mock
+
+from argparse import Namespace
+from fvdl_target import FvdlTarget, _SSH_KEY_DIR
+
+
+class TestBuildCommandFvdlTarget(unittest.TestCase):
+ def setUp(self):
+ self.args = Namespace(out_dir='outdir',
+ system_log_file=None,
+ target_cpu='x64',
+ require_kvm=True,
+ enable_graphics=False,
+ hardware_gpu=False,
+ with_network=False,
+ ram_size_mb=8192)
+
+ def testBasicEmuCommand(self):
+ with FvdlTarget.CreateFromArgs(self.args) as target:
+ target.Shutdown = mock.MagicMock()
+ common.EnsurePathExists = mock.MagicMock(return_value='image')
+ with mock.patch.object(boot_data, 'ProvisionSSH') as provision_mock:
+ build_command = target._BuildCommand()
+ self.assertIn(target._FVDL_PATH, build_command)
+ self.assertIn('--sdk', build_command)
+ self.assertIn('start', build_command)
+ self.assertNotIn('--noacceleration', target._BuildCommand())
+ self.assertIn('--headless', target._BuildCommand())
+ self.assertNotIn('--host-gpu', target._BuildCommand())
+ self.assertNotIn('-N', target._BuildCommand())
+ self.assertIn('--device-proto', target._BuildCommand())
+ self.assertTrue(os.path.exists(target._device_proto_file.name))
+ correct_ram_amount = False
+ with open(target._device_proto_file.name) as file:
+ for line in file:
+ if line.strip() == 'ram: 8192':
+ correct_ram_amount = True
+ break
+ self.assertTrue(correct_ram_amount)
+
+ def testBuildCommandCheckIfNotRequireKVMSetNoAcceleration(self):
+ self.args.require_kvm = False
+ with FvdlTarget.CreateFromArgs(self.args) as target:
+ target.Shutdown = mock.MagicMock()
+ common.EnsurePathExists = mock.MagicMock(return_value='image')
+ with mock.patch.object(boot_data, 'ProvisionSSH') as provision_mock:
+ self.assertIn('--noacceleration', target._BuildCommand())
+
+ def testBuildCommandCheckIfNotEnableGraphicsSetHeadless(self):
+ self.args.enable_graphics = True
+ with FvdlTarget.CreateFromArgs(self.args) as target:
+ target.Shutdown = mock.MagicMock()
+ common.EnsurePathExists = mock.MagicMock(return_value='image')
+ with mock.patch.object(boot_data, 'ProvisionSSH') as provision_mock:
+ self.assertNotIn('--headless', target._BuildCommand())
+
+ def testBuildCommandCheckIfHardwareGpuSetHostGPU(self):
+ self.args.hardware_gpu = True
+ with FvdlTarget.CreateFromArgs(self.args) as target:
+ target.Shutdown = mock.MagicMock()
+ common.EnsurePathExists = mock.MagicMock(return_value='image')
+ with mock.patch.object(boot_data, 'ProvisionSSH') as provision_mock:
+ self.assertIn('--host-gpu', target._BuildCommand())
+
+ def testBuildCommandCheckIfWithNetworkSetTunTap(self):
+ self.args.with_network = True
+ with FvdlTarget.CreateFromArgs(self.args) as target:
+ target.Shutdown = mock.MagicMock()
+ common.EnsurePathExists = mock.MagicMock(return_value='image')
+ with mock.patch.object(boot_data, 'ProvisionSSH') as provision_mock:
+ self.assertIn('-N', target._BuildCommand())
+
+ def testBuildCommandCheckRamSizeNot8192SetRamSize(self):
+ self.args.ram_size_mb = 4096
+ with FvdlTarget.CreateFromArgs(self.args) as target:
+ target.Shutdown = mock.MagicMock()
+ common.EnsurePathExists = mock.MagicMock(return_value='image')
+ with mock.patch.object(boot_data, 'ProvisionSSH') as provision_mock:
+ self.assertIn('--device-proto', target._BuildCommand())
+ self.assertTrue(os.path.exists(target._device_proto_file.name))
+ correct_ram_amount = False
+ with open(target._device_proto_file.name) as file:
+ for line in file:
+ if line.strip() == 'ram: 4096':
+ correct_ram_amount = True
+ break
+ self.assertTrue(correct_ram_amount)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/libwebrtc/build/fuchsia/generic_x64_target.py b/third_party/libwebrtc/build/fuchsia/generic_x64_target.py
new file mode 100644
index 0000000000..5fece127d9
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/generic_x64_target.py
@@ -0,0 +1,99 @@
+# Copyright 2020 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Implements commands for running and interacting with Fuchsia generic
+build on devices."""
+
+import boot_data
+import device_target
+import logging
+import os
+
+from common import SDK_ROOT, EnsurePathExists, \
+ GetHostToolPathFromPlatform, SubprocessCallWithTimeout
+
+
+def GetTargetType():
+ return GenericX64PavedDeviceTarget
+
+
+class GenericX64PavedDeviceTarget(device_target.DeviceTarget):
+ """In addition to the functionality provided by DeviceTarget, this class
+ automatically handles paving of x64 devices that use generic Fuchsia build.
+
+ If there are no running devices, then search for a device running Zedboot
+ and pave it.
+
+ If there's only one running device, or |_node_name| is set, then the
+ device's SDK version is checked unless --os-check=ignore is set.
+ If --os-check=update is set, then the target device is repaved if the SDK
+ version doesn't match."""
+
+ TARGET_HASH_FILE_PATH = '/data/.hash'
+
+ def _SDKHashMatches(self):
+ """Checks if /data/.hash on the device matches SDK_ROOT/.hash.
+
+ Returns True if the files are identical, or False otherwise.
+ """
+
+ with tempfile.NamedTemporaryFile() as tmp:
+ # TODO: Avoid using an exception for when file is unretrievable.
+ try:
+ self.GetFile(TARGET_HASH_FILE_PATH, tmp.name)
+ except subprocess.CalledProcessError:
+ # If the file is unretrievable for whatever reason, assume mismatch.
+ return False
+
+ return filecmp.cmp(tmp.name, os.path.join(SDK_ROOT, '.hash'), False)
+
+ def _ProvisionDeviceIfNecessary(self):
+ should_provision = False
+
+ if self._Discover():
+ self._WaitUntilReady()
+
+ if self._os_check != 'ignore':
+ if self._SDKHashMatches():
+ if self._os_check == 'update':
+ logging.info('SDK hash does not match; rebooting and repaving.')
+ self.RunCommand(['dm', 'reboot'])
+ should_provision = True
+ elif self._os_check == 'check':
+ raise Exception('Target device SDK version does not match.')
+ else:
+ should_provision = True
+
+ if should_provision:
+ self._ProvisionDevice()
+
+ def _ProvisionDevice(self):
+ """Pave a device with a generic image of Fuchsia."""
+
+ bootserver_path = GetHostToolPathFromPlatform('bootserver')
+ bootserver_command = [
+ bootserver_path, '-1', '--fvm',
+ EnsurePathExists(
+ boot_data.GetTargetFile('storage-sparse.blk',
+ self._GetTargetSdkArch(),
+ boot_data.TARGET_TYPE_GENERIC)),
+ EnsurePathExists(
+ boot_data.GetBootImage(self._out_dir, self._GetTargetSdkArch(),
+ boot_data.TARGET_TYPE_GENERIC))
+ ]
+
+ if self._node_name:
+ bootserver_command += ['-n', self._node_name]
+
+ bootserver_command += ['--']
+ bootserver_command += boot_data.GetKernelArgs(self._out_dir)
+
+ logging.debug(' '.join(bootserver_command))
+ _, stdout = SubprocessCallWithTimeout(bootserver_command,
+ silent=False,
+ timeout_secs=300)
+
+ self._ParseNodename(stdout)
+
+ # Update the target's hash to match the current tree's.
+ self.PutFile(os.path.join(SDK_ROOT, '.hash'), TARGET_HASH_FILE_PATH)
diff --git a/third_party/libwebrtc/build/fuchsia/linux.sdk.sha1 b/third_party/libwebrtc/build/fuchsia/linux.sdk.sha1
new file mode 100644
index 0000000000..c7cd76a58d
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/linux.sdk.sha1
@@ -0,0 +1 @@
+6.20210918.3.1
diff --git a/third_party/libwebrtc/build/fuchsia/mac.sdk.sha1 b/third_party/libwebrtc/build/fuchsia/mac.sdk.sha1
new file mode 100644
index 0000000000..c7cd76a58d
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/mac.sdk.sha1
@@ -0,0 +1 @@
+6.20210918.3.1
diff --git a/third_party/libwebrtc/build/fuchsia/net_test_server.py b/third_party/libwebrtc/build/fuchsia/net_test_server.py
new file mode 100644
index 0000000000..56005cf12c
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/net_test_server.py
@@ -0,0 +1,90 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import common
+import json
+import logging
+import os
+import re
+import socket
+import sys
+import subprocess
+import tempfile
+
+DIR_SOURCE_ROOT = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
+sys.path.append(os.path.join(DIR_SOURCE_ROOT, 'build', 'util', 'lib', 'common'))
+import chrome_test_server_spawner
+
+
+# Implementation of chrome_test_server_spawner.PortForwarder that uses SSH's
+# remote port forwarding feature to forward ports.
+class SSHPortForwarder(chrome_test_server_spawner.PortForwarder):
+ def __init__(self, target):
+ self._target = target
+
+ # Maps the host (server) port to the device port number.
+ self._port_mapping = {}
+
+ def Map(self, port_pairs):
+ for p in port_pairs:
+ _, host_port = p
+ self._port_mapping[host_port] = \
+ common.ConnectPortForwardingTask(self._target, host_port)
+
+ def GetDevicePortForHostPort(self, host_port):
+ return self._port_mapping[host_port]
+
+ def Unmap(self, device_port):
+ for host_port, entry in self._port_mapping.iteritems():
+ if entry == device_port:
+ forwarding_args = [
+ '-NT', '-O', 'cancel', '-R', '0:localhost:%d' % host_port]
+ task = self._target.RunCommandPiped([],
+ ssh_args=forwarding_args,
+ stdout=open(os.devnull, 'w'),
+ stderr=subprocess.PIPE)
+ task.wait()
+ if task.returncode != 0:
+ raise Exception(
+ 'Error %d when unmapping port %d' % (task.returncode,
+ device_port))
+ del self._port_mapping[host_port]
+ return
+
+ raise Exception('Unmap called for unknown port: %d' % device_port)
+
+
+def SetupTestServer(target, test_concurrency, for_package, for_realms=[]):
+ """Provisions a forwarding test server and configures |target| to use it.
+
+ Returns a Popen object for the test server process."""
+
+ logging.debug('Starting test server.')
+ # The TestLauncher can launch more jobs than the limit specified with
+ # --test-launcher-jobs so the max number of spawned test servers is set to
+ # twice that limit here. See https://crbug.com/913156#c19.
+ spawning_server = chrome_test_server_spawner.SpawningServer(
+ 0, SSHPortForwarder(target), test_concurrency * 2)
+ forwarded_port = common.ConnectPortForwardingTask(
+ target, spawning_server.server_port)
+ spawning_server.Start()
+
+ logging.debug('Test server listening for connections (port=%d)' %
+ spawning_server.server_port)
+ logging.debug('Forwarded port is %d' % forwarded_port)
+
+ config_file = tempfile.NamedTemporaryFile(delete=True)
+
+ config_file.write(json.dumps({
+ 'spawner_url_base': 'http://localhost:%d' % forwarded_port
+ }))
+
+ config_file.flush()
+ target.PutFile(config_file.name,
+ '/tmp/net-test-server-config',
+ for_package=for_package,
+ for_realms=for_realms)
+
+ return spawning_server
diff --git a/third_party/libwebrtc/build/fuchsia/pkg_repo.py b/third_party/libwebrtc/build/fuchsia/pkg_repo.py
new file mode 100644
index 0000000000..3e635e9ec0
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/pkg_repo.py
@@ -0,0 +1,209 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import common
+import json
+import logging
+import os
+import shutil
+import subprocess
+import tempfile
+import time
+
+from six.moves import urllib
+
+# Maximum amount of time to block while waiting for "pm serve" to come up.
+_PM_SERVE_LIVENESS_TIMEOUT_SECS = 10
+
+_MANAGED_REPO_NAME = 'chrome-runner'
+
+
+class PkgRepo(object):
+ """Abstract interface for a repository used to serve packages to devices."""
+
+ def __init__(self, target):
+ self._target = target
+
+ def PublishPackage(self, package_path):
+ pm_tool = common.GetHostToolPathFromPlatform('pm')
+ # Flags for `pm publish`:
+ # https://fuchsia.googlesource.com/fuchsia/+/refs/heads/main/src/sys/pkg/bin/pm/cmd/pm/publish/publish.go
+ # https://fuchsia.googlesource.com/fuchsia/+/refs/heads/main/src/sys/pkg/bin/pm/repo/config.go
+ # -a: Publish archived package
+ # -f <path>: Path to packages
+ # -r <path>: Path to repository
+ # -vt: Repo versioning based on time rather than monotonic version number
+ # increase
+ # -v: Verbose output
+ subprocess.check_call([
+ pm_tool, 'publish', '-a', '-f', package_path, '-r',
+ self.GetPath(), '-vt', '-v'
+ ],
+ stderr=subprocess.STDOUT)
+
+ def GetPath(self):
+ pass
+
+
+class ManagedPkgRepo(PkgRepo):
+ """Creates and serves packages from an ephemeral repository."""
+
+ def __init__(self, target):
+ PkgRepo.__init__(self, target)
+ self._with_count = 0
+
+ self._pkg_root = tempfile.mkdtemp()
+ pm_tool = common.GetHostToolPathFromPlatform('pm')
+ subprocess.check_call([pm_tool, 'newrepo', '-repo', self._pkg_root])
+ logging.info('Creating and serving temporary package root: {}.'.format(
+ self._pkg_root))
+
+ serve_port = common.GetAvailableTcpPort()
+ # Flags for `pm serve`:
+ # https://fuchsia.googlesource.com/fuchsia/+/refs/heads/main/src/sys/pkg/bin/pm/cmd/pm/serve/serve.go
+ # -l <port>: Port to listen on
+ # -c 2: Use config.json format v2, the default for pkgctl
+ # -q: Don't print out information about requests
+ self._pm_serve_task = subprocess.Popen([
+ pm_tool, 'serve', '-d',
+ os.path.join(self._pkg_root, 'repository'), '-l',
+ ':%d' % serve_port, '-c', '2', '-q'
+ ])
+
+ # Block until "pm serve" starts serving HTTP traffic at |serve_port|.
+ timeout = time.time() + _PM_SERVE_LIVENESS_TIMEOUT_SECS
+ while True:
+ try:
+ urllib.request.urlopen('http://localhost:%d' % serve_port,
+ timeout=1).read()
+ break
+ except urllib.error.URLError:
+ logging.info('Waiting until \'pm serve\' is up...')
+
+ if time.time() >= timeout:
+ raise Exception('Timed out while waiting for \'pm serve\'.')
+
+ time.sleep(1)
+
+ remote_port = common.ConnectPortForwardingTask(target, serve_port, 0)
+ self._RegisterPkgRepository(self._pkg_root, remote_port)
+
+ def __enter__(self):
+ self._with_count += 1
+ return self
+
+ def __exit__(self, type, value, tb):
+ # Allows the repository to delete itself when it leaves the scope of a 'with' block.
+ self._with_count -= 1
+ if self._with_count > 0:
+ return
+
+ self._UnregisterPkgRepository()
+ self._pm_serve_task.kill()
+ self._pm_serve_task = None
+
+ logging.info('Cleaning up package root: ' + self._pkg_root)
+ shutil.rmtree(self._pkg_root)
+ self._pkg_root = None
+
+ def GetPath(self):
+ return self._pkg_root
+
+ def _RegisterPkgRepository(self, tuf_repo, remote_port):
+ """Configures a device to use a local TUF repository as an installation
+ source for packages.
+ |tuf_repo|: The host filesystem path to the TUF repository.
+ |remote_port|: The reverse-forwarded port used to connect to instance of
+ `pm serve` that is serving the contents of |tuf_repo|."""
+
+ # Extract the public signing key for inclusion in the config file.
+ root_keys = []
+ root_json_path = os.path.join(tuf_repo, 'repository', 'root.json')
+ root_json = json.load(open(root_json_path, 'r'))
+ for root_key_id in root_json['signed']['roles']['root']['keyids']:
+ root_keys.append({
+ 'type':
+ root_json['signed']['keys'][root_key_id]['keytype'],
+ 'value':
+ root_json['signed']['keys'][root_key_id]['keyval']['public']
+ })
+
+ # "pm serve" can automatically generate a "config.json" file at query time,
+ # but the file is unusable because it specifies URLs with port
+ # numbers that are unreachable from across the port forwarding boundary.
+ # So instead, we generate our own config file with the forwarded port
+ # numbers instead.
+ config_file = open(os.path.join(tuf_repo, 'repository', 'repo_config.json'),
+ 'w')
+ json.dump(
+ {
+ 'repo_url':
+ "fuchsia-pkg://%s" % _MANAGED_REPO_NAME,
+ 'root_keys':
+ root_keys,
+ 'mirrors': [{
+ "mirror_url": "http://127.0.0.1:%d" % remote_port,
+ "subscribe": True
+ }],
+ 'root_threshold':
+ 1,
+ 'root_version':
+ 1
+ }, config_file)
+ config_file.close()
+
+ # Register the repo.
+ return_code = self._target.RunCommand([
+ ('pkgctl repo rm fuchsia-pkg://%s; ' +
+ 'pkgctl repo add url http://127.0.0.1:%d/repo_config.json; ') %
+ (_MANAGED_REPO_NAME, remote_port)
+ ])
+ if return_code != 0:
+ raise Exception('Error code %d when running pkgctl repo add.' %
+ return_code)
+
+ rule_template = """'{"version":"1","content":[{"host_match":"fuchsia.com","host_replacement":"%s","path_prefix_match":"/","path_prefix_replacement":"/"}]}'"""
+ return_code = self._target.RunCommand([
+ ('pkgctl rule replace json %s') % (rule_template % (_MANAGED_REPO_NAME))
+ ])
+ if return_code != 0:
+ raise Exception('Error code %d when running pkgctl rule replace.' %
+ return_code)
+
+ def _UnregisterPkgRepository(self):
+ """Unregisters the package repository."""
+
+ logging.debug('Unregistering package repository.')
+ self._target.RunCommand(
+ ['pkgctl', 'repo', 'rm',
+ 'fuchsia-pkg://%s' % (_MANAGED_REPO_NAME)])
+
+ # Re-enable 'devhost' repo if it's present. This is useful for devices that
+ # were booted with 'fx serve'.
+ self._target.RunCommand([
+ 'pkgctl', 'rule', 'replace', 'json',
+ """'{"version":"1","content":[{"host_match":"fuchsia.com","host_replacement":"devhost","path_prefix_match":"/","path_prefix_replacement":"/"}]}'"""
+ ],
+ silent=True)
+
+
+class ExternalPkgRepo(PkgRepo):
+ """Publishes packages to a package repository located and served externally
+ (ie. located under a Fuchsia build directory and served by "fx serve"."""
+
+ def __init__(self, pkg_root):
+ self._pkg_root = pkg_root
+ logging.info('Using existing package root: {}'.format(pkg_root))
+ logging.info(
+ 'ATTENTION: This will not start a package server. Please run "fx serve" manually.'
+ )
+
+ def GetPath(self):
+ return self._pkg_root
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, tb):
+ pass
diff --git a/third_party/libwebrtc/build/fuchsia/qemu_image.py b/third_party/libwebrtc/build/fuchsia/qemu_image.py
new file mode 100644
index 0000000000..ab5e040acb
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/qemu_image.py
@@ -0,0 +1,75 @@
+# Copyright 2020 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Workaround for qemu-img bug on arm64 platforms with multiple cores.
+
+Runs qemu-img command with timeout and retries the command if it hangs.
+
+See:
+crbug.com/1046861 QEMU is out of date; current version of qemu-img
+is unstable
+
+https://bugs.launchpad.net/qemu/+bug/1805256 qemu-img hangs on
+rcu_call_ready_event logic in Aarch64 when converting images
+
+TODO(crbug.com/1046861): Remove this workaround when the bug is fixed.
+"""
+
+import logging
+import subprocess
+import tempfile
+import time
+
+
+# qemu-img p99 run time on Cavium ThunderX2 servers is 26 seconds.
+# Using 2x the p99 time as the timeout.
+QEMU_IMG_TIMEOUT_SEC = 52
+
+
+def _ExecQemuImgWithTimeout(command):
+ """Execute qemu-img command in subprocess with timeout.
+
+ Returns: None if command timed out or return code if command completed.
+ """
+
+ logging.info('qemu-img starting')
+ command_output_file = tempfile.NamedTemporaryFile('w')
+ p = subprocess.Popen(command, stdout=command_output_file,
+ stderr=subprocess.STDOUT)
+ start_sec = time.time()
+ while p.poll() is None and time.time() - start_sec < QEMU_IMG_TIMEOUT_SEC:
+ time.sleep(1)
+ stop_sec = time.time()
+ logging.info('qemu-img duration: %f' % float(stop_sec - start_sec))
+
+ if p.poll() is None:
+ returncode = None
+ p.kill()
+ p.wait()
+ else:
+ returncode = p.returncode
+
+ log_level = logging.WARN if returncode else logging.DEBUG
+ for line in open(command_output_file.name, 'r'):
+ logging.log(log_level, 'qemu-img stdout: ' + line.strip())
+
+ return returncode
+
+
+def ExecQemuImgWithRetry(command):
+ """ Execute qemu-img command in subprocess with 2 retries.
+
+ Raises CalledProcessError if command does not complete successfully.
+ """
+
+ tries = 0
+ status = None
+ while status is None and tries <= 2:
+ tries += 1
+ status = _ExecQemuImgWithTimeout(command)
+
+ if status is None:
+ raise subprocess.CalledProcessError(-1, command)
+ if status:
+ raise subprocess.CalledProcessError(status, command)
diff --git a/third_party/libwebrtc/build/fuchsia/qemu_target.py b/third_party/libwebrtc/build/fuchsia/qemu_target.py
new file mode 100644
index 0000000000..529b1cc443
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/qemu_target.py
@@ -0,0 +1,243 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Implements commands for running and interacting with Fuchsia on QEMU."""
+
+import boot_data
+import common
+import emu_target
+import hashlib
+import logging
+import os
+import platform
+import qemu_image
+import shutil
+import subprocess
+import sys
+import tempfile
+
+from common import GetHostArchFromPlatform, GetEmuRootForPlatform
+from common import EnsurePathExists
+from qemu_image import ExecQemuImgWithRetry
+from target import FuchsiaTargetException
+
+
+# Virtual networking configuration data for QEMU.
+HOST_IP_ADDRESS = '10.0.2.2'
+GUEST_MAC_ADDRESS = '52:54:00:63:5e:7b'
+
+# Capacity of the system's blobstore volume.
+EXTENDED_BLOBSTORE_SIZE = 1073741824 # 1GB
+
+
+def GetTargetType():
+ return QemuTarget
+
+
+class QemuTarget(emu_target.EmuTarget):
+ EMULATOR_NAME = 'qemu'
+
+ def __init__(self, out_dir, target_cpu, system_log_file, cpu_cores,
+ require_kvm, ram_size_mb):
+ super(QemuTarget, self).__init__(out_dir, target_cpu, system_log_file)
+ self._cpu_cores=cpu_cores
+ self._require_kvm=require_kvm
+ self._ram_size_mb=ram_size_mb
+
+ @staticmethod
+ def CreateFromArgs(args):
+ return QemuTarget(args.out_dir, args.target_cpu, args.system_log_file,
+ args.cpu_cores, args.require_kvm, args.ram_size_mb)
+
+ def _IsKvmEnabled(self):
+ kvm_supported = sys.platform.startswith('linux') and \
+ os.access('/dev/kvm', os.R_OK | os.W_OK)
+ same_arch = \
+ (self._target_cpu == 'arm64' and platform.machine() == 'aarch64') or \
+ (self._target_cpu == 'x64' and platform.machine() == 'x86_64')
+ if kvm_supported and same_arch:
+ return True
+ elif self._require_kvm:
+ if same_arch:
+ if not os.path.exists('/dev/kvm'):
+ kvm_error = 'File /dev/kvm does not exist. Please install KVM first.'
+ else:
+ kvm_error = 'To use KVM acceleration, add user to the kvm group '\
+ 'with "sudo usermod -a -G kvm $USER". Log out and back '\
+ 'in for the change to take effect.'
+ raise FuchsiaTargetException(kvm_error)
+ else:
+ raise FuchsiaTargetException('KVM unavailable when CPU architecture '\
+ 'of host is different from that of'\
+ ' target. See --allow-no-kvm.')
+ else:
+ return False
+
+ def _BuildQemuConfig(self):
+ boot_data.AssertBootImagesExist(self._GetTargetSdkArch(), 'qemu')
+
+ emu_command = [
+ '-kernel',
+ EnsurePathExists(
+ boot_data.GetTargetFile('qemu-kernel.kernel',
+ self._GetTargetSdkArch(),
+ boot_data.TARGET_TYPE_QEMU)),
+ '-initrd',
+ EnsurePathExists(
+ boot_data.GetBootImage(self._out_dir, self._GetTargetSdkArch(),
+ boot_data.TARGET_TYPE_QEMU)),
+ '-m',
+ str(self._ram_size_mb),
+ '-smp',
+ str(self._cpu_cores),
+
+ # Attach the blobstore and data volumes. Use snapshot mode to discard
+ # any changes.
+ '-snapshot',
+ '-drive',
+ 'file=%s,format=qcow2,if=none,id=blobstore,snapshot=on' %
+ _EnsureBlobstoreQcowAndReturnPath(self._out_dir,
+ self._GetTargetSdkArch()),
+ '-device',
+ 'virtio-blk-pci,drive=blobstore',
+
+ # Use stdio for the guest OS only; don't attach the QEMU interactive
+ # monitor.
+ '-serial',
+ 'stdio',
+ '-monitor',
+ 'none',
+ ]
+
+ # Configure the machine to emulate, based on the target architecture.
+ if self._target_cpu == 'arm64':
+ emu_command.extend([
+ '-machine','virt,gic_version=3',
+ ])
+ else:
+ emu_command.extend([
+ '-machine', 'q35',
+ ])
+
+ # Configure virtual network.
+ netdev_type = 'virtio-net-pci'
+ netdev_config = 'type=user,id=net0,restrict=off'
+
+ self._host_ssh_port = common.GetAvailableTcpPort()
+ netdev_config += ",hostfwd=tcp::%s-:22" % self._host_ssh_port
+ emu_command.extend([
+ '-netdev', netdev_config,
+ '-device', '%s,netdev=net0,mac=%s' % (netdev_type, GUEST_MAC_ADDRESS),
+ ])
+
+ # Configure the CPU to emulate.
+ # On Linux, we can enable lightweight virtualization (KVM) if the host and
+ # guest architectures are the same.
+ if self._IsKvmEnabled():
+ kvm_command = ['-enable-kvm', '-cpu']
+ if self._target_cpu == 'arm64':
+ kvm_command.append('host')
+ else:
+ kvm_command.append('host,migratable=no,+invtsc')
+ else:
+ logging.warning('Unable to launch %s with KVM acceleration. '
+ 'The guest VM will be slow.' % (self.EMULATOR_NAME))
+ if self._target_cpu == 'arm64':
+ kvm_command = ['-cpu', 'cortex-a53']
+ else:
+ kvm_command = ['-cpu', 'Haswell,+smap,-check,-fsgsbase']
+
+ emu_command.extend(kvm_command)
+
+ kernel_args = boot_data.GetKernelArgs(self._out_dir)
+
+ # TERM=dumb tells the guest OS to not emit ANSI commands that trigger
+ # noisy ANSI spew from the user's terminal emulator.
+ kernel_args.append('TERM=dumb')
+
+ # Construct kernel cmd line
+ kernel_args.append('kernel.serial=legacy')
+
+ # Don't 'reboot' the emulator if the kernel crashes
+ kernel_args.append('kernel.halt-on-panic=true')
+
+ emu_command.extend(['-append', ' '.join(kernel_args)])
+
+ return emu_command
+
+ def _BuildCommand(self):
+ if self._target_cpu == 'arm64':
+ qemu_exec = 'qemu-system-' + 'aarch64'
+ elif self._target_cpu == 'x64':
+ qemu_exec = 'qemu-system-' + 'x86_64'
+ else:
+ raise Exception('Unknown target_cpu %s:' % self._target_cpu)
+
+ qemu_command = [
+ os.path.join(GetEmuRootForPlatform(self.EMULATOR_NAME), 'bin',
+ qemu_exec)
+ ]
+ qemu_command.extend(self._BuildQemuConfig())
+ qemu_command.append('-nographic')
+ return qemu_command
+
+def _ComputeFileHash(filename):
+ hasher = hashlib.md5()
+ with open(filename, 'rb') as f:
+ buf = f.read(4096)
+ while buf:
+ hasher.update(buf)
+ buf = f.read(4096)
+
+ return hasher.hexdigest()
+
+
+def _EnsureBlobstoreQcowAndReturnPath(out_dir, target_arch):
+ """Returns a file containing the Fuchsia blobstore in a QCOW format,
+ with extra buffer space added for growth."""
+
+ qimg_tool = os.path.join(common.GetEmuRootForPlatform('qemu'),
+ 'bin', 'qemu-img')
+ fvm_tool = common.GetHostToolPathFromPlatform('fvm')
+ blobstore_path = boot_data.GetTargetFile('storage-full.blk', target_arch,
+ 'qemu')
+ qcow_path = os.path.join(out_dir, 'gen', 'blobstore.qcow')
+
+ # Check a hash of the blobstore to determine if we can re-use an existing
+ # extended version of it.
+ blobstore_hash_path = os.path.join(out_dir, 'gen', 'blobstore.hash')
+ current_blobstore_hash = _ComputeFileHash(blobstore_path)
+
+ if os.path.exists(blobstore_hash_path) and os.path.exists(qcow_path):
+ if current_blobstore_hash == open(blobstore_hash_path, 'r').read():
+ return qcow_path
+
+ # Add some extra room for growth to the Blobstore volume.
+ # Fuchsia is unable to automatically extend FVM volumes at runtime so the
+ # volume enlargement must be performed prior to QEMU startup.
+
+ # The 'fvm' tool only supports extending volumes in-place, so make a
+ # temporary copy of 'blobstore.bin' before it's mutated.
+ extended_blobstore = tempfile.NamedTemporaryFile()
+ shutil.copyfile(blobstore_path, extended_blobstore.name)
+ subprocess.check_call([fvm_tool, extended_blobstore.name, 'extend',
+ '--length', str(EXTENDED_BLOBSTORE_SIZE),
+ blobstore_path])
+
+ # Construct a QCOW image from the extended, temporary FVM volume.
+ # The result will be retained in the build output directory for re-use.
+ qemu_img_cmd = [qimg_tool, 'convert', '-f', 'raw', '-O', 'qcow2',
+ '-c', extended_blobstore.name, qcow_path]
+ # TODO(crbug.com/1046861): Remove arm64 call with retries when bug is fixed.
+ if common.GetHostArchFromPlatform() == 'arm64':
+ qemu_image.ExecQemuImgWithRetry(qemu_img_cmd)
+ else:
+ subprocess.check_call(qemu_img_cmd)
+
+ # Write out a hash of the original blobstore file, so that subsequent runs
+ # can trivially check if a cached extended FVM volume is available for reuse.
+ with open(blobstore_hash_path, 'w') as blobstore_hash_file:
+ blobstore_hash_file.write(current_blobstore_hash)
+
+ return qcow_path
diff --git a/third_party/libwebrtc/build/fuchsia/qemu_target_test.py b/third_party/libwebrtc/build/fuchsia/qemu_target_test.py
new file mode 100755
index 0000000000..44b3802909
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/qemu_target_test.py
@@ -0,0 +1,58 @@
+#!/usr/bin/python2
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import qemu_target
+import shutil
+import subprocess
+import tempfile
+import time
+import unittest
+
+TEST_PAYLOAD = "Let's get this payload across the finish line!"
+
+tmpdir = tempfile.mkdtemp()
+
+# Register the target with the context manager so that it always gets
+# torn down on process exit. Otherwise there might be lingering QEMU instances
+# if Python crashes or is interrupted.
+with qemu_target.QemuTarget(tmpdir, 'x64') as target:
+ class TestQemuTarget(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ target.Start()
+
+ @classmethod
+ def tearDownClass(cls):
+ target.Shutdown()
+ shutil.rmtree(tmpdir)
+
+ def testCopyBidirectional(self):
+ tmp_path = tmpdir + "/payload"
+ with open(tmp_path, "w") as tmpfile:
+ tmpfile.write(TEST_PAYLOAD)
+ target.PutFile(tmp_path, '/tmp/payload')
+
+ tmp_path_roundtrip = tmp_path + ".roundtrip"
+ target.GetFile('/tmp/payload', tmp_path_roundtrip)
+ with open(tmp_path_roundtrip) as roundtrip:
+ self.assertEqual(TEST_PAYLOAD, roundtrip.read())
+
+ def testRunCommand(self):
+ self.assertEqual(0, target.RunCommand(['true']))
+ self.assertEqual(1, target.RunCommand(['false']))
+
+ def testRunCommandPiped(self):
+ proc = target.RunCommandPiped(['cat'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ proc.stdin.write(TEST_PAYLOAD)
+ proc.stdin.flush()
+ proc.stdin.close()
+ self.assertEqual(TEST_PAYLOAD, proc.stdout.readline())
+ proc.kill()
+
+
+ if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/libwebrtc/build/fuchsia/remote_cmd.py b/third_party/libwebrtc/build/fuchsia/remote_cmd.py
new file mode 100644
index 0000000000..56aa8b1721
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/remote_cmd.py
@@ -0,0 +1,131 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import subprocess
+import threading
+
+from common import SubprocessCallWithTimeout
+
+_SSH = ['ssh']
+_SCP = ['scp', '-C'] # Use gzip compression.
+_SSH_LOGGER = logging.getLogger('ssh')
+
+COPY_TO_TARGET = 0
+COPY_FROM_TARGET = 1
+
+
+def _IsLinkLocalIPv6(hostname):
+ return hostname.startswith('fe80::')
+
+def _EscapeIfIPv6Address(address):
+ if ':' in address:
+ return '[' + address + ']'
+ else:
+ return address
+
+class CommandRunner(object):
+ """Helper class used to execute commands on a remote host over SSH."""
+
+ def __init__(self, config_path, host, port):
+ """Creates a CommandRunner that connects to the specified |host| and |port|
+ using the ssh config at the specified |config_path|.
+
+ config_path: Full path to SSH configuration.
+ host: The hostname or IP address of the remote host.
+ port: The port to connect to."""
+
+ self._config_path = config_path
+ self._host = host
+ self._port = port
+
+ def _GetSshCommandLinePrefix(self):
+ cmd_prefix = _SSH + ['-F', self._config_path, self._host]
+ if self._port:
+ cmd_prefix += ['-p', str(self._port)]
+ return cmd_prefix
+
+ def RunCommand(self, command, silent, timeout_secs=None):
+ """Executes an SSH command on the remote host and blocks until completion.
+
+ command: A list of strings containing the command and its arguments.
+ silent: If true, suppresses all output from 'ssh'.
+ timeout_secs: If set, limits the amount of time that |command| may run.
+ Commands which exceed the timeout are killed.
+
+ Returns the exit code from the remote command."""
+
+ ssh_command = self._GetSshCommandLinePrefix() + command
+ logging.warning(ssh_command)
+ _SSH_LOGGER.debug('ssh exec: ' + ' '.join(ssh_command))
+ retval, _, _ = SubprocessCallWithTimeout(ssh_command, silent, timeout_secs)
+ return retval
+
+
+ def RunCommandPiped(self, command, stdout, stderr, ssh_args = None, **kwargs):
+ """Executes an SSH command on the remote host and returns a process object
+ with access to the command's stdio streams. Does not block.
+
+ command: A list of strings containing the command and its arguments.
+ stdout: subprocess stdout. Must not be None.
+ stderr: subprocess stderr. Must not be None.
+ ssh_args: Arguments that will be passed to SSH.
+ kwargs: A dictionary of parameters to be passed to subprocess.Popen().
+ The parameters can be used to override stdin and stdout, for
+ example.
+
+ Returns a Popen object for the command."""
+
+ if not stdout or not stderr:
+ raise Exception('Stdout/stderr must be specified explicitly')
+
+ if not ssh_args:
+ ssh_args = []
+
+ ssh_command = self._GetSshCommandLinePrefix() + ssh_args + ['--'] + command
+ logging.warning(ssh_command)
+ _SSH_LOGGER.debug(' '.join(ssh_command))
+ return subprocess.Popen(ssh_command, stdout=stdout, stderr=stderr, **kwargs)
+
+
+ def RunScp(self, sources, dest, direction, recursive=False):
+ """Copies a file to or from a remote host using SCP and blocks until
+ completion.
+
+ sources: Paths of the files to be copied.
+ dest: The path that |source| will be copied to.
+ direction: Indicates whether the file should be copied to
+ or from the remote side.
+ Valid values are COPY_TO_TARGET or COPY_FROM_TARGET.
+ recursive: If true, performs a recursive copy.
+
+ Function will raise an assertion if a failure occurred."""
+
+ scp_command = _SCP[:]
+ if _SSH_LOGGER.getEffectiveLevel() == logging.DEBUG:
+ scp_command.append('-v')
+ if recursive:
+ scp_command.append('-r')
+
+ host = _EscapeIfIPv6Address(self._host)
+
+ if direction == COPY_TO_TARGET:
+ dest = "%s:%s" % (host, dest)
+ else:
+ sources = ["%s:%s" % (host, source) for source in sources]
+
+ scp_command += ['-F', self._config_path]
+ if self._port:
+ scp_command += ['-P', str(self._port)]
+ scp_command += sources
+ scp_command += [dest]
+
+ _SSH_LOGGER.debug(' '.join(scp_command))
+ try:
+ scp_output = subprocess.check_output(scp_command,
+ stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as error:
+ _SSH_LOGGER.info(error.output)
+ raise
diff --git a/third_party/libwebrtc/build/fuchsia/run_test_package.py b/third_party/libwebrtc/build/fuchsia/run_test_package.py
new file mode 100644
index 0000000000..7e93461027
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/run_test_package.py
@@ -0,0 +1,278 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Contains a helper function for deploying and executing a packaged
+executable on a Target."""
+
+from __future__ import print_function
+
+import common
+import hashlib
+import logging
+import multiprocessing
+import os
+import re
+import select
+import subprocess
+import sys
+import threading
+import uuid
+
+from symbolizer import BuildIdsPaths, RunSymbolizer, SymbolizerFilter
+
+FAR = common.GetHostToolPathFromPlatform('far')
+
+# Amount of time to wait for the termination of the system log output thread.
+_JOIN_TIMEOUT_SECS = 5
+
+
+def _AttachKernelLogReader(target):
+ """Attaches a kernel log reader as a long-running SSH task."""
+
+ logging.info('Attaching kernel logger.')
+ return target.RunCommandPiped(['dlog', '-f'],
+ stdin=open(os.devnull, 'r'),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+
+
+class SystemLogReader(object):
+ """Collects and symbolizes Fuchsia system log to a file."""
+
+ def __init__(self):
+ self._listener_proc = None
+ self._symbolizer_proc = None
+ self._system_log = None
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ """Stops the system logging processes and closes the output file."""
+ if self._symbolizer_proc:
+ self._symbolizer_proc.kill()
+ if self._listener_proc:
+ self._listener_proc.kill()
+ if self._system_log:
+ self._system_log.close()
+
+ def Start(self, target, package_paths, system_log_file):
+ """Start a system log reader as a long-running SSH task."""
+ logging.debug('Writing fuchsia system log to %s' % system_log_file)
+
+ self._listener_proc = target.RunCommandPiped(['log_listener'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+
+ self._system_log = open(system_log_file, 'w', buffering=1)
+ self._symbolizer_proc = RunSymbolizer(self._listener_proc.stdout,
+ self._system_log,
+ BuildIdsPaths(package_paths))
+
+
+class MergedInputStream(object):
+ """Merges a number of input streams into a UNIX pipe on a dedicated thread.
+ Terminates when the file descriptor of the primary stream (the first in
+ the sequence) is closed."""
+
+ def __init__(self, streams):
+ assert len(streams) > 0
+ self._streams = streams
+ self._output_stream = None
+ self._thread = None
+
+ def Start(self):
+ """Returns a pipe to the merged output stream."""
+
+ read_pipe, write_pipe = os.pipe()
+
+ self._output_stream = os.fdopen(write_pipe, 'wb', 1)
+ self._thread = threading.Thread(target=self._Run)
+ self._thread.start()
+
+ return os.fdopen(read_pipe, 'r')
+
+ def _Run(self):
+ streams_by_fd = {}
+ primary_fd = self._streams[0].fileno()
+ for s in self._streams:
+ streams_by_fd[s.fileno()] = s
+
+ # Set when the primary FD is closed. Input from other FDs will continue to
+ # be processed until select() runs dry.
+ flush = False
+
+ # The lifetime of the MergedInputStream is bound to the lifetime of
+ # |primary_fd|.
+ while primary_fd:
+ # When not flushing: block until data is read or an exception occurs.
+ rlist, _, xlist = select.select(streams_by_fd, [], streams_by_fd)
+
+ if len(rlist) == 0 and flush:
+ break
+
+ for fileno in xlist:
+ del streams_by_fd[fileno]
+ if fileno == primary_fd:
+ primary_fd = None
+
+ for fileno in rlist:
+ line = streams_by_fd[fileno].readline()
+ if line:
+ self._output_stream.write(line)
+ else:
+ del streams_by_fd[fileno]
+ if fileno == primary_fd:
+ primary_fd = None
+
+ # Flush the streams by executing nonblocking reads from the input file
+ # descriptors until no more data is available, or all the streams are
+ # closed.
+ while streams_by_fd:
+ rlist, _, _ = select.select(streams_by_fd, [], [], 0)
+
+ if not rlist:
+ break
+
+ for fileno in rlist:
+ line = streams_by_fd[fileno].readline()
+ if line:
+ self._output_stream.write(line)
+ else:
+ del streams_by_fd[fileno]
+
+
+def _GetComponentUri(package_name):
+ return 'fuchsia-pkg://fuchsia.com/%s#meta/%s.cmx' % (package_name,
+ package_name)
+
+
+class RunTestPackageArgs:
+ """RunTestPackage() configuration arguments structure.
+
+ code_coverage: If set, the test package will be run via 'runtests', and the
+ output will be saved to /tmp folder on the device.
+ system_logging: If set, connects a system log reader to the target.
+ test_realm_label: Specifies the realm name that run-test-component should use.
+ This must be specified if a filter file is to be set, or a results summary
+ file fetched after the test suite has run.
+ use_run_test_component: If True then the test package will be run hermetically
+ via 'run-test-component', rather than using 'run'.
+ """
+
+ def __init__(self):
+ self.code_coverage = False
+ self.system_logging = False
+ self.test_realm_label = None
+ self.use_run_test_component = False
+
+ @staticmethod
+ def FromCommonArgs(args):
+ run_test_package_args = RunTestPackageArgs()
+ run_test_package_args.code_coverage = args.code_coverage
+ run_test_package_args.system_logging = args.include_system_logs
+ return run_test_package_args
+
+
+def _DrainStreamToStdout(stream, quit_event):
+ """Outputs the contents of |stream| until |quit_event| is set."""
+
+ while not quit_event.is_set():
+ rlist, _, _ = select.select([stream], [], [], 0.1)
+ if rlist:
+ line = rlist[0].readline()
+ if not line:
+ return
+ print(line.rstrip())
+
+
+def RunTestPackage(output_dir, target, package_paths, package_name,
+ package_args, args):
+ """Installs the Fuchsia package at |package_path| on the target,
+ executes it with |package_args|, and symbolizes its output.
+
+ output_dir: The path containing the build output files.
+ target: The deployment Target object that will run the package.
+ package_paths: The paths to the .far packages to be installed.
+ package_name: The name of the primary package to run.
+ package_args: The arguments which will be passed to the Fuchsia process.
+ args: RunTestPackageArgs instance configuring how the package will be run.
+
+ Returns the exit code of the remote package process."""
+
+ system_logger = (_AttachKernelLogReader(target)
+ if args.system_logging else None)
+ try:
+ if system_logger:
+ # Spin up a thread to asynchronously dump the system log to stdout
+ # for easier diagnoses of early, pre-execution failures.
+ log_output_quit_event = multiprocessing.Event()
+ log_output_thread = threading.Thread(target=lambda: _DrainStreamToStdout(
+ system_logger.stdout, log_output_quit_event))
+ log_output_thread.daemon = True
+ log_output_thread.start()
+
+ with target.GetPkgRepo():
+ target.InstallPackage(package_paths)
+
+ if system_logger:
+ log_output_quit_event.set()
+ log_output_thread.join(timeout=_JOIN_TIMEOUT_SECS)
+
+ logging.info('Running application.')
+
+ # TODO(crbug.com/1156768): Deprecate runtests.
+ if args.code_coverage:
+ # runtests requires specifying an output directory and a double dash
+ # before the argument list.
+ command = ['runtests', '-o', '/tmp', _GetComponentUri(package_name)]
+ if args.test_realm_label:
+ command += ['--realm-label', args.test_realm_label]
+ command += ['--']
+ elif args.use_run_test_component:
+ command = ['run-test-component']
+ if args.test_realm_label:
+ command += ['--realm-label=%s' % args.test_realm_label]
+ command.append(_GetComponentUri(package_name))
+ else:
+ command = ['run', _GetComponentUri(package_name)]
+
+ command.extend(package_args)
+
+ process = target.RunCommandPiped(command,
+ stdin=open(os.devnull, 'r'),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+
+ if system_logger:
+ output_stream = MergedInputStream(
+ [process.stdout, system_logger.stdout]).Start()
+ else:
+ output_stream = process.stdout
+
+ # Run the log data through the symbolizer process.
+ output_stream = SymbolizerFilter(output_stream,
+ BuildIdsPaths(package_paths))
+
+ for next_line in output_stream:
+ # TODO(crbug/1198733): Switch to having stream encode to utf-8 directly
+ # once we drop Python 2 support.
+ print(next_line.encode('utf-8').rstrip())
+
+ process.wait()
+ if process.returncode == 0:
+ logging.info('Process exited normally with status code 0.')
+ else:
+ # The test runner returns an error status code if *any* tests fail,
+ # so we should proceed anyway.
+ logging.warning('Process exited with status code %d.' %
+ process.returncode)
+
+ finally:
+ if system_logger:
+ logging.info('Terminating kernel log reader.')
+ log_output_quit_event.set()
+ log_output_thread.join()
+ system_logger.kill()
+
+ return process.returncode
diff --git a/third_party/libwebrtc/build/fuchsia/runner_exceptions.py b/third_party/libwebrtc/build/fuchsia/runner_exceptions.py
new file mode 100644
index 0000000000..03f872e453
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/runner_exceptions.py
@@ -0,0 +1,78 @@
+# Copyright 2020 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Converts exceptions to return codes and prints error messages.
+
+This makes it easier to query build tables for particular error types as
+exit codes are visible to queries while exception stack traces are not."""
+
+import errno
+import fcntl
+import logging
+import os
+import subprocess
+import sys
+import traceback
+
+from target import FuchsiaTargetException
+
+def _PrintException(value, trace):
+ """Prints stack trace and error message for the current exception."""
+
+ traceback.print_tb(trace)
+ print(str(value))
+
+
+def IsStdoutBlocking():
+ """Returns True if sys.stdout is blocking or False if non-blocking.
+
+ sys.stdout should always be blocking. Non-blocking is associated with
+ intermittent IOErrors (crbug.com/1080858).
+ """
+
+ nonblocking = fcntl.fcntl(sys.stdout, fcntl.F_GETFL) & os.O_NONBLOCK
+ return not nonblocking
+
+
+def HandleExceptionAndReturnExitCode():
+ """Maps the current exception to a return code and prints error messages.
+
+ Mapped exception types are assigned blocks of 8 return codes starting at 64.
+ The choice of 64 as the starting code is based on the Advanced Bash-Scripting
+ Guide (http://tldp.org/LDP/abs/html/exitcodes.html).
+
+ A generic exception is mapped to the start of the block. More specific
+ exceptions are mapped to numbers inside the block. For example, a
+ FuchsiaTargetException is mapped to return code 64, unless it involves SSH
+ in which case it is mapped to return code 65.
+
+ Exceptions not specifically mapped go to return code 1.
+
+ Returns the mapped return code."""
+
+ (type, value, trace) = sys.exc_info()
+ _PrintException(value, trace)
+
+ if type is FuchsiaTargetException:
+ if 'ssh' in str(value).lower():
+ print('Error: FuchsiaTargetException: SSH to Fuchsia target failed.')
+ return 65
+ return 64
+ elif type is IOError:
+ if value.errno == errno.EAGAIN:
+ logging.info('Python print to sys.stdout probably failed')
+ if not IsStdoutBlocking():
+ logging.warn('sys.stdout is non-blocking')
+ return 73
+ return 72
+ elif type is subprocess.CalledProcessError:
+ if os.path.basename(value.cmd[0]) == 'scp':
+ print('Error: scp operation failed - %s' % str(value))
+ return 81
+ if os.path.basename(value.cmd[0]) == 'qemu-img':
+ print('Error: qemu-img fuchsia image generation failed.')
+ return 82
+ return 80
+ else:
+ return 1
diff --git a/third_party/libwebrtc/build/fuchsia/runner_logs.py b/third_party/libwebrtc/build/fuchsia/runner_logs.py
new file mode 100644
index 0000000000..20ab6b227d
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/runner_logs.py
@@ -0,0 +1,96 @@
+# Copyright 2020 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Creates and manages test runner log file objects.
+
+Provides a context manager object for use in a with statement
+and a module level FileStreamFor function for use by clients.
+"""
+
+import collections
+import multiprocessing
+import os
+
+from symbolizer import RunSymbolizer
+
+SYMBOLIZED_SUFFIX = '.symbolized'
+
+_RunnerLogEntry = collections.namedtuple(
+ '_RunnerLogEntry', ['name', 'log_file', 'path', 'symbolize'])
+
+# Module singleton variable.
+_instance = None
+
+
+class RunnerLogManager(object):
+ """ Runner logs object for use in a with statement."""
+
+ def __init__(self, log_dir, build_ids_files):
+ global _instance
+ if _instance:
+ raise Exception('Only one RunnerLogManager can be instantiated')
+
+ self._log_dir = log_dir
+ self._build_ids_files = build_ids_files
+ self._runner_logs = []
+
+ if self._log_dir and not os.path.isdir(self._log_dir):
+ os.makedirs(self._log_dir)
+
+ _instance = self
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ pool = multiprocessing.Pool(4)
+ for log_entry in self._runner_logs:
+ pool.apply_async(_FinalizeLog, (log_entry, self._build_ids_files))
+ pool.close()
+ pool.join()
+ _instance = None
+
+
+ def _FileStreamFor(self, name, symbolize):
+ if any(elem.name == name for elem in self._runner_logs):
+ raise Exception('RunnerLogManager can only open "%s" once' % name)
+
+ path = os.path.join(self._log_dir, name) if self._log_dir else os.devnull
+ log_file = open(path, 'w')
+
+ self._runner_logs.append(_RunnerLogEntry(name, log_file, path, symbolize))
+
+ return log_file
+
+
+def _FinalizeLog(log_entry, build_ids_files):
+ log_entry.log_file.close()
+
+ if log_entry.symbolize:
+ input_file = open(log_entry.path, 'r')
+ output_file = open(log_entry.path + SYMBOLIZED_SUFFIX, 'w')
+ proc = RunSymbolizer(input_file, output_file, build_ids_files)
+ proc.wait()
+ output_file.close()
+ input_file.close()
+
+
+def IsEnabled():
+ """Returns True if the RunnerLogManager has been created, or False if not."""
+
+ return _instance is not None and _instance._log_dir is not None
+
+
+def FileStreamFor(name, symbolize=False):
+ """Opens a test runner file stream in the test runner log directory.
+
+ If no test runner log directory is specified, output is discarded.
+
+ name: log file name
+ symbolize: if True, make a symbolized copy of the log after closing it.
+
+ Returns an opened log file object."""
+
+ return _instance._FileStreamFor(name, symbolize) if IsEnabled() else open(
+ os.devnull, 'w')
diff --git a/third_party/libwebrtc/build/fuchsia/sdk-bucket.txt b/third_party/libwebrtc/build/fuchsia/sdk-bucket.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/sdk-bucket.txt
diff --git a/third_party/libwebrtc/build/fuchsia/sdk-hash-files.list b/third_party/libwebrtc/build/fuchsia/sdk-hash-files.list
new file mode 100644
index 0000000000..6f37bcd9f7
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/sdk-hash-files.list
@@ -0,0 +1 @@
+{platform}.sdk.sha1
diff --git a/third_party/libwebrtc/build/fuchsia/start_emulator.py b/third_party/libwebrtc/build/fuchsia/start_emulator.py
new file mode 100755
index 0000000000..c7edead3fe
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/start_emulator.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+# Copyright 2021 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Starts up a long running emulator for unit testing and developer use."""
+
+import argparse
+import common
+import common_args
+import logging
+import os
+import time
+import subprocess
+
+from fvdl_target import FvdlTarget
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description='Launches a long-running emulator that can '
+ 'be re-used for multiple test runs.')
+ AddLongRunningArgs(parser)
+ args = parser.parse_args()
+ args.out_dir = None
+ args.device = 'fvdl'
+ common_args.ConfigureLogging(args)
+ with common_args.GetDeploymentTargetForArgs(args) as fvdl_target:
+ if fvdl_target._with_network:
+ logging.info('If you haven\'t set up tuntap, you may be prompted '
+ 'for your sudo password to set up tuntap.')
+ fvdl_target.Start()
+ logging.info('Emulator successfully started up! If you are running '
+ 'multiple fuchsia devices, specify the port the ip address '
+ 'via the --host flag.')
+ if fvdl_target._with_network:
+ logging.info('You can now use the "-d" flag when running '
+ 'Chrome Fuchsia tests to target this emulator.')
+ while fvdl_target._IsEmuStillRunning():
+ time.sleep(10)
+ pass
+
+
+def AddLongRunningArgs(arg_parser):
+ arg_parser.add_argument('-v',
+ '--verbose',
+ default=False,
+ action='store_true',
+ help='Enable debug-level logging.')
+ fvdl_args = arg_parser.add_argument_group('FVDL arguments')
+ fvdl_args.add_argument('--target-cpu',
+ default=common_args.GetHostArchFromPlatform(),
+ help='Set target_cpu for the emulator. Defaults '
+ 'to the same architecture as host cpu.')
+ fvdl_args.add_argument('--system-log-file',
+ help='File to write system logs to. Specify '
+ '\'-\' to log to stdout.')
+ fvdl_args.add_argument('--allow-no-kvm',
+ action='store_false',
+ dest='require_kvm',
+ default=True,
+ help='Disables KVM acceleration for the emulator.')
+ fvdl_args.add_argument('--enable-graphics',
+ action='store_true',
+ default=False,
+ help='Start FVDL with graphics instead of '\
+ 'headless.')
+ fvdl_args.add_argument('--hardware-gpu',
+ action='store_true',
+ default=False,
+ help='Use local GPU hardware instead of '\
+ 'Swiftshader.')
+ fvdl_args.add_argument('--without-network',
+ action='store_false',
+ dest='with_network',
+ default=True,
+ help='Run emulator without emulated nic via tun/tap.')
+ fvdl_args.add_argument('--ram-size-mb',
+ type=int,
+ default=8192,
+ help='Set the ram size amount for the emulator.')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/third_party/libwebrtc/build/fuchsia/symbolizer.py b/third_party/libwebrtc/build/fuchsia/symbolizer.py
new file mode 100644
index 0000000000..8469d11046
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/symbolizer.py
@@ -0,0 +1,70 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import subprocess
+
+from common import SDK_ROOT
+from common import GetHostArchFromPlatform
+from common import GetHostToolPathFromPlatform
+
+
+def BuildIdsPaths(package_paths):
+ """Generates build ids paths for symbolizer processes."""
+
+ build_ids_paths = map(
+ lambda package_path: os.path.join(
+ os.path.dirname(package_path), 'ids.txt'),
+ package_paths)
+ return build_ids_paths
+
+
+def RunSymbolizer(input_file, output_file, build_ids_files):
+ """Starts a symbolizer process.
+
+ input_file: Input file to be symbolized.
+ output_file: Output file for symbolizer stdout and stderr.
+ build_ids_file: Path to the ids.txt file which maps build IDs to
+ unstripped binaries on the filesystem.
+ Returns a Popen object for the started process."""
+
+ symbolizer = GetHostToolPathFromPlatform('symbolizer')
+ symbolizer_cmd = [
+ symbolizer, '--build-id-dir',
+ os.path.join(SDK_ROOT, '.build-id')
+ ]
+ for build_ids_file in build_ids_files:
+ symbolizer_cmd.extend(['--ids-txt', build_ids_file])
+
+ logging.info('Running "%s".' % ' '.join(symbolizer_cmd))
+ return subprocess.Popen(symbolizer_cmd, stdin=input_file, stdout=output_file,
+ stderr=subprocess.STDOUT, close_fds=True)
+
+
+def SymbolizerFilter(input_file, build_ids_files):
+ """Symbolizes an output stream from a process.
+
+ input_file: Input file to be symbolized.
+ build_ids_file: Path to the ids.txt file which maps build IDs to
+ unstripped binaries on the filesystem.
+ Returns a generator that yields symbolized process output."""
+
+ symbolizer_proc = RunSymbolizer(input_file, subprocess.PIPE, build_ids_files)
+
+ while True:
+ # TODO(chonggu): Switch to encoding='utf-8' once we drop Python 2
+ # support.
+ line = symbolizer_proc.stdout.readline().decode('utf-8')
+ if not line:
+ break
+
+ # Skip spam emitted by the symbolizer that obscures the symbolized output.
+ # TODO(https://crbug.com/1069446): Fix the symbolizer and remove this.
+ if '[[[ELF ' in line:
+ continue
+
+ yield line
+
+ symbolizer_proc.wait()
diff --git a/third_party/libwebrtc/build/fuchsia/target.py b/third_party/libwebrtc/build/fuchsia/target.py
new file mode 100644
index 0000000000..7a8628266a
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/target.py
@@ -0,0 +1,336 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import logging
+import os
+import subprocess
+import time
+
+import common
+import remote_cmd
+import runner_logs
+
+
+_SHUTDOWN_CMD = ['dm', 'poweroff']
+_ATTACH_RETRY_INTERVAL = 1
+_ATTACH_RETRY_SECONDS = 120
+
+# Amount of time to wait for a complete package installation, as a
+# mitigation against hangs due to pkg/network-related failures.
+_INSTALL_TIMEOUT_SECS = 10 * 60
+
+
+def _GetPackageUri(package_name):
+ """Returns the URI for the specified package name."""
+ return 'fuchsia-pkg://fuchsia.com/%s' % (package_name)
+
+
+def _GetPackageInfo(package_path):
+ """Returns a tuple with the name and version of a package."""
+
+ # Query the metadata file which resides next to the package file.
+ package_info = json.load(
+ open(os.path.join(os.path.dirname(package_path), 'package')))
+ return package_info['name'], package_info['version'],
+
+
+class _MapIsolatedPathsForPackage:
+ """Callable object which remaps /data and /tmp paths to their component-
+ specific locations, based on the package name and test realm path."""
+
+ def __init__(self, package_name, package_version, realms):
+ realms_path_fragment = '/r/'.join(['r/sys'] + realms)
+ package_sub_path = '{2}/fuchsia.com:{0}:{1}#meta:{0}.cmx/'.format(
+ package_name, package_version, realms_path_fragment)
+ self.isolated_format = '{0}' + package_sub_path + '{1}'
+
+ def __call__(self, path):
+ for isolated_directory in ['/data/' , '/tmp/']:
+ if (path+'/').startswith(isolated_directory):
+ return self.isolated_format.format(isolated_directory,
+ path[len(isolated_directory):])
+ return path
+
+
+class FuchsiaTargetException(Exception):
+ def __init__(self, message):
+ super(FuchsiaTargetException, self).__init__(message)
+
+
+class Target(object):
+ """Base class representing a Fuchsia deployment target."""
+
+ def __init__(self, out_dir, target_cpu):
+ self._out_dir = out_dir
+ self._started = False
+ self._dry_run = False
+ self._target_cpu = target_cpu
+ self._command_runner = None
+ self._ffx_path = os.path.join(common.SDK_ROOT, 'tools',
+ common.GetHostArchFromPlatform(), 'ffx')
+
+ @staticmethod
+ def CreateFromArgs(args):
+ raise NotImplementedError()
+
+ @staticmethod
+ def RegisterArgs(arg_parser):
+ pass
+
+ # Functions used by the Python context manager for teardown.
+ def __enter__(self):
+ return self
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ return
+
+ def Start(self):
+ """Handles the instantiation and connection process for the Fuchsia
+ target instance."""
+
+ def IsStarted(self):
+ """Returns True if the Fuchsia target instance is ready to accept
+ commands."""
+ return self._started
+
+ def IsNewInstance(self):
+ """Returns True if the connected target instance is newly provisioned."""
+ return True
+
+ def GetCommandRunner(self):
+ """Returns CommandRunner that can be used to execute commands on the
+ target. Most clients should prefer RunCommandPiped() and RunCommand()."""
+ self._AssertIsStarted()
+
+ if self._command_runner is None:
+ host, port = self._GetEndpoint()
+ self._command_runner = \
+ remote_cmd.CommandRunner(self._GetSshConfigPath(), host, port)
+
+ return self._command_runner
+
+ def RunCommandPiped(self, command, **kwargs):
+ """Starts a remote command and immediately returns a Popen object for the
+ command. The caller may interact with the streams, inspect the status code,
+ wait on command termination, etc.
+
+ command: A list of strings representing the command and arguments.
+ kwargs: A dictionary of parameters to be passed to subprocess.Popen().
+ The parameters can be used to override stdin and stdout, for
+ example.
+
+ Returns: a Popen object.
+
+ Note: method does not block.
+ """
+ logging.debug('running (non-blocking) \'%s\'.', ' '.join(command))
+ return self.GetCommandRunner().RunCommandPiped(command, **kwargs)
+
+ def RunCommand(self, command, silent=False, timeout_secs=None):
+ """Executes a remote command and waits for it to finish executing.
+
+ Returns the exit code of the command.
+ """
+ logging.debug('running \'%s\'.', ' '.join(command))
+ return self.GetCommandRunner().RunCommand(command, silent,
+ timeout_secs=timeout_secs)
+
+ def EnsureIsolatedPathsExist(self, for_package, for_realms):
+ """Ensures that the package's isolated /data and /tmp exist."""
+ for isolated_directory in ['/data', '/tmp']:
+ self.RunCommand([
+ 'mkdir', '-p',
+ _MapIsolatedPathsForPackage(for_package, 0,
+ for_realms)(isolated_directory)
+ ])
+
+ def PutFile(self,
+ source,
+ dest,
+ recursive=False,
+ for_package=None,
+ for_realms=()):
+ """Copies a file from the local filesystem to the target filesystem.
+
+ source: The path of the file being copied.
+ dest: The path on the remote filesystem which will be copied to.
+ recursive: If true, performs a recursive copy.
+ for_package: If specified, isolated paths in the |dest| are mapped to their
+ obsolute paths for the package, on the target. This currently
+ affects the /data and /tmp directories.
+ for_realms: If specified, identifies the sub-realm of 'sys' under which
+ isolated paths (see |for_package|) are stored.
+ """
+ assert type(source) is str
+ self.PutFiles([source], dest, recursive, for_package, for_realms)
+
+ def PutFiles(self,
+ sources,
+ dest,
+ recursive=False,
+ for_package=None,
+ for_realms=()):
+ """Copies files from the local filesystem to the target filesystem.
+
+ sources: List of local file paths to copy from, or a single path.
+ dest: The path on the remote filesystem which will be copied to.
+ recursive: If true, performs a recursive copy.
+ for_package: If specified, /data in the |dest| is mapped to the package's
+ isolated /data location.
+ for_realms: If specified, identifies the sub-realm of 'sys' under which
+ isolated paths (see |for_package|) are stored.
+ """
+ assert type(sources) is tuple or type(sources) is list
+ if for_package:
+ self.EnsureIsolatedPathsExist(for_package, for_realms)
+ dest = _MapIsolatedPathsForPackage(for_package, 0, for_realms)(dest)
+ logging.debug('copy local:%s => remote:%s', sources, dest)
+ self.GetCommandRunner().RunScp(sources, dest, remote_cmd.COPY_TO_TARGET,
+ recursive)
+
+ def GetFile(self,
+ source,
+ dest,
+ for_package=None,
+ for_realms=(),
+ recursive=False):
+ """Copies a file from the target filesystem to the local filesystem.
+
+ source: The path of the file being copied.
+ dest: The path on the local filesystem which will be copied to.
+ for_package: If specified, /data in paths in |sources| is mapped to the
+ package's isolated /data location.
+ for_realms: If specified, identifies the sub-realm of 'sys' under which
+ isolated paths (see |for_package|) are stored.
+ recursive: If true, performs a recursive copy.
+ """
+ assert type(source) is str
+ self.GetFiles([source], dest, for_package, for_realms, recursive)
+
+ def GetFiles(self,
+ sources,
+ dest,
+ for_package=None,
+ for_realms=(),
+ recursive=False):
+ """Copies files from the target filesystem to the local filesystem.
+
+ sources: List of remote file paths to copy.
+ dest: The path on the local filesystem which will be copied to.
+ for_package: If specified, /data in paths in |sources| is mapped to the
+ package's isolated /data location.
+ for_realms: If specified, identifies the sub-realm of 'sys' under which
+ isolated paths (see |for_package|) are stored.
+ recursive: If true, performs a recursive copy.
+ """
+ assert type(sources) is tuple or type(sources) is list
+ self._AssertIsStarted()
+ if for_package:
+ sources = map(_MapIsolatedPathsForPackage(for_package, 0, for_realms),
+ sources)
+ logging.debug('copy remote:%s => local:%s', sources, dest)
+ return self.GetCommandRunner().RunScp(sources, dest,
+ remote_cmd.COPY_FROM_TARGET,
+ recursive)
+
+ def _GetEndpoint(self):
+ """Returns a (host, port) tuple for the SSH connection to the target."""
+ raise NotImplementedError()
+
+ def _GetTargetSdkArch(self):
+ """Returns the Fuchsia SDK architecture name for the target CPU."""
+ if self._target_cpu == 'arm64' or self._target_cpu == 'x64':
+ return self._target_cpu
+ raise FuchsiaTargetException('Unknown target_cpu:' + self._target_cpu)
+
+ def _AssertIsStarted(self):
+ assert self.IsStarted()
+
+ def _WaitUntilReady(self):
+ logging.info('Connecting to Fuchsia using SSH.')
+
+ host, port = self._GetEndpoint()
+ end_time = time.time() + _ATTACH_RETRY_SECONDS
+ ssh_diagnostic_log = runner_logs.FileStreamFor('ssh_diagnostic_log')
+ while time.time() < end_time:
+ runner = remote_cmd.CommandRunner(self._GetSshConfigPath(), host, port)
+ ssh_proc = runner.RunCommandPiped(['true'],
+ ssh_args=['-v'],
+ stdout=ssh_diagnostic_log,
+ stderr=subprocess.STDOUT)
+ if ssh_proc.wait() == 0:
+ logging.info('Connected!')
+ self._started = True
+ return True
+ time.sleep(_ATTACH_RETRY_INTERVAL)
+
+ logging.error('Timeout limit reached.')
+
+ raise FuchsiaTargetException('Couldn\'t connect using SSH.')
+
+ def _GetSshConfigPath(self, path):
+ raise NotImplementedError()
+
+ def GetPkgRepo(self):
+ """Returns an PkgRepo instance which serves packages for this Target.
+ Callers should typically call GetPkgRepo() in a |with| statement, and
+ install and execute commands inside the |with| block, so that the returned
+ PkgRepo can teardown correctly, if necessary.
+ """
+ raise NotImplementedError()
+
+ def InstallPackage(self, package_paths):
+ """Installs a package and it's dependencies on the device. If the package is
+ already installed then it will be updated to the new version.
+
+ package_paths: Paths to the .far files to install.
+ """
+ with self.GetPkgRepo() as pkg_repo:
+ # Publish all packages to the serving TUF repository under |tuf_root|.
+ for package_path in package_paths:
+ pkg_repo.PublishPackage(package_path)
+
+ # Resolve all packages, to have them pulled into the device/VM cache.
+ for package_path in package_paths:
+ package_name, package_version = _GetPackageInfo(package_path)
+ logging.info('Resolving %s into cache.', package_name)
+ return_code = self.RunCommand(
+ ['pkgctl', 'resolve',
+ _GetPackageUri(package_name), '>/dev/null'],
+ timeout_secs=_INSTALL_TIMEOUT_SECS)
+ if return_code != 0:
+ raise Exception(
+ 'Error {} while resolving {}.'.format(return_code, package_name))
+
+ # Verify that the newly resolved versions of packages are reported.
+ for package_path in package_paths:
+ # Use pkgctl get-hash to determine which version will be resolved.
+ package_name, package_version = _GetPackageInfo(package_path)
+ pkgctl = self.RunCommandPiped(
+ ['pkgctl', 'get-hash',
+ _GetPackageUri(package_name)],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ pkgctl_out, pkgctl_err = pkgctl.communicate()
+
+ # Read the expected version from the meta.far Merkel hash file alongside
+ # the package's FAR.
+ meta_far_path = os.path.join(os.path.dirname(package_path), 'meta.far')
+ meta_far_merkel = subprocess.check_output(
+ [common.GetHostToolPathFromPlatform('merkleroot'),
+ meta_far_path]).split()[0]
+ if pkgctl_out != meta_far_merkel:
+ raise Exception('Hash mismatch for %s after resolve (%s vs %s).' %
+ (package_name, pkgctl_out, meta_far_merkel))
+
+ def RunFFXCommand(self, ffx_args, **kwargs):
+ """Automatically gets the FFX path and runs FFX based on the
+ arguments provided. Extra args can be added to be used with Popen.
+
+ ffx_args: The arguments for a ffx command.
+ kwargs: A dictionary of parameters to be passed to subprocess.Popen().
+
+ Returns a Popen object for the command."""
+ command = [self._ffx_path] + ffx_args
+ return subprocess.Popen(command, **kwargs)
diff --git a/third_party/libwebrtc/build/fuchsia/test_runner.py b/third_party/libwebrtc/build/fuchsia/test_runner.py
new file mode 100755
index 0000000000..2ccbec9593
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/test_runner.py
@@ -0,0 +1,264 @@
+#!/usr/bin/env python
+#
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Deploys and runs a test package on a Fuchsia target."""
+
+import argparse
+import os
+import runner_logs
+import sys
+import tempfile
+
+from common_args import AddCommonArgs, AddTargetSpecificArgs, \
+ ConfigureLogging, GetDeploymentTargetForArgs
+from net_test_server import SetupTestServer
+from run_test_package import RunTestPackage, RunTestPackageArgs, SystemLogReader
+from runner_exceptions import HandleExceptionAndReturnExitCode
+from runner_logs import RunnerLogManager
+from symbolizer import BuildIdsPaths
+
+DEFAULT_TEST_SERVER_CONCURRENCY = 4
+
+TEST_DATA_DIR = '/tmp'
+TEST_FILTER_PATH = TEST_DATA_DIR + '/test_filter.txt'
+TEST_LLVM_PROFILE_PATH = TEST_DATA_DIR + '/llvm-profile'
+TEST_PERF_RESULT_PATH = TEST_DATA_DIR + '/test_perf_summary.json'
+TEST_RESULT_PATH = TEST_DATA_DIR + '/test_summary.json'
+
+TEST_REALM_NAME = 'chromium_tests'
+
+
+def AddTestExecutionArgs(arg_parser):
+ test_args = arg_parser.add_argument_group('testing',
+ 'Test execution arguments')
+ test_args.add_argument('--gtest_filter',
+ help='GTest filter to use in place of any default.')
+ test_args.add_argument(
+ '--gtest_repeat',
+ help='GTest repeat value to use. This also disables the '
+ 'test launcher timeout.')
+ test_args.add_argument(
+ '--test-launcher-retry-limit',
+ help='Number of times that test suite will retry failing '
+ 'tests. This is multiplicative with --gtest_repeat.')
+ test_args.add_argument('--test-launcher-shard-index',
+ type=int,
+ default=os.environ.get('GTEST_SHARD_INDEX'),
+ help='Index of this instance amongst swarming shards.')
+ test_args.add_argument('--test-launcher-total-shards',
+ type=int,
+ default=os.environ.get('GTEST_TOTAL_SHARDS'),
+ help='Total number of swarming shards of this suite.')
+ test_args.add_argument('--gtest_break_on_failure',
+ action='store_true',
+ default=False,
+ help='Should GTest break on failure; useful with '
+ '--gtest_repeat.')
+ test_args.add_argument('--single-process-tests',
+ action='store_true',
+ default=False,
+ help='Runs the tests and the launcher in the same '
+ 'process. Useful for debugging.')
+ test_args.add_argument('--test-launcher-batch-limit',
+ type=int,
+ help='Sets the limit of test batch to run in a single '
+ 'process.')
+ # --test-launcher-filter-file is specified relative to --out-dir,
+ # so specifying type=os.path.* will break it.
+ test_args.add_argument(
+ '--test-launcher-filter-file',
+ default=None,
+ help='Filter file(s) passed to target test process. Use ";" to separate '
+ 'multiple filter files ')
+ test_args.add_argument('--test-launcher-jobs',
+ type=int,
+ help='Sets the number of parallel test jobs.')
+ test_args.add_argument('--test-launcher-summary-output',
+ help='Where the test launcher will output its json.')
+ test_args.add_argument('--enable-test-server',
+ action='store_true',
+ default=False,
+ help='Enable Chrome test server spawner.')
+ test_args.add_argument(
+ '--test-launcher-bot-mode',
+ action='store_true',
+ default=False,
+ help='Informs the TestLauncher to that it should enable '
+ 'special allowances for running on a test bot.')
+ test_args.add_argument('--isolated-script-test-output',
+ help='If present, store test results on this path.')
+ test_args.add_argument(
+ '--isolated-script-test-perf-output',
+ help='If present, store chartjson results on this path.')
+ test_args.add_argument('--use-run-test-component',
+ default=False,
+ action='store_true',
+ help='Run the test package hermetically using '
+ 'run-test-component, rather than run.')
+ test_args.add_argument(
+ '--code-coverage',
+ default=False,
+ action='store_true',
+ help='Gather code coverage information and place it in '
+ 'the output directory.')
+ test_args.add_argument('--code-coverage-dir',
+ default=os.getcwd(),
+ help='Directory to place code coverage information. '
+ 'Only relevant when --code-coverage set to true. '
+ 'Defaults to current directory.')
+ test_args.add_argument('--child-arg',
+ action='append',
+ help='Arguments for the test process.')
+ test_args.add_argument('child_args',
+ nargs='*',
+ help='Arguments for the test process.')
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ AddTestExecutionArgs(parser)
+ AddCommonArgs(parser)
+ AddTargetSpecificArgs(parser)
+ args = parser.parse_args()
+
+ # Flag out_dir is required for tests launched with this script.
+ if not args.out_dir:
+ raise ValueError("out-dir must be specified.")
+
+ # Code coverage uses runtests, which calls run_test_component.
+ if args.code_coverage:
+ args.use_run_test_component = True
+
+ ConfigureLogging(args)
+
+ child_args = []
+ if args.test_launcher_shard_index != None:
+ child_args.append(
+ '--test-launcher-shard-index=%d' % args.test_launcher_shard_index)
+ if args.test_launcher_total_shards != None:
+ child_args.append(
+ '--test-launcher-total-shards=%d' % args.test_launcher_total_shards)
+ if args.single_process_tests:
+ child_args.append('--single-process-tests')
+ if args.test_launcher_bot_mode:
+ child_args.append('--test-launcher-bot-mode')
+ if args.test_launcher_batch_limit:
+ child_args.append('--test-launcher-batch-limit=%d' %
+ args.test_launcher_batch_limit)
+
+ # Only set --test-launcher-jobs if the caller specifies it, in general.
+ # If the caller enables the test-server then we need to launch the right
+ # number of instances to match the maximum number of parallel test jobs, so
+ # in that case we set --test-launcher-jobs based on the number of CPU cores
+ # specified for the emulator to use.
+ test_concurrency = None
+ if args.test_launcher_jobs:
+ test_concurrency = args.test_launcher_jobs
+ elif args.enable_test_server:
+ if args.device == 'device':
+ test_concurrency = DEFAULT_TEST_SERVER_CONCURRENCY
+ else:
+ test_concurrency = args.cpu_cores
+ if test_concurrency:
+ child_args.append('--test-launcher-jobs=%d' % test_concurrency)
+
+ if args.gtest_filter:
+ child_args.append('--gtest_filter=' + args.gtest_filter)
+ if args.gtest_repeat:
+ child_args.append('--gtest_repeat=' + args.gtest_repeat)
+ child_args.append('--test-launcher-timeout=-1')
+ if args.test_launcher_retry_limit:
+ child_args.append(
+ '--test-launcher-retry-limit=' + args.test_launcher_retry_limit)
+ if args.gtest_break_on_failure:
+ child_args.append('--gtest_break_on_failure')
+ if args.test_launcher_summary_output:
+ child_args.append('--test-launcher-summary-output=' + TEST_RESULT_PATH)
+ if args.isolated_script_test_output:
+ child_args.append('--isolated-script-test-output=' + TEST_RESULT_PATH)
+ if args.isolated_script_test_perf_output:
+ child_args.append('--isolated-script-test-perf-output=' +
+ TEST_PERF_RESULT_PATH)
+
+ if args.child_arg:
+ child_args.extend(args.child_arg)
+ if args.child_args:
+ child_args.extend(args.child_args)
+
+ test_realms = []
+ if args.use_run_test_component:
+ test_realms = [TEST_REALM_NAME]
+
+ try:
+ with GetDeploymentTargetForArgs(args) as target, \
+ SystemLogReader() as system_logger, \
+ RunnerLogManager(args.runner_logs_dir, BuildIdsPaths(args.package)):
+ target.Start()
+
+ if args.system_log_file and args.system_log_file != '-':
+ system_logger.Start(target, args.package, args.system_log_file)
+
+ if args.test_launcher_filter_file:
+ test_launcher_filter_files = args.test_launcher_filter_file.split(';')
+ with tempfile.NamedTemporaryFile('a+b') as combined_filter_file:
+ for filter_file in test_launcher_filter_files:
+ with open(filter_file, 'r') as f:
+ combined_filter_file.write(f.read())
+ combined_filter_file.seek(0)
+ target.PutFile(combined_filter_file.name,
+ TEST_FILTER_PATH,
+ for_package=args.package_name,
+ for_realms=test_realms)
+ child_args.append('--test-launcher-filter-file=' + TEST_FILTER_PATH)
+
+ test_server = None
+ if args.enable_test_server:
+ assert test_concurrency
+ test_server = SetupTestServer(target, test_concurrency,
+ args.package_name, test_realms)
+
+ run_package_args = RunTestPackageArgs.FromCommonArgs(args)
+ if args.use_run_test_component:
+ run_package_args.test_realm_label = TEST_REALM_NAME
+ run_package_args.use_run_test_component = True
+ returncode = RunTestPackage(args.out_dir, target, args.package,
+ args.package_name, child_args,
+ run_package_args)
+
+ if test_server:
+ test_server.Stop()
+
+ if args.code_coverage:
+ # Copy all the files in the profile directory. /* is used instead
+ # of recursively copying due to permission issues for the latter.
+ target.GetFile(TEST_LLVM_PROFILE_PATH + '/*', args.code_coverage_dir)
+
+ if args.test_launcher_summary_output:
+ target.GetFile(TEST_RESULT_PATH,
+ args.test_launcher_summary_output,
+ for_package=args.package_name,
+ for_realms=test_realms)
+
+ if args.isolated_script_test_output:
+ target.GetFile(TEST_RESULT_PATH,
+ args.isolated_script_test_output,
+ for_package=args.package_name,
+ for_realms=test_realms)
+
+ if args.isolated_script_test_perf_output:
+ target.GetFile(TEST_PERF_RESULT_PATH,
+ args.isolated_script_test_perf_output,
+ for_package=args.package_name,
+ for_realms=test_realms)
+
+ return returncode
+
+ except:
+ return HandleExceptionAndReturnExitCode()
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/third_party/libwebrtc/build/fuchsia/update_images.py b/third_party/libwebrtc/build/fuchsia/update_images.py
new file mode 100755
index 0000000000..79b8e49d86
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/update_images.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python
+# Copyright 2020 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Updates the Fuchsia SDK to the given revision. Should be used in a 'hooks_os'
+entry so that it only runs when .gclient's target_os includes 'fuchsia'."""
+
+import argparse
+import itertools
+import logging
+import os
+import re
+import shutil
+import subprocess
+import sys
+import tarfile
+
+from common import GetHostOsFromPlatform, GetHostArchFromPlatform, \
+ DIR_SOURCE_ROOT, IMAGES_ROOT
+from update_sdk import DownloadAndUnpackFromCloudStorage, \
+ GetOverrideCloudStorageBucket, GetSdkHash, \
+ MakeCleanDirectory, SDK_SIGNATURE_FILE
+
+
+def GetSdkSignature(sdk_hash, boot_images):
+ return 'gn:{sdk_hash}:{boot_images}:'.format(sdk_hash=sdk_hash,
+ boot_images=boot_images)
+
+
+def GetAllImages(boot_image_names):
+ if not boot_image_names:
+ return
+
+ all_device_types = ['generic', 'qemu']
+ all_archs = ['x64', 'arm64']
+
+ images_to_download = set()
+
+ for boot_image in boot_image_names.split(','):
+ components = boot_image.split('.')
+ if len(components) != 2:
+ continue
+
+ device_type, arch = components
+ device_images = all_device_types if device_type == '*' else [device_type]
+ arch_images = all_archs if arch == '*' else [arch]
+ images_to_download.update(itertools.product(device_images, arch_images))
+ return images_to_download
+
+
+def DownloadSdkBootImages(bucket, sdk_hash, boot_image_names, image_root_dir):
+ images_to_download = GetAllImages(boot_image_names)
+ for image_to_download in images_to_download:
+ device_type = image_to_download[0]
+ arch = image_to_download[1]
+ image_output_dir = os.path.join(image_root_dir, arch, device_type)
+ if os.path.exists(image_output_dir):
+ continue
+
+ logging.info('Downloading Fuchsia boot images for %s.%s...' %
+ (device_type, arch))
+ if bucket == 'fuchsia-sdk':
+ images_tarball_url = 'gs://{bucket}/development/{sdk_hash}/images/'\
+ '{device_type}.{arch}.tgz'.format(
+ bucket=bucket, sdk_hash=sdk_hash,
+ device_type=device_type, arch=arch)
+ else:
+ images_tarball_url = 'gs://{bucket}/development/{sdk_hash}/images/'\
+ '{device_type}-{arch}.tgz'.format(
+ bucket=bucket, sdk_hash=sdk_hash,
+ device_type=device_type, arch=arch)
+ DownloadAndUnpackFromCloudStorage(images_tarball_url, image_output_dir)
+
+
+def GetNewSignature(sdk_hash, boot_images):
+ return GetSdkSignature(sdk_hash, boot_images)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--verbose',
+ '-v',
+ action='store_true',
+ help='Enable debug-level logging.')
+ parser.add_argument(
+ '--boot-images',
+ type=str,
+ required=True,
+ help='List of boot images to download, represented as a comma separated '
+ 'list. Wildcards are allowed. ')
+ parser.add_argument(
+ '--default-bucket',
+ type=str,
+ default='fuchsia',
+ help='The Google Cloud Storage bucket in which the Fuchsia images are '
+ 'stored. Entry in sdk-bucket.txt will override this flag.')
+ parser.add_argument(
+ '--image-root-dir',
+ default=IMAGES_ROOT,
+ help='Specify the root directory of the downloaded images. Optional')
+ args = parser.parse_args()
+
+ logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
+
+ # If no boot images need to be downloaded, exit.
+ if not args.boot_images:
+ return 0
+
+ # Check whether there's SDK support for this platform.
+ GetHostOsFromPlatform()
+
+ # Use the bucket in sdk-bucket.txt if an entry exists.
+ # Otherwise use the default bucket.
+ bucket = GetOverrideCloudStorageBucket() or args.default_bucket
+
+ sdk_hash = GetSdkHash(bucket)
+ if not sdk_hash:
+ return 1
+
+ signature_filename = os.path.join(args.image_root_dir, SDK_SIGNATURE_FILE)
+ current_signature = (open(signature_filename, 'r').read().strip()
+ if os.path.exists(signature_filename) else '')
+ new_signature = GetNewSignature(sdk_hash, args.boot_images)
+ if current_signature != new_signature:
+ logging.info('Downloading Fuchsia images %s...' % sdk_hash)
+ MakeCleanDirectory(args.image_root_dir)
+
+ try:
+ DownloadSdkBootImages(bucket, sdk_hash, args.boot_images,
+ args.image_root_dir)
+ with open(signature_filename, 'w') as f:
+ f.write(new_signature)
+
+ except subprocess.CalledProcessError as e:
+ logging.error(("command '%s' failed with status %d.%s"), " ".join(e.cmd),
+ e.returncode, " Details: " + e.output if e.output else "")
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/third_party/libwebrtc/build/fuchsia/update_sdk.py b/third_party/libwebrtc/build/fuchsia/update_sdk.py
new file mode 100755
index 0000000000..a1c9621fac
--- /dev/null
+++ b/third_party/libwebrtc/build/fuchsia/update_sdk.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Updates the Fuchsia SDK to the given revision. Should be used in a 'hooks_os'
+entry so that it only runs when .gclient's target_os includes 'fuchsia'."""
+
+import argparse
+import logging
+import os
+import re
+import shutil
+import subprocess
+import sys
+import tarfile
+
+from common import GetHostOsFromPlatform, GetHostArchFromPlatform, \
+ DIR_SOURCE_ROOT, SDK_ROOT
+
+sys.path.append(os.path.join(DIR_SOURCE_ROOT, 'build'))
+import find_depot_tools
+
+SDK_SIGNATURE_FILE = '.hash'
+SDK_TARBALL_PATH_TEMPLATE = (
+ 'gs://{bucket}/development/{sdk_hash}/sdk/{platform}-amd64/gn.tar.gz')
+
+
+def ReadFile(filename):
+ with open(os.path.join(os.path.dirname(__file__), filename), 'r') as f:
+ return f.read()
+
+
+# TODO(crbug.com/1138433): Investigate whether we can deprecate
+# use of sdk_bucket.txt.
+def GetOverrideCloudStorageBucket():
+ """Read bucket entry from sdk_bucket.txt"""
+ return ReadFile('sdk-bucket.txt').strip()
+
+
+def GetSdkHash(bucket):
+ hashes = GetSdkHashList()
+ return (max(hashes, key=lambda sdk: GetSdkGeneration(bucket, sdk))
+ if hashes else None)
+
+
+def GetSdkHashList():
+ """Read filename entries from sdk-hash-files.list (one per line), substitute
+ {platform} in each entry if present, and read from each filename."""
+ platform = GetHostOsFromPlatform()
+ filenames = [
+ line.strip() for line in ReadFile('sdk-hash-files.list').replace(
+ '{platform}', platform).splitlines()
+ ]
+ sdk_hashes = [ReadFile(filename).strip() for filename in filenames]
+ return sdk_hashes
+
+
+def GetSdkGeneration(bucket, hash):
+ if not hash:
+ return None
+
+ sdk_path = GetSdkTarballPath(bucket, hash)
+ cmd = [
+ os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gsutil.py'), 'ls', '-L',
+ sdk_path
+ ]
+ logging.debug("Running '%s'", " ".join(cmd))
+ sdk_details = subprocess.check_output(cmd).decode('utf-8')
+ m = re.search('Generation:\s*(\d*)', sdk_details)
+ if not m:
+ raise RuntimeError('Could not find SDK generation for {sdk_path}'.format(
+ sdk_path=sdk_path))
+ return int(m.group(1))
+
+
+def GetSdkTarballPath(bucket, sdk_hash):
+ return SDK_TARBALL_PATH_TEMPLATE.format(
+ bucket=bucket, sdk_hash=sdk_hash, platform=GetHostOsFromPlatform())
+
+
+# Updates the modification timestamps of |path| and its contents to the
+# current time.
+def UpdateTimestampsRecursive():
+ for root, dirs, files in os.walk(SDK_ROOT):
+ for f in files:
+ os.utime(os.path.join(root, f), None)
+ for d in dirs:
+ os.utime(os.path.join(root, d), None)
+
+
+# Fetches a tarball from GCS and uncompresses it to |output_dir|.
+def DownloadAndUnpackFromCloudStorage(url, output_dir):
+ # Pass the compressed stream directly to 'tarfile'; don't bother writing it
+ # to disk first.
+ cmd = [os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gsutil.py'),
+ 'cp', url, '-']
+ logging.debug('Running "%s"', ' '.join(cmd))
+ task = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
+ try:
+ tarfile.open(mode='r|gz', fileobj=task.stdout).extractall(path=output_dir)
+ except tarfile.ReadError:
+ task.wait()
+ stderr = task.stderr.read()
+ raise subprocess.CalledProcessError(task.returncode, cmd,
+ "Failed to read a tarfile from gsutil.py.{}".format(
+ stderr if stderr else ""))
+ task.wait()
+ if task.returncode:
+ raise subprocess.CalledProcessError(task.returncode, cmd,
+ task.stderr.read())
+
+
+def MakeCleanDirectory(directory_name):
+ if (os.path.exists(directory_name)):
+ shutil.rmtree(directory_name)
+ os.mkdir(directory_name)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--verbose', '-v',
+ action='store_true',
+ help='Enable debug-level logging.')
+ parser.add_argument(
+ '--default-bucket',
+ type=str,
+ default='fuchsia',
+ help='The Google Cloud Storage bucket in which the Fuchsia SDK is '
+ 'stored. Entry in sdk-bucket.txt will override this flag.')
+ args = parser.parse_args()
+
+ logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
+
+ # Quietly exit if there's no SDK support for this platform.
+ try:
+ GetHostOsFromPlatform()
+ except:
+ return 0
+
+ # Use the bucket in sdk-bucket.txt if an entry exists.
+ # Otherwise use the default bucket.
+ bucket = GetOverrideCloudStorageBucket() or args.default_bucket
+
+ sdk_hash = GetSdkHash(bucket)
+ if not sdk_hash:
+ return 1
+
+ signature_filename = os.path.join(SDK_ROOT, SDK_SIGNATURE_FILE)
+ current_signature = (open(signature_filename, 'r').read().strip()
+ if os.path.exists(signature_filename) else '')
+ if current_signature != sdk_hash:
+ logging.info('Downloading GN SDK %s...' % sdk_hash)
+
+ MakeCleanDirectory(SDK_ROOT)
+ DownloadAndUnpackFromCloudStorage(GetSdkTarballPath(bucket, sdk_hash),
+ SDK_ROOT)
+
+ with open(signature_filename, 'w') as f:
+ f.write(sdk_hash)
+
+ UpdateTimestampsRecursive()
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())