diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 09:22:09 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 09:22:09 +0000 |
commit | 43a97878ce14b72f0981164f87f2e35e14151312 (patch) | |
tree | 620249daf56c0258faa40cbdcf9cfba06de2a846 /third_party/libwebrtc/tools_webrtc/perf | |
parent | Initial commit. (diff) | |
download | firefox-43a97878ce14b72f0981164f87f2e35e14151312.tar.xz firefox-43a97878ce14b72f0981164f87f2e35e14151312.zip |
Adding upstream version 110.0.1.upstream/110.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/libwebrtc/tools_webrtc/perf')
6 files changed, 667 insertions, 0 deletions
diff --git a/third_party/libwebrtc/tools_webrtc/perf/BUILD.gn b/third_party/libwebrtc/tools_webrtc/perf/BUILD.gn new file mode 100644 index 0000000000..484f9565b5 --- /dev/null +++ b/third_party/libwebrtc/tools_webrtc/perf/BUILD.gn @@ -0,0 +1,17 @@ +# Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../webrtc.gni") + +if (rtc_enable_protobuf) { + group("webrtc_dashboard_upload") { + data = [ "process_perf_results.py" ] + data_deps = + [ "//third_party/catapult/tracing/tracing/proto:histogram_proto" ] + } +} diff --git a/third_party/libwebrtc/tools_webrtc/perf/catapult_uploader.py b/third_party/libwebrtc/tools_webrtc/perf/catapult_uploader.py new file mode 100644 index 0000000000..d07c287f28 --- /dev/null +++ b/third_party/libwebrtc/tools_webrtc/perf/catapult_uploader.py @@ -0,0 +1,310 @@ +#!/usr/bin/env vpython3 + +# Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import datetime +import json +import subprocess +import time +import zlib + +from typing import Optional +import dataclasses +import httplib2 + +from tracing.value import histogram +from tracing.value import histogram_set +from tracing.value.diagnostics import generic_set +from tracing.value.diagnostics import reserved_infos + + +@dataclasses.dataclass +class UploaderOptions(): + """Required information to upload perf metrics. + + Attributes: + perf_dashboard_machine_group: The "master" the bots are grouped under. + This string is the group in the the perf dashboard path + group/bot/perf_id/metric/subtest. + bot: The bot running the test (e.g. webrtc-win-large-tests). + test_suite: The key for the test in the dashboard (i.e. what you select + in the top-level test suite selector in the dashboard + webrtc_git_hash: webrtc.googlesource.com commit hash. + commit_position: Commit pos corresponding to the git hash. + build_page_url: URL to the build page for this build. + dashboard_url: Which dashboard to use. + input_results_file: A HistogramSet proto file coming from WebRTC tests. + output_json_file: Where to write the output (for debugging). + wait_timeout_sec: Maximum amount of time in seconds that the script will + wait for the confirmation. + wait_polling_period_sec: Status will be requested from the Dashboard + every wait_polling_period_sec seconds. + """ + perf_dashboard_machine_group: str + bot: str + test_suite: str + webrtc_git_hash: str + commit_position: int + build_page_url: str + dashboard_url: str + input_results_file: str + output_json_file: Optional[str] = None + wait_timeout_sec: datetime.timedelta = datetime.timedelta(seconds=1200) + wait_polling_period_sec: datetime.timedelta = datetime.timedelta(seconds=120) + + +def _GenerateOauthToken(): + args = ['luci-auth', 'token'] + p = subprocess.Popen(args, + universal_newlines=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + if p.wait() == 0: + output = p.stdout.read() + return output.strip() + raise RuntimeError( + 'Error generating authentication token.\nStdout: %s\nStderr:%s' % + (p.stdout.read(), p.stderr.read())) + + +def _CreateHeaders(oauth_token): + return {'Authorization': 'Bearer %s' % oauth_token} + + +def _SendHistogramSet(url, histograms): + """Make a HTTP POST with the given JSON to the Performance Dashboard. + + Args: + url: URL of Performance Dashboard instance, e.g. + "https://chromeperf.appspot.com". + histograms: a histogram set object that contains the data to be sent. + """ + headers = _CreateHeaders(_GenerateOauthToken()) + + serialized = json.dumps(_ApplyHacks(histograms.AsDicts()), indent=4) + + if url.startswith('http://localhost'): + # The catapult server turns off compression in developer mode. + data = serialized + else: + data = zlib.compress(serialized.encode('utf-8')) + + print('Sending %d bytes to %s.' % (len(data), url + '/add_histograms')) + + http = httplib2.Http() + response, content = http.request(url + '/add_histograms', + method='POST', + body=data, + headers=headers) + return response, content + + +def _WaitForUploadConfirmation(url, upload_token, wait_timeout, + wait_polling_period): + """Make a HTTP GET requests to the Performance Dashboard untill upload + status is known or the time is out. + + Args: + url: URL of Performance Dashboard instance, e.g. + "https://chromeperf.appspot.com". + upload_token: String that identifies Performance Dashboard and can be used + for the status check. + wait_timeout: (datetime.timedelta) Maximum time to wait for the + confirmation. + wait_polling_period: (datetime.timedelta) Performance Dashboard will be + polled every wait_polling_period amount of time. + """ + assert wait_polling_period <= wait_timeout + + headers = _CreateHeaders(_GenerateOauthToken()) + http = httplib2.Http() + + oauth_refreshed = False + response = None + resp_json = None + current_time = datetime.datetime.now() + end_time = current_time + wait_timeout + next_poll_time = current_time + wait_polling_period + while datetime.datetime.now() < end_time: + current_time = datetime.datetime.now() + if next_poll_time > current_time: + time.sleep((next_poll_time - current_time).total_seconds()) + next_poll_time = datetime.datetime.now() + wait_polling_period + + response, content = http.request(url + '/uploads/' + upload_token, + method='GET', + headers=headers) + + print('Upload state polled. Response: %r.' % content) + + if not oauth_refreshed and response.status == 403: + print('Oauth token refreshed. Continue polling.') + headers = _CreateHeaders(_GenerateOauthToken()) + oauth_refreshed = True + continue + + if response.status != 200: + break + + resp_json = json.loads(content) + if resp_json['state'] == 'COMPLETED' or resp_json['state'] == 'FAILED': + break + + return response, resp_json + + +# Because of an issues on the Dashboard side few measurements over a large set +# can fail to upload. That would lead to the whole upload to be marked as +# failed. Check it, so it doesn't increase flakiness of our tests. +# TODO(crbug.com/1145904): Remove check after fixed. +def _CheckFullUploadInfo(url, upload_token, + min_measurements_amount=50, + max_failed_measurements_percent=0.03): + """Make a HTTP GET requests to the Performance Dashboard to get full info + about upload (including measurements). Checks if upload is correct despite + not having status "COMPLETED". + + Args: + url: URL of Performance Dashboard instance, e.g. + "https://chromeperf.appspot.com". + upload_token: String that identifies Performance Dashboard and can be used + for the status check. + min_measurements_amount: minimal amount of measurements that the upload + should have to start tolerating failures in particular measurements. + max_failed_measurements_percent: maximal percent of failured measurements + to tolerate. + """ + headers = _CreateHeaders(_GenerateOauthToken()) + http = httplib2.Http() + + response, content = http.request(url + '/uploads/' + upload_token + + '?additional_info=measurements', + method='GET', + headers=headers) + + if response.status != 200: + print('Failed to reach the dashboard to get full upload info.') + return False + + resp_json = json.loads(content) + print('Full upload info: %s.' % json.dumps(resp_json, indent=4)) + + if 'measurements' in resp_json: + measurements_cnt = len(resp_json['measurements']) + not_completed_state_cnt = len( + [m for m in resp_json['measurements'] if m['state'] != 'COMPLETED']) + + if (measurements_cnt >= min_measurements_amount + and (not_completed_state_cnt / + (measurements_cnt * 1.0) <= max_failed_measurements_percent)): + print(('Not all measurements were confirmed to upload. ' + 'Measurements count: %d, failed to upload or timed out: %d' % + (measurements_cnt, not_completed_state_cnt))) + return True + + return False + + +# TODO(https://crbug.com/1029452): HACKHACK +# Remove once we have doubles in the proto and handle -infinity correctly. +def _ApplyHacks(dicts): + def _NoInf(value): + if value == float('inf'): + return histogram.JS_MAX_VALUE + if value == float('-inf'): + return -histogram.JS_MAX_VALUE + return value + + for d in dicts: + if 'running' in d: + d['running'] = [_NoInf(value) for value in d['running']] + if 'sampleValues' in d: + d['sampleValues'] = [_NoInf(value) for value in d['sampleValues']] + + return dicts + + +def _LoadHistogramSetFromProto(options): + hs = histogram_set.HistogramSet() + with open(options.input_results_file, 'rb') as f: + hs.ImportProto(f.read()) + + return hs + + +def _AddBuildInfo(histograms, options): + common_diagnostics = { + reserved_infos.MASTERS: options.perf_dashboard_machine_group, + reserved_infos.BOTS: options.bot, + reserved_infos.POINT_ID: options.commit_position, + reserved_infos.BENCHMARKS: options.test_suite, + reserved_infos.WEBRTC_REVISIONS: str(options.webrtc_git_hash), + reserved_infos.BUILD_URLS: options.build_page_url, + } + + for k, v in list(common_diagnostics.items()): + histograms.AddSharedDiagnosticToAllHistograms(k.name, + generic_set.GenericSet([v])) + + +def _DumpOutput(histograms, output_file): + with open(output_file, 'w') as f: + json.dump(_ApplyHacks(histograms.AsDicts()), f, indent=4) + + +def UploadToDashboardImpl(options): + histograms = _LoadHistogramSetFromProto(options) + _AddBuildInfo(histograms, options) + + if options.output_json_file: + _DumpOutput(histograms, options.output_json_file) + + response, content = _SendHistogramSet(options.dashboard_url, histograms) + + if response.status != 200: + print(('Upload failed with %d: %s\n\n%s' % + (response.status, response.reason, content))) + return 1 + + upload_token = json.loads(content).get('token') + if not upload_token: + print(('Received 200 from dashboard. ', + 'Not waiting for the upload status confirmation.')) + return 0 + + response, resp_json = _WaitForUploadConfirmation( + options.dashboard_url, upload_token, options.wait_timeout_sec, + options.wait_polling_period_sec) + + if ((resp_json and resp_json['state'] == 'COMPLETED') + or _CheckFullUploadInfo(options.dashboard_url, upload_token)): + print('Upload completed.') + return 0 + + if response.status != 200: + print(('Upload status poll failed with %d: %s' % + (response.status, response.reason))) + return 1 + + if resp_json['state'] == 'FAILED': + print('Upload failed.') + return 1 + + print(('Upload wasn\'t completed in a given time: %s seconds.' % + options.wait_timeout_sec)) + return 1 + + +def UploadToDashboard(options): + try: + exit_code = UploadToDashboardImpl(options) + except RuntimeError as e: + print(e) + return 1 + return exit_code diff --git a/third_party/libwebrtc/tools_webrtc/perf/catapult_uploader_test.py b/third_party/libwebrtc/tools_webrtc/perf/catapult_uploader_test.py new file mode 100644 index 0000000000..ba42554412 --- /dev/null +++ b/third_party/libwebrtc/tools_webrtc/perf/catapult_uploader_test.py @@ -0,0 +1,122 @@ +#!/usr/bin/env vpython3 + +# Copyright (c) 2022 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import os +import sys +import unittest + +from unittest.mock import MagicMock + + +# This tests requires the webrtc_dashboard_upload target to be built before +# running the tests. +def _ConfigurePythonPath(): + # We just yank the python scripts we require into the PYTHONPATH. You could + # also imagine a solution where we use for instance + # protobuf:py_proto_runtime to copy catapult and protobuf code to out/. + # This is the convention in Chromium and WebRTC python scripts. We do need + # to build histogram_pb2 however, so that's why we add out/ to sys.path + # below. + # + # It would be better if there was an equivalent to py_binary in GN, but + # there's not. + script_dir = os.path.dirname(os.path.realpath(__file__)) + checkout_root = os.path.abspath(os.path.join(script_dir, os.pardir, + os.pardir)) + + sys.path.insert( + 0, os.path.join(checkout_root, 'third_party', 'catapult', 'tracing')) + sys.path.insert( + 0, os.path.join(checkout_root, 'third_party', 'protobuf', 'python')) + + # The webrtc_dashboard_upload gn rule will build the protobuf stub for + # python, so put it in the path for this script before we attempt to import + # it. + histogram_proto_path = os.path.join(os.path.join('../../out/Default'), + 'pyproto', 'tracing', 'tracing', 'proto') + sys.path.insert(0, histogram_proto_path) + + # Fail early in case the proto hasn't been built. + from tracing.proto import histogram_proto + if not histogram_proto.HAS_PROTO: + raise ImportError('Could not find histogram_pb2. You need to build the ' + 'webrtc_dashboard_upload target before invoking this ' + 'script. Expected to find ' + 'histogram_pb2.py in %s.' % histogram_proto_path) + + +def _CreateHistogram(name='hist', + master=None, + bot=None, + benchmark=None, + benchmark_description=None, + commit_position=None, + samples=None): + hists = [catapult_uploader.histogram.Histogram(name, 'count')] + if samples: + for s in samples: + hists[0].AddSample(s) + histograms = catapult_uploader.histogram_set.HistogramSet(hists) + if master: + histograms.AddSharedDiagnosticToAllHistograms( + catapult_uploader.reserved_infos.MASTERS.name, + catapult_uploader.generic_set.GenericSet([master])) + if bot: + histograms.AddSharedDiagnosticToAllHistograms( + catapult_uploader.reserved_infos.BOTS.name, + catapult_uploader.generic_set.GenericSet([bot])) + if commit_position: + histograms.AddSharedDiagnosticToAllHistograms( + catapult_uploader.reserved_infos.CHROMIUM_COMMIT_POSITIONS.name, + catapult_uploader.generic_set.GenericSet([commit_position])) + if benchmark: + histograms.AddSharedDiagnosticToAllHistograms( + catapult_uploader.reserved_infos.BENCHMARKS.name, + catapult_uploader.generic_set.GenericSet([benchmark])) + if benchmark_description: + histograms.AddSharedDiagnosticToAllHistograms( + catapult_uploader.reserved_infos.BENCHMARK_DESCRIPTIONS.name, + catapult_uploader.generic_set.GenericSet([benchmark_description])) + return histograms + + +class CatapultUploaderTest(unittest.TestCase): + def setUp(self): + mock = MagicMock(return_value=[200, None]) + catapult_uploader.httplib2.Http.request = mock + + self.histogram = _CreateHistogram( + master='master', + bot='bot', + benchmark='benchmark', + commit_position=123, + benchmark_description='Benchmark description.', + samples=[1, 2, 3]) + + def testSendHistogramsSet(self): + url = 'http://notlocalhost' + # pylint: disable=protected-access + response, content = catapult_uploader._SendHistogramSet(url, self.histogram) + self.assertEqual(response, 200) + self.assertEqual(content, None) + + def testSendHistogramsSetLocalhost(self): + url = 'http://localhost' + # pylint: disable=protected-access + response, content = catapult_uploader._SendHistogramSet(url, self.histogram) + self.assertEqual(response, 200) + self.assertEqual(content, None) + + +if (__name__) == '__main__': + _ConfigurePythonPath() + import catapult_uploader + + unittest.main() diff --git a/third_party/libwebrtc/tools_webrtc/perf/process_perf_results.py b/third_party/libwebrtc/tools_webrtc/perf/process_perf_results.py new file mode 100644 index 0000000000..e91b1f66e9 --- /dev/null +++ b/third_party/libwebrtc/tools_webrtc/perf/process_perf_results.py @@ -0,0 +1,123 @@ +#!/usr/bin/env vpython3 + +# Copyright (c) 2022 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. +"""Adds build info to perf results and uploads them. + +The tests don't know which bot executed the tests or at what revision, so we +need to take their output and enrich it with this information. We load the proto +from the tests, add the build information as shared diagnostics and then +upload it to the dashboard. + +This script can't be in recipes, because we can't access the catapult APIs from +there. It needs to be here source-side. +""" + +import argparse +import json +import os +import sys + +from pathlib import Path + +# Even if protobuf is not used directly, this allows transitive imports +# of the protobuf library to use the vpython wheel specified in the root +# level .vpython (see bugs.webrtc.org/12211 for context). +import google.protobuf # pylint: disable=unused-import + + +def _ConfigurePythonPath(outdir): + # We just yank the python scripts we require into the PYTHONPATH. You could + # also imagine a solution where we use for instance + # protobuf:py_proto_runtime to copy catapult and protobuf code to out/. + # This is the convention in Chromium and WebRTC python scripts. We do need + # to build histogram_pb2 however, so that's why we add out/ to sys.path + # below. + # + # It would be better if there was an equivalent to py_binary in GN, but + # there's not. + script_dir = os.path.dirname(os.path.realpath(__file__)) + checkout_root = os.path.abspath(os.path.join(script_dir, os.pardir, + os.pardir)) + + sys.path.insert( + 0, os.path.join(checkout_root, 'third_party', 'catapult', 'tracing')) + sys.path.insert( + 0, os.path.join(checkout_root, 'third_party', 'protobuf', 'python')) + + # The webrtc_dashboard_upload gn rule will build the protobuf stub for + # python, so put it in the path for this script before we attempt to import + # it. + histogram_proto_path = os.path.join(outdir, 'pyproto', 'tracing', 'tracing', + 'proto') + sys.path.insert(0, histogram_proto_path) + + # Fail early in case the proto hasn't been built. + from tracing.proto import histogram_proto + if not histogram_proto.HAS_PROTO: + print('Could not find histogram_pb2. You need to build the ' + 'webrtc_dashboard_upload target before invoking this ' + 'script. Expected to find ' + 'histogram_pb2.py in %s.' % histogram_proto_path) + return 1 + return 0 + + +def _UploadToDasboard(args): + build_properties = json.loads(args.build_properties) + exit_code = _ConfigurePythonPath(build_properties['outdir']) + if exit_code != 0: + return exit_code + + import catapult_uploader + + perftest_outputs = [ + f.absolute() for f in Path(args.task_output_dir).rglob('perftest-output*') + if f.is_file() + ] + for perftest_output in perftest_outputs: + uploader_options = catapult_uploader.UploaderOptions( + perf_dashboard_machine_group=( + build_properties['perf_dashboard_machine_group']), + bot=build_properties['bot'], + webrtc_git_hash=build_properties['webrtc_git_hash'], + commit_position=build_properties['commit_position'], + build_page_url=build_properties['build_page_url'], + dashboard_url=build_properties['dashboard_url'], + test_suite=args.test_suite, + input_results_file=perftest_output, + ) + exit_code = catapult_uploader.UploadToDashboard(uploader_options) + if exit_code != 0: + return exit_code + return 0 + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--build-properties', help=argparse.SUPPRESS) + parser.add_argument('--summary-json', help=argparse.SUPPRESS) + parser.add_argument('--task-output-dir', help=argparse.SUPPRESS) + parser.add_argument('--test-suite', help=argparse.SUPPRESS) + parser.add_argument('-o', '--output-json', help=argparse.SUPPRESS) + parser.add_argument('json_files', nargs='*', help=argparse.SUPPRESS) + args = parser.parse_args() + + exit_code = _UploadToDasboard(args) + if exit_code != 0: + with open(args.output_json, 'w') as f: + json.dump({ + "global_tags": ["UNRELIABLE_RESULTS"], + "missing_shards": [0] + }, f) + return exit_code + return 0 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/third_party/libwebrtc/tools_webrtc/perf/process_perf_results_py2.py b/third_party/libwebrtc/tools_webrtc/perf/process_perf_results_py2.py new file mode 100644 index 0000000000..14b6858093 --- /dev/null +++ b/third_party/libwebrtc/tools_webrtc/perf/process_perf_results_py2.py @@ -0,0 +1,25 @@ +#!/usr/bin/env vpython3 + +# Copyright (c) 2022 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. +"""Calls process_perf_results.py with a python 3 interpreter.""" + +import sys +import subprocess + + +# TODO(crbug.com/webrtc/13835): Delete this file and use +# process_perf_results.py instead. +def main(): + cmd = sys.argv[0].replace('_py2', '') + print('Calling "%s" with py3 in case this script was called with py2.' % cmd) + return subprocess.call(['vpython3', cmd] + sys.argv[1:]) + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/third_party/libwebrtc/tools_webrtc/perf/process_perf_results_test.py b/third_party/libwebrtc/tools_webrtc/perf/process_perf_results_test.py new file mode 100644 index 0000000000..3aa5afd75c --- /dev/null +++ b/third_party/libwebrtc/tools_webrtc/perf/process_perf_results_test.py @@ -0,0 +1,70 @@ +#!/usr/bin/env vpython3 + +# Copyright (c) 2022 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import os +import sys + +import unittest +from unittest import mock + +_SCRIPT_DIR = os.path.dirname(__file__) +_SRC_DIR = os.path.normpath(os.path.join(_SCRIPT_DIR, '..', '..')) + +sys.path.insert(0, os.path.join(_SRC_DIR, 'third_party', 'protobuf', 'python')) +import process_perf_results + + +class ProcessPerfResultsTest(unittest.TestCase): + def testConfigurePythonPath(self): + # pylint: disable=protected-access + self.assertEqual( + 0, + process_perf_results._ConfigurePythonPath( + os.path.join(_SRC_DIR, 'out/Default'))) + + def testUploadToDasboard(self): + outdir = os.path.join(_SRC_DIR, 'out/Default') + args = mock.Mock( + build_properties='{' + '"outdir":"' + outdir + '", ' + + '"perf_dashboard_machine_group":"mock_machine_group", ' + + '"bot":"mock_bot", ' + '"webrtc_git_hash":"mock_webrtc_git_hash", ' + + '"commit_position":"123456", ' + + '"build_page_url":"mock_build_page_url", ' + + '"dashboard_url":"mock_dashboard_url"' + '}', + summary_json='mock_sumary_json', + task_output_dir='mock_task_output_dir', + test_suite='mock_test_suite', + ) + perftest_output = mock.Mock( + absolute=lambda: 'dummy_path/perftest-output.pb', + is_file=lambda: True, + ) + with mock.patch('pathlib.Path.rglob') as mocked_rglob: + with mock.patch('catapult_uploader.UploadToDashboard') as mocked_upload: + mocked_rglob.return_value = [perftest_output] + mocked_upload.return_value = 0 + # pylint: disable=protected-access + self.assertEqual(0, process_perf_results._UploadToDasboard(args)) + + import catapult_uploader + mocked_upload.assert_called_once_with( + catapult_uploader.UploaderOptions( + perf_dashboard_machine_group='mock_machine_group', + bot='mock_bot', + test_suite='mock_test_suite', + webrtc_git_hash='mock_webrtc_git_hash', + commit_position='123456', + build_page_url='mock_build_page_url', + dashboard_url='mock_dashboard_url', + input_results_file=perftest_output.absolute())) + + +if (__name__) == '__main__': + unittest.main() |