diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 19:33:14 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 19:33:14 +0000 |
commit | 36d22d82aa202bb199967e9512281e9a53db42c9 (patch) | |
tree | 105e8c98ddea1c1e4784a60a5a6410fa416be2de /third_party/libwebrtc/build/android/pylib/results | |
parent | Initial commit. (diff) | |
download | firefox-esr-upstream.tar.xz firefox-esr-upstream.zip |
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/libwebrtc/build/android/pylib/results')
14 files changed, 2854 insertions, 0 deletions
diff --git a/third_party/libwebrtc/build/android/pylib/results/__init__.py b/third_party/libwebrtc/build/android/pylib/results/__init__.py new file mode 100644 index 0000000000..4d6aabb953 --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/results/__init__.py @@ -0,0 +1,3 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. diff --git a/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/__init__.py b/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/__init__.py new file mode 100644 index 0000000000..4d6aabb953 --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/__init__.py @@ -0,0 +1,3 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. diff --git a/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/json_results_generator.py b/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/json_results_generator.py new file mode 100644 index 0000000000..ff035ec1c7 --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/json_results_generator.py @@ -0,0 +1,702 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# +# Most of this file was ported over from Blink's +# tools/blinkpy/web_tests/layout_package/json_results_generator.py +# tools/blinkpy/common/net/file_uploader.py +# + +import json +import logging +import mimetypes +import os +import time +try: + from urllib.request import urlopen, Request + from urllib.error import HTTPError, URLError + from urllib.parse import quote +except ImportError: + from urllib import quote + from urllib2 import urlopen, HTTPError, URLError, Request + +_log = logging.getLogger(__name__) + +_JSON_PREFIX = 'ADD_RESULTS(' +_JSON_SUFFIX = ');' + + +def HasJSONWrapper(string): + return string.startswith(_JSON_PREFIX) and string.endswith(_JSON_SUFFIX) + + +def StripJSONWrapper(json_content): + # FIXME: Kill this code once the server returns json instead of jsonp. + if HasJSONWrapper(json_content): + return json_content[len(_JSON_PREFIX):len(json_content) - len(_JSON_SUFFIX)] + return json_content + + +def WriteJSON(json_object, file_path, callback=None): + # Specify separators in order to get compact encoding. + json_string = json.dumps(json_object, separators=(',', ':')) + if callback: + json_string = callback + '(' + json_string + ');' + with open(file_path, 'w') as fp: + fp.write(json_string) + + +def ConvertTrieToFlatPaths(trie, prefix=None): + """Flattens the trie of paths, prepending a prefix to each.""" + result = {} + for name, data in trie.items(): + if prefix: + name = prefix + '/' + name + + if len(data) and not 'results' in data: + result.update(ConvertTrieToFlatPaths(data, name)) + else: + result[name] = data + + return result + + +def AddPathToTrie(path, value, trie): + """Inserts a single path and value into a directory trie structure.""" + if not '/' in path: + trie[path] = value + return + + directory, _, rest = path.partition('/') + if not directory in trie: + trie[directory] = {} + AddPathToTrie(rest, value, trie[directory]) + + +def TestTimingsTrie(individual_test_timings): + """Breaks a test name into dicts by directory + + foo/bar/baz.html: 1ms + foo/bar/baz1.html: 3ms + + becomes + foo: { + bar: { + baz.html: 1, + baz1.html: 3 + } + } + """ + trie = {} + for test_result in individual_test_timings: + test = test_result.test_name + + AddPathToTrie(test, int(1000 * test_result.test_run_time), trie) + + return trie + + +class TestResult(object): + """A simple class that represents a single test result.""" + + # Test modifier constants. + (NONE, FAILS, FLAKY, DISABLED) = list(range(4)) + + def __init__(self, test, failed=False, elapsed_time=0): + self.test_name = test + self.failed = failed + self.test_run_time = elapsed_time + + test_name = test + try: + test_name = test.split('.')[1] + except IndexError: + _log.warn('Invalid test name: %s.', test) + + if test_name.startswith('FAILS_'): + self.modifier = self.FAILS + elif test_name.startswith('FLAKY_'): + self.modifier = self.FLAKY + elif test_name.startswith('DISABLED_'): + self.modifier = self.DISABLED + else: + self.modifier = self.NONE + + def Fixable(self): + return self.failed or self.modifier == self.DISABLED + + +class JSONResultsGeneratorBase(object): + """A JSON results generator for generic tests.""" + + MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG = 750 + # Min time (seconds) that will be added to the JSON. + MIN_TIME = 1 + + # Note that in non-chromium tests those chars are used to indicate + # test modifiers (FAILS, FLAKY, etc) but not actual test results. + PASS_RESULT = 'P' + SKIP_RESULT = 'X' + FAIL_RESULT = 'F' + FLAKY_RESULT = 'L' + NO_DATA_RESULT = 'N' + + MODIFIER_TO_CHAR = {TestResult.NONE: PASS_RESULT, + TestResult.DISABLED: SKIP_RESULT, + TestResult.FAILS: FAIL_RESULT, + TestResult.FLAKY: FLAKY_RESULT} + + VERSION = 4 + VERSION_KEY = 'version' + RESULTS = 'results' + TIMES = 'times' + BUILD_NUMBERS = 'buildNumbers' + TIME = 'secondsSinceEpoch' + TESTS = 'tests' + + FIXABLE_COUNT = 'fixableCount' + FIXABLE = 'fixableCounts' + ALL_FIXABLE_COUNT = 'allFixableCount' + + RESULTS_FILENAME = 'results.json' + TIMES_MS_FILENAME = 'times_ms.json' + INCREMENTAL_RESULTS_FILENAME = 'incremental_results.json' + + # line too long pylint: disable=line-too-long + URL_FOR_TEST_LIST_JSON = ( + 'https://%s/testfile?builder=%s&name=%s&testlistjson=1&testtype=%s&' + 'master=%s') + # pylint: enable=line-too-long + + def __init__(self, builder_name, build_name, build_number, + results_file_base_path, builder_base_url, + test_results_map, svn_repositories=None, + test_results_server=None, + test_type='', + master_name=''): + """Modifies the results.json file. Grabs it off the archive directory + if it is not found locally. + + Args + builder_name: the builder name (e.g. Webkit). + build_name: the build name (e.g. webkit-rel). + build_number: the build number. + results_file_base_path: Absolute path to the directory containing the + results json file. + builder_base_url: the URL where we have the archived test results. + If this is None no archived results will be retrieved. + test_results_map: A dictionary that maps test_name to TestResult. + svn_repositories: A (json_field_name, svn_path) pair for SVN + repositories that tests rely on. The SVN revision will be + included in the JSON with the given json_field_name. + test_results_server: server that hosts test results json. + test_type: test type string (e.g. 'layout-tests'). + master_name: the name of the buildbot master. + """ + self._builder_name = builder_name + self._build_name = build_name + self._build_number = build_number + self._builder_base_url = builder_base_url + self._results_directory = results_file_base_path + + self._test_results_map = test_results_map + self._test_results = list(test_results_map.values()) + + self._svn_repositories = svn_repositories + if not self._svn_repositories: + self._svn_repositories = {} + + self._test_results_server = test_results_server + self._test_type = test_type + self._master_name = master_name + + self._archived_results = None + + def GenerateJSONOutput(self): + json_object = self.GetJSON() + if json_object: + file_path = ( + os.path.join( + self._results_directory, + self.INCREMENTAL_RESULTS_FILENAME)) + WriteJSON(json_object, file_path) + + def GenerateTimesMSFile(self): + times = TestTimingsTrie(list(self._test_results_map.values())) + file_path = os.path.join(self._results_directory, self.TIMES_MS_FILENAME) + WriteJSON(times, file_path) + + def GetJSON(self): + """Gets the results for the results.json file.""" + results_json = {} + + if not results_json: + results_json, error = self._GetArchivedJSONResults() + if error: + # If there was an error don't write a results.json + # file at all as it would lose all the information on the + # bot. + _log.error('Archive directory is inaccessible. Not ' + 'modifying or clobbering the results.json ' + 'file: ' + str(error)) + return None + + builder_name = self._builder_name + if results_json and builder_name not in results_json: + _log.debug('Builder name (%s) is not in the results.json file.', + builder_name) + + self._ConvertJSONToCurrentVersion(results_json) + + if builder_name not in results_json: + results_json[builder_name] = ( + self._CreateResultsForBuilderJSON()) + + results_for_builder = results_json[builder_name] + + if builder_name: + self._InsertGenericMetaData(results_for_builder) + + self._InsertFailureSummaries(results_for_builder) + + # Update the all failing tests with result type and time. + tests = results_for_builder[self.TESTS] + all_failing_tests = self._GetFailedTestNames() + all_failing_tests.update(ConvertTrieToFlatPaths(tests)) + + for test in all_failing_tests: + self._InsertTestTimeAndResult(test, tests) + + return results_json + + def SetArchivedResults(self, archived_results): + self._archived_results = archived_results + + def UploadJSONFiles(self, json_files): + """Uploads the given json_files to the test_results_server (if the + test_results_server is given).""" + if not self._test_results_server: + return + + if not self._master_name: + _log.error( + '--test-results-server was set, but --master-name was not. Not ' + 'uploading JSON files.') + return + + _log.info('Uploading JSON files for builder: %s', self._builder_name) + attrs = [('builder', self._builder_name), + ('testtype', self._test_type), + ('master', self._master_name)] + + files = [(json_file, os.path.join(self._results_directory, json_file)) + for json_file in json_files] + + url = 'https://%s/testfile/upload' % self._test_results_server + # Set uploading timeout in case appengine server is having problems. + # 120 seconds are more than enough to upload test results. + uploader = _FileUploader(url, 120) + try: + response = uploader.UploadAsMultipartFormData(files, attrs) + if response: + if response.code == 200: + _log.info('JSON uploaded.') + else: + _log.debug( + "JSON upload failed, %d: '%s'", response.code, response.read()) + else: + _log.error('JSON upload failed; no response returned') + except Exception as err: # pylint: disable=broad-except + _log.error('Upload failed: %s', err) + return + + def _GetTestTiming(self, test_name): + """Returns test timing data (elapsed time) in second + for the given test_name.""" + if test_name in self._test_results_map: + # Floor for now to get time in seconds. + return int(self._test_results_map[test_name].test_run_time) + return 0 + + def _GetFailedTestNames(self): + """Returns a set of failed test names.""" + return set([r.test_name for r in self._test_results if r.failed]) + + def _GetModifierChar(self, test_name): + """Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT, + PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test modifier + for the given test_name. + """ + if test_name not in self._test_results_map: + return self.__class__.NO_DATA_RESULT + + test_result = self._test_results_map[test_name] + if test_result.modifier in list(self.MODIFIER_TO_CHAR.keys()): + return self.MODIFIER_TO_CHAR[test_result.modifier] + + return self.__class__.PASS_RESULT + + def _get_result_char(self, test_name): + """Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT, + PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test result + for the given test_name. + """ + if test_name not in self._test_results_map: + return self.__class__.NO_DATA_RESULT + + test_result = self._test_results_map[test_name] + if test_result.modifier == TestResult.DISABLED: + return self.__class__.SKIP_RESULT + + if test_result.failed: + return self.__class__.FAIL_RESULT + + return self.__class__.PASS_RESULT + + def _GetSVNRevision(self, in_directory): + """Returns the svn revision for the given directory. + + Args: + in_directory: The directory where svn is to be run. + """ + # This is overridden in flakiness_dashboard_results_uploader.py. + raise NotImplementedError() + + def _GetArchivedJSONResults(self): + """Download JSON file that only contains test + name list from test-results server. This is for generating incremental + JSON so the file generated has info for tests that failed before but + pass or are skipped from current run. + + Returns (archived_results, error) tuple where error is None if results + were successfully read. + """ + results_json = {} + old_results = None + error = None + + if not self._test_results_server: + return {}, None + + results_file_url = (self.URL_FOR_TEST_LIST_JSON % + (quote(self._test_results_server), + quote(self._builder_name), self.RESULTS_FILENAME, + quote(self._test_type), quote(self._master_name))) + + # pylint: disable=redefined-variable-type + try: + # FIXME: We should talk to the network via a Host object. + results_file = urlopen(results_file_url) + old_results = results_file.read() + except HTTPError as http_error: + # A non-4xx status code means the bot is hosed for some reason + # and we can't grab the results.json file off of it. + if http_error.code < 400 and http_error.code >= 500: + error = http_error + except URLError as url_error: + error = url_error + # pylint: enable=redefined-variable-type + + if old_results: + # Strip the prefix and suffix so we can get the actual JSON object. + old_results = StripJSONWrapper(old_results) + + try: + results_json = json.loads(old_results) + except Exception: # pylint: disable=broad-except + _log.debug('results.json was not valid JSON. Clobbering.') + # The JSON file is not valid JSON. Just clobber the results. + results_json = {} + else: + _log.debug('Old JSON results do not exist. Starting fresh.') + results_json = {} + + return results_json, error + + def _InsertFailureSummaries(self, results_for_builder): + """Inserts aggregate pass/failure statistics into the JSON. + This method reads self._test_results and generates + FIXABLE, FIXABLE_COUNT and ALL_FIXABLE_COUNT entries. + + Args: + results_for_builder: Dictionary containing the test results for a + single builder. + """ + # Insert the number of tests that failed or skipped. + fixable_count = len([r for r in self._test_results if r.Fixable()]) + self._InsertItemIntoRawList(results_for_builder, + fixable_count, self.FIXABLE_COUNT) + + # Create a test modifiers (FAILS, FLAKY etc) summary dictionary. + entry = {} + for test_name in self._test_results_map.keys(): + result_char = self._GetModifierChar(test_name) + entry[result_char] = entry.get(result_char, 0) + 1 + + # Insert the pass/skip/failure summary dictionary. + self._InsertItemIntoRawList(results_for_builder, entry, + self.FIXABLE) + + # Insert the number of all the tests that are supposed to pass. + all_test_count = len(self._test_results) + self._InsertItemIntoRawList(results_for_builder, + all_test_count, self.ALL_FIXABLE_COUNT) + + def _InsertItemIntoRawList(self, results_for_builder, item, key): + """Inserts the item into the list with the given key in the results for + this builder. Creates the list if no such list exists. + + Args: + results_for_builder: Dictionary containing the test results for a + single builder. + item: Number or string to insert into the list. + key: Key in results_for_builder for the list to insert into. + """ + if key in results_for_builder: + raw_list = results_for_builder[key] + else: + raw_list = [] + + raw_list.insert(0, item) + raw_list = raw_list[:self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG] + results_for_builder[key] = raw_list + + def _InsertItemRunLengthEncoded(self, item, encoded_results): + """Inserts the item into the run-length encoded results. + + Args: + item: String or number to insert. + encoded_results: run-length encoded results. An array of arrays, e.g. + [[3,'A'],[1,'Q']] encodes AAAQ. + """ + if len(encoded_results) and item == encoded_results[0][1]: + num_results = encoded_results[0][0] + if num_results <= self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG: + encoded_results[0][0] = num_results + 1 + else: + # Use a list instead of a class for the run-length encoding since + # we want the serialized form to be concise. + encoded_results.insert(0, [1, item]) + + def _InsertGenericMetaData(self, results_for_builder): + """ Inserts generic metadata (such as version number, current time etc) + into the JSON. + + Args: + results_for_builder: Dictionary containing the test results for + a single builder. + """ + self._InsertItemIntoRawList(results_for_builder, + self._build_number, self.BUILD_NUMBERS) + + # Include SVN revisions for the given repositories. + for (name, path) in self._svn_repositories: + # Note: for JSON file's backward-compatibility we use 'chrome' rather + # than 'chromium' here. + lowercase_name = name.lower() + if lowercase_name == 'chromium': + lowercase_name = 'chrome' + self._InsertItemIntoRawList(results_for_builder, + self._GetSVNRevision(path), + lowercase_name + 'Revision') + + self._InsertItemIntoRawList(results_for_builder, + int(time.time()), + self.TIME) + + def _InsertTestTimeAndResult(self, test_name, tests): + """ Insert a test item with its results to the given tests dictionary. + + Args: + tests: Dictionary containing test result entries. + """ + + result = self._get_result_char(test_name) + test_time = self._GetTestTiming(test_name) + + this_test = tests + for segment in test_name.split('/'): + if segment not in this_test: + this_test[segment] = {} + this_test = this_test[segment] + + if not len(this_test): + self._PopulateResultsAndTimesJSON(this_test) + + if self.RESULTS in this_test: + self._InsertItemRunLengthEncoded(result, this_test[self.RESULTS]) + else: + this_test[self.RESULTS] = [[1, result]] + + if self.TIMES in this_test: + self._InsertItemRunLengthEncoded(test_time, this_test[self.TIMES]) + else: + this_test[self.TIMES] = [[1, test_time]] + + def _ConvertJSONToCurrentVersion(self, results_json): + """If the JSON does not match the current version, converts it to the + current version and adds in the new version number. + """ + if self.VERSION_KEY in results_json: + archive_version = results_json[self.VERSION_KEY] + if archive_version == self.VERSION: + return + else: + archive_version = 3 + + # version 3->4 + if archive_version == 3: + for results in list(results_json.values()): + self._ConvertTestsToTrie(results) + + results_json[self.VERSION_KEY] = self.VERSION + + def _ConvertTestsToTrie(self, results): + if not self.TESTS in results: + return + + test_results = results[self.TESTS] + test_results_trie = {} + for test in test_results.keys(): + single_test_result = test_results[test] + AddPathToTrie(test, single_test_result, test_results_trie) + + results[self.TESTS] = test_results_trie + + def _PopulateResultsAndTimesJSON(self, results_and_times): + results_and_times[self.RESULTS] = [] + results_and_times[self.TIMES] = [] + return results_and_times + + def _CreateResultsForBuilderJSON(self): + results_for_builder = {} + results_for_builder[self.TESTS] = {} + return results_for_builder + + def _RemoveItemsOverMaxNumberOfBuilds(self, encoded_list): + """Removes items from the run-length encoded list after the final + item that exceeds the max number of builds to track. + + Args: + encoded_results: run-length encoded results. An array of arrays, e.g. + [[3,'A'],[1,'Q']] encodes AAAQ. + """ + num_builds = 0 + index = 0 + for result in encoded_list: + num_builds = num_builds + result[0] + index = index + 1 + if num_builds > self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG: + return encoded_list[:index] + return encoded_list + + def _NormalizeResultsJSON(self, test, test_name, tests): + """ Prune tests where all runs pass or tests that no longer exist and + truncate all results to maxNumberOfBuilds. + + Args: + test: ResultsAndTimes object for this test. + test_name: Name of the test. + tests: The JSON object with all the test results for this builder. + """ + test[self.RESULTS] = self._RemoveItemsOverMaxNumberOfBuilds( + test[self.RESULTS]) + test[self.TIMES] = self._RemoveItemsOverMaxNumberOfBuilds( + test[self.TIMES]) + + is_all_pass = self._IsResultsAllOfType(test[self.RESULTS], + self.PASS_RESULT) + is_all_no_data = self._IsResultsAllOfType(test[self.RESULTS], + self.NO_DATA_RESULT) + max_time = max([test_time[1] for test_time in test[self.TIMES]]) + + # Remove all passes/no-data from the results to reduce noise and + # filesize. If a test passes every run, but takes > MIN_TIME to run, + # don't throw away the data. + if is_all_no_data or (is_all_pass and max_time <= self.MIN_TIME): + del tests[test_name] + + # method could be a function pylint: disable=R0201 + def _IsResultsAllOfType(self, results, result_type): + """Returns whether all the results are of the given type + (e.g. all passes).""" + return len(results) == 1 and results[0][1] == result_type + + +class _FileUploader(object): + + def __init__(self, url, timeout_seconds): + self._url = url + self._timeout_seconds = timeout_seconds + + def UploadAsMultipartFormData(self, files, attrs): + file_objs = [] + for filename, path in files: + with file(path, 'rb') as fp: + file_objs.append(('file', filename, fp.read())) + + # FIXME: We should use the same variable names for the formal and actual + # parameters. + content_type, data = _EncodeMultipartFormData(attrs, file_objs) + return self._UploadData(content_type, data) + + def _UploadData(self, content_type, data): + start = time.time() + end = start + self._timeout_seconds + while time.time() < end: + try: + request = Request(self._url, data, {'Content-Type': content_type}) + return urlopen(request) + except HTTPError as e: + _log.warn("Received HTTP status %s loading \"%s\". " + 'Retrying in 10 seconds...', e.code, e.filename) + time.sleep(10) + + +def _GetMIMEType(filename): + return mimetypes.guess_type(filename)[0] or 'application/octet-stream' + + +# FIXME: Rather than taking tuples, this function should take more +# structured data. +def _EncodeMultipartFormData(fields, files): + """Encode form fields for multipart/form-data. + + Args: + fields: A sequence of (name, value) elements for regular form fields. + files: A sequence of (name, filename, value) elements for data to be + uploaded as files. + Returns: + (content_type, body) ready for httplib.HTTP instance. + + Source: + http://code.google.com/p/rietveld/source/browse/trunk/upload.py + """ + BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-' + CRLF = '\r\n' + lines = [] + + for key, value in fields: + lines.append('--' + BOUNDARY) + lines.append('Content-Disposition: form-data; name="%s"' % key) + lines.append('') + if isinstance(value, str): + value = value.encode('utf-8') + lines.append(value) + + for key, filename, value in files: + lines.append('--' + BOUNDARY) + lines.append('Content-Disposition: form-data; name="%s"; ' + 'filename="%s"' % (key, filename)) + lines.append('Content-Type: %s' % _GetMIMEType(filename)) + lines.append('') + if isinstance(value, str): + value = value.encode('utf-8') + lines.append(value) + + lines.append('--' + BOUNDARY + '--') + lines.append('') + body = CRLF.join(lines) + content_type = 'multipart/form-data; boundary=%s' % BOUNDARY + return content_type, body diff --git a/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/json_results_generator_unittest.py b/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/json_results_generator_unittest.py new file mode 100644 index 0000000000..70c808c71f --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/json_results_generator_unittest.py @@ -0,0 +1,213 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# +# Most of this file was ported over from Blink's +# webkitpy/layout_tests/layout_package/json_results_generator_unittest.py +# + +import unittest +import json + +from pylib.results.flakiness_dashboard import json_results_generator + + +class JSONGeneratorTest(unittest.TestCase): + + def setUp(self): + self.builder_name = 'DUMMY_BUILDER_NAME' + self.build_name = 'DUMMY_BUILD_NAME' + self.build_number = 'DUMMY_BUILDER_NUMBER' + + # For archived results. + self._json = None + self._num_runs = 0 + self._tests_set = set([]) + self._test_timings = {} + self._failed_count_map = {} + + self._PASS_count = 0 + self._DISABLED_count = 0 + self._FLAKY_count = 0 + self._FAILS_count = 0 + self._fixable_count = 0 + + self._orig_write_json = json_results_generator.WriteJSON + + # unused arguments ... pylint: disable=W0613 + def _WriteJSONStub(json_object, file_path, callback=None): + pass + + json_results_generator.WriteJSON = _WriteJSONStub + + def tearDown(self): + json_results_generator.WriteJSON = self._orig_write_json + + def _TestJSONGeneration(self, passed_tests_list, failed_tests_list): + tests_set = set(passed_tests_list) | set(failed_tests_list) + + DISABLED_tests = set([t for t in tests_set + if t.startswith('DISABLED_')]) + FLAKY_tests = set([t for t in tests_set + if t.startswith('FLAKY_')]) + FAILS_tests = set([t for t in tests_set + if t.startswith('FAILS_')]) + PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests) + + failed_tests = set(failed_tests_list) - DISABLED_tests + failed_count_map = dict([(t, 1) for t in failed_tests]) + + test_timings = {} + i = 0 + for test in tests_set: + test_timings[test] = float(self._num_runs * 100 + i) + i += 1 + + test_results_map = dict() + for test in tests_set: + test_results_map[test] = json_results_generator.TestResult( + test, failed=(test in failed_tests), + elapsed_time=test_timings[test]) + + generator = json_results_generator.JSONResultsGeneratorBase( + self.builder_name, self.build_name, self.build_number, + '', + None, # don't fetch past json results archive + test_results_map) + + failed_count_map = dict([(t, 1) for t in failed_tests]) + + # Test incremental json results + incremental_json = generator.GetJSON() + self._VerifyJSONResults( + tests_set, + test_timings, + failed_count_map, + len(PASS_tests), + len(DISABLED_tests), + len(FLAKY_tests), + len(DISABLED_tests | failed_tests), + incremental_json, + 1) + + # We don't verify the results here, but at least we make sure the code + # runs without errors. + generator.GenerateJSONOutput() + generator.GenerateTimesMSFile() + + def _VerifyJSONResults(self, tests_set, test_timings, failed_count_map, + PASS_count, DISABLED_count, FLAKY_count, + fixable_count, json_obj, num_runs): + # Aliasing to a short name for better access to its constants. + JRG = json_results_generator.JSONResultsGeneratorBase + + self.assertIn(JRG.VERSION_KEY, json_obj) + self.assertIn(self.builder_name, json_obj) + + buildinfo = json_obj[self.builder_name] + self.assertIn(JRG.FIXABLE, buildinfo) + self.assertIn(JRG.TESTS, buildinfo) + self.assertEqual(len(buildinfo[JRG.BUILD_NUMBERS]), num_runs) + self.assertEqual(buildinfo[JRG.BUILD_NUMBERS][0], self.build_number) + + if tests_set or DISABLED_count: + fixable = {} + for fixable_items in buildinfo[JRG.FIXABLE]: + for (result_type, count) in fixable_items.items(): + if result_type in fixable: + fixable[result_type] = fixable[result_type] + count + else: + fixable[result_type] = count + + if PASS_count: + self.assertEqual(fixable[JRG.PASS_RESULT], PASS_count) + else: + self.assertTrue(JRG.PASS_RESULT not in fixable or + fixable[JRG.PASS_RESULT] == 0) + if DISABLED_count: + self.assertEqual(fixable[JRG.SKIP_RESULT], DISABLED_count) + else: + self.assertTrue(JRG.SKIP_RESULT not in fixable or + fixable[JRG.SKIP_RESULT] == 0) + if FLAKY_count: + self.assertEqual(fixable[JRG.FLAKY_RESULT], FLAKY_count) + else: + self.assertTrue(JRG.FLAKY_RESULT not in fixable or + fixable[JRG.FLAKY_RESULT] == 0) + + if failed_count_map: + tests = buildinfo[JRG.TESTS] + for test_name in failed_count_map.keys(): + test = self._FindTestInTrie(test_name, tests) + + failed = 0 + for result in test[JRG.RESULTS]: + if result[1] == JRG.FAIL_RESULT: + failed += result[0] + self.assertEqual(failed_count_map[test_name], failed) + + timing_count = 0 + for timings in test[JRG.TIMES]: + if timings[1] == test_timings[test_name]: + timing_count = timings[0] + self.assertEqual(1, timing_count) + + if fixable_count: + self.assertEqual(sum(buildinfo[JRG.FIXABLE_COUNT]), fixable_count) + + def _FindTestInTrie(self, path, trie): + nodes = path.split('/') + sub_trie = trie + for node in nodes: + self.assertIn(node, sub_trie) + sub_trie = sub_trie[node] + return sub_trie + + def testJSONGeneration(self): + self._TestJSONGeneration([], []) + self._TestJSONGeneration(['A1', 'B1'], []) + self._TestJSONGeneration([], ['FAILS_A2', 'FAILS_B2']) + self._TestJSONGeneration(['DISABLED_A3', 'DISABLED_B3'], []) + self._TestJSONGeneration(['A4'], ['B4', 'FAILS_C4']) + self._TestJSONGeneration(['DISABLED_C5', 'DISABLED_D5'], ['A5', 'B5']) + self._TestJSONGeneration( + ['A6', 'B6', 'FAILS_C6', 'DISABLED_E6', 'DISABLED_F6'], + ['FAILS_D6']) + + # Generate JSON with the same test sets. (Both incremental results and + # archived results must be updated appropriately.) + self._TestJSONGeneration( + ['A', 'FLAKY_B', 'DISABLED_C'], + ['FAILS_D', 'FLAKY_E']) + self._TestJSONGeneration( + ['A', 'DISABLED_C', 'FLAKY_E'], + ['FLAKY_B', 'FAILS_D']) + self._TestJSONGeneration( + ['FLAKY_B', 'DISABLED_C', 'FAILS_D'], + ['A', 'FLAKY_E']) + + def testHierarchicalJSNGeneration(self): + # FIXME: Re-work tests to be more comprehensible and comprehensive. + self._TestJSONGeneration(['foo/A'], ['foo/B', 'bar/C']) + + def testTestTimingsTrie(self): + individual_test_timings = [] + individual_test_timings.append( + json_results_generator.TestResult( + 'foo/bar/baz.html', + elapsed_time=1.2)) + individual_test_timings.append( + json_results_generator.TestResult('bar.html', elapsed_time=0.0001)) + trie = json_results_generator.TestTimingsTrie(individual_test_timings) + + expected_trie = { + 'bar.html': 0, + 'foo': { + 'bar': { + 'baz.html': 1200, + } + } + } + + self.assertEqual(json.dumps(trie), json.dumps(expected_trie)) diff --git a/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/results_uploader.py b/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/results_uploader.py new file mode 100644 index 0000000000..b68a898b7d --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/results/flakiness_dashboard/results_uploader.py @@ -0,0 +1,176 @@ +# Copyright (c) 2012 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Uploads the results to the flakiness dashboard server.""" +# pylint: disable=E1002,R0201 + +import logging +import os +import shutil +import tempfile +import xml + + +from devil.utils import cmd_helper +from pylib.constants import host_paths +from pylib.results.flakiness_dashboard import json_results_generator +from pylib.utils import repo_utils + + + +class JSONResultsGenerator(json_results_generator.JSONResultsGeneratorBase): + """Writes test results to a JSON file and handles uploading that file to + the test results server. + """ + def __init__(self, builder_name, build_name, build_number, tmp_folder, + test_results_map, test_results_server, test_type, master_name): + super(JSONResultsGenerator, self).__init__( + builder_name=builder_name, + build_name=build_name, + build_number=build_number, + results_file_base_path=tmp_folder, + builder_base_url=None, + test_results_map=test_results_map, + svn_repositories=(('webkit', 'third_party/WebKit'), + ('chrome', '.')), + test_results_server=test_results_server, + test_type=test_type, + master_name=master_name) + + #override + def _GetModifierChar(self, test_name): + if test_name not in self._test_results_map: + return self.__class__.NO_DATA_RESULT + + return self._test_results_map[test_name].modifier + + #override + def _GetSVNRevision(self, in_directory): + """Returns the git/svn revision for the given directory. + + Args: + in_directory: The directory relative to src. + """ + def _is_git_directory(in_directory): + """Returns true if the given directory is in a git repository. + + Args: + in_directory: The directory path to be tested. + """ + if os.path.exists(os.path.join(in_directory, '.git')): + return True + parent = os.path.dirname(in_directory) + if parent == host_paths.DIR_SOURCE_ROOT or parent == in_directory: + return False + return _is_git_directory(parent) + + in_directory = os.path.join(host_paths.DIR_SOURCE_ROOT, in_directory) + + if not os.path.exists(os.path.join(in_directory, '.svn')): + if _is_git_directory(in_directory): + return repo_utils.GetGitHeadSHA1(in_directory) + else: + return '' + + output = cmd_helper.GetCmdOutput(['svn', 'info', '--xml'], cwd=in_directory) + try: + dom = xml.dom.minidom.parseString(output) + return dom.getElementsByTagName('entry')[0].getAttribute('revision') + except xml.parsers.expat.ExpatError: + return '' + return '' + + +class ResultsUploader(object): + """Handles uploading buildbot tests results to the flakiness dashboard.""" + def __init__(self, tests_type): + self._build_number = os.environ.get('BUILDBOT_BUILDNUMBER') + self._master_name = os.environ.get('BUILDBOT_MASTERNAME') + self._builder_name = os.environ.get('BUILDBOT_BUILDERNAME') + self._tests_type = tests_type + self._build_name = None + + if not self._build_number or not self._builder_name: + raise Exception('You should not be uploading tests results to the server' + 'from your local machine.') + + upstream = (tests_type != 'Chromium_Android_Instrumentation') + if not upstream: + self._build_name = 'chromium-android' + buildbot_branch = os.environ.get('BUILDBOT_BRANCH') + if not buildbot_branch: + buildbot_branch = 'master' + else: + # Ensure there's no leading "origin/" + buildbot_branch = buildbot_branch[buildbot_branch.find('/') + 1:] + self._master_name = '%s-%s' % (self._build_name, buildbot_branch) + + self._test_results_map = {} + + def AddResults(self, test_results): + # TODO(frankf): Differentiate between fail/crash/timeouts. + conversion_map = [ + (test_results.GetPass(), False, + json_results_generator.JSONResultsGeneratorBase.PASS_RESULT), + (test_results.GetFail(), True, + json_results_generator.JSONResultsGeneratorBase.FAIL_RESULT), + (test_results.GetCrash(), True, + json_results_generator.JSONResultsGeneratorBase.FAIL_RESULT), + (test_results.GetTimeout(), True, + json_results_generator.JSONResultsGeneratorBase.FAIL_RESULT), + (test_results.GetUnknown(), True, + json_results_generator.JSONResultsGeneratorBase.NO_DATA_RESULT), + ] + + for results_list, failed, modifier in conversion_map: + for single_test_result in results_list: + test_result = json_results_generator.TestResult( + test=single_test_result.GetName(), + failed=failed, + elapsed_time=single_test_result.GetDuration() / 1000) + # The WebKit TestResult object sets the modifier it based on test name. + # Since we don't use the same test naming convention as WebKit the + # modifier will be wrong, so we need to overwrite it. + test_result.modifier = modifier + + self._test_results_map[single_test_result.GetName()] = test_result + + def Upload(self, test_results_server): + if not self._test_results_map: + return + + tmp_folder = tempfile.mkdtemp() + + try: + results_generator = JSONResultsGenerator( + builder_name=self._builder_name, + build_name=self._build_name, + build_number=self._build_number, + tmp_folder=tmp_folder, + test_results_map=self._test_results_map, + test_results_server=test_results_server, + test_type=self._tests_type, + master_name=self._master_name) + + json_files = ["incremental_results.json", "times_ms.json"] + results_generator.GenerateJSONOutput() + results_generator.GenerateTimesMSFile() + results_generator.UploadJSONFiles(json_files) + except Exception as e: # pylint: disable=broad-except + logging.error("Uploading results to test server failed: %s.", e) + finally: + shutil.rmtree(tmp_folder) + + +def Upload(results, flakiness_dashboard_server, test_type): + """Reports test results to the flakiness dashboard for Chrome for Android. + + Args: + results: test results. + flakiness_dashboard_server: the server to upload the results to. + test_type: the type of the tests (as displayed by the flakiness dashboard). + """ + uploader = ResultsUploader(test_type) + uploader.AddResults(results) + uploader.Upload(flakiness_dashboard_server) diff --git a/third_party/libwebrtc/build/android/pylib/results/json_results.py b/third_party/libwebrtc/build/android/pylib/results/json_results.py new file mode 100644 index 0000000000..ed63c1540c --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/results/json_results.py @@ -0,0 +1,239 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +import collections +import itertools +import json +import logging +import time + +import six + +from pylib.base import base_test_result + +def GenerateResultsDict(test_run_results, global_tags=None): + """Create a results dict from |test_run_results| suitable for writing to JSON. + Args: + test_run_results: a list of base_test_result.TestRunResults objects. + Returns: + A results dict that mirrors the one generated by + base/test/launcher/test_results_tracker.cc:SaveSummaryAsJSON. + """ + # Example json output. + # { + # "global_tags": [], + # "all_tests": [ + # "test1", + # "test2", + # ], + # "disabled_tests": [], + # "per_iteration_data": [ + # { + # "test1": [ + # { + # "status": "SUCCESS", + # "elapsed_time_ms": 1, + # "output_snippet": "", + # "output_snippet_base64": "", + # "losless_snippet": "", + # }, + # ... + # ], + # "test2": [ + # { + # "status": "FAILURE", + # "elapsed_time_ms": 12, + # "output_snippet": "", + # "output_snippet_base64": "", + # "losless_snippet": "", + # }, + # ... + # ], + # }, + # { + # "test1": [ + # { + # "status": "SUCCESS", + # "elapsed_time_ms": 1, + # "output_snippet": "", + # "output_snippet_base64": "", + # "losless_snippet": "", + # }, + # ], + # "test2": [ + # { + # "status": "FAILURE", + # "elapsed_time_ms": 12, + # "output_snippet": "", + # "output_snippet_base64": "", + # "losless_snippet": "", + # }, + # ], + # }, + # ... + # ], + # } + + all_tests = set() + per_iteration_data = [] + test_run_links = {} + + for test_run_result in test_run_results: + iteration_data = collections.defaultdict(list) + if isinstance(test_run_result, list): + results_iterable = itertools.chain(*(t.GetAll() for t in test_run_result)) + for tr in test_run_result: + test_run_links.update(tr.GetLinks()) + + else: + results_iterable = test_run_result.GetAll() + test_run_links.update(test_run_result.GetLinks()) + + for r in results_iterable: + result_dict = { + 'status': r.GetType(), + 'elapsed_time_ms': r.GetDuration(), + 'output_snippet': six.ensure_text(r.GetLog(), errors='replace'), + 'losless_snippet': True, + 'output_snippet_base64': '', + 'links': r.GetLinks(), + } + iteration_data[r.GetName()].append(result_dict) + + all_tests = all_tests.union(set(six.iterkeys(iteration_data))) + per_iteration_data.append(iteration_data) + + return { + 'global_tags': global_tags or [], + 'all_tests': sorted(list(all_tests)), + # TODO(jbudorick): Add support for disabled tests within base_test_result. + 'disabled_tests': [], + 'per_iteration_data': per_iteration_data, + 'links': test_run_links, + } + + +def GenerateJsonTestResultFormatDict(test_run_results, interrupted): + """Create a results dict from |test_run_results| suitable for writing to JSON. + + Args: + test_run_results: a list of base_test_result.TestRunResults objects. + interrupted: True if tests were interrupted, e.g. timeout listing tests + Returns: + A results dict that mirrors the standard JSON Test Results Format. + """ + + tests = {} + counts = {'PASS': 0, 'FAIL': 0, 'SKIP': 0, 'CRASH': 0, 'TIMEOUT': 0} + + for test_run_result in test_run_results: + if isinstance(test_run_result, list): + results_iterable = itertools.chain(*(t.GetAll() for t in test_run_result)) + else: + results_iterable = test_run_result.GetAll() + + for r in results_iterable: + element = tests + for key in r.GetName().split('.'): + if key not in element: + element[key] = {} + element = element[key] + + element['expected'] = 'PASS' + + if r.GetType() == base_test_result.ResultType.PASS: + result = 'PASS' + elif r.GetType() == base_test_result.ResultType.SKIP: + result = 'SKIP' + elif r.GetType() == base_test_result.ResultType.CRASH: + result = 'CRASH' + elif r.GetType() == base_test_result.ResultType.TIMEOUT: + result = 'TIMEOUT' + else: + result = 'FAIL' + + if 'actual' in element: + element['actual'] += ' ' + result + else: + counts[result] += 1 + element['actual'] = result + if result == 'FAIL': + element['is_unexpected'] = True + + if r.GetDuration() != 0: + element['time'] = r.GetDuration() + + # Fill in required fields. + return { + 'interrupted': interrupted, + 'num_failures_by_type': counts, + 'path_delimiter': '.', + 'seconds_since_epoch': time.time(), + 'tests': tests, + 'version': 3, + } + + +def GenerateJsonResultsFile(test_run_result, file_path, global_tags=None, + **kwargs): + """Write |test_run_result| to JSON. + + This emulates the format of the JSON emitted by + base/test/launcher/test_results_tracker.cc:SaveSummaryAsJSON. + + Args: + test_run_result: a base_test_result.TestRunResults object. + file_path: The path to the JSON file to write. + """ + with open(file_path, 'w') as json_result_file: + json_result_file.write(json.dumps( + GenerateResultsDict(test_run_result, global_tags=global_tags), + **kwargs)) + logging.info('Generated json results file at %s', file_path) + + +def GenerateJsonTestResultFormatFile(test_run_result, interrupted, file_path, + **kwargs): + """Write |test_run_result| to JSON. + + This uses the official Chromium Test Results Format. + + Args: + test_run_result: a base_test_result.TestRunResults object. + interrupted: True if tests were interrupted, e.g. timeout listing tests + file_path: The path to the JSON file to write. + """ + with open(file_path, 'w') as json_result_file: + json_result_file.write( + json.dumps( + GenerateJsonTestResultFormatDict(test_run_result, interrupted), + **kwargs)) + logging.info('Generated json results file at %s', file_path) + + +def ParseResultsFromJson(json_results): + """Creates a list of BaseTestResult objects from JSON. + + Args: + json_results: A JSON dict in the format created by + GenerateJsonResultsFile. + """ + + def string_as_status(s): + if s in base_test_result.ResultType.GetTypes(): + return s + return base_test_result.ResultType.UNKNOWN + + results_list = [] + testsuite_runs = json_results['per_iteration_data'] + for testsuite_run in testsuite_runs: + for test, test_runs in six.iteritems(testsuite_run): + results_list.extend( + [base_test_result.BaseTestResult(test, + string_as_status(tr['status']), + duration=tr['elapsed_time_ms'], + log=tr.get('output_snippet')) + for tr in test_runs]) + return results_list diff --git a/third_party/libwebrtc/build/android/pylib/results/json_results_test.py b/third_party/libwebrtc/build/android/pylib/results/json_results_test.py new file mode 100755 index 0000000000..cb942e2898 --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/results/json_results_test.py @@ -0,0 +1,311 @@ +#!/usr/bin/env vpython3 +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +import unittest + +import six +from pylib.base import base_test_result +from pylib.results import json_results + + +class JsonResultsTest(unittest.TestCase): + + def testGenerateResultsDict_passedResult(self): + result = base_test_result.BaseTestResult( + 'test.package.TestName', base_test_result.ResultType.PASS) + + all_results = base_test_result.TestRunResults() + all_results.AddResult(result) + + results_dict = json_results.GenerateResultsDict([all_results]) + self.assertEqual(['test.package.TestName'], results_dict['all_tests']) + self.assertEqual(1, len(results_dict['per_iteration_data'])) + + iteration_result = results_dict['per_iteration_data'][0] + self.assertTrue('test.package.TestName' in iteration_result) + self.assertEqual(1, len(iteration_result['test.package.TestName'])) + + test_iteration_result = iteration_result['test.package.TestName'][0] + self.assertTrue('status' in test_iteration_result) + self.assertEqual('SUCCESS', test_iteration_result['status']) + + def testGenerateResultsDict_skippedResult(self): + result = base_test_result.BaseTestResult( + 'test.package.TestName', base_test_result.ResultType.SKIP) + + all_results = base_test_result.TestRunResults() + all_results.AddResult(result) + + results_dict = json_results.GenerateResultsDict([all_results]) + self.assertEqual(['test.package.TestName'], results_dict['all_tests']) + self.assertEqual(1, len(results_dict['per_iteration_data'])) + + iteration_result = results_dict['per_iteration_data'][0] + self.assertTrue('test.package.TestName' in iteration_result) + self.assertEqual(1, len(iteration_result['test.package.TestName'])) + + test_iteration_result = iteration_result['test.package.TestName'][0] + self.assertTrue('status' in test_iteration_result) + self.assertEqual('SKIPPED', test_iteration_result['status']) + + def testGenerateResultsDict_failedResult(self): + result = base_test_result.BaseTestResult( + 'test.package.TestName', base_test_result.ResultType.FAIL) + + all_results = base_test_result.TestRunResults() + all_results.AddResult(result) + + results_dict = json_results.GenerateResultsDict([all_results]) + self.assertEqual(['test.package.TestName'], results_dict['all_tests']) + self.assertEqual(1, len(results_dict['per_iteration_data'])) + + iteration_result = results_dict['per_iteration_data'][0] + self.assertTrue('test.package.TestName' in iteration_result) + self.assertEqual(1, len(iteration_result['test.package.TestName'])) + + test_iteration_result = iteration_result['test.package.TestName'][0] + self.assertTrue('status' in test_iteration_result) + self.assertEqual('FAILURE', test_iteration_result['status']) + + def testGenerateResultsDict_duration(self): + result = base_test_result.BaseTestResult( + 'test.package.TestName', base_test_result.ResultType.PASS, duration=123) + + all_results = base_test_result.TestRunResults() + all_results.AddResult(result) + + results_dict = json_results.GenerateResultsDict([all_results]) + self.assertEqual(['test.package.TestName'], results_dict['all_tests']) + self.assertEqual(1, len(results_dict['per_iteration_data'])) + + iteration_result = results_dict['per_iteration_data'][0] + self.assertTrue('test.package.TestName' in iteration_result) + self.assertEqual(1, len(iteration_result['test.package.TestName'])) + + test_iteration_result = iteration_result['test.package.TestName'][0] + self.assertTrue('elapsed_time_ms' in test_iteration_result) + self.assertEqual(123, test_iteration_result['elapsed_time_ms']) + + def testGenerateResultsDict_multipleResults(self): + result1 = base_test_result.BaseTestResult( + 'test.package.TestName1', base_test_result.ResultType.PASS) + result2 = base_test_result.BaseTestResult( + 'test.package.TestName2', base_test_result.ResultType.PASS) + + all_results = base_test_result.TestRunResults() + all_results.AddResult(result1) + all_results.AddResult(result2) + + results_dict = json_results.GenerateResultsDict([all_results]) + self.assertEqual(['test.package.TestName1', 'test.package.TestName2'], + results_dict['all_tests']) + + self.assertTrue('per_iteration_data' in results_dict) + iterations = results_dict['per_iteration_data'] + self.assertEqual(1, len(iterations)) + + expected_tests = set([ + 'test.package.TestName1', + 'test.package.TestName2', + ]) + + for test_name, iteration_result in six.iteritems(iterations[0]): + self.assertTrue(test_name in expected_tests) + expected_tests.remove(test_name) + self.assertEqual(1, len(iteration_result)) + + test_iteration_result = iteration_result[0] + self.assertTrue('status' in test_iteration_result) + self.assertEqual('SUCCESS', test_iteration_result['status']) + + def testGenerateResultsDict_passOnRetry(self): + raw_results = [] + + result1 = base_test_result.BaseTestResult( + 'test.package.TestName1', base_test_result.ResultType.FAIL) + run_results1 = base_test_result.TestRunResults() + run_results1.AddResult(result1) + raw_results.append(run_results1) + + result2 = base_test_result.BaseTestResult( + 'test.package.TestName1', base_test_result.ResultType.PASS) + run_results2 = base_test_result.TestRunResults() + run_results2.AddResult(result2) + raw_results.append(run_results2) + + results_dict = json_results.GenerateResultsDict([raw_results]) + self.assertEqual(['test.package.TestName1'], results_dict['all_tests']) + + # Check that there's only one iteration. + self.assertIn('per_iteration_data', results_dict) + iterations = results_dict['per_iteration_data'] + self.assertEqual(1, len(iterations)) + + # Check that test.package.TestName1 is the only test in the iteration. + self.assertEqual(1, len(iterations[0])) + self.assertIn('test.package.TestName1', iterations[0]) + + # Check that there are two results for test.package.TestName1. + actual_test_results = iterations[0]['test.package.TestName1'] + self.assertEqual(2, len(actual_test_results)) + + # Check that the first result is a failure. + self.assertIn('status', actual_test_results[0]) + self.assertEqual('FAILURE', actual_test_results[0]['status']) + + # Check that the second result is a success. + self.assertIn('status', actual_test_results[1]) + self.assertEqual('SUCCESS', actual_test_results[1]['status']) + + def testGenerateResultsDict_globalTags(self): + raw_results = [] + global_tags = ['UNRELIABLE_RESULTS'] + + results_dict = json_results.GenerateResultsDict( + [raw_results], global_tags=global_tags) + self.assertEqual(['UNRELIABLE_RESULTS'], results_dict['global_tags']) + + def testGenerateResultsDict_loslessSnippet(self): + result = base_test_result.BaseTestResult( + 'test.package.TestName', base_test_result.ResultType.FAIL) + log = 'blah-blah' + result.SetLog(log) + + all_results = base_test_result.TestRunResults() + all_results.AddResult(result) + + results_dict = json_results.GenerateResultsDict([all_results]) + self.assertEqual(['test.package.TestName'], results_dict['all_tests']) + self.assertEqual(1, len(results_dict['per_iteration_data'])) + + iteration_result = results_dict['per_iteration_data'][0] + self.assertTrue('test.package.TestName' in iteration_result) + self.assertEqual(1, len(iteration_result['test.package.TestName'])) + + test_iteration_result = iteration_result['test.package.TestName'][0] + self.assertTrue('losless_snippet' in test_iteration_result) + self.assertTrue(test_iteration_result['losless_snippet']) + self.assertTrue('output_snippet' in test_iteration_result) + self.assertEqual(log, test_iteration_result['output_snippet']) + self.assertTrue('output_snippet_base64' in test_iteration_result) + self.assertEqual('', test_iteration_result['output_snippet_base64']) + + def testGenerateJsonTestResultFormatDict_passedResult(self): + result = base_test_result.BaseTestResult('test.package.TestName', + base_test_result.ResultType.PASS) + + all_results = base_test_result.TestRunResults() + all_results.AddResult(result) + + results_dict = json_results.GenerateJsonTestResultFormatDict([all_results], + False) + self.assertEqual(1, len(results_dict['tests'])) + self.assertEqual(1, len(results_dict['tests']['test'])) + self.assertEqual(1, len(results_dict['tests']['test']['package'])) + self.assertEqual( + 'PASS', + results_dict['tests']['test']['package']['TestName']['expected']) + self.assertEqual( + 'PASS', results_dict['tests']['test']['package']['TestName']['actual']) + + self.assertTrue('FAIL' not in results_dict['num_failures_by_type'] + or results_dict['num_failures_by_type']['FAIL'] == 0) + self.assertIn('PASS', results_dict['num_failures_by_type']) + self.assertEqual(1, results_dict['num_failures_by_type']['PASS']) + + def testGenerateJsonTestResultFormatDict_failedResult(self): + result = base_test_result.BaseTestResult('test.package.TestName', + base_test_result.ResultType.FAIL) + + all_results = base_test_result.TestRunResults() + all_results.AddResult(result) + + results_dict = json_results.GenerateJsonTestResultFormatDict([all_results], + False) + self.assertEqual(1, len(results_dict['tests'])) + self.assertEqual(1, len(results_dict['tests']['test'])) + self.assertEqual(1, len(results_dict['tests']['test']['package'])) + self.assertEqual( + 'PASS', + results_dict['tests']['test']['package']['TestName']['expected']) + self.assertEqual( + 'FAIL', results_dict['tests']['test']['package']['TestName']['actual']) + self.assertEqual( + True, + results_dict['tests']['test']['package']['TestName']['is_unexpected']) + + self.assertTrue('PASS' not in results_dict['num_failures_by_type'] + or results_dict['num_failures_by_type']['PASS'] == 0) + self.assertIn('FAIL', results_dict['num_failures_by_type']) + self.assertEqual(1, results_dict['num_failures_by_type']['FAIL']) + + def testGenerateJsonTestResultFormatDict_skippedResult(self): + result = base_test_result.BaseTestResult('test.package.TestName', + base_test_result.ResultType.SKIP) + + all_results = base_test_result.TestRunResults() + all_results.AddResult(result) + + results_dict = json_results.GenerateJsonTestResultFormatDict([all_results], + False) + self.assertEqual(1, len(results_dict['tests'])) + self.assertEqual(1, len(results_dict['tests']['test'])) + self.assertEqual(1, len(results_dict['tests']['test']['package'])) + self.assertEqual( + 'PASS', + results_dict['tests']['test']['package']['TestName']['expected']) + self.assertEqual( + 'SKIP', results_dict['tests']['test']['package']['TestName']['actual']) + # Should only be set if the test fails. + self.assertNotIn('is_unexpected', + results_dict['tests']['test']['package']['TestName']) + + self.assertTrue('FAIL' not in results_dict['num_failures_by_type'] + or results_dict['num_failures_by_type']['FAIL'] == 0) + self.assertTrue('PASS' not in results_dict['num_failures_by_type'] + or results_dict['num_failures_by_type']['PASS'] == 0) + self.assertIn('SKIP', results_dict['num_failures_by_type']) + self.assertEqual(1, results_dict['num_failures_by_type']['SKIP']) + + def testGenerateJsonTestResultFormatDict_failedResultWithRetry(self): + result_1 = base_test_result.BaseTestResult('test.package.TestName', + base_test_result.ResultType.FAIL) + run_results_1 = base_test_result.TestRunResults() + run_results_1.AddResult(result_1) + + # Simulate a second retry with failure. + result_2 = base_test_result.BaseTestResult('test.package.TestName', + base_test_result.ResultType.FAIL) + run_results_2 = base_test_result.TestRunResults() + run_results_2.AddResult(result_2) + + all_results = [run_results_1, run_results_2] + + results_dict = json_results.GenerateJsonTestResultFormatDict( + all_results, False) + self.assertEqual(1, len(results_dict['tests'])) + self.assertEqual(1, len(results_dict['tests']['test'])) + self.assertEqual(1, len(results_dict['tests']['test']['package'])) + self.assertEqual( + 'PASS', + results_dict['tests']['test']['package']['TestName']['expected']) + self.assertEqual( + 'FAIL FAIL', + results_dict['tests']['test']['package']['TestName']['actual']) + self.assertEqual( + True, + results_dict['tests']['test']['package']['TestName']['is_unexpected']) + + self.assertTrue('PASS' not in results_dict['num_failures_by_type'] + or results_dict['num_failures_by_type']['PASS'] == 0) + # According to the spec: If a test was run more than once, only the first + # invocation's result is included in the totals. + self.assertIn('FAIL', results_dict['num_failures_by_type']) + self.assertEqual(1, results_dict['num_failures_by_type']['FAIL']) + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/third_party/libwebrtc/build/android/pylib/results/presentation/__init__.py b/third_party/libwebrtc/build/android/pylib/results/presentation/__init__.py new file mode 100644 index 0000000000..a22a6ee39a --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/results/presentation/__init__.py @@ -0,0 +1,3 @@ +# Copyright 2017 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. diff --git a/third_party/libwebrtc/build/android/pylib/results/presentation/javascript/main_html.js b/third_party/libwebrtc/build/android/pylib/results/presentation/javascript/main_html.js new file mode 100644 index 0000000000..3d94663e33 --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/results/presentation/javascript/main_html.js @@ -0,0 +1,193 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +function getArguments() { + // Returns the URL arguments as a dictionary. + args = {} + var s = location.search; + if (s) { + var vals = s.substring(1).split('&'); + for (var i = 0; i < vals.length; i++) { + var pair = vals[i].split('='); + args[pair[0]] = pair[1]; + } + } + return args; +} + +function showSuiteTable(show_the_table) { + document.getElementById('suite-table').style.display = ( + show_the_table ? 'table' : 'none'); +} + +function showTestTable(show_the_table) { + document.getElementById('test-table').style.display = ( + show_the_table ? 'table' : 'none'); +} + +function showTestsOfOneSuiteOnly(suite_name) { + setTitle('Test Results of Suite: ' + suite_name) + show_all = (suite_name == 'TOTAL') + var testTableBlocks = document.getElementById('test-table') + .getElementsByClassName('row_block'); + Array.prototype.slice.call(testTableBlocks) + .forEach(function(testTableBlock) { + if (!show_all) { + var table_block_in_suite = (testTableBlock.firstElementChild + .firstElementChild.firstElementChild.innerHTML) + .startsWith(suite_name); + if (!table_block_in_suite) { + testTableBlock.style.display = 'none'; + return; + } + } + testTableBlock.style.display = 'table-row-group'; + }); + showTestTable(true); + showSuiteTable(false); + window.scrollTo(0, 0); +} + +function showTestsOfOneSuiteOnlyWithNewState(suite_name) { + showTestsOfOneSuiteOnly(suite_name); + history.pushState({suite: suite_name}, suite_name, ''); +} + +function showSuiteTableOnly() { + setTitle('Suites Summary') + showTestTable(false); + showSuiteTable(true); + window.scrollTo(0, 0); +} + +function showSuiteTableOnlyWithReplaceState() { + showSuiteTableOnly(); + history.replaceState({}, 'suite_table', ''); +} + +function setBrowserBackButtonLogic() { + window.onpopstate = function(event) { + if (!event.state || !event.state.suite) { + showSuiteTableOnly(); + } else { + showTestsOfOneSuiteOnly(event.state.suite); + } + }; +} + +function setTitle(title) { + document.getElementById('summary-header').textContent = title; +} + +function sortByColumn(head) { + var table = head.parentNode.parentNode.parentNode; + var rowBlocks = Array.prototype.slice.call( + table.getElementsByTagName('tbody')); + + // Determine whether to asc or desc and set arrows. + var headers = head.parentNode.getElementsByTagName('th'); + var headIndex = Array.prototype.slice.call(headers).indexOf(head); + var asc = -1; + for (var i = 0; i < headers.length; i++) { + if (headers[i].dataset.ascSorted != 0) { + if (headers[i].dataset.ascSorted == 1) { + headers[i].getElementsByClassName('up')[0] + .style.display = 'none'; + } else { + headers[i].getElementsByClassName('down')[0] + .style.display = 'none'; + } + if (headers[i] == head) { + asc = headers[i].dataset.ascSorted * -1; + } else { + headers[i].dataset.ascSorted = 0; + } + break; + } + } + headers[headIndex].dataset.ascSorted = asc; + if (asc == 1) { + headers[headIndex].getElementsByClassName('up')[0] + .style.display = 'inline'; + } else { + headers[headIndex].getElementsByClassName('down')[0] + .style.display = 'inline'; + } + + // Sort the array by the specified column number (col) and order (asc). + rowBlocks.sort(function (a, b) { + if (a.style.display == 'none') { + return -1; + } else if (b.style.display == 'none') { + return 1; + } + var a_rows = Array.prototype.slice.call(a.children); + var b_rows = Array.prototype.slice.call(b.children); + if (head.className == "text") { + // If sorting by text, we only compare the entry on the first row. + var aInnerHTML = a_rows[0].children[headIndex].innerHTML; + var bInnerHTML = b_rows[0].children[headIndex].innerHTML; + return (aInnerHTML == bInnerHTML) ? 0 : ( + (aInnerHTML > bInnerHTML) ? asc : -1 * asc); + } else if (head.className == "number") { + // If sorting by number, for example, duration, + // we will sum up the durations of different test runs + // for one specific test case and sort by the sum. + var avalue = 0; + var bvalue = 0; + a_rows.forEach(function (row, i) { + var index = (i > 0) ? headIndex - 1 : headIndex; + avalue += Number(row.children[index].innerHTML); + }); + b_rows.forEach(function (row, i) { + var index = (i > 0) ? headIndex - 1 : headIndex; + bvalue += Number(row.children[index].innerHTML); + }); + } else if (head.className == "flaky") { + // Flakiness = (#total - #success - #skipped) / (#total - #skipped) + var a_success_or_skipped = 0; + var a_skipped = 0; + var b_success_or_skipped = 0; + var b_skipped = 0; + a_rows.forEach(function (row, i) { + var index = (i > 0) ? headIndex - 1 : headIndex; + var status = row.children[index].innerHTML.trim(); + if (status == 'SUCCESS') { + a_success_or_skipped += 1; + } + if (status == 'SKIPPED') { + a_success_or_skipped += 1; + a_skipped += 1; + } + }); + b_rows.forEach(function (row, i) { + var index = (i > 0) ? headIndex - 1 : headIndex; + var status = row.children[index].innerHTML.trim(); + if (status == 'SUCCESS') { + b_success_or_skipped += 1; + } + if (status == 'SKIPPED') { + b_success_or_skipped += 1; + b_skipped += 1; + } + }); + var atotal_minus_skipped = a_rows.length - a_skipped; + var btotal_minus_skipped = b_rows.length - b_skipped; + + var avalue = ((atotal_minus_skipped == 0) ? -1 : + (a_rows.length - a_success_or_skipped) / atotal_minus_skipped); + var bvalue = ((btotal_minus_skipped == 0) ? -1 : + (b_rows.length - b_success_or_skipped) / btotal_minus_skipped); + } + return asc * (avalue - bvalue); + }); + + for (var i = 0; i < rowBlocks.length; i++) { + table.appendChild(rowBlocks[i]); + } +} + +function sortSuiteTableByFailedTestCases() { + sortByColumn(document.getElementById('number_fail_tests')); +} diff --git a/third_party/libwebrtc/build/android/pylib/results/presentation/standard_gtest_merge.py b/third_party/libwebrtc/build/android/pylib/results/presentation/standard_gtest_merge.py new file mode 100755 index 0000000000..d458223abb --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/results/presentation/standard_gtest_merge.py @@ -0,0 +1,173 @@ +#! /usr/bin/env python3 +# +# Copyright 2017 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +from __future__ import print_function + +import argparse +import json +import os +import sys + + +def merge_shard_results(summary_json, jsons_to_merge): + """Reads JSON test output from all shards and combines them into one. + + Returns dict with merged test output on success or None on failure. Emits + annotations. + """ + try: + with open(summary_json) as f: + summary = json.load(f) + except (IOError, ValueError): + raise Exception('Summary json cannot be loaded.') + + # Merge all JSON files together. Keep track of missing shards. + merged = { + 'all_tests': set(), + 'disabled_tests': set(), + 'global_tags': set(), + 'missing_shards': [], + 'per_iteration_data': [], + 'swarming_summary': summary, + 'links': set() + } + for index, result in enumerate(summary['shards']): + if result is None: + merged['missing_shards'].append(index) + continue + + # Author note: this code path doesn't trigger convert_to_old_format() in + # client/swarming.py, which means the state enum is saved in its string + # name form, not in the number form. + state = result.get('state') + if state == 'BOT_DIED': + print( + 'Shard #%d had a Swarming internal failure' % index, file=sys.stderr) + elif state == 'EXPIRED': + print('There wasn\'t enough capacity to run your test', file=sys.stderr) + elif state == 'TIMED_OUT': + print('Test runtime exceeded allocated time' + 'Either it ran for too long (hard timeout) or it didn\'t produce ' + 'I/O for an extended period of time (I/O timeout)', + file=sys.stderr) + elif state != 'COMPLETED': + print('Invalid Swarming task state: %s' % state, file=sys.stderr) + + json_data, err_msg = load_shard_json(index, result.get('task_id'), + jsons_to_merge) + if json_data: + # Set-like fields. + for key in ('all_tests', 'disabled_tests', 'global_tags', 'links'): + merged[key].update(json_data.get(key), []) + + # 'per_iteration_data' is a list of dicts. Dicts should be merged + # together, not the 'per_iteration_data' list itself. + merged['per_iteration_data'] = merge_list_of_dicts( + merged['per_iteration_data'], json_data.get('per_iteration_data', [])) + else: + merged['missing_shards'].append(index) + print('No result was found: %s' % err_msg, file=sys.stderr) + + # If some shards are missing, make it known. Continue parsing anyway. Step + # should be red anyway, since swarming.py return non-zero exit code in that + # case. + if merged['missing_shards']: + as_str = ', '.join([str(shard) for shard in merged['missing_shards']]) + print('some shards did not complete: %s' % as_str, file=sys.stderr) + # Not all tests run, combined JSON summary can not be trusted. + merged['global_tags'].add('UNRELIABLE_RESULTS') + + # Convert to jsonish dict. + for key in ('all_tests', 'disabled_tests', 'global_tags', 'links'): + merged[key] = sorted(merged[key]) + return merged + + +OUTPUT_JSON_SIZE_LIMIT = 100 * 1024 * 1024 # 100 MB + + +def load_shard_json(index, task_id, jsons_to_merge): + """Reads JSON output of the specified shard. + + Args: + output_dir: The directory in which to look for the JSON output to load. + index: The index of the shard to load data for, this is for old api. + task_id: The directory of the shard to load data for, this is for new api. + + Returns: A tuple containing: + * The contents of path, deserialized into a python object. + * An error string. + (exactly one of the tuple elements will be non-None). + """ + matching_json_files = [ + j for j in jsons_to_merge + if (os.path.basename(j) == 'output.json' and + (os.path.basename(os.path.dirname(j)) == str(index) or + os.path.basename(os.path.dirname(j)) == task_id))] + + if not matching_json_files: + print('shard %s test output missing' % index, file=sys.stderr) + return (None, 'shard %s test output was missing' % index) + elif len(matching_json_files) > 1: + print('duplicate test output for shard %s' % index, file=sys.stderr) + return (None, 'shard %s test output was duplicated' % index) + + path = matching_json_files[0] + + try: + filesize = os.stat(path).st_size + if filesize > OUTPUT_JSON_SIZE_LIMIT: + print( + 'output.json is %d bytes. Max size is %d' % (filesize, + OUTPUT_JSON_SIZE_LIMIT), + file=sys.stderr) + return (None, 'shard %s test output exceeded the size limit' % index) + + with open(path) as f: + return (json.load(f), None) + except (IOError, ValueError, OSError) as e: + print('Missing or invalid gtest JSON file: %s' % path, file=sys.stderr) + print('%s: %s' % (type(e).__name__, e), file=sys.stderr) + + return (None, 'shard %s test output was missing or invalid' % index) + + +def merge_list_of_dicts(left, right): + """Merges dicts left[0] with right[0], left[1] with right[1], etc.""" + output = [] + for i in range(max(len(left), len(right))): + left_dict = left[i] if i < len(left) else {} + right_dict = right[i] if i < len(right) else {} + merged_dict = left_dict.copy() + merged_dict.update(right_dict) + output.append(merged_dict) + return output + + +def standard_gtest_merge( + output_json, summary_json, jsons_to_merge): + + output = merge_shard_results(summary_json, jsons_to_merge) + with open(output_json, 'wb') as f: + json.dump(output, f) + + return 0 + + +def main(raw_args): + parser = argparse.ArgumentParser() + parser.add_argument('--summary-json') + parser.add_argument('-o', '--output-json', required=True) + parser.add_argument('jsons_to_merge', nargs='*') + + args = parser.parse_args(raw_args) + + return standard_gtest_merge( + args.output_json, args.summary_json, args.jsons_to_merge) + + +if __name__ == '__main__': + sys.exit(main(sys.argv[1:])) diff --git a/third_party/libwebrtc/build/android/pylib/results/presentation/template/main.html b/third_party/libwebrtc/build/android/pylib/results/presentation/template/main.html new file mode 100644 index 0000000000..e30d7d3f23 --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/results/presentation/template/main.html @@ -0,0 +1,93 @@ +<!DOCTYPE html> +<html> + <head> + <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> + <style> + body { + background-color: #fff; + color: #333; + font-family: Verdana, sans-serif; + font-size: 10px; + margin-left: 30px; + margin-right: 30px; + margin-top: 20px; + margin-bottom: 50px; + padding: 0; + } + table, th, td { + border: 1px solid black; + border-collapse: collapse; + text-align: center; + } + table, td { + padding: 0.1em 1em 0.1em 1em; + } + th { + cursor: pointer; + padding: 0.2em 1.5em 0.2em 1.5em; + } + table { + width: 100%; + } + .center { + text-align: center; + } + .left { + text-align: left; + } + a { + cursor: pointer; + text-decoration: underline; + } + a:link,a:visited,a:active { + color: #444; + } + .row_block:hover { + background-color: #F6F6F6; + } + .skipped, .success, .failure { + border-color: #000000; + } + .success { + color: #000; + background-color: #8d4; + } + .failure { + color: #000; + background-color: #e88; + } + .skipped { + color: #000; + background: #AADDEE; + } + </style> + <script type="text/javascript"> + {% include "javascript/main_html.js" %} + </script> + </head> + <body> + <div> + <h2 id="summary-header"></h2> + {% for tb_value in tb_values %} + {% include 'template/table.html' %} + {% endfor %} + </div> + {% if feedback_url %} + </br> + <a href="{{feedback_url}}" target="_blank"><b>Feedback</b></a> + </body> + {%- endif %} + <script> + sortSuiteTableByFailedTestCases(); + showSuiteTableOnlyWithReplaceState(); + // Enable sorting for each column of tables. + Array.prototype.slice.call(document.getElementsByTagName('th')) + .forEach(function(head) { + head.addEventListener( + "click", + function() { sortByColumn(head); }); + } + ); + setBrowserBackButtonLogic(); + </script> +</html> diff --git a/third_party/libwebrtc/build/android/pylib/results/presentation/template/table.html b/third_party/libwebrtc/build/android/pylib/results/presentation/template/table.html new file mode 100644 index 0000000000..4240043490 --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/results/presentation/template/table.html @@ -0,0 +1,60 @@ +<table id="{{tb_value.table_id}}" style="display:none;"> + <thead class="heads"> + <tr> + {% for cell in tb_value.table_headers -%} + <th class="{{cell.class}}" id="{{cell.data}}" data-asc-sorted=0> + {{cell.data}} + <span class="up" style="display:none;"> ↑</span> + <span class="down" style="display:none;"> ↓</span> + </th> + {%- endfor %} + </tr> + </thead> + {% for block in tb_value.table_row_blocks -%} + <tbody class="row_block"> + {% for row in block -%} + <tr class="{{tb_value.table_id}}-body-row"> + {% for cell in row -%} + {% if cell.rowspan -%} + <td rowspan="{{cell.rowspan}}" class="{{tb_value.table_id}}-body-column-{{loop.index0}} {{cell.class}}"> + {%- else -%} + <td rowspan="1" class="{{tb_value.table_id}}-body-column-{{loop.index0}} {{cell.class}}"> + {%- endif %} + {% if cell.cell_type == 'pre' -%} + <pre>{{cell.data}}</pre> + {%- elif cell.cell_type == 'links' -%} + {% for link in cell.links -%} + <a href="{{link.href}}" target="{{link.target}}">{{link.data}}</a> + {% if not loop.last -%} + <br /> + {%- endif %} + {%- endfor %} + {%- elif cell.cell_type == 'action' -%} + <a onclick="{{cell.action}}">{{cell.data}}</a> + {%- else -%} + {{cell.data}} + {%- endif %} + </td> + {%- endfor %} + </tr> + {%- endfor %} + </tbody> + {%- endfor %} + <tfoot> + <tr> + {% for cell in tb_value.table_footer -%} + <td class="{{tb_value.table_id}}-summary-column-{{loop.index0}} {{cell.class}}"> + {% if cell.cell_type == 'links' -%} + {% for link in cell.links -%} + <a href="{{link.href}}" target="{{link.target}}"><b>{{link.data}}</b></a> + {%- endfor %} + {%- elif cell.cell_type == 'action' -%} + <a onclick="{{cell.action}}">{{cell.data}}</a> + {%- else -%} + <b>{{cell.data}}</b> + {%- endif %} + </td> + {%- endfor %} + </tr> + </tfoot> +</table> diff --git a/third_party/libwebrtc/build/android/pylib/results/presentation/test_results_presentation.py b/third_party/libwebrtc/build/android/pylib/results/presentation/test_results_presentation.py new file mode 100755 index 0000000000..fc14b8bf03 --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/results/presentation/test_results_presentation.py @@ -0,0 +1,549 @@ +#!/usr/bin/env python3 +# +# Copyright 2017 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + + +import argparse +import collections +import contextlib +import json +import logging +import tempfile +import os +import sys +try: + from urllib.parse import urlencode + from urllib.request import urlopen +except ImportError: + from urllib import urlencode + from urllib2 import urlopen + + +CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) +BASE_DIR = os.path.abspath(os.path.join( + CURRENT_DIR, '..', '..', '..', '..', '..')) + +sys.path.append(os.path.join(BASE_DIR, 'build', 'android')) +from pylib.results.presentation import standard_gtest_merge +from pylib.utils import google_storage_helper # pylint: disable=import-error + +sys.path.append(os.path.join(BASE_DIR, 'third_party')) +import jinja2 # pylint: disable=import-error +JINJA_ENVIRONMENT = jinja2.Environment( + loader=jinja2.FileSystemLoader(os.path.dirname(__file__)), + autoescape=True) + + +def cell(data, html_class='center'): + """Formats table cell data for processing in jinja template.""" + return { + 'data': data, + 'class': html_class, + } + + +def pre_cell(data, html_class='center'): + """Formats table <pre> cell data for processing in jinja template.""" + return { + 'cell_type': 'pre', + 'data': data, + 'class': html_class, + } + + +class LinkTarget(object): + # Opens the linked document in a new window or tab. + NEW_TAB = '_blank' + # Opens the linked document in the same frame as it was clicked. + CURRENT_TAB = '_self' + + +def link(data, href, target=LinkTarget.CURRENT_TAB): + """Formats <a> tag data for processing in jinja template. + + Args: + data: String link appears as on HTML page. + href: URL where link goes. + target: Where link should be opened (e.g. current tab or new tab). + """ + return { + 'data': data, + 'href': href, + 'target': target, + } + + +def links_cell(links, html_class='center', rowspan=None): + """Formats table cell with links for processing in jinja template. + + Args: + links: List of link dictionaries. Use |link| function to generate them. + html_class: Class for table cell. + rowspan: Rowspan HTML attribute. + """ + return { + 'cell_type': 'links', + 'class': html_class, + 'links': links, + 'rowspan': rowspan, + } + + +def action_cell(action, data, html_class): + """Formats table cell with javascript actions. + + Args: + action: Javscript action. + data: Data in cell. + class: Class for table cell. + """ + return { + 'cell_type': 'action', + 'action': action, + 'data': data, + 'class': html_class, + } + + +def flakiness_dashbord_link(test_name, suite_name): + url_args = urlencode([('testType', suite_name), ('tests', test_name)]) + return ('https://test-results.appspot.com/' + 'dashboards/flakiness_dashboard.html#%s' % url_args) + + +def logs_cell(result, test_name, suite_name): + """Formats result logs data for processing in jinja template.""" + link_list = [] + result_link_dict = result.get('links', {}) + result_link_dict['flakiness'] = flakiness_dashbord_link( + test_name, suite_name) + for name, href in sorted(result_link_dict.items()): + link_list.append(link( + data=name, + href=href, + target=LinkTarget.NEW_TAB)) + if link_list: + return links_cell(link_list) + else: + return cell('(no logs)') + + +def code_search(test, cs_base_url): + """Returns URL for test on codesearch.""" + search = test.replace('#', '.') + return '%s/search/?q=%s&type=cs' % (cs_base_url, search) + + +def status_class(status): + """Returns HTML class for test status.""" + if not status: + return 'failure unknown' + status = status.lower() + if status not in ('success', 'skipped'): + return 'failure %s' % status + return status + + +def create_test_table(results_dict, cs_base_url, suite_name): + """Format test data for injecting into HTML table.""" + + header_row = [ + cell(data='test_name', html_class='text'), + cell(data='status', html_class='flaky'), + cell(data='elapsed_time_ms', html_class='number'), + cell(data='logs', html_class='text'), + cell(data='output_snippet', html_class='text'), + ] + + test_row_blocks = [] + for test_name, test_results in results_dict.items(): + test_runs = [] + for index, result in enumerate(test_results): + if index == 0: + test_run = [links_cell( + links=[ + link(href=code_search(test_name, cs_base_url), + target=LinkTarget.NEW_TAB, + data=test_name)], + rowspan=len(test_results), + html_class='left %s' % test_name + )] # test_name + else: + test_run = [] + + test_run.extend([ + cell(data=result['status'] or 'UNKNOWN', + # status + html_class=('center %s' % + status_class(result['status']))), + cell(data=result['elapsed_time_ms']), # elapsed_time_ms + logs_cell(result, test_name, suite_name), # logs + pre_cell(data=result['output_snippet'], # output_snippet + html_class='left'), + ]) + test_runs.append(test_run) + test_row_blocks.append(test_runs) + return header_row, test_row_blocks + + +def create_suite_table(results_dict): + """Format test suite data for injecting into HTML table.""" + + SUCCESS_COUNT_INDEX = 1 + FAIL_COUNT_INDEX = 2 + ALL_COUNT_INDEX = 3 + TIME_INDEX = 4 + + header_row = [ + cell(data='suite_name', html_class='text'), + cell(data='number_success_tests', html_class='number'), + cell(data='number_fail_tests', html_class='number'), + cell(data='all_tests', html_class='number'), + cell(data='elapsed_time_ms', html_class='number'), + ] + + footer_row = [ + action_cell( + 'showTestsOfOneSuiteOnlyWithNewState("TOTAL")', + 'TOTAL', + 'center' + ), # TOTAL + cell(data=0), # number_success_tests + cell(data=0), # number_fail_tests + cell(data=0), # all_tests + cell(data=0), # elapsed_time_ms + ] + + suite_row_dict = {} + for test_name, test_results in results_dict.items(): + # TODO(mikecase): This logic doesn't work if there are multiple test runs. + # That is, if 'per_iteration_data' has multiple entries. + # Since we only care about the result of the last test run. + result = test_results[-1] + + suite_name = (test_name.split('#')[0] if '#' in test_name + else test_name.split('.')[0]) + if suite_name in suite_row_dict: + suite_row = suite_row_dict[suite_name] + else: + suite_row = [ + action_cell( + 'showTestsOfOneSuiteOnlyWithNewState("%s")' % suite_name, + suite_name, + 'left' + ), # suite_name + cell(data=0), # number_success_tests + cell(data=0), # number_fail_tests + cell(data=0), # all_tests + cell(data=0), # elapsed_time_ms + ] + + suite_row_dict[suite_name] = suite_row + + suite_row[ALL_COUNT_INDEX]['data'] += 1 + footer_row[ALL_COUNT_INDEX]['data'] += 1 + + if result['status'] == 'SUCCESS': + suite_row[SUCCESS_COUNT_INDEX]['data'] += 1 + footer_row[SUCCESS_COUNT_INDEX]['data'] += 1 + elif result['status'] != 'SKIPPED': + suite_row[FAIL_COUNT_INDEX]['data'] += 1 + footer_row[FAIL_COUNT_INDEX]['data'] += 1 + + # Some types of crashes can have 'null' values for elapsed_time_ms. + if result['elapsed_time_ms'] is not None: + suite_row[TIME_INDEX]['data'] += result['elapsed_time_ms'] + footer_row[TIME_INDEX]['data'] += result['elapsed_time_ms'] + + for suite in list(suite_row_dict.values()): + if suite[FAIL_COUNT_INDEX]['data'] > 0: + suite[FAIL_COUNT_INDEX]['class'] += ' failure' + else: + suite[FAIL_COUNT_INDEX]['class'] += ' success' + + if footer_row[FAIL_COUNT_INDEX]['data'] > 0: + footer_row[FAIL_COUNT_INDEX]['class'] += ' failure' + else: + footer_row[FAIL_COUNT_INDEX]['class'] += ' success' + + return (header_row, [[suite_row] + for suite_row in list(suite_row_dict.values())], + footer_row) + + +def feedback_url(result_details_link): + # pylint: disable=redefined-variable-type + url_args = [ + ('labels', 'Pri-2,Type-Bug,Restrict-View-Google'), + ('summary', 'Result Details Feedback:'), + ('components', 'Test>Android'), + ] + if result_details_link: + url_args.append(('comment', 'Please check out: %s' % result_details_link)) + url_args = urlencode(url_args) + # pylint: enable=redefined-variable-type + return 'https://bugs.chromium.org/p/chromium/issues/entry?%s' % url_args + + +def results_to_html(results_dict, cs_base_url, bucket, test_name, + builder_name, build_number, local_output): + """Convert list of test results into html format. + + Args: + local_output: Whether this results file is uploaded to Google Storage or + just a local file. + """ + test_rows_header, test_rows = create_test_table( + results_dict, cs_base_url, test_name) + suite_rows_header, suite_rows, suite_row_footer = create_suite_table( + results_dict) + + suite_table_values = { + 'table_id': 'suite-table', + 'table_headers': suite_rows_header, + 'table_row_blocks': suite_rows, + 'table_footer': suite_row_footer, + } + + test_table_values = { + 'table_id': 'test-table', + 'table_headers': test_rows_header, + 'table_row_blocks': test_rows, + } + + main_template = JINJA_ENVIRONMENT.get_template( + os.path.join('template', 'main.html')) + + if local_output: + html_render = main_template.render( # pylint: disable=no-member + { + 'tb_values': [suite_table_values, test_table_values], + 'feedback_url': feedback_url(None), + }) + return (html_render, None, None) + else: + dest = google_storage_helper.unique_name( + '%s_%s_%s' % (test_name, builder_name, build_number)) + result_details_link = google_storage_helper.get_url_link( + dest, '%s/html' % bucket) + html_render = main_template.render( # pylint: disable=no-member + { + 'tb_values': [suite_table_values, test_table_values], + 'feedback_url': feedback_url(result_details_link), + }) + return (html_render, dest, result_details_link) + + +def result_details(json_path, test_name, cs_base_url, bucket=None, + builder_name=None, build_number=None, local_output=False): + """Get result details from json path and then convert results to html. + + Args: + local_output: Whether this results file is uploaded to Google Storage or + just a local file. + """ + + with open(json_path) as json_file: + json_object = json.loads(json_file.read()) + + if not 'per_iteration_data' in json_object: + return 'Error: json file missing per_iteration_data.' + + results_dict = collections.defaultdict(list) + for testsuite_run in json_object['per_iteration_data']: + for test, test_runs in testsuite_run.items(): + results_dict[test].extend(test_runs) + return results_to_html(results_dict, cs_base_url, bucket, test_name, + builder_name, build_number, local_output) + + +def upload_to_google_bucket(html, bucket, dest): + with tempfile.NamedTemporaryFile(suffix='.html') as temp_file: + temp_file.write(html) + temp_file.flush() + return google_storage_helper.upload( + name=dest, + filepath=temp_file.name, + bucket='%s/html' % bucket, + content_type='text/html', + authenticated_link=True) + + +def ui_screenshot_set(json_path): + with open(json_path) as json_file: + json_object = json.loads(json_file.read()) + if not 'per_iteration_data' in json_object: + # This will be reported as an error by result_details, no need to duplicate. + return None + ui_screenshots = [] + # pylint: disable=too-many-nested-blocks + for testsuite_run in json_object['per_iteration_data']: + for _, test_runs in testsuite_run.items(): + for test_run in test_runs: + if 'ui screenshot' in test_run['links']: + screenshot_link = test_run['links']['ui screenshot'] + if screenshot_link.startswith('file:'): + with contextlib.closing(urlopen(screenshot_link)) as f: + test_screenshots = json.load(f) + else: + # Assume anything that isn't a file link is a google storage link + screenshot_string = google_storage_helper.read_from_link( + screenshot_link) + if not screenshot_string: + logging.error('Bad screenshot link %s', screenshot_link) + continue + test_screenshots = json.loads( + screenshot_string) + ui_screenshots.extend(test_screenshots) + # pylint: enable=too-many-nested-blocks + + if ui_screenshots: + return json.dumps(ui_screenshots) + return None + + +def upload_screenshot_set(json_path, test_name, bucket, builder_name, + build_number): + screenshot_set = ui_screenshot_set(json_path) + if not screenshot_set: + return None + dest = google_storage_helper.unique_name( + 'screenshots_%s_%s_%s' % (test_name, builder_name, build_number), + suffix='.json') + with tempfile.NamedTemporaryFile(suffix='.json') as temp_file: + temp_file.write(screenshot_set) + temp_file.flush() + return google_storage_helper.upload( + name=dest, + filepath=temp_file.name, + bucket='%s/json' % bucket, + content_type='application/json', + authenticated_link=True) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--json-file', help='Path of json file.') + parser.add_argument('--cs-base-url', help='Base url for code search.', + default='http://cs.chromium.org') + parser.add_argument('--bucket', help='Google storage bucket.', required=True) + parser.add_argument('--builder-name', help='Builder name.') + parser.add_argument('--build-number', help='Build number.') + parser.add_argument('--test-name', help='The name of the test.', + required=True) + parser.add_argument( + '-o', '--output-json', + help='(Swarming Merge Script API) ' + 'Output JSON file to create.') + parser.add_argument( + '--build-properties', + help='(Swarming Merge Script API) ' + 'Build property JSON file provided by recipes.') + parser.add_argument( + '--summary-json', + help='(Swarming Merge Script API) ' + 'Summary of shard state running on swarming. ' + '(Output of the swarming.py collect ' + '--task-summary-json=XXX command.)') + parser.add_argument( + '--task-output-dir', + help='(Swarming Merge Script API) ' + 'Directory containing all swarming task results.') + parser.add_argument( + 'positional', nargs='*', + help='output.json from shards.') + + args = parser.parse_args() + + if ((args.build_properties is None) == + (args.build_number is None or args.builder_name is None)): + raise parser.error('Exactly one of build_perperties or ' + '(build_number or builder_name) should be given.') + + if (args.build_number is None) != (args.builder_name is None): + raise parser.error('args.build_number and args.builder_name ' + 'has to be be given together' + 'or not given at all.') + + if len(args.positional) == 0 and args.json_file is None: + if args.output_json: + with open(args.output_json, 'w') as f: + json.dump({}, f) + return + elif len(args.positional) != 0 and args.json_file: + raise parser.error('Exactly one of args.positional and ' + 'args.json_file should be given.') + + if args.build_properties: + build_properties = json.loads(args.build_properties) + if ((not 'buildnumber' in build_properties) or + (not 'buildername' in build_properties)): + raise parser.error('Build number/builder name not specified.') + build_number = build_properties['buildnumber'] + builder_name = build_properties['buildername'] + elif args.build_number and args.builder_name: + build_number = args.build_number + builder_name = args.builder_name + + if args.positional: + if len(args.positional) == 1: + json_file = args.positional[0] + else: + if args.output_json and args.summary_json: + standard_gtest_merge.standard_gtest_merge( + args.output_json, args.summary_json, args.positional) + json_file = args.output_json + elif not args.output_json: + raise Exception('output_json required by merge API is missing.') + else: + raise Exception('summary_json required by merge API is missing.') + elif args.json_file: + json_file = args.json_file + + if not os.path.exists(json_file): + raise IOError('--json-file %s not found.' % json_file) + + # Link to result details presentation page is a part of the page. + result_html_string, dest, result_details_link = result_details( + json_file, args.test_name, args.cs_base_url, args.bucket, + builder_name, build_number) + + result_details_link_2 = upload_to_google_bucket( + result_html_string.encode('UTF-8'), + args.bucket, dest) + assert result_details_link == result_details_link_2, ( + 'Result details link do not match. The link returned by get_url_link' + ' should be the same as that returned by upload.') + + ui_screenshot_set_link = upload_screenshot_set(json_file, args.test_name, + args.bucket, builder_name, build_number) + + if ui_screenshot_set_link: + ui_catalog_url = 'https://chrome-ui-catalog.appspot.com/' + ui_catalog_query = urlencode({'screenshot_source': ui_screenshot_set_link}) + ui_screenshot_link = '%s?%s' % (ui_catalog_url, ui_catalog_query) + + if args.output_json: + with open(json_file) as original_json_file: + json_object = json.load(original_json_file) + json_object['links'] = { + 'result_details (logcats, flakiness links)': result_details_link + } + + if ui_screenshot_set_link: + json_object['links']['ui screenshots'] = ui_screenshot_link + + with open(args.output_json, 'w') as f: + json.dump(json_object, f) + else: + print('Result Details: %s' % result_details_link) + + if ui_screenshot_set_link: + print('UI Screenshots %s' % ui_screenshot_link) + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/third_party/libwebrtc/build/android/pylib/results/report_results.py b/third_party/libwebrtc/build/android/pylib/results/report_results.py new file mode 100644 index 0000000000..56eefac46c --- /dev/null +++ b/third_party/libwebrtc/build/android/pylib/results/report_results.py @@ -0,0 +1,136 @@ +# Copyright (c) 2013 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Module containing utility functions for reporting results.""" + +from __future__ import print_function + +import logging +import os +import re + +from pylib import constants +from pylib.results.flakiness_dashboard import results_uploader +from pylib.utils import logging_utils + + +def _LogToFile(results, test_type, suite_name): + """Log results to local files which can be used for aggregation later.""" + log_file_path = os.path.join(constants.GetOutDirectory(), 'test_logs') + if not os.path.exists(log_file_path): + os.mkdir(log_file_path) + full_file_name = os.path.join( + log_file_path, re.sub(r'\W', '_', test_type).lower() + '.log') + if not os.path.exists(full_file_name): + with open(full_file_name, 'w') as log_file: + print( + '\n%s results for %s build %s:' % + (test_type, os.environ.get('BUILDBOT_BUILDERNAME'), + os.environ.get('BUILDBOT_BUILDNUMBER')), + file=log_file) + logging.info('Writing results to %s.', full_file_name) + + logging.info('Writing results to %s.', full_file_name) + with open(full_file_name, 'a') as log_file: + shortened_suite_name = suite_name[:25] + (suite_name[25:] and '...') + print( + '%s%s' % (shortened_suite_name.ljust(30), results.GetShortForm()), + file=log_file) + + +def _LogToFlakinessDashboard(results, test_type, test_package, + flakiness_server): + """Upload results to the flakiness dashboard""" + logging.info('Upload results for test type "%s", test package "%s" to %s', + test_type, test_package, flakiness_server) + + try: + # TODO(jbudorick): remove Instrumentation once instrumentation tests + # switch to platform mode. + if test_type in ('instrumentation', 'Instrumentation'): + if flakiness_server == constants.UPSTREAM_FLAKINESS_SERVER: + assert test_package in ['ContentShellTest', + 'ChromePublicTest', + 'ChromeSyncShellTest', + 'SystemWebViewShellLayoutTest', + 'WebViewInstrumentationTest'] + dashboard_test_type = ('%s_instrumentation_tests' % + test_package.lower().rstrip('test')) + # Downstream server. + else: + dashboard_test_type = 'Chromium_Android_Instrumentation' + + elif test_type == 'gtest': + dashboard_test_type = test_package + + else: + logging.warning('Invalid test type') + return + + results_uploader.Upload( + results, flakiness_server, dashboard_test_type) + + except Exception: # pylint: disable=broad-except + logging.exception('Failure while logging to %s', flakiness_server) + + +def LogFull(results, test_type, test_package, annotation=None, + flakiness_server=None): + """Log the tests results for the test suite. + + The results will be logged three different ways: + 1. Log to stdout. + 2. Log to local files for aggregating multiple test steps + (on buildbots only). + 3. Log to flakiness dashboard (on buildbots only). + + Args: + results: An instance of TestRunResults object. + test_type: Type of the test (e.g. 'Instrumentation', 'Unit test', etc.). + test_package: Test package name (e.g. 'ipc_tests' for gtests, + 'ContentShellTest' for instrumentation tests) + annotation: If instrumenation test type, this is a list of annotations + (e.g. ['Feature', 'SmallTest']). + flakiness_server: If provider, upload the results to flakiness dashboard + with this URL. + """ + # pylint doesn't like how colorama set up its color enums. + # pylint: disable=no-member + black_on_white = (logging_utils.BACK.WHITE, logging_utils.FORE.BLACK) + with logging_utils.OverrideColor(logging.CRITICAL, black_on_white): + if not results.DidRunPass(): + logging.critical('*' * 80) + logging.critical('Detailed Logs') + logging.critical('*' * 80) + for line in results.GetLogs().splitlines(): + logging.critical(line) + logging.critical('*' * 80) + logging.critical('Summary') + logging.critical('*' * 80) + for line in results.GetGtestForm().splitlines(): + color = black_on_white + if 'FAILED' in line: + # Red on white, dim. + color = (logging_utils.BACK.WHITE, logging_utils.FORE.RED, + logging_utils.STYLE.DIM) + elif 'PASSED' in line: + # Green on white, dim. + color = (logging_utils.BACK.WHITE, logging_utils.FORE.GREEN, + logging_utils.STYLE.DIM) + with logging_utils.OverrideColor(logging.CRITICAL, color): + logging.critical(line) + logging.critical('*' * 80) + + if os.environ.get('BUILDBOT_BUILDERNAME'): + # It is possible to have multiple buildbot steps for the same + # instrumenation test package using different annotations. + if annotation and len(annotation) == 1: + suite_name = annotation[0] + else: + suite_name = test_package + _LogToFile(results, test_type, suite_name) + + if flakiness_server: + _LogToFlakinessDashboard(results, test_type, test_package, + flakiness_server) |