summaryrefslogtreecommitdiffstats
path: root/src/VBox/ValidationKit/analysis
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:17:27 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:17:27 +0000
commitf215e02bf85f68d3a6106c2a1f4f7f063f819064 (patch)
tree6bb5b92c046312c4e95ac2620b10ddf482d3fa8b /src/VBox/ValidationKit/analysis
parentInitial commit. (diff)
downloadvirtualbox-f215e02bf85f68d3a6106c2a1f4f7f063f819064.tar.xz
virtualbox-f215e02bf85f68d3a6106c2a1f4f7f063f819064.zip
Adding upstream version 7.0.14-dfsg.upstream/7.0.14-dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/VBox/ValidationKit/analysis')
-rw-r--r--src/VBox/ValidationKit/analysis/Makefile.kmk45
-rw-r--r--src/VBox/ValidationKit/analysis/__init__.py40
-rwxr-xr-xsrc/VBox/ValidationKit/analysis/analyze.py447
-rwxr-xr-xsrc/VBox/ValidationKit/analysis/reader.py762
-rwxr-xr-xsrc/VBox/ValidationKit/analysis/reporting.py746
5 files changed, 2040 insertions, 0 deletions
diff --git a/src/VBox/ValidationKit/analysis/Makefile.kmk b/src/VBox/ValidationKit/analysis/Makefile.kmk
new file mode 100644
index 00000000..901250a7
--- /dev/null
+++ b/src/VBox/ValidationKit/analysis/Makefile.kmk
@@ -0,0 +1,45 @@
+# $Id: Makefile.kmk $
+## @file
+# VirtualBox Validation Kit - Python Test Driver.
+#
+
+#
+# Copyright (C) 2010-2023 Oracle and/or its affiliates.
+#
+# This file is part of VirtualBox base platform packages, as
+# available from https://www.virtualbox.org.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation, in version 3 of the
+# License.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <https://www.gnu.org/licenses>.
+#
+# The contents of this file may alternatively be used under the terms
+# of the Common Development and Distribution License Version 1.0
+# (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
+# in the VirtualBox distribution, in which case the provisions of the
+# CDDL are applicable instead of those of the GPL.
+#
+# You may elect to license modified versions of this file under the
+# terms and conditions of either the GPL or the CDDL or both.
+#
+# SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
+#
+
+SUB_DEPTH = ../../../..
+include $(KBUILD_PATH)/subheader.kmk
+
+
+VBOX_VALIDATIONKIT_PYTHON_SOURCES += $(wildcard $(PATH_SUB_CURRENT)/*.py)
+
+$(evalcall def_vbox_validationkit_process_python_sources)
+include $(FILE_KBUILD_SUB_FOOTER)
+
diff --git a/src/VBox/ValidationKit/analysis/__init__.py b/src/VBox/ValidationKit/analysis/__init__.py
new file mode 100644
index 00000000..3f6a51e3
--- /dev/null
+++ b/src/VBox/ValidationKit/analysis/__init__.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+# $Id: __init__.py $
+
+"""
+Test analysis package
+"""
+
+__copyright__ = \
+"""
+Copyright (C) 2010-2023 Oracle and/or its affiliates.
+
+This file is part of VirtualBox base platform packages, as
+available from https://www.virtualbox.org.
+
+This program is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public License
+as published by the Free Software Foundation, in version 3 of the
+License.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, see <https://www.gnu.org/licenses>.
+
+The contents of this file may alternatively be used under the terms
+of the Common Development and Distribution License Version 1.0
+(CDDL), a copy of it is provided in the "COPYING.CDDL" file included
+in the VirtualBox distribution, in which case the provisions of the
+CDDL are applicable instead of those of the GPL.
+
+You may elect to license modified versions of this file under the
+terms and conditions of either the GPL or the CDDL or both.
+
+SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
+"""
+__version__ = "$Revision: 155244 $"
+
diff --git a/src/VBox/ValidationKit/analysis/analyze.py b/src/VBox/ValidationKit/analysis/analyze.py
new file mode 100755
index 00000000..ec2b3cc1
--- /dev/null
+++ b/src/VBox/ValidationKit/analysis/analyze.py
@@ -0,0 +1,447 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# $Id: analyze.py $
+
+"""
+Analyzer CLI.
+"""
+
+__copyright__ = \
+"""
+Copyright (C) 2010-2023 Oracle and/or its affiliates.
+
+This file is part of VirtualBox base platform packages, as
+available from https://www.virtualbox.org.
+
+This program is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public License
+as published by the Free Software Foundation, in version 3 of the
+License.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, see <https://www.gnu.org/licenses>.
+
+The contents of this file may alternatively be used under the terms
+of the Common Development and Distribution License Version 1.0
+(CDDL), a copy of it is provided in the "COPYING.CDDL" file included
+in the VirtualBox distribution, in which case the provisions of the
+CDDL are applicable instead of those of the GPL.
+
+You may elect to license modified versions of this file under the
+terms and conditions of either the GPL or the CDDL or both.
+
+SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
+"""
+__version__ = "$Revision: 155244 $"
+
+# Standard python imports.
+import re;
+import os;
+import textwrap;
+import sys;
+
+# Only the main script needs to modify the path.
+try: __file__
+except: __file__ = sys.argv[0];
+g_ksValidationKitDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)));
+sys.path.append(g_ksValidationKitDir);
+
+# Validation Kit imports.
+from analysis import reader
+from analysis import reporting
+
+
+def usage():
+ """
+ Display usage.
+ """
+ # Set up the output wrapper.
+ try: cCols = os.get_terminal_size()[0] # since 3.3
+ except: cCols = 79;
+ oWrapper = textwrap.TextWrapper(width = cCols);
+
+ # Do the outputting.
+ print('Tool for comparing test results.');
+ print('');
+ oWrapper.subsequent_indent = ' ' * (len('usage: ') + 4);
+ print(oWrapper.fill('usage: analyze.py [options] [collection-1] -- [collection-2] [-- [collection3] [..]])'))
+ oWrapper.subsequent_indent = '';
+ print('');
+ print(oWrapper.fill('This tool compares two or more result collections, using one as a baseline (first by default) '
+ 'and showing how the results in others differs from it.'));
+ print('');
+ print(oWrapper.fill('The results (XML file) from one or more test runs makes up a collection. A collection can be '
+ 'named using the --name <name> option, or will get a sequential name automatically. The baseline '
+ 'collection will have "(baseline)" appended to its name.'));
+ print('');
+ print(oWrapper.fill('A test run produces one XML file, either via the testdriver/reporter.py machinery or via the IPRT '
+ 'test.cpp code. In the latter case it can be enabled and controlled via IPRT_TEST_FILE. A collection '
+ 'consists of one or more of test runs (i.e. XML result files). These are combined (aka distilled) '
+ 'into a single set of results before comparing them with the others. The --best and --avg options '
+ 'controls how this combining is done. The need for this is mainly to try counteract some of the '
+ 'instability typically found in the restuls. Just because one test run produces a better result '
+ 'after a change does not necessarily mean this will always be the case and that the change was to '
+ 'the better, it might just have been regular fluctuations in the test results.'));
+
+ oWrapper.initial_indent = ' ';
+ oWrapper.subsequent_indent = ' ';
+ print('');
+ print('Options governing combining (distillation):');
+ print(' --avg, --average');
+ print(oWrapper.fill('Picks the best result by calculating the average values across all the runs.'));
+ print('');
+ print(' --best');
+ print(oWrapper.fill('Picks the best result from all the runs. For values, this means making guessing what result is '
+ 'better based on the unit. This may not always lead to the right choices.'));
+ print(oWrapper.initial_indent + 'Default: --best');
+
+ print('');
+ print('Options relating to collections:');
+ print(' --name <name>');
+ print(oWrapper.fill('Sets the name of the current collection. By default a collection gets a sequential number.'));
+ print('');
+ print(' --baseline <num>');
+ print(oWrapper.fill('Sets collection given by <num> (0-based) as the baseline collection.'));
+ print(oWrapper.initial_indent + 'Default: --baseline 0')
+
+ print('');
+ print('Filtering options:');
+ print(' --filter-test <substring>');
+ print(oWrapper.fill('Exclude tests not containing any of the substrings given via the --filter-test option. The '
+ 'matching is done with full test name, i.e. all parent names are prepended with ", " as separator '
+ '(for example "tstIOInstr, CPUID EAX=1").'));
+ print('');
+ print(' --filter-test-out <substring>');
+ print(oWrapper.fill('Exclude tests containing the given substring. As with --filter-test, the matching is done against '
+ 'the full test name.'));
+ print('');
+ print(' --filter-value <substring>');
+ print(oWrapper.fill('Exclude values not containing any of the substrings given via the --filter-value option. The '
+ 'matching is done against the value name prefixed by the full test name and ": " '
+ '(for example "tstIOInstr, CPUID EAX=1: real mode, CPUID").'));
+ print('');
+ print(' --filter-value-out <substring>');
+ print(oWrapper.fill('Exclude value containing the given substring. As with --filter-value, the matching is done against '
+ 'the value name prefixed by the full test name.'));
+
+ print('');
+ print(' --regex-test <expr>');
+ print(oWrapper.fill('Same as --filter-test except the substring matching is done via a regular expression.'));
+ print('');
+ print(' --regex-test-out <expr>');
+ print(oWrapper.fill('Same as --filter-test-out except the substring matching is done via a regular expression.'));
+ print('');
+ print(' --regex-value <expr>');
+ print(oWrapper.fill('Same as --filter-value except the substring matching is done via a regular expression.'));
+ print('');
+ print(' --regex-value-out <expr>');
+ print(oWrapper.fill('Same as --filter-value-out except the substring matching is done via a regular expression.'));
+ print('');
+ print(' --filter-out-empty-leaf-tests');
+ print(oWrapper.fill('Removes any leaf tests that are without any values or sub-tests. This is useful when '
+ 'only considering values, especially when doing additional value filtering.'));
+
+ print('');
+ print('Analysis options:');
+ print(' --pct-same-value <float>');
+ print(oWrapper.fill('The threshold at which the percent difference between two values are considered the same '
+ 'during analysis.'));
+ print(oWrapper.initial_indent + 'Default: --pct-same-value 0.10');
+
+ print('');
+ print('Output options:');
+ print(' --brief, --verbose');
+ print(oWrapper.fill('Whether to omit (--brief) the value for non-baseline runs and just get along with the difference.'));
+ print(oWrapper.initial_indent + 'Default: --brief');
+ print('');
+ print(' --pct <num>, --pct-precision <num>');
+ print(oWrapper.fill('Specifies the number of decimal place to use when formatting the difference as percent.'));
+ print(oWrapper.initial_indent + 'Default: --pct 2');
+ return 1;
+
+
+class ResultCollection(object):
+ """
+ One or more test runs that should be merged before comparison.
+ """
+
+ def __init__(self, sName):
+ self.sName = sName;
+ self.aoTestTrees = [] # type: [Test]
+ self.asTestFiles = [] # type: [str] - runs parallel to aoTestTrees
+ self.oDistilled = None # type: Test
+
+ def append(self, sFilename):
+ """
+ Loads sFilename and appends the result.
+ Returns True on success, False on failure.
+ """
+ oTestTree = reader.parseTestResult(sFilename);
+ if oTestTree:
+ self.aoTestTrees.append(oTestTree);
+ self.asTestFiles.append(sFilename);
+ return True;
+ return False;
+
+ def isEmpty(self):
+ """ Checks if the result is empty. """
+ return len(self.aoTestTrees) == 0;
+
+ def filterTests(self, asFilters):
+ """
+ Keeps all the tests in the test trees sub-string matching asFilters (str or re).
+ """
+ for oTestTree in self.aoTestTrees:
+ oTestTree.filterTests(asFilters);
+ return self;
+
+ def filterOutTests(self, asFilters):
+ """
+ Removes all the tests in the test trees sub-string matching asFilters (str or re).
+ """
+ for oTestTree in self.aoTestTrees:
+ oTestTree.filterOutTests(asFilters);
+ return self;
+
+ def filterValues(self, asFilters):
+ """
+ Keeps all the tests in the test trees sub-string matching asFilters (str or re).
+ """
+ for oTestTree in self.aoTestTrees:
+ oTestTree.filterValues(asFilters);
+ return self;
+
+ def filterOutValues(self, asFilters):
+ """
+ Removes all the tests in the test trees sub-string matching asFilters (str or re).
+ """
+ for oTestTree in self.aoTestTrees:
+ oTestTree.filterOutValues(asFilters);
+ return self;
+
+ def filterOutEmptyLeafTests(self):
+ """
+ Removes all the tests in the test trees that have neither child tests nor values.
+ """
+ for oTestTree in self.aoTestTrees:
+ oTestTree.filterOutEmptyLeafTests();
+ return self;
+
+ def distill(self, sMethod, fDropLoners = False):
+ """
+ Distills the set of test results into a single one by the given method.
+
+ Valid sMethod values:
+ - 'best': Pick the best result for each test and value among all the test runs.
+ - 'avg': Calculate the average value among all the test runs.
+
+ When fDropLoners is True, tests and values that only appear in a single test run
+ will be discarded. When False (the default), the lone result will be used.
+ """
+ assert sMethod in ['best', 'avg'];
+ assert not self.oDistilled;
+
+ # If empty, nothing to do.
+ if self.isEmpty():
+ return None;
+
+ # If there is only a single tree, make a deep copy of it.
+ if len(self.aoTestTrees) == 1:
+ oDistilled = self.aoTestTrees[0].clone();
+ else:
+
+ # Since we don't know if the test runs are all from the same test, we create
+ # dummy root tests for each run and use these are the start for the distillation.
+ aoDummyInputTests = [];
+ for oRun in self.aoTestTrees:
+ oDummy = reader.Test();
+ oDummy.aoChildren = [oRun,];
+ aoDummyInputTests.append(oDummy);
+
+ # Similarly, we end up with a "dummy" root test for the result.
+ oDistilled = reader.Test();
+ oDistilled.distill(aoDummyInputTests, sMethod, fDropLoners);
+
+ # We can drop this if there is only a single child, i.e. if all runs are for
+ # the same test.
+ if len(oDistilled.aoChildren) == 1:
+ oDistilled = oDistilled.aoChildren[0];
+
+ self.oDistilled = oDistilled;
+ return oDistilled;
+
+
+
+# matchWithValue hacks.
+g_asOptions = [];
+g_iOptInd = 1;
+g_sOptArg = '';
+
+def matchWithValue(sOption):
+ """ Matches an option with a value, placing the value in g_sOptArg if it matches. """
+ global g_asOptions, g_iOptInd, g_sOptArg;
+ sArg = g_asOptions[g_iOptInd];
+ if sArg.startswith(sOption):
+ if len(sArg) == len(sOption):
+ if g_iOptInd + 1 < len(g_asOptions):
+ g_iOptInd += 1;
+ g_sOptArg = g_asOptions[g_iOptInd];
+ return True;
+
+ print('syntax error: Option %s takes a value!' % (sOption,));
+ raise Exception('syntax error: Option %s takes a value!' % (sOption,));
+
+ if sArg[len(sOption)] in ('=', ':'):
+ g_sOptArg = sArg[len(sOption) + 1:];
+ return True;
+ return False;
+
+
+def main(asArgs):
+ """ C style main(). """
+ #
+ # Parse arguments
+ #
+ oCurCollection = ResultCollection('#0');
+ aoCollections = [ oCurCollection, ];
+ iBaseline = 0;
+ sDistillationMethod = 'best';
+ fBrief = True;
+ cPctPrecision = 2;
+ rdPctSameValue = 0.1;
+ asTestFilters = [];
+ asTestOutFilters = [];
+ asValueFilters = [];
+ asValueOutFilters = [];
+ fFilterOutEmptyLeafTest = True;
+
+ global g_asOptions, g_iOptInd, g_sOptArg;
+ g_asOptions = asArgs;
+ g_iOptInd = 1;
+ while g_iOptInd < len(asArgs):
+ sArg = asArgs[g_iOptInd];
+ g_sOptArg = '';
+ #print("dbg: g_iOptInd=%s '%s'" % (g_iOptInd, sArg,));
+
+ if sArg.startswith('--help'):
+ return usage();
+
+ if matchWithValue('--filter-test'):
+ asTestFilters.append(g_sOptArg);
+ elif matchWithValue('--filter-test-out'):
+ asTestOutFilters.append(g_sOptArg);
+ elif matchWithValue('--filter-value'):
+ asValueFilters.append(g_sOptArg);
+ elif matchWithValue('--filter-value-out'):
+ asValueOutFilters.append(g_sOptArg);
+
+ elif matchWithValue('--regex-test'):
+ asTestFilters.append(re.compile(g_sOptArg));
+ elif matchWithValue('--regex-test-out'):
+ asTestOutFilters.append(re.compile(g_sOptArg));
+ elif matchWithValue('--regex-value'):
+ asValueFilters.append(re.compile(g_sOptArg));
+ elif matchWithValue('--regex-value-out'):
+ asValueOutFilters.append(re.compile(g_sOptArg));
+
+ elif sArg == '--filter-out-empty-leaf-tests':
+ fFilterOutEmptyLeafTest = True;
+ elif sArg == '--no-filter-out-empty-leaf-tests':
+ fFilterOutEmptyLeafTest = False;
+
+ elif sArg == '--best':
+ sDistillationMethod = 'best';
+ elif sArg in ('--avg', '--average'):
+ sDistillationMethod = 'avg';
+
+ elif sArg == '--brief':
+ fBrief = True;
+ elif sArg == '--verbose':
+ fBrief = False;
+
+ elif matchWithValue('--pct') or matchWithValue('--pct-precision'):
+ cPctPrecision = int(g_sOptArg);
+ elif matchWithValue('--base') or matchWithValue('--baseline'):
+ iBaseline = int(g_sOptArg);
+
+ elif matchWithValue('--pct-same-value'):
+ rdPctSameValue = float(g_sOptArg);
+
+ # '--' starts a new collection. If current one is empty, drop it.
+ elif sArg == '--':
+ print("dbg: new collection");
+ #if oCurCollection.isEmpty():
+ # del aoCollections[-1];
+ oCurCollection = ResultCollection("#%s" % (len(aoCollections),));
+ aoCollections.append(oCurCollection);
+
+ # Name the current result collection.
+ elif matchWithValue('--name'):
+ oCurCollection.sName = g_sOptArg;
+
+ # Read in a file and add it to the current data set.
+ else:
+ if not oCurCollection.append(sArg):
+ return 1;
+ g_iOptInd += 1;
+
+ #
+ # Post argument parsing processing.
+ #
+
+ # Drop the last collection if empty.
+ if oCurCollection.isEmpty():
+ del aoCollections[-1];
+ if not aoCollections:
+ print("error: No input files given!");
+ return 1;
+
+ # Check the baseline value and mark the column as such.
+ if iBaseline < 0 or iBaseline > len(aoCollections):
+ print("error: specified baseline is out of range: %s, valid range 0 <= baseline < %s"
+ % (iBaseline, len(aoCollections),));
+ return 1;
+ aoCollections[iBaseline].sName += ' (baseline)';
+
+ #
+ # Apply filtering before distilling each collection into a single result tree.
+ #
+ if asTestFilters:
+ for oCollection in aoCollections:
+ oCollection.filterTests(asTestFilters);
+ if asTestOutFilters:
+ for oCollection in aoCollections:
+ oCollection.filterOutTests(asTestOutFilters);
+
+ if asValueFilters:
+ for oCollection in aoCollections:
+ oCollection.filterValues(asValueFilters);
+ if asValueOutFilters:
+ for oCollection in aoCollections:
+ oCollection.filterOutValues(asValueOutFilters);
+
+ if fFilterOutEmptyLeafTest:
+ for oCollection in aoCollections:
+ oCollection.filterOutEmptyLeafTests();
+
+ # Distillation.
+ for oCollection in aoCollections:
+ oCollection.distill(sDistillationMethod);
+
+ #
+ # Produce the report.
+ #
+ oTable = reporting.RunTable(iBaseline, fBrief, cPctPrecision, rdPctSameValue);
+ oTable.populateFromRuns([oCollection.oDistilled for oCollection in aoCollections],
+ [oCollection.sName for oCollection in aoCollections]);
+ print('\n'.join(oTable.formatAsText()));
+ return 0;
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv));
+
diff --git a/src/VBox/ValidationKit/analysis/reader.py b/src/VBox/ValidationKit/analysis/reader.py
new file mode 100755
index 00000000..9e98e270
--- /dev/null
+++ b/src/VBox/ValidationKit/analysis/reader.py
@@ -0,0 +1,762 @@
+# -*- coding: utf-8 -*-
+# $Id: reader.py $
+
+"""
+XML reader module.
+
+This produces a test result tree that can be processed and passed to
+reporting.
+"""
+
+__copyright__ = \
+"""
+Copyright (C) 2010-2023 Oracle and/or its affiliates.
+
+This file is part of VirtualBox base platform packages, as
+available from https://www.virtualbox.org.
+
+This program is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public License
+as published by the Free Software Foundation, in version 3 of the
+License.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, see <https://www.gnu.org/licenses>.
+
+The contents of this file may alternatively be used under the terms
+of the Common Development and Distribution License Version 1.0
+(CDDL), a copy of it is provided in the "COPYING.CDDL" file included
+in the VirtualBox distribution, in which case the provisions of the
+CDDL are applicable instead of those of the GPL.
+
+You may elect to license modified versions of this file under the
+terms and conditions of either the GPL or the CDDL or both.
+
+SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
+"""
+__version__ = "$Revision: 155244 $"
+__all__ = [ 'parseTestResult', ]
+
+# Standard python imports.
+import datetime;
+import os;
+import re;
+import sys;
+import traceback;
+
+# Only the main script needs to modify the path.
+try: __file__;
+except: __file__ = sys.argv[0];
+g_ksValidationKitDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)));
+sys.path.append(g_ksValidationKitDir);
+
+# ValidationKit imports.
+from common import utils;
+
+# Python 3 hacks:
+if sys.version_info[0] >= 3:
+ long = int; # pylint: disable=redefined-builtin,invalid-name
+
+# pylint: disable=missing-docstring
+
+
+class Value(object):
+ """
+ Represents a value. Usually this is benchmark result or parameter.
+ """
+
+ kdBestByUnit = {
+ "%": +1, # Difficult to say what's best really.
+ "bytes": +1, # Difficult to say what's best really.
+ "bytes/s": +2,
+ "KB": +1,
+ "KB/s": +2,
+ "MB": +1,
+ "MB/s": +2,
+ "packets": +2,
+ "packets/s": +2,
+ "frames": +2,
+ "frames/s": +2,
+ "occurrences": +1, # Difficult to say what's best really.
+ "occurrences/s": +2,
+ "roundtrips": +2,
+ "calls": +1, # Difficult to say what's best really.
+ "calls/s": +2,
+ "s": -2,
+ "ms": -2,
+ "ns": -2,
+ "ns/call": -2,
+ "ns/frame": -2,
+ "ns/occurrence": -2,
+ "ns/packet": -2,
+ "ns/roundtrip": -2,
+ "ins": +2,
+ "ins/sec": +2,
+ "": +1, # Difficult to say what's best really.
+ "pp1k": -2,
+ "pp10k": -2,
+ "ppm": -2,
+ "ppb": -2,
+ "ticks": -1, # Difficult to say what's best really.
+ "ticks/call": -2,
+ "ticks/occ": -2,
+ "pages": +1, # Difficult to say what's best really.
+ "pages/s": +2,
+ "ticks/page": -2,
+ "ns/page": -2,
+ "ps": -1, # Difficult to say what's best really.
+ "ps/call": -2,
+ "ps/frame": -2,
+ "ps/occurrence": -2,
+ "ps/packet": -2,
+ "ps/roundtrip": -2,
+ "ps/page": -2,
+ };
+
+ def __init__(self, oTest, sName = None, sUnit = None, sTimestamp = None, lValue = None):
+ self.oTest = oTest;
+ self.sName = sName;
+ self.sUnit = sUnit;
+ self.sTimestamp = sTimestamp;
+ self.lValue = self.valueToInteger(lValue);
+ assert self.lValue is None or isinstance(self.lValue, (int, long)), "lValue=%s %s" % (self.lValue, type(self.lValue),);
+
+ def clone(self, oParentTest):
+ """
+ Clones the value.
+ """
+ return Value(oParentTest, self.sName, self.sUnit, self.sTimestamp, self.lValue);
+
+ def matchFilters(self, sPrefix, aoFilters):
+ """
+ Checks for any substring match between aoFilters (str or re.Pattern)
+ and the value name prefixed by sPrefix.
+
+ Returns True if any of the filters matches.
+ Returns False if none of the filters matches.
+ """
+ sFullName = sPrefix + self.sName;
+ for oFilter in aoFilters:
+ if oFilter.search(sFullName) is not None if isinstance(oFilter, re.Pattern) else sFullName.find(oFilter) >= 0:
+ return True;
+ return False;
+
+ def canDoBetterCompare(self):
+ """
+ Checks whether we can do a confident better-than comparsion of the value.
+ """
+ return self.sUnit is not None and self.kdBestByUnit[self.sUnit] not in (-1, 0, 1);
+
+ def getBetterRelation(self):
+ """
+ Returns +2 if larger values are definintely better.
+ Returns +1 if larger values are likely to be better.
+ Returns 0 if we have no clue.
+ Returns -1 if smaller values are likey to better.
+ Returns -2 if smaller values are definitely better.
+ """
+ if self.sUnit is None:
+ return 0;
+ return self.kdBestByUnit[self.sUnit];
+
+ @staticmethod
+ def valueToInteger(sValue):
+ """
+ Returns integer (long) represention of lValue.
+ Returns None if it cannot be converted to integer.
+
+ Raises an exception if sValue isn't an integer.
+ """
+ if sValue is None or isinstance(sValue, (int, long)):
+ return sValue;
+ sValue = sValue.strip();
+ if not sValue:
+ return None;
+ return long(sValue);
+
+ # Manipluation
+
+ def distill(self, aoValues, sMethod):
+ """
+ Distills the value of the object from values from multiple test runs.
+ """
+ if not aoValues:
+ return self;
+
+ # Everything except the value comes from the first run.
+ self.sName = aoValues[0].sName;
+ self.sTimestamp = aoValues[0].sTimestamp;
+ self.sUnit = aoValues[0].sUnit;
+
+ # Find the value to use according to sMethod.
+ if len(aoValues) == 1:
+ self.lValue = aoValues[0].lValue;
+ else:
+ alValuesXcptInvalid = [oValue.lValue for oValue in aoValues if oValue.lValue is not None];
+ if not alValuesXcptInvalid:
+ # No integer result, so just pick the first value whatever it is.
+ self.lValue = aoValues[0].lValue;
+
+ elif sMethod == 'best':
+ # Pick the best result out of the whole bunch.
+ if self.kdBestByUnit[self.sUnit] >= 0:
+ self.lValue = max(alValuesXcptInvalid);
+ else:
+ self.lValue = min(alValuesXcptInvalid);
+
+ elif sMethod == 'avg':
+ # Calculate the average.
+ self.lValue = (sum(alValuesXcptInvalid) + len(alValuesXcptInvalid) // 2) // len(alValuesXcptInvalid);
+
+ else:
+ assert False;
+ self.lValue = aoValues[0].lValue;
+
+ return self;
+
+
+ # debug
+
+ def printValue(self, cIndent):
+ print('%sValue: name=%s timestamp=%s unit=%s value=%s'
+ % (''.ljust(cIndent*2), self.sName, self.sTimestamp, self.sUnit, self.lValue));
+
+
+class Test(object):
+ """
+ Nested test result.
+ """
+ def __init__(self, oParent = None, hsAttrs = None):
+ self.aoChildren = [] # type: list(Test)
+ self.aoValues = [];
+ self.oParent = oParent;
+ self.sName = hsAttrs['name'] if hsAttrs else None;
+ self.sStartTS = hsAttrs['timestamp'] if hsAttrs else None;
+ self.sEndTS = None;
+ self.sStatus = None;
+ self.cErrors = -1;
+
+ def clone(self, oParent = None):
+ """
+ Returns a deep copy.
+ """
+ oClone = Test(oParent, {'name': self.sName, 'timestamp': self.sStartTS});
+
+ for oChild in self.aoChildren:
+ oClone.aoChildren.append(oChild.clone(oClone));
+
+ for oValue in self.aoValues:
+ oClone.aoValues.append(oValue.clone(oClone));
+
+ oClone.sEndTS = self.sEndTS;
+ oClone.sStatus = self.sStatus;
+ oClone.cErrors = self.cErrors;
+ return oClone;
+
+ # parsing
+
+ def addChild(self, oChild):
+ self.aoChildren.append(oChild);
+ return oChild;
+
+ def addValue(self, oValue):
+ self.aoValues.append(oValue);
+ return oValue;
+
+ def __markCompleted(self, sTimestamp):
+ """ Sets sEndTS if not already done. """
+ if not self.sEndTS:
+ self.sEndTS = sTimestamp;
+
+ def markPassed(self, sTimestamp):
+ self.__markCompleted(sTimestamp);
+ self.sStatus = 'passed';
+ self.cErrors = 0;
+
+ def markSkipped(self, sTimestamp):
+ self.__markCompleted(sTimestamp);
+ self.sStatus = 'skipped';
+ self.cErrors = 0;
+
+ def markFailed(self, sTimestamp, cErrors):
+ self.__markCompleted(sTimestamp);
+ self.sStatus = 'failed';
+ self.cErrors = cErrors;
+
+ def markEnd(self, sTimestamp, cErrors):
+ self.__markCompleted(sTimestamp);
+ if self.sStatus is None:
+ self.sStatus = 'failed' if cErrors != 0 else 'end';
+ self.cErrors = 0;
+
+ def mergeInIncludedTest(self, oTest):
+ """ oTest will be robbed. """
+ if oTest is not None:
+ for oChild in oTest.aoChildren:
+ oChild.oParent = self;
+ self.aoChildren.append(oChild);
+ for oValue in oTest.aoValues:
+ oValue.oTest = self;
+ self.aoValues.append(oValue);
+ oTest.aoChildren = [];
+ oTest.aoValues = [];
+
+ # debug
+
+ def printTree(self, iLevel = 0):
+ print('%sTest: name=%s start=%s end=%s' % (''.ljust(iLevel*2), self.sName, self.sStartTS, self.sEndTS));
+ for oChild in self.aoChildren:
+ oChild.printTree(iLevel + 1);
+ for oValue in self.aoValues:
+ oValue.printValue(iLevel + 1);
+
+ # getters / queries
+
+ def getFullNameWorker(self, cSkipUpper):
+ if self.oParent is None:
+ return (self.sName, 0);
+ sName, iLevel = self.oParent.getFullNameWorker(cSkipUpper);
+ if iLevel < cSkipUpper:
+ sName = self.sName;
+ else:
+ sName += ', ' + self.sName;
+ return (sName, iLevel + 1);
+
+ def getFullName(self, cSkipUpper = 2):
+ return self.getFullNameWorker(cSkipUpper)[0];
+
+ def matchFilters(self, aoFilters):
+ """
+ Checks for any substring match between aoFilters (str or re.Pattern)
+ and the full test name.
+
+ Returns True if any of the filters matches.
+ Returns False if none of the filters matches.
+ """
+ sFullName = self.getFullName();
+ for oFilter in aoFilters:
+ if oFilter.search(sFullName) is not None if isinstance(oFilter, re.Pattern) else sFullName.find(oFilter) >= 0:
+ return True;
+ return False;
+
+ # manipulation
+
+ def filterTestsWorker(self, asFilters, fReturnOnMatch):
+ # depth first
+ i = 0;
+ while i < len(self.aoChildren):
+ if self.aoChildren[i].filterTestsWorker(asFilters, fReturnOnMatch):
+ i += 1;
+ else:
+ self.aoChildren[i].oParent = None;
+ del self.aoChildren[i];
+
+ # If we have children, they must've matched up.
+ if self.aoChildren:
+ return True;
+ if self.matchFilters(asFilters):
+ return fReturnOnMatch;
+ return not fReturnOnMatch;
+
+ def filterTests(self, asFilters):
+ """ Keep tests matching asFilters. """
+ if asFilters:
+ self.filterTestsWorker(asFilters, True);
+ return self;
+
+ def filterOutTests(self, asFilters):
+ """ Removes tests matching asFilters. """
+ if asFilters:
+ self.filterTestsWorker(asFilters, False);
+ return self;
+
+ def filterValuesWorker(self, asFilters, fKeepWhen):
+ # Process children recursively.
+ for oChild in self.aoChildren:
+ oChild.filterValuesWorker(asFilters, fKeepWhen);
+
+ # Filter our values.
+ iValue = len(self.aoValues);
+ if iValue > 0:
+ sFullname = self.getFullName() + ': ';
+ while iValue > 0:
+ iValue -= 1;
+ if self.aoValues[iValue].matchFilters(sFullname, asFilters) != fKeepWhen:
+ del self.aoValues[iValue];
+ return None;
+
+ def filterValues(self, asFilters):
+ """ Keep values matching asFilters. """
+ if asFilters:
+ self.filterValuesWorker(asFilters, True);
+ return self;
+
+ def filterOutValues(self, asFilters):
+ """ Removes values matching asFilters. """
+ if asFilters:
+ self.filterValuesWorker(asFilters, False);
+ return self;
+
+ def filterOutEmptyLeafTests(self):
+ """
+ Removes any child tests that has neither values nor sub-tests.
+ Returns True if leaf, False if not.
+ """
+ iChild = len(self.aoChildren);
+ while iChild > 0:
+ iChild -= 1;
+ if self.aoChildren[iChild].filterOutEmptyLeafTests():
+ del self.aoChildren[iChild];
+ return not self.aoChildren and not self.aoValues;
+
+ @staticmethod
+ def calcDurationStatic(sStartTS, sEndTS):
+ """
+ Returns None the start timestamp is absent or invalid.
+ Returns datetime.timedelta otherwise.
+ """
+ if not sStartTS:
+ return None;
+ try:
+ oStart = utils.parseIsoTimestamp(sStartTS);
+ except:
+ return None;
+
+ if not sEndTS:
+ return datetime.timedelta.max;
+ try:
+ oEnd = utils.parseIsoTimestamp(sEndTS);
+ except:
+ return datetime.timedelta.max;
+
+ return oEnd - oStart;
+
+ def calcDuration(self):
+ """
+ Returns the duration as a datetime.timedelta object or None if not available.
+ """
+ return self.calcDurationStatic(self.sStartTS, self.sEndTS);
+
+ def calcDurationAsMicroseconds(self):
+ """
+ Returns the duration as microseconds or None if not available.
+ """
+ oDuration = self.calcDuration();
+ if not oDuration:
+ return None;
+ return (oDuration.days * 86400 + oDuration.seconds) * 1000000 + oDuration.microseconds;
+
+ @staticmethod
+ def distillTimes(aoTestRuns, sMethod, sStatus):
+ """
+ Destills the error counts of the tests.
+ Returns a (sStartTS, sEndTS) pair.
+ """
+
+ #
+ # Start by assembling two list of start and end times for all runs that have a start timestamp.
+ # Then sort out the special cases where no run has a start timestamp and only a single one has.
+ #
+ asStartTS = [oRun.sStartTS for oRun in aoTestRuns if oRun.sStartTS];
+ if not asStartTS:
+ return (None, None);
+ asEndTS = [oRun.sEndTS for oRun in aoTestRuns if oRun.sStartTS]; # parallel to asStartTS, so we don't check sEndTS.
+ if len(asStartTS) == 1:
+ return (asStartTS[0], asEndTS[0]);
+
+ #
+ # Calculate durations for all runs.
+ #
+ if sMethod == 'best':
+ aoDurations = [Test.calcDurationStatic(oRun.sStartTS, oRun.sEndTS) for oRun in aoTestRuns if oRun.sStatus == sStatus];
+ if not aoDurations or aoDurations.count(None) == len(aoDurations):
+ aoDurations = [Test.calcDurationStatic(oRun.sStartTS, oRun.sEndTS) for oRun in aoTestRuns];
+ if aoDurations.count(None) == len(aoDurations):
+ return (asStartTS[0], None);
+ oDuration = min([oDuration for oDuration in aoDurations if oDuration is not None]);
+
+ elif sMethod == 'avg':
+ print("dbg: 0: sStatus=%s []=%s"
+ % (sStatus, [(Test.calcDurationStatic(oRun.sStartTS, oRun.sEndTS),oRun.sStatus) for oRun in aoTestRuns],));
+ aoDurations = [Test.calcDurationStatic(oRun.sStartTS, oRun.sEndTS) for oRun in aoTestRuns if oRun.sStatus == sStatus];
+ print("dbg: 1: aoDurations=%s" % (aoDurations,))
+ aoDurations = [oDuration for oDuration in aoDurations if oDuration];
+ print("dbg: 2: aoDurations=%s" % (aoDurations,))
+ if not aoDurations:
+ return (asStartTS[0], None);
+ aoDurations = [oDuration for oDuration in aoDurations if oDuration < datetime.timedelta.max];
+ print("dbg: 3: aoDurations=%s" % (aoDurations,))
+ if not aoDurations:
+ return (asStartTS[0], None);
+ # sum doesn't work on timedelta, so do it manually.
+ oDuration = aoDurations[0];
+ for i in range(1, len(aoDurations)):
+ oDuration += aoDurations[i];
+ print("dbg: 5: oDuration=%s" % (aoDurations,))
+ oDuration = oDuration / len(aoDurations);
+ print("dbg: 6: oDuration=%s" % (aoDurations,))
+
+ else:
+ assert False;
+ return (asStartTS[0], asEndTS[0]);
+
+ # Check unfinished test.
+ if oDuration >= datetime.timedelta.max:
+ return (asStartTS[0], None);
+
+ # Calculate and format the end timestamp string.
+ oStartTS = utils.parseIsoTimestamp(asStartTS[0]);
+ oEndTS = oStartTS + oDuration;
+ return (asStartTS[0], utils.formatIsoTimestamp(oEndTS));
+
+ @staticmethod
+ def distillStatus(aoTestRuns, sMethod):
+ """
+ Destills the status of the tests.
+ Returns the status.
+ """
+ asStatuses = [oRun.sStatus for oRun in aoTestRuns];
+
+ if sMethod == 'best':
+ for sStatus in ('passed', 'failed', 'skipped'):
+ if sStatus in asStatuses:
+ return sStatus;
+ return asStatuses[0];
+
+ if sMethod == 'avg':
+ cPassed = asStatuses.count('passed');
+ cFailed = asStatuses.count('failed');
+ cSkipped = asStatuses.count('skipped');
+ cEnd = asStatuses.count('end');
+ cNone = asStatuses.count(None);
+ if cPassed >= cFailed and cPassed >= cSkipped and cPassed >= cNone and cPassed >= cEnd:
+ return 'passed';
+ if cFailed >= cPassed and cFailed >= cSkipped and cFailed >= cNone and cFailed >= cEnd:
+ return 'failed';
+ if cSkipped >= cPassed and cSkipped >= cFailed and cSkipped >= cNone and cSkipped >= cEnd:
+ return 'skipped';
+ if cEnd >= cPassed and cEnd >= cFailed and cEnd >= cNone and cEnd >= cSkipped:
+ return 'end';
+ return None;
+
+ assert False;
+ return asStatuses[0];
+
+ @staticmethod
+ def distillErrors(aoTestRuns, sMethod):
+ """
+ Destills the error counts of the tests.
+ Returns the status.
+ """
+ acErrorsXcptNeg = [oRun.cErrors for oRun in aoTestRuns if oRun.cErrors >= 0];
+
+ if sMethod == 'best':
+ if acErrorsXcptNeg:
+ return min(acErrorsXcptNeg);
+ elif sMethod == 'avg':
+ if acErrorsXcptNeg:
+ return sum(acErrorsXcptNeg) // len(acErrorsXcptNeg);
+ else:
+ assert False;
+ return -1;
+
+ def distill(self, aoTestRuns, sMethod, fDropLoners):
+ """
+ Distills the test runs into this test.
+ """
+ #
+ # Recurse first (before we create too much state in the stack
+ # frame) and do child tests.
+ #
+ # We copy the child lists of each test run so we can remove tests we've
+ # processed from each run and thus make sure we include tests in
+ #
+ #
+ aaoChildren = [list(oRun.aoChildren) for oRun in aoTestRuns];
+
+ # Process the tests for each run.
+ for i, _ in enumerate(aaoChildren):
+ # Process all tests for the current run.
+ while len(aaoChildren[i]) > 0:
+ oFirst = aaoChildren[i].pop(0);
+
+ # Build a list of sub-test runs by searching remaining runs by test name.
+ aoSameSubTests = [oFirst,];
+ for j in range(i + 1, len(aaoChildren)):
+ aoThis = aaoChildren[j];
+ for iThis, oThis in enumerate(aoThis):
+ if oThis.sName == oFirst.sName:
+ del aoThis[iThis];
+ aoSameSubTests.append(oThis);
+ break;
+
+ # Apply fDropLoners.
+ if not fDropLoners or len(aoSameSubTests) > 1 or len(aaoChildren) == 1:
+ # Create an empty test and call distill on it with the subtest array, unless
+ # of course that the array only has one member and we can simply clone it.
+ if len(aoSameSubTests) == 1:
+ self.addChild(oFirst.clone(self));
+ else:
+ oSubTest = Test(self);
+ oSubTest.sName = oFirst.sName;
+ oSubTest.distill(aoSameSubTests, sMethod, fDropLoners);
+ self.addChild(oSubTest);
+ del aaoChildren;
+
+ #
+ # Do values. Similar approch as for the sub-tests.
+ #
+ aaoValues = [list(oRun.aoValues) for oRun in aoTestRuns];
+
+ # Process the values for each run.
+ for i,_ in enumerate(aaoValues):
+ # Process all values for the current run.
+ while len(aaoValues[i]) > 0:
+ oFirst = aaoValues[i].pop(0);
+
+ # Build a list of values runs by searching remaining runs by value name and unit.
+ aoSameValues = [oFirst,];
+ for j in range(i + 1, len(aaoValues)):
+ aoThis = aaoValues[j];
+ for iThis, oThis in enumerate(aoThis):
+ if oThis.sName == oFirst.sName and oThis.sUnit == oFirst.sUnit:
+ del aoThis[iThis];
+ aoSameValues.append(oThis);
+ break;
+
+ # Apply fDropLoners.
+ if not fDropLoners or len(aoSameValues) > 1 or len(aaoValues) == 1:
+ # Create an empty test and call distill on it with the subtest array, unless
+ # of course that the array only has one member and we can simply clone it.
+ if len(aoSameValues) == 1:
+ self.aoValues.append(oFirst.clone(self));
+ else:
+ oValue = Value(self);
+ oValue.distill(aoSameValues, sMethod);
+ self.aoValues.append(oValue);
+ del aaoValues;
+
+ #
+ # Distill test properties.
+ #
+ self.sStatus = self.distillStatus(aoTestRuns, sMethod);
+ self.cErrors = self.distillErrors(aoTestRuns, sMethod);
+ (self.sStartTS, self.sEndTS) = self.distillTimes(aoTestRuns, sMethod, self.sStatus);
+ print("dbg: %s: sStartTS=%s, sEndTS=%s" % (self.sName, self.sStartTS, self.sEndTS));
+
+ return self;
+
+
+class XmlLogReader(object):
+ """
+ XML log reader class.
+ """
+
+ def __init__(self, sXmlFile):
+ self.sXmlFile = sXmlFile;
+ self.oRoot = Test(None, {'name': 'root', 'timestamp': ''});
+ self.oTest = self.oRoot;
+ self.iLevel = 0;
+ self.oValue = None;
+
+ def parse(self):
+ try:
+ oFile = open(self.sXmlFile, 'rb'); # pylint: disable=consider-using-with
+ except:
+ traceback.print_exc();
+ return False;
+
+ from xml.parsers.expat import ParserCreate
+ oParser = ParserCreate();
+ oParser.StartElementHandler = self.handleElementStart;
+ oParser.CharacterDataHandler = self.handleElementData;
+ oParser.EndElementHandler = self.handleElementEnd;
+ try:
+ oParser.ParseFile(oFile);
+ except:
+ traceback.print_exc();
+ oFile.close();
+ return False;
+ oFile.close();
+ return True;
+
+ def handleElementStart(self, sName, hsAttrs):
+ #print('%s%s: %s' % (''.ljust(self.iLevel * 2), sName, str(hsAttrs)));
+ if sName in ('Test', 'SubTest',):
+ self.iLevel += 1;
+ self.oTest = self.oTest.addChild(Test(self.oTest, hsAttrs));
+ elif sName == 'Value':
+ self.oValue = self.oTest.addValue(Value(self.oTest, hsAttrs.get('name'), hsAttrs.get('unit'),
+ hsAttrs.get('timestamp'), hsAttrs.get('value')));
+ elif sName == 'End':
+ self.oTest.markEnd(hsAttrs.get('timestamp'), int(hsAttrs.get('errors', '0')));
+ elif sName == 'Passed':
+ self.oTest.markPassed(hsAttrs.get('timestamp'));
+ elif sName == 'Skipped':
+ self.oTest.markSkipped(hsAttrs.get('timestamp'));
+ elif sName == 'Failed':
+ self.oTest.markFailed(hsAttrs.get('timestamp'), int(hsAttrs['errors']));
+ elif sName == 'Include':
+ self.handleInclude(hsAttrs);
+ else:
+ print('Unknown element "%s"' % (sName,));
+
+ def handleElementData(self, sData):
+ if self.oValue is not None:
+ self.oValue.addData(sData);
+ elif sData.strip() != '':
+ print('Unexpected data "%s"' % (sData,));
+ return True;
+
+ def handleElementEnd(self, sName):
+ if sName in ('Test', 'Subtest',):
+ self.iLevel -= 1;
+ self.oTest = self.oTest.oParent;
+ elif sName == 'Value':
+ self.oValue = None;
+ return True;
+
+ def handleInclude(self, hsAttrs):
+ # relative or absolute path.
+ sXmlFile = hsAttrs['filename'];
+ if not os.path.isabs(sXmlFile):
+ sXmlFile = os.path.join(os.path.dirname(self.sXmlFile), sXmlFile);
+
+ # Try parse it.
+ oSub = parseTestResult(sXmlFile);
+ if oSub is None:
+ print('error: failed to parse include "%s"' % (sXmlFile,));
+ else:
+ # Skip the root and the next level before merging it the subtest and
+ # values in to the current test. The reason for this is that the
+ # include is the output of some sub-program we've run and we don't need
+ # the extra test level it automatically adds.
+ #
+ # More benchmark heuristics: Walk down until we find more than one
+ # test or values.
+ oSub2 = oSub;
+ while len(oSub2.aoChildren) == 1 and not oSub2.aoValues:
+ oSub2 = oSub2.aoChildren[0];
+ if not oSub2.aoValues:
+ oSub2 = oSub;
+ self.oTest.mergeInIncludedTest(oSub2);
+ return True;
+
+def parseTestResult(sXmlFile):
+ """
+ Parses the test results in the XML.
+ Returns result tree.
+ Returns None on failure.
+ """
+ oXlr = XmlLogReader(sXmlFile);
+ if oXlr.parse():
+ if len(oXlr.oRoot.aoChildren) == 1 and not oXlr.oRoot.aoValues:
+ return oXlr.oRoot.aoChildren[0];
+ return oXlr.oRoot;
+ return None;
+
diff --git a/src/VBox/ValidationKit/analysis/reporting.py b/src/VBox/ValidationKit/analysis/reporting.py
new file mode 100755
index 00000000..ba8db6a0
--- /dev/null
+++ b/src/VBox/ValidationKit/analysis/reporting.py
@@ -0,0 +1,746 @@
+# -*- coding: utf-8 -*-
+# $Id: reporting.py $
+
+"""
+Test Result Report Writer.
+
+This takes a processed test result tree and creates a HTML, re-structured text,
+or normal text report from it.
+"""
+
+__copyright__ = \
+"""
+Copyright (C) 2010-2023 Oracle and/or its affiliates.
+
+This file is part of VirtualBox base platform packages, as
+available from https://www.virtualbox.org.
+
+This program is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public License
+as published by the Free Software Foundation, in version 3 of the
+License.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, see <https://www.gnu.org/licenses>.
+
+The contents of this file may alternatively be used under the terms
+of the Common Development and Distribution License Version 1.0
+(CDDL), a copy of it is provided in the "COPYING.CDDL" file included
+in the VirtualBox distribution, in which case the provisions of the
+CDDL are applicable instead of those of the GPL.
+
+You may elect to license modified versions of this file under the
+terms and conditions of either the GPL or the CDDL or both.
+
+SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
+"""
+
+__version__ = "$Revision: 155244 $"
+
+# Standard python imports.
+import os;
+import sys;
+
+# Only the main script needs to modify the path.
+try: __file__;
+except: __file__ = sys.argv[0];
+g_ksValidationKitDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)));
+sys.path.append(g_ksValidationKitDir);
+
+# ValidationKit imports.
+from common import utils;
+
+# Python 3 hacks:
+if sys.version_info[0] >= 3:
+ long = int; # pylint: disable=redefined-builtin,invalid-name
+
+
+##################################################################################################################################
+# Run Table #
+##################################################################################################################################
+
+def alignTextLeft(sText, cchWidth):
+ """ Left aligns text and pads it to cchWidth characters length. """
+ return sText + ' ' * (cchWidth - min(len(sText), cchWidth));
+
+
+def alignTextRight(sText, cchWidth):
+ """ Right aligns text and pads it to cchWidth characters length. """
+ return ' ' * (cchWidth - min(len(sText), cchWidth)) + sText;
+
+
+def alignTextCenter(sText, cchWidth):
+ """ Pads the text equally on both sides to cchWidth characters length. """
+ return alignTextLeft(' ' * ((cchWidth - min(len(sText), cchWidth)) // 2) + sText, cchWidth);
+
+
+g_kiAlignLeft = -1;
+g_kiAlignRight = 1;
+g_kiAlignCenter = 0;
+def alignText(sText, cchWidth, iAlignType):
+ """
+ General alignment method.
+
+ Negative iAlignType for left aligning, zero for entered, and positive for
+ right aligning the text.
+ """
+ if iAlignType < 0:
+ return alignTextLeft(sText, cchWidth);
+ if iAlignType > 0:
+ return alignTextRight(sText, cchWidth);
+ return alignTextCenter(sText, cchWidth);
+
+
+class TextColumnWidth(object):
+ """
+ Tracking the width of a column, dealing with sub-columns and such.
+ """
+
+ def __init__(self):
+ self.cch = 0;
+ self.dacchSub = {};
+
+ def update(self, oWidth, cchSubColSpacing = 1):
+ """
+ Updates the column width tracking with oWidth, which is either
+ an int or an array of ints (sub columns).
+ """
+ if isinstance(oWidth, int):
+ self.cch = max(self.cch, oWidth);
+ else:
+ cSubCols = len(oWidth);
+ if cSubCols not in self.dacchSub:
+ self.dacchSub[cSubCols] = list(oWidth);
+ self.cch = max(self.cch, sum(oWidth) + cchSubColSpacing * (cSubCols - 1));
+ else:
+ acchSubCols = self.dacchSub[cSubCols];
+ for iSub in range(cSubCols):
+ acchSubCols[iSub] = max(acchSubCols[iSub], oWidth[iSub]);
+ self.cch = max(self.cch, sum(acchSubCols) + cchSubColSpacing * (cSubCols - 1));
+
+ def finalize(self):
+ """ Finalizes sub-column sizes. """
+ ## @todo maybe do something here, maybe not...
+ return self;
+
+ def hasSubColumns(self):
+ """ Checks if there are sub-columns for this column. """
+ return not self.dacchSub;
+
+class TextWidths(object):
+ """
+ Tracks the column widths for text rending of the table.
+ """
+ def __init__(self, cchSubColSpacing = 1, ):
+ self.cchName = 1;
+ self.aoColumns = [] # type: TextColumnWidth
+ self.cchSubColSpacing = cchSubColSpacing;
+ self.fFinalized = False;
+
+ def update(self, aoWidths):
+ """ Updates the tracker with the returns of calcColumnWidthsForText. """
+ if not aoWidths[0]:
+ self.cchName = max(self.cchName, aoWidths[1]);
+
+ for iCol, oWidth in enumerate(aoWidths[2]):
+ if iCol >= len(self.aoColumns):
+ self.aoColumns.append(TextColumnWidth());
+ self.aoColumns[iCol].update(oWidth, self.cchSubColSpacing);
+
+ return self;
+
+ def finalize(self):
+ """ Finalizes sub-column sizes. """
+ for oColumnWidth in self.aoColumns:
+ oColumnWidth.finalize();
+ self.fFinalized = True;
+ return self;
+
+ def getColumnWidth(self, iColumn, cSubs = None, iSub = None):
+ """ Returns the width of the specified column. """
+ if not self.fFinalized:
+ return 0;
+ assert iColumn < len(self.aoColumns), "iColumn=%s vs %s" % (iColumn, len(self.aoColumns),);
+ oColumn = self.aoColumns[iColumn];
+ if cSubs is not None:
+ assert iSub < cSubs;
+ if cSubs != 1:
+ assert cSubs in oColumn.dacchSub, \
+ "iColumn=%s cSubs=%s iSub=%s; dacchSub=%s" % (iColumn, cSubs, iSub, oColumn.dacchSub);
+ return oColumn.dacchSub[cSubs][iSub];
+ return oColumn.cch;
+
+
+class TextElement(object):
+ """
+ A text element (cell/sub-cell in a table).
+ """
+
+ def __init__(self, sText = '', iAlign = g_kiAlignRight): # type: (str, int) -> None
+ self.sText = sText;
+ self.iAlign = iAlign;
+
+ def asText(self, cchWidth): # type: (int) -> str
+ """ Pads the text to width of cchWidth characters. """
+ return alignText(self.sText, cchWidth, self.iAlign);
+
+
+class RunRow(object):
+ """
+ Run table row.
+ """
+
+ def __init__(self, iLevel, sName, iRun = 0): # type: (int, str, int) -> None
+ self.iLevel = iLevel;
+ self.sName = sName;
+ self.iFirstRun = iRun;
+
+ # Fields used while formatting (set during construction or calcColumnWidthsForText/Html).
+ self.cColumns = 0; ##< Number of columns.
+ self.fSkip = False ##< Whether or not to skip this row in the output.
+
+ # Format as Text:
+
+ def formatNameAsText(self, cchWidth): # (int) -> TextElement
+ """ Format the row as text. """
+ _ = cchWidth;
+ return TextElement(' ' * (self.iLevel * 2) + self.sName, g_kiAlignLeft);
+
+ def getColumnCountAsText(self, oTable):
+ """
+ Called by calcColumnWidthsForText for getting an up-to-date self.cColumns value.
+ Override this to update cColumns after construction.
+ """
+ _ = oTable;
+ return self.cColumns;
+
+ def formatColumnAsText(self, iColumn, oTable): # type: (int, RunTable) -> [TextElement]
+ """ Returns an array of TextElements for the given column in this row. """
+ _ = iColumn; _ = oTable;
+ return [ TextElement(),];
+
+ def calcColumnWidthsForText(self, oTable): # type: (RunTable) -> (bool, int, [])
+ """
+ Calculates the column widths for text rendering.
+
+ Returns a tuple consisting of the fSkip, the formatted name width, and an
+ array of column widths. The entries in the latter are either integer
+ widths or arrays of subcolumn integer widths.
+ """
+ aoRetCols = [];
+ cColumns = self.getColumnCountAsText(oTable);
+ for iColumn in range(cColumns):
+ aoSubColumns = self.formatColumnAsText(iColumn, oTable);
+ if len(aoSubColumns) == 1:
+ aoRetCols.append(len(aoSubColumns[0].sText));
+ else:
+ aoRetCols.append([len(oSubColumn.sText) for oSubColumn in aoSubColumns]);
+ return (False, len(self.formatNameAsText(0).sText), aoRetCols);
+
+ def renderAsText(self, oWidths, oTable): # type: (TextWidths, RunTable) -> str
+ """
+ Renders the row as text.
+
+ Returns string.
+ """
+ sRow = self.formatNameAsText(oWidths.cchName).asText(oWidths.cchName);
+ sRow = sRow + ' ' * (oWidths.cchName - min(len(sRow), oWidths.cchName)) + ' : ';
+
+ for iColumn in range(self.cColumns):
+ aoSubCols = self.formatColumnAsText(iColumn, oTable);
+ sCell = '';
+ for iSub, oText in enumerate(aoSubCols):
+ cchWidth = oWidths.getColumnWidth(iColumn, len(aoSubCols), iSub);
+ if iSub > 0:
+ sCell += ' ' * oWidths.cchSubColSpacing;
+ sCell += oText.asText(cchWidth);
+ cchWidth = oWidths.getColumnWidth(iColumn);
+ sRow += (' | ' if iColumn > 0 else '') + ' ' * (cchWidth - min(cchWidth, len(sCell))) + sCell;
+
+ return sRow;
+
+ @staticmethod
+ def formatDiffAsText(lNumber, lBaseline):
+ """ Formats the difference between lNumber and lBaseline as text. """
+ if lNumber is not None:
+ if lBaseline is not None:
+ if lNumber < lBaseline:
+ return '-' + utils.formatNumber(lBaseline - lNumber); ## @todo formatter is busted for negative nums.
+ if lNumber > lBaseline:
+ return '+' + utils.formatNumber(lNumber - lBaseline);
+ return '0';
+ return '';
+
+ @staticmethod
+ def formatPctAsText(chSign, rdPct, cPctPrecision):
+ """ Formats percentage value as text. """
+ if rdPct >= 100:
+ return '%s%s%%' % (chSign, utils.formatNumber(int(rdPct + 0.5)),);
+ if round(rdPct, cPctPrecision) != 0:
+ return '%s%.*f%%' % (chSign, cPctPrecision, rdPct,); # %.*f rounds.
+ return '~' + chSign + '0.' + '0' * cPctPrecision + '%';
+
+ @staticmethod
+ def formatDiffInPctAsText(lNumber, lBaseline, cPctPrecision):
+ """ Formats the difference between lNumber and lBaseline in precent as text. """
+ if lNumber is not None:
+ if lBaseline is not None:
+ ## @todo implement cPctPrecision
+ if lNumber == lBaseline:
+ return '0.' + '0'*cPctPrecision + '%';
+
+ lDiff = lNumber - lBaseline;
+ chSign = '+';
+ if lDiff < 0:
+ lDiff = -lDiff;
+ chSign = '-';
+ return RunRow.formatPctAsText(chSign, lDiff / float(lBaseline) * 100, cPctPrecision);
+ return '';
+
+
+class RunHeaderRow(RunRow):
+ """
+ Run table header row.
+ """
+ def __init__(self, sName, asColumns): # type: (str, [str]) -> None
+ RunRow.__init__(self, 0, sName);
+ self.asColumns = asColumns
+ self.cColumns = len(asColumns);
+
+ def formatColumnAsText(self, iColumn, oTable): # type: (int, RunTable) -> [TextElement]
+ return [TextElement(self.asColumns[iColumn], g_kiAlignCenter),];
+
+
+class RunFooterRow(RunHeaderRow):
+ """
+ Run table footer row.
+ """
+ def __init__(self, sName, asColumns):
+ RunHeaderRow.__init__(self, sName, asColumns);
+
+
+class RunSeparatorRow(RunRow):
+ """
+ Base class for separator rows.
+ """
+ def __init__(self):
+ RunRow.__init__(self, 0, '');
+
+ def calcTableWidthAsText(self, oWidths):
+ """ Returns the table width for when rendered as text. """
+ cchWidth = oWidths.cchName;
+ for oCol in oWidths.aoColumns:
+ cchWidth += 3 + oCol.cch;
+ return cchWidth;
+
+
+class RunHeaderSeparatorRow(RunSeparatorRow):
+ """
+ Run table header separator row.
+ """
+ def __init__(self):
+ RunSeparatorRow.__init__(self);
+
+ def renderAsText(self, oWidths, oTable):
+ _ = oTable;
+ return '=' * self.calcTableWidthAsText(oWidths);
+
+
+class RunFooterSeparatorRow(RunHeaderSeparatorRow):
+ """
+ Run table footer separator row.
+ """
+ def __init__(self):
+ RunHeaderSeparatorRow.__init__(self);
+
+
+class RunTestRow(RunRow):
+ """
+ Run table test row.
+ """
+
+ def __init__(self, iLevel, oTest, iRun, aoTests = None): # type: (int, reader.Test, int, [reader.Test]) -> None
+ RunRow.__init__(self, iLevel, oTest.sName, iRun);
+ assert oTest;
+ self.oTest = oTest;
+ if aoTests is None:
+ aoTests = [None for i in range(iRun)];
+ aoTests.append(oTest);
+ else:
+ aoTests= list(aoTests);
+ self.aoTests = aoTests
+
+ def isSameTest(self, oTest):
+ """ Checks if oTest belongs to this row or not. """
+ return oTest.sName == self.oTest.sName;
+
+ def getBaseTest(self, oTable):
+ """ Returns the baseline test. """
+ oBaseTest = self.aoTests[oTable.iBaseline];
+ if not oBaseTest:
+ oBaseTest = self.aoTests[self.iFirstRun];
+ return oBaseTest;
+
+
+class RunTestStartRow(RunTestRow):
+ """
+ Run table start of test row.
+ """
+
+ def __init__(self, iLevel, oTest, iRun): # type: (int, reader.Test, int) -> None
+ RunTestRow.__init__(self, iLevel, oTest, iRun);
+
+ def renderAsText(self, oWidths, oTable):
+ _ = oTable;
+ sRet = self.formatNameAsText(oWidths.cchName).asText(oWidths.cchName);
+ sRet += ' : ';
+ sRet += ' | '.join(['-' * oCol.cch for oCol in oWidths.aoColumns]);
+ return sRet;
+
+
+class RunTestEndRow(RunTestRow):
+ """
+ Run table end of test row.
+ """
+
+ def __init__(self, oStartRow): # type: (RunTestStartRow) -> None
+ RunTestRow.__init__(self, oStartRow.iLevel, oStartRow.oTest, oStartRow.iFirstRun, oStartRow.aoTests);
+ self.oStartRow = oStartRow # type: RunTestStartRow
+
+ def getColumnCountAsText(self, oTable):
+ self.cColumns = len(self.aoTests);
+ return self.cColumns;
+
+ def formatColumnAsText(self, iColumn, oTable):
+ oTest = self.aoTests[iColumn];
+ if oTest and oTest.sStatus:
+ if oTest.cErrors > 0:
+ return [ TextElement(oTest.sStatus, g_kiAlignCenter),
+ TextElement(utils.formatNumber(oTest.cErrors) + 'errors') ];
+ return [ TextElement(oTest.sStatus, g_kiAlignCenter) ];
+ return [ TextElement(), ];
+
+
+class RunTestEndRow2(RunTestRow):
+ """
+ Run table 2nd end of test row, this shows the times.
+ """
+
+ def __init__(self, oStartRow): # type: (RunTestStartRow) -> None
+ RunTestRow.__init__(self, oStartRow.iLevel, oStartRow.oTest, oStartRow.iFirstRun, oStartRow.aoTests);
+ self.oStartRow = oStartRow # type: RunTestStartRow
+
+ def formatNameAsText(self, cchWidth):
+ _ = cchWidth;
+ return TextElement('runtime', g_kiAlignRight);
+
+ def getColumnCountAsText(self, oTable):
+ self.cColumns = len(self.aoTests);
+ return self.cColumns;
+
+ def formatColumnAsText(self, iColumn, oTable):
+ oTest = self.aoTests[iColumn];
+ if oTest:
+ cUsElapsed = oTest.calcDurationAsMicroseconds();
+ if cUsElapsed:
+ oBaseTest = self.getBaseTest(oTable);
+ if oTest is oBaseTest:
+ return [ TextElement(utils.formatNumber(cUsElapsed)), TextElement('us', g_kiAlignLeft), ];
+ cUsElapsedBase = oBaseTest.calcDurationAsMicroseconds();
+ aoRet = [
+ TextElement(utils.formatNumber(cUsElapsed)),
+ TextElement(self.formatDiffAsText(cUsElapsed, cUsElapsedBase)),
+ TextElement(self.formatDiffInPctAsText(cUsElapsed, cUsElapsedBase, oTable.cPctPrecision)),
+ ];
+ return aoRet[1:] if oTable.fBrief else aoRet;
+ return [ TextElement(), ];
+
+
+class RunTestValueAnalysisRow(RunTestRow):
+ """
+ Run table row with value analysis for a test, see if we have an improvement or not.
+ """
+ def __init__(self, oStartRow): # type: (RunTestStartRow) -> None
+ RunTestRow.__init__(self, oStartRow.iLevel, oStartRow.oTest, oStartRow.iFirstRun, oStartRow.aoTests);
+ self.oStartRow = oStartRow # type: RunTestStartRow
+ self.cColumns = len(self.aoTests);
+
+ def formatNameAsText(self, cchWidth):
+ _ = cchWidth;
+ return TextElement('value analysis', g_kiAlignRight);
+
+ def formatColumnAsText(self, iColumn, oTable):
+ oBaseline = self.getBaseTest(oTable);
+ oTest = self.aoTests[iColumn];
+ if not oTest or oTest is oBaseline:
+ return [TextElement(),];
+
+ #
+ # This is a bit ugly, but it means we don't have to re-merge the values.
+ #
+ cTotal = 0;
+ cBetter = 0;
+ cWorse = 0;
+ cSame = 0;
+ cUncertain = 0;
+ rdPctTotal = 0.0;
+
+ iRow = oTable.aoRows.index(self.oStartRow); # ugly
+ while iRow < len(oTable.aoRows):
+ oRow = oTable.aoRows[iRow];
+ if oRow is self:
+ break;
+ if isinstance(oRow, RunValueRow):
+ oValue = oRow.aoValues[iColumn];
+ oBaseValue = oRow.getBaseValue(oTable);
+ if oValue is not None and oValue is not oBaseValue:
+ iBetter = oValue.getBetterRelation();
+ if iBetter != 0:
+ lDiff = oValue.lValue - oBaseValue.lValue;
+ rdPct = abs(lDiff / float(oBaseValue.lValue) * 100);
+ if rdPct < oTable.rdPctSameValue:
+ cSame += 1;
+ else:
+ if lDiff > 0 if iBetter > 0 else lDiff < 0:
+ cBetter += 1;
+ rdPctTotal += rdPct;
+ else:
+ cWorse += 1;
+ rdPctTotal += -rdPct;
+ cUncertain += 1 if iBetter in (1, -1) else 0;
+ cTotal += 1;
+ iRow += 1;
+
+ #
+ # Format the result.
+ #
+ aoRet = [];
+ if not oTable.fBrief:
+ sText = u' \u2193%u' % (cWorse,);
+ sText = u' \u2248%u' % (cSame,) + alignTextRight(sText, 4);
+ sText = u'\u2191%u' % (cBetter,) + alignTextRight(sText, 8);
+ aoRet = [TextElement(sText),];
+
+ if cSame >= cWorse and cSame >= cBetter:
+ sVerdict = 'same';
+ elif cWorse >= cSame and cWorse >= cBetter:
+ sVerdict = 'worse';
+ else:
+ sVerdict = 'better';
+ if cUncertain > 0:
+ sVerdict = 'probably ' + sVerdict;
+ aoRet.append(TextElement(sVerdict));
+
+ rdPctAvg = abs(rdPctTotal / cTotal); # Yes, average of the percentages!
+ aoRet.append(TextElement(self.formatPctAsText('+' if rdPctTotal >= 0 else '-', rdPctAvg, oTable.cPctPrecision)));
+
+ return aoRet;
+
+
+class RunValueRow(RunRow):
+ """
+ Run table value row.
+ """
+
+ def __init__(self, iLevel, oValue, iRun): # type: (int, reader.Value, int) -> None
+ RunRow.__init__(self, iLevel, oValue.sName, iRun);
+ self.oValue = oValue;
+ self.aoValues = [None for i in range(iRun)];
+ self.aoValues.append(oValue);
+
+ def isSameValue(self, oValue):
+ """ Checks if oValue belongs to this row or not. """
+ return oValue.sName == self.oValue.sName and oValue.sUnit == self.oValue.sUnit;
+
+ # Formatting as Text.
+
+ @staticmethod
+ def formatOneValueAsText(oValue): # type: (reader.Value) -> str
+ """ Formats a value. """
+ if not oValue:
+ return "N/A";
+ return utils.formatNumber(oValue.lValue);
+
+ def getBaseValue(self, oTable):
+ """ Returns the base value instance. """
+ oBaseValue = self.aoValues[oTable.iBaseline];
+ if not oBaseValue:
+ oBaseValue = self.aoValues[self.iFirstRun];
+ return oBaseValue;
+
+ def getColumnCountAsText(self, oTable):
+ self.cColumns = len(self.aoValues);
+ return self.cColumns;
+
+ def formatColumnAsText(self, iColumn, oTable):
+ oValue = self.aoValues[iColumn];
+ oBaseValue = self.getBaseValue(oTable);
+ if oValue is oBaseValue:
+ return [ TextElement(self.formatOneValueAsText(oValue)),
+ TextElement(oValue.sUnit, g_kiAlignLeft), ];
+ aoRet = [
+ TextElement(self.formatOneValueAsText(oValue)),
+ TextElement(self.formatDiffAsText(oValue.lValue if oValue else None, oBaseValue.lValue)),
+ TextElement(self.formatDiffInPctAsText(oValue.lValue if oValue else None, oBaseValue.lValue, oTable.cPctPrecision))
+ ];
+ return aoRet[1:] if oTable.fBrief else aoRet;
+
+
+class RunTable(object):
+ """
+ Result table.
+
+ This contains one or more test runs as columns.
+ """
+
+ def __init__(self, iBaseline = 0, fBrief = True, cPctPrecision = 2, rdPctSameValue = 0.10): # (int, bool, int, float) -> None
+ self.asColumns = [] # type: [str] ##< Column names.
+ self.aoRows = [] # type: [RunRow] ##< The table rows.
+ self.iBaseline = iBaseline # type: int ##< Which column is the baseline when diffing things.
+ self.fBrief = fBrief # type: bool ##< Whether to exclude the numerical values of non-baseline runs.
+ self.cPctPrecision = cPctPrecision # type: int ##< Number of decimal points in diff percentage value.
+ self.rdPctSameValue = rdPctSameValue # type: float ##< The percent value at which a value difference is considered
+ ## to be the same during value analysis.
+ def __populateFromValues(self, aaoValueRuns, iLevel): # type: ([reader.Value]) -> None
+ """
+ Internal worker for __populateFromRuns()
+
+ This will modify the sub-lists inside aaoValueRuns, returning with them all empty.
+
+ Returns True if an value analysis row should be added, False if not.
+ """
+ # Same as for __populateFromRuns, only no recursion.
+ fAnalysisRow = False;
+ for iValueRun, aoValuesForRun in enumerate(aaoValueRuns):
+ while aoValuesForRun:
+ oRow = RunValueRow(iLevel, aoValuesForRun.pop(0), iValueRun);
+ self.aoRows.append(oRow);
+
+ # Pop matching values from the other runs of this test.
+ for iOtherRun in range(iValueRun + 1, len(aaoValueRuns)):
+ aoValuesForOtherRun = aaoValueRuns[iOtherRun];
+ for iValueToPop, oOtherValue in enumerate(aoValuesForOtherRun):
+ if oRow.isSameValue(oOtherValue):
+ oRow.aoValues.append(aoValuesForOtherRun.pop(iValueToPop));
+ break;
+ if len(oRow.aoValues) <= iOtherRun:
+ oRow.aoValues.append(None);
+
+ fAnalysisRow = fAnalysisRow or oRow.oValue.canDoBetterCompare();
+ return fAnalysisRow;
+
+ def __populateFromRuns(self, aaoTestRuns, iLevel): # type: ([reader.Test]) -> None
+ """
+ Internal worker for populateFromRuns()
+
+ This will modify the sub-lists inside aaoTestRuns, returning with them all empty.
+ """
+
+ #
+ # Currently doing depth first, so values are always at the end.
+ # Nominally, we should inject values according to the timestamp.
+ # However, that's too much work right now and can be done later if needed.
+ #
+ for iRun, aoTestForRun in enumerate(aaoTestRuns):
+ while aoTestForRun:
+ # Pop the next test and create a start-test row for it.
+ oStartRow = RunTestStartRow(iLevel, aoTestForRun.pop(0), iRun);
+ self.aoRows.append(oStartRow);
+
+ # Pop matching tests from the other runs.
+ for iOtherRun in range(iRun + 1, len(aaoTestRuns)):
+ aoOtherTestRun = aaoTestRuns[iOtherRun];
+ for iTestToPop, oOtherTest in enumerate(aoOtherTestRun):
+ if oStartRow.isSameTest(oOtherTest):
+ oStartRow.aoTests.append(aoOtherTestRun.pop(iTestToPop));
+ break;
+ if len(oStartRow.aoTests) <= iOtherRun:
+ oStartRow.aoTests.append(None);
+
+ # Now recursively do the subtests for it and then do the values.
+ self.__populateFromRuns( [list(oTest.aoChildren) if oTest else list() for oTest in oStartRow.aoTests], iLevel+1);
+ fValueAnalysisRow = self.__populateFromValues([list(oTest.aoValues)
+ if oTest else list() for oTest in oStartRow.aoTests], iLevel+1);
+
+ # Add the end-test row for it.
+ self.aoRows.append(RunTestEndRow(oStartRow));
+ self.aoRows.append(RunTestEndRow2(oStartRow));
+ if fValueAnalysisRow:
+ self.aoRows.append(RunTestValueAnalysisRow(oStartRow));
+
+ return self;
+
+ def populateFromRuns(self, aoTestRuns, asRunNames = None): # type: ([reader.Test], [str]) -> RunTable
+ """
+ Populates the table from the series of runs.
+
+ The aoTestRuns and asRunNames run in parallel. If the latter isn't
+ given, the names will just be ordinals starting with #0 for the
+ first column.
+
+ Returns self.
+ """
+ #
+ # Deal with the column names first.
+ #
+ if asRunNames:
+ self.asColumns = list(asRunNames);
+ else:
+ self.asColumns = [];
+ iCol = len(self.asColumns);
+ while iCol < len(aoTestRuns):
+ self.asColumns.append('#%u%s' % (iCol, ' (baseline)' if iCol == self.iBaseline else '',));
+
+ self.aoRows = [
+ RunHeaderSeparatorRow(),
+ RunHeaderRow('Test / Value', self.asColumns),
+ RunHeaderSeparatorRow(),
+ ];
+
+ #
+ # Now flatten the test trees into a table.
+ #
+ self.__populateFromRuns([[oTestRun,] for oTestRun in aoTestRuns], 0);
+
+ #
+ # Add a footer if there are a lot of rows.
+ #
+ if len(self.aoRows) - 2 > 40:
+ self.aoRows.extend([RunFooterSeparatorRow(), RunFooterRow('', self.asColumns),]);
+
+ return self;
+
+ #
+ # Text formatting.
+ #
+
+ def formatAsText(self):
+ """
+ Formats the table as text.
+
+ Returns a string array of the output lines.
+ """
+
+ #
+ # Pass 1: Calculate column widths.
+ #
+ oWidths = TextWidths(1);
+ for oRow in self.aoRows:
+ oWidths.update(oRow.calcColumnWidthsForText(self));
+ oWidths.finalize();
+
+ #
+ # Pass 2: Generate the output strings.
+ #
+ asRet = [];
+ for oRow in self.aoRows:
+ if not oRow.fSkip:
+ asRet.append(oRow.renderAsText(oWidths, self));
+
+ return asRet;
+