summaryrefslogtreecommitdiffstats
path: root/testing/mozbase/mozlog
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /testing/mozbase/mozlog
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'testing/mozbase/mozlog')
-rw-r--r--testing/mozbase/mozlog/mozlog/__init__.py34
-rw-r--r--testing/mozbase/mozlog/mozlog/capture.py96
-rw-r--r--testing/mozbase/mozlog/mozlog/commandline.py344
-rw-r--r--testing/mozbase/mozlog/mozlog/formatters/__init__.py32
-rw-r--r--testing/mozbase/mozlog/mozlog/formatters/base.py25
-rw-r--r--testing/mozbase/mozlog/mozlog/formatters/errorsummary.py208
-rw-r--r--testing/mozbase/mozlog/mozlog/formatters/grouping.py391
-rw-r--r--testing/mozbase/mozlog/mozlog/formatters/html/__init__.py7
-rwxr-xr-xtesting/mozbase/mozlog/mozlog/formatters/html/html.py343
-rw-r--r--testing/mozbase/mozlog/mozlog/formatters/html/main.js166
-rw-r--r--testing/mozbase/mozlog/mozlog/formatters/html/style.css155
-rw-r--r--testing/mozbase/mozlog/mozlog/formatters/html/xmlgen.py310
-rw-r--r--testing/mozbase/mozlog/mozlog/formatters/machformatter.py657
-rw-r--r--testing/mozbase/mozlog/mozlog/formatters/process.py59
-rw-r--r--testing/mozbase/mozlog/mozlog/formatters/tbplformatter.py473
-rwxr-xr-xtesting/mozbase/mozlog/mozlog/formatters/unittest.py83
-rw-r--r--testing/mozbase/mozlog/mozlog/formatters/xunit.py115
-rw-r--r--testing/mozbase/mozlog/mozlog/handlers/__init__.py19
-rw-r--r--testing/mozbase/mozlog/mozlog/handlers/base.py124
-rw-r--r--testing/mozbase/mozlog/mozlog/handlers/bufferhandler.py86
-rw-r--r--testing/mozbase/mozlog/mozlog/handlers/messagehandler.py39
-rw-r--r--testing/mozbase/mozlog/mozlog/handlers/statushandler.py87
-rw-r--r--testing/mozbase/mozlog/mozlog/handlers/summaryhandler.py193
-rw-r--r--testing/mozbase/mozlog/mozlog/handlers/valgrindhandler.py138
-rw-r--r--testing/mozbase/mozlog/mozlog/logtypes.py302
-rw-r--r--testing/mozbase/mozlog/mozlog/proxy.py81
-rw-r--r--testing/mozbase/mozlog/mozlog/pytest_mozlog/__init__.py0
-rw-r--r--testing/mozbase/mozlog/mozlog/pytest_mozlog/plugin.py127
-rw-r--r--testing/mozbase/mozlog/mozlog/reader.py78
-rw-r--r--testing/mozbase/mozlog/mozlog/scripts/__init__.py41
-rw-r--r--testing/mozbase/mozlog/mozlog/scripts/format.py55
-rw-r--r--testing/mozbase/mozlog/mozlog/scripts/logmerge.py90
-rw-r--r--testing/mozbase/mozlog/mozlog/scripts/unstable.py148
-rw-r--r--testing/mozbase/mozlog/mozlog/stdadapter.py50
-rw-r--r--testing/mozbase/mozlog/mozlog/structuredlog.py820
-rw-r--r--testing/mozbase/mozlog/mozlog/unstructured/__init__.py8
-rw-r--r--testing/mozbase/mozlog/mozlog/unstructured/logger.py191
-rw-r--r--testing/mozbase/mozlog/mozlog/unstructured/loggingmixin.py42
-rw-r--r--testing/mozbase/mozlog/mozlog/unstructured/loglistener.py50
-rw-r--r--testing/mozbase/mozlog/setup.cfg2
-rw-r--r--testing/mozbase/mozlog/setup.py43
-rw-r--r--testing/mozbase/mozlog/tests/conftest.py24
-rw-r--r--testing/mozbase/mozlog/tests/manifest.toml16
-rw-r--r--testing/mozbase/mozlog/tests/test_capture.py37
-rw-r--r--testing/mozbase/mozlog/tests/test_errorsummary.py262
-rw-r--r--testing/mozbase/mozlog/tests/test_formatters.py767
-rw-r--r--testing/mozbase/mozlog/tests/test_logger.py303
-rw-r--r--testing/mozbase/mozlog/tests/test_logtypes.py106
-rw-r--r--testing/mozbase/mozlog/tests/test_structured.py1162
-rw-r--r--testing/mozbase/mozlog/tests/test_terminal_colors.py62
50 files changed, 9051 insertions, 0 deletions
diff --git a/testing/mozbase/mozlog/mozlog/__init__.py b/testing/mozbase/mozlog/mozlog/__init__.py
new file mode 100644
index 0000000000..82d40b5c55
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/__init__.py
@@ -0,0 +1,34 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Mozlog aims to standardize log handling and formatting within Mozilla.
+
+It implements a JSON-based structured logging protocol with convenience
+facilities for recording test results.
+
+The old unstructured module is deprecated. It simply wraps Python's
+logging_ module and adds a few convenience methods for logging test
+results and events.
+"""
+
+import sys
+
+from . import commandline, structuredlog, unstructured
+from .proxy import get_proxy_logger
+from .structuredlog import get_default_logger, set_default_logger
+
+# Backwards compatibility shim for consumers that use mozlog.structured
+structured = sys.modules[__name__]
+sys.modules["{}.structured".format(__name__)] = structured
+
+__all__ = [
+ "commandline",
+ "structuredlog",
+ "unstructured",
+ "get_default_logger",
+ "set_default_logger",
+ "get_proxy_logger",
+ "structured",
+]
diff --git a/testing/mozbase/mozlog/mozlog/capture.py b/testing/mozbase/mozlog/mozlog/capture.py
new file mode 100644
index 0000000000..75717d62c8
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/capture.py
@@ -0,0 +1,96 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import sys
+import threading
+from io import BytesIO
+
+
+class LogThread(threading.Thread):
+ def __init__(self, queue, logger, level):
+ self.queue = queue
+ self.log_func = getattr(logger, level)
+ threading.Thread.__init__(self, name="Thread-Log")
+ self.daemon = True
+
+ def run(self):
+ while True:
+ try:
+ msg = self.queue.get()
+ except (EOFError, IOError):
+ break
+ if msg is None:
+ break
+ else:
+ self.log_func(msg)
+
+
+class LoggingWrapper(BytesIO):
+ """Wrapper for file like objects to redirect output to logger
+ instead"""
+
+ def __init__(self, queue, prefix=None):
+ BytesIO.__init__(self)
+ self.queue = queue
+ self.prefix = prefix
+ self.buffer = self
+
+ def write(self, data):
+ if isinstance(data, bytes):
+ try:
+ data = data.decode("utf8")
+ except UnicodeDecodeError:
+ data = data.decode("unicode_escape")
+
+ if data.endswith("\n"):
+ data = data[:-1]
+ if data.endswith("\r"):
+ data = data[:-1]
+ if not data:
+ return
+ if self.prefix is not None:
+ data = "%s: %s" % (self.prefix, data)
+ self.queue.put(data)
+
+ def flush(self):
+ pass
+
+
+class CaptureIO(object):
+ def __init__(self, logger, do_capture, mp_context=None):
+ if mp_context is None:
+ import multiprocessing as mp_context
+ self.logger = logger
+ self.do_capture = do_capture
+ self.logging_queue = None
+ self.logging_thread = None
+ self.original_stdio = None
+ self.mp_context = mp_context
+
+ def __enter__(self):
+ if self.do_capture:
+ self.original_stdio = (sys.stdout, sys.stderr)
+ self.logging_queue = self.mp_context.Queue()
+ self.logging_thread = LogThread(self.logging_queue, self.logger, "info")
+ sys.stdout = LoggingWrapper(self.logging_queue, prefix="STDOUT")
+ sys.stderr = LoggingWrapper(self.logging_queue, prefix="STDERR")
+ self.logging_thread.start()
+
+ def __exit__(self, *args, **kwargs):
+ if self.do_capture:
+ sys.stdout, sys.stderr = self.original_stdio
+ if self.logging_queue is not None:
+ self.logger.info("Closing logging queue")
+ self.logging_queue.put(None)
+ if self.logging_thread is not None:
+ self.logging_thread.join(10)
+ while not self.logging_queue.empty():
+ try:
+ self.logger.warning(
+ "Dropping log message: %r", self.logging_queue.get()
+ )
+ except Exception:
+ pass
+ self.logging_queue.close()
+ self.logger.info("queue closed")
diff --git a/testing/mozbase/mozlog/mozlog/commandline.py b/testing/mozbase/mozlog/mozlog/commandline.py
new file mode 100644
index 0000000000..51e9ea6929
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/commandline.py
@@ -0,0 +1,344 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import argparse
+import optparse
+import os
+import sys
+from collections import defaultdict
+
+import six
+
+from . import formatters, handlers
+from .structuredlog import StructuredLogger, set_default_logger
+
+log_formatters = {
+ "raw": (
+ formatters.JSONFormatter,
+ "Raw structured log messages " "(provided by mozlog)",
+ ),
+ "unittest": (
+ formatters.UnittestFormatter,
+ "Unittest style output " "(provided by mozlog)",
+ ),
+ "xunit": (
+ formatters.XUnitFormatter,
+ "xUnit compatible XML " "(provided by mozlog)",
+ ),
+ "html": (formatters.HTMLFormatter, "HTML report " "(provided by mozlog)"),
+ "mach": (formatters.MachFormatter, "Human-readable output " "(provided by mozlog)"),
+ "tbpl": (formatters.TbplFormatter, "TBPL style log format " "(provided by mozlog)"),
+ "grouped": (
+ formatters.GroupingFormatter,
+ "Grouped summary of test results " "(provided by mozlog)",
+ ),
+ "errorsummary": (formatters.ErrorSummaryFormatter, argparse.SUPPRESS),
+}
+
+TEXT_FORMATTERS = ("raw", "mach")
+"""a subset of formatters for non test harnesses related applications"""
+
+
+DOCS_URL = "https://firefox-source-docs.mozilla.org/mozbase/mozlog.html"
+
+
+def level_filter_wrapper(formatter, level):
+ return handlers.LogLevelFilter(formatter, level)
+
+
+def verbose_wrapper(formatter, verbose):
+ formatter.verbose = verbose
+ return formatter
+
+
+def compact_wrapper(formatter, compact):
+ formatter.compact = compact
+ return formatter
+
+
+def buffer_handler_wrapper(handler, buffer_limit):
+ if buffer_limit == "UNLIMITED":
+ buffer_limit = None
+ else:
+ buffer_limit = int(buffer_limit)
+ return handlers.BufferHandler(handler, buffer_limit)
+
+
+def screenshot_wrapper(formatter, enable_screenshot):
+ formatter.enable_screenshot = enable_screenshot
+ return formatter
+
+
+def valgrind_handler_wrapper(handler):
+ return handlers.ValgrindHandler(handler)
+
+
+def default_formatter_options(log_type, overrides):
+ formatter_option_defaults = {"raw": {"level": "debug"}}
+ rv = {"verbose": False, "level": "info"}
+ rv.update(formatter_option_defaults.get(log_type, {}))
+
+ if overrides is not None:
+ rv.update(overrides)
+
+ return rv
+
+
+fmt_options = {
+ # <option name>: (<wrapper function>, description, <applicable formatters>, action)
+ # "action" is used by the commandline parser in use.
+ "verbose": (
+ verbose_wrapper,
+ "Enables verbose mode for the given formatter.",
+ {"mach"},
+ "store_true",
+ ),
+ "compact": (
+ compact_wrapper,
+ "Enables compact mode for the given formatter.",
+ {"tbpl"},
+ "store_true",
+ ),
+ "level": (
+ level_filter_wrapper,
+ "A least log level to subscribe to for the given formatter "
+ "(debug, info, error, etc.)",
+ {"mach", "raw", "tbpl"},
+ "store",
+ ),
+ "buffer": (
+ buffer_handler_wrapper,
+ "If specified, enables message buffering at the given buffer size limit.",
+ ["mach", "tbpl"],
+ "store",
+ ),
+ "screenshot": (
+ screenshot_wrapper,
+ "Enable logging reftest-analyzer compatible screenshot data.",
+ {"mach"},
+ "store_true",
+ ),
+ "no-screenshot": (
+ screenshot_wrapper,
+ "Disable logging reftest-analyzer compatible screenshot data.",
+ {"mach"},
+ "store_false",
+ ),
+}
+
+
+def log_file(name):
+ if name == "-":
+ return sys.stdout
+ # ensure we have a correct dirpath by using realpath
+ dirpath = os.path.dirname(os.path.realpath(name))
+ if not os.path.exists(dirpath):
+ os.makedirs(dirpath)
+ return open(name, "w")
+
+
+def add_logging_group(parser, include_formatters=None):
+ """
+ Add logging options to an argparse ArgumentParser or
+ optparse OptionParser.
+
+ Each formatter has a corresponding option of the form --log-{name}
+ where {name} is the name of the formatter. The option takes a value
+ which is either a filename or "-" to indicate stdout.
+
+ :param parser: The ArgumentParser or OptionParser object that should have
+ logging options added.
+ :param include_formatters: List of formatter names that should be included
+ in the option group. Default to None, meaning
+ all the formatters are included. A common use
+ of this option is to specify
+ :data:`TEXT_FORMATTERS` to include only the
+ most useful formatters for a command line tool
+ that is not related to test harnesses.
+ """
+ group_name = "Output Logging"
+ group_description = (
+ "Each option represents a possible logging format "
+ "and takes a filename to write that format to, "
+ "or '-' to write to stdout. Some options are "
+ "provided by the mozlog utility; see %s "
+ "for extended documentation." % DOCS_URL
+ )
+
+ if include_formatters is None:
+ include_formatters = list(log_formatters.keys())
+
+ if isinstance(parser, optparse.OptionParser):
+ group = optparse.OptionGroup(parser, group_name, group_description)
+ parser.add_option_group(group)
+ opt_log_type = "str"
+ group_add = group.add_option
+ else:
+ group = parser.add_argument_group(group_name, group_description)
+ opt_log_type = log_file
+ group_add = group.add_argument
+
+ for name, (cls, help_str) in six.iteritems(log_formatters):
+ if name in include_formatters:
+ group_add(
+ "--log-" + name, action="append", type=opt_log_type, help=help_str
+ )
+
+ for fmt in include_formatters:
+ for optname, (cls, help_str, formatters_, action) in six.iteritems(fmt_options):
+ if fmt not in formatters_:
+ continue
+ if optname.startswith("no-") and action == "store_false":
+ dest = optname.split("-", 1)[1]
+ else:
+ dest = optname
+ dest = dest.replace("-", "_")
+ group_add(
+ "--log-%s-%s" % (fmt, optname),
+ action=action,
+ help=help_str,
+ default=None,
+ dest="log_%s_%s" % (fmt, dest),
+ )
+
+
+def setup_handlers(logger, formatters, formatter_options, allow_unused_options=False):
+ """
+ Add handlers to the given logger according to the formatters and
+ options provided.
+
+ :param logger: The logger configured by this function.
+ :param formatters: A dict of {formatter, [streams]} to use in handlers.
+ :param formatter_options: a dict of {formatter: {option: value}} to
+ to use when configuring formatters.
+ """
+ unused_options = set(formatter_options.keys()) - set(formatters.keys())
+ if unused_options and not allow_unused_options:
+ msg = "Options specified for unused formatter(s) (%s) have no effect" % list(
+ unused_options
+ )
+ raise ValueError(msg)
+
+ for fmt, streams in six.iteritems(formatters):
+ formatter_cls = log_formatters[fmt][0]
+ formatter = formatter_cls()
+ handler_wrappers_and_options = []
+
+ for option, value in six.iteritems(formatter_options[fmt]):
+ wrapper, wrapper_args = None, ()
+ if option == "valgrind":
+ wrapper = valgrind_handler_wrapper
+ elif option == "buffer":
+ wrapper, wrapper_args = fmt_options[option][0], (value,)
+ else:
+ formatter = fmt_options[option][0](formatter, value)
+
+ if wrapper is not None:
+ handler_wrappers_and_options.append((wrapper, wrapper_args))
+
+ for value in streams:
+ handler = handlers.StreamHandler(stream=value, formatter=formatter)
+ for wrapper, wrapper_args in handler_wrappers_and_options:
+ handler = wrapper(handler, *wrapper_args)
+ logger.add_handler(handler)
+
+
+def setup_logging(
+ logger, args, defaults=None, formatter_defaults=None, allow_unused_options=False
+):
+ """
+ Configure a structuredlogger based on command line arguments.
+
+ The created structuredlogger will also be set as the default logger, and
+ can be retrieved with :py:func:`~mozlog.get_default_logger`.
+
+ :param logger: A StructuredLogger instance or string name. If a string, a
+ new StructuredLogger instance will be created using
+ `logger` as the name.
+ :param args: A dictionary of {argument_name:value} produced from
+ parsing the command line arguments for the application
+ :param defaults: A dictionary of {formatter name: output stream} to apply
+ when there is no logging supplied on the command line. If
+ this isn't supplied, reasonable defaults are chosen
+ (coloured mach formatting if stdout is a terminal, or raw
+ logs otherwise).
+ :param formatter_defaults: A dictionary of {option_name: default_value} to provide
+ to the formatters in the absence of command line overrides.
+ :rtype: StructuredLogger
+ """
+
+ if not isinstance(logger, StructuredLogger):
+ logger = StructuredLogger(logger)
+ # The likely intent when using this function is to get a brand new
+ # logger, so reset state in case it was previously initialized.
+ logger.reset_state()
+
+ # Keep track of any options passed for formatters.
+ formatter_options = {}
+ # Keep track of formatters and list of streams specified.
+ formatters = defaultdict(list)
+ found = False
+ found_stdout_logger = False
+ if args is None:
+ args = {}
+ if not isinstance(args, dict):
+ args = vars(args)
+
+ if defaults is None:
+ if sys.__stdout__.isatty():
+ defaults = {"mach": sys.stdout}
+ else:
+ defaults = {"raw": sys.stdout}
+
+ for name, values in six.iteritems(args):
+ parts = name.split("_")
+ if len(parts) > 3:
+ continue
+ # Our args will be ['log', <formatter>]
+ # or ['log', <formatter>, <option>]
+ # or ['valgrind']
+ if parts[0] == "log" and values is not None:
+ if len(parts) == 1 or parts[1] not in log_formatters:
+ continue
+ if len(parts) == 2:
+ _, formatter = parts
+ for value in values:
+ found = True
+ if isinstance(value, six.string_types):
+ value = log_file(value)
+ if value == sys.stdout:
+ found_stdout_logger = True
+ formatters[formatter].append(value)
+ if len(parts) == 3:
+ _, formatter, opt = parts
+ if formatter not in formatter_options:
+ formatter_options[formatter] = default_formatter_options(
+ formatter, formatter_defaults
+ )
+ formatter_options[formatter][opt] = values
+
+ # If there is no user-specified logging, go with the default options
+ if not found:
+ for name, value in six.iteritems(defaults):
+ formatters[name].append(value)
+
+ elif not found_stdout_logger and sys.stdout in list(defaults.values()):
+ for name, value in six.iteritems(defaults):
+ if value == sys.stdout:
+ formatters[name].append(value)
+
+ for name in formatters:
+ if name not in formatter_options:
+ formatter_options[name] = default_formatter_options(
+ name, formatter_defaults
+ )
+
+ # If the user specified --valgrind, add it as an option for all formatters
+ if args.get("valgrind", None) is not None:
+ for name in formatters:
+ formatter_options[name]["valgrind"] = True
+ setup_handlers(logger, formatters, formatter_options, allow_unused_options)
+ set_default_logger(logger)
+
+ return logger
diff --git a/testing/mozbase/mozlog/mozlog/formatters/__init__.py b/testing/mozbase/mozlog/mozlog/formatters/__init__.py
new file mode 100644
index 0000000000..962fb92ca4
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/formatters/__init__.py
@@ -0,0 +1,32 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from .errorsummary import ErrorSummaryFormatter
+from .grouping import GroupingFormatter
+from .html import HTMLFormatter
+from .machformatter import MachFormatter
+from .tbplformatter import TbplFormatter
+from .unittest import UnittestFormatter
+from .xunit import XUnitFormatter
+
+try:
+ import ujson as json
+except ImportError:
+ import json
+
+
+def JSONFormatter():
+ return lambda x: json.dumps(x) + "\n"
+
+
+__all__ = [
+ "UnittestFormatter",
+ "XUnitFormatter",
+ "HTMLFormatter",
+ "MachFormatter",
+ "TbplFormatter",
+ "ErrorSummaryFormatter",
+ "JSONFormatter",
+ "GroupingFormatter",
+]
diff --git a/testing/mozbase/mozlog/mozlog/formatters/base.py b/testing/mozbase/mozlog/mozlog/formatters/base.py
new file mode 100644
index 0000000000..407514b741
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/formatters/base.py
@@ -0,0 +1,25 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from mozlog.handlers.messagehandler import MessageHandler
+
+from ..reader import LogHandler
+
+
+class BaseFormatter(LogHandler):
+ """Base class for implementing non-trivial formatters.
+
+ Subclasses are expected to provide a method for each action type they
+ wish to handle, each taking a single argument for the test data.
+ For example a trivial subclass that just produces the id of each test as
+ it starts might be::
+
+ class StartIdFormatter(BaseFormatter);
+ def test_start(data):
+ #For simplicity in the example pretend the id is always a string
+ return data["test"]
+ """
+
+ def __init__(self):
+ self.message_handler = MessageHandler()
diff --git a/testing/mozbase/mozlog/mozlog/formatters/errorsummary.py b/testing/mozbase/mozlog/mozlog/formatters/errorsummary.py
new file mode 100644
index 0000000000..fcedb7ebd4
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/formatters/errorsummary.py
@@ -0,0 +1,208 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import json
+import os
+from collections import defaultdict
+
+from .base import BaseFormatter
+
+
+class ErrorSummaryFormatter(BaseFormatter):
+ def __init__(self):
+ self.test_to_group = {}
+ self.groups = defaultdict(
+ lambda: {
+ "status": None,
+ "test_times": [],
+ "start": None,
+ "end": None,
+ }
+ )
+ self.line_count = 0
+ self.dump_passing_tests = False
+
+ if os.environ.get("MOZLOG_DUMP_ALL_TESTS", False):
+ self.dump_passing_tests = True
+
+ def __call__(self, data):
+ rv = BaseFormatter.__call__(self, data)
+ self.line_count += 1
+ return rv
+
+ def _output(self, data_type, data):
+ data["action"] = data_type
+ data["line"] = self.line_count
+ return "%s\n" % json.dumps(data)
+
+ def _output_test(self, test, subtest, item):
+ data = {
+ "test": test,
+ "subtest": subtest,
+ "group": self.test_to_group.get(test, ""),
+ "status": item["status"],
+ "expected": item["expected"],
+ "message": item.get("message"),
+ "stack": item.get("stack"),
+ "known_intermittent": item.get("known_intermittent", []),
+ }
+ return self._output("test_result", data)
+
+ def _get_group_result(self, group, item):
+ group_info = self.groups[group]
+ result = group_info["status"]
+
+ if result == "ERROR":
+ return result
+
+ # If status == expected, we delete item[expected]
+ test_status = item["status"]
+ test_expected = item.get("expected", test_status)
+ known_intermittent = item.get("known_intermittent", [])
+
+ if test_status == "SKIP":
+ self.groups[group]["start"] = None
+ if result is None:
+ result = "SKIP"
+ elif test_status == test_expected or test_status in known_intermittent:
+ # If the test status is expected, or it's a known intermittent
+ # the group has at least one passing test
+ result = "OK"
+ else:
+ result = "ERROR"
+
+ return result
+
+ def _clean_test_name(self, test):
+ retVal = test
+ # remove extra stuff like "(finished)"
+ if "(finished)" in test:
+ retVal = test.split(" ")[0]
+ return retVal
+
+ def suite_start(self, item):
+ self.test_to_group = {v: k for k in item["tests"] for v in item["tests"][k]}
+ return self._output("test_groups", {"groups": list(item["tests"].keys())})
+
+ def suite_end(self, data):
+ output = []
+ for group, info in self.groups.items():
+ duration = sum(info["test_times"])
+
+ output.append(
+ self._output(
+ "group_result",
+ {
+ "group": group,
+ "status": info["status"],
+ "duration": duration,
+ },
+ )
+ )
+
+ return "".join(output)
+
+ def test_start(self, item):
+ group = item.get(
+ "group", self.test_to_group.get(self._clean_test_name(item["test"]), None)
+ )
+ if group and self.groups[group]["start"] is None:
+ self.groups[group]["start"] = item["time"]
+
+ def test_status(self, item):
+ group = item.get(
+ "group", self.test_to_group.get(self._clean_test_name(item["test"]), None)
+ )
+ if group:
+ self.groups[group]["status"] = self._get_group_result(group, item)
+
+ if not self.dump_passing_tests and "expected" not in item:
+ return
+
+ if item.get("expected", "") == "":
+ item["expected"] = item["status"]
+
+ return self._output_test(
+ self._clean_test_name(item["test"]), item["subtest"], item
+ )
+
+ def test_end(self, item):
+ group = item.get(
+ "group", self.test_to_group.get(self._clean_test_name(item["test"]), None)
+ )
+ if group:
+ self.groups[group]["status"] = self._get_group_result(group, item)
+ if self.groups[group]["start"]:
+ self.groups[group]["test_times"].append(
+ item["time"] - self.groups[group]["start"]
+ )
+ self.groups[group]["start"] = None
+
+ if not self.dump_passing_tests and "expected" not in item:
+ return
+
+ if item.get("expected", "") == "":
+ item["expected"] = item["status"]
+
+ return self._output_test(self._clean_test_name(item["test"]), None, item)
+
+ def log(self, item):
+ if item["level"] not in ("ERROR", "CRITICAL"):
+ return
+
+ data = {"level": item["level"], "message": item["message"]}
+ return self._output("log", data)
+
+ def shutdown_failure(self, item):
+ data = {"status": "FAIL", "test": item["group"], "message": item["message"]}
+ data["group"] = [g for g in self.groups if item["group"].endswith(g)][0]
+ self.groups[data["group"]]["status"] = "FAIL"
+ return self._output("log", data)
+
+ def crash(self, item):
+ data = {
+ "test": item.get("test"),
+ "signature": item["signature"],
+ "stackwalk_stdout": item.get("stackwalk_stdout"),
+ "stackwalk_stderr": item.get("stackwalk_stderr"),
+ }
+
+ if item.get("test"):
+ data["group"] = self.test_to_group.get(
+ self._clean_test_name(item["test"]), ""
+ )
+ if data["group"] == "":
+ # item['test'] could be the group name, not a test name
+ if self._clean_test_name(item["test"]) in self.groups:
+ data["group"] = self._clean_test_name(item["test"])
+
+ # unlike test group summary, if we crash expect error unless expected
+ if (
+ (
+ "expected" in item
+ and "status" in item
+ and item["status"] in item["expected"]
+ )
+ or ("expected" in item and "CRASH" == item["expected"])
+ or "status" in item
+ and item["status"] in item.get("known_intermittent", [])
+ ):
+ self.groups[data["group"]]["status"] = "PASS"
+ else:
+ self.groups[data["group"]]["status"] = "ERROR"
+
+ return self._output("crash", data)
+
+ def lint(self, item):
+ data = {
+ "level": item["level"],
+ "path": item["path"],
+ "message": item["message"],
+ "lineno": item["lineno"],
+ "column": item.get("column"),
+ "rule": item.get("rule"),
+ "linter": item.get("linter"),
+ }
+ self._output("lint", data)
diff --git a/testing/mozbase/mozlog/mozlog/formatters/grouping.py b/testing/mozbase/mozlog/mozlog/formatters/grouping.py
new file mode 100644
index 0000000000..89f5d24783
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/formatters/grouping.py
@@ -0,0 +1,391 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+import collections
+import os
+import platform
+import subprocess
+import sys
+
+import six
+
+from mozlog.formatters import base
+
+DEFAULT_MOVE_UP_CODE = "\x1b[A"
+DEFAULT_CLEAR_EOL_CODE = "\x1b[K"
+
+
+class GroupingFormatter(base.BaseFormatter):
+ """Formatter designed to produce unexpected test results grouped
+ together in a readable format."""
+
+ def __init__(self):
+ super(GroupingFormatter, self).__init__()
+ self.number_of_tests = 0
+ self.completed_tests = 0
+ self.need_to_erase_last_line = False
+ self.current_display = ""
+ self.running_tests = {}
+ self.test_output = collections.defaultdict(str)
+ self.subtest_failures = collections.defaultdict(list)
+ self.test_failure_text = ""
+ self.tests_with_failing_subtests = []
+ self.interactive = os.isatty(sys.stdout.fileno())
+ self.show_logs = False
+
+ self.message_handler.register_message_handlers(
+ "show_logs",
+ {
+ "on": self._enable_show_logs,
+ "off": self._disable_show_logs,
+ },
+ )
+
+ # TODO(mrobinson, 8313): We need to add support for Windows terminals here.
+ if self.interactive:
+ self.move_up, self.clear_eol = self.get_move_up_and_clear_eol_codes()
+ if platform.system() != "Windows":
+ self.line_width = int(
+ subprocess.check_output(["stty", "size"]).split()[1]
+ )
+ else:
+ # Until we figure out proper Windows support,
+ # this makes things work well enough to run.
+ self.line_width = 80
+
+ self.expected = {
+ "OK": 0,
+ "PASS": 0,
+ "FAIL": 0,
+ "PRECONDITION_FAILED": 0,
+ "ERROR": 0,
+ "TIMEOUT": 0,
+ "SKIP": 0,
+ "CRASH": 0,
+ }
+
+ self.unexpected_tests = {
+ "OK": [],
+ "PASS": [],
+ "FAIL": [],
+ "PRECONDITION_FAILED": [],
+ "ERROR": [],
+ "TIMEOUT": [],
+ "CRASH": [],
+ }
+
+ # Follows the format of {(<test>, <subtest>): <data>}, where
+ # (<test>, None) represents a top level test.
+ self.known_intermittent_results = {}
+
+ def _enable_show_logs(self):
+ self.show_logs = True
+
+ def _disable_show_logs(self):
+ self.show_logs = False
+
+ def get_move_up_and_clear_eol_codes(self):
+ try:
+ import blessed
+ except ImportError:
+ return DEFAULT_MOVE_UP_CODE, DEFAULT_CLEAR_EOL_CODE
+
+ try:
+ self.terminal = blessed.Terminal()
+ return self.terminal.move_up, self.terminal.clear_eol
+ except Exception as exception:
+ sys.stderr.write(
+ "GroupingFormatter: Could not get terminal "
+ "control characters: %s\n" % exception
+ )
+ return DEFAULT_MOVE_UP_CODE, DEFAULT_CLEAR_EOL_CODE
+
+ def text_to_erase_display(self):
+ if not self.interactive or not self.current_display:
+ return ""
+ return (self.move_up + self.clear_eol) * self.current_display.count("\n")
+
+ def generate_output(self, text=None, new_display=None):
+ if not self.interactive:
+ return text
+
+ output = self.text_to_erase_display()
+ if text:
+ output += text
+ if new_display is not None:
+ self.current_display = new_display
+ return output + self.current_display
+
+ def build_status_line(self):
+ if self.number_of_tests == 0:
+ new_display = " [%i] " % self.completed_tests
+ else:
+ new_display = " [%i/%i] " % (self.completed_tests, self.number_of_tests)
+
+ if self.running_tests:
+ indent = " " * len(new_display)
+ if self.interactive:
+ max_width = self.line_width - len(new_display)
+ else:
+ max_width = sys.maxsize
+ return (
+ new_display
+ + ("\n%s" % indent).join(
+ val[:max_width] for val in self.running_tests.values()
+ )
+ + "\n"
+ )
+ else:
+ return new_display + "No tests running.\n"
+
+ def suite_start(self, data):
+ self.number_of_tests = sum(
+ len(tests) for tests in six.itervalues(data["tests"])
+ )
+ self.start_time = data["time"]
+
+ if self.number_of_tests == 0:
+ return "Running tests in %s\n\n" % data["source"]
+ else:
+ return "Running %i tests in %s\n\n" % (
+ self.number_of_tests,
+ data["source"],
+ )
+
+ def test_start(self, data):
+ self.running_tests[data["thread"]] = data["test"]
+ return self.generate_output(text=None, new_display=self.build_status_line())
+
+ def wrap_and_indent_lines(self, lines, indent):
+ assert len(lines) > 0
+
+ output = indent + "\u25B6 %s\n" % lines[0]
+ for line in lines[1:-1]:
+ output += indent + "\u2502 %s\n" % line
+ if len(lines) > 1:
+ output += indent + "\u2514 %s\n" % lines[-1]
+ return output
+
+ def get_lines_for_unexpected_result(
+ self, test_name, status, expected, message, stack
+ ):
+ # Test names sometimes contain control characters, which we want
+ # to be printed in their raw form, and not their interpreted form.
+ test_name = test_name.encode("unicode-escape").decode("utf-8")
+
+ if expected:
+ expected_text = " [expected %s]" % expected
+ else:
+ expected_text = ""
+
+ lines = ["%s%s %s" % (status, expected_text, test_name)]
+ if message:
+ lines.append(" \u2192 %s" % message)
+ if stack:
+ lines.append("")
+ lines += [stackline for stackline in stack.splitlines()]
+ return lines
+
+ def get_lines_for_known_intermittents(self, known_intermittent_results):
+ lines = []
+
+ for (test, subtest), data in six.iteritems(self.known_intermittent_results):
+ status = data["status"]
+ known_intermittent = ", ".join(data["known_intermittent"])
+ expected = " [expected %s, known intermittent [%s]" % (
+ data["expected"],
+ known_intermittent,
+ )
+ lines += [
+ "%s%s %s%s"
+ % (
+ status,
+ expected,
+ test,
+ (", %s" % subtest) if subtest is not None else "",
+ )
+ ]
+ output = self.wrap_and_indent_lines(lines, " ") + "\n"
+ return output
+
+ def get_output_for_unexpected_subtests(self, test_name, unexpected_subtests):
+ if not unexpected_subtests:
+ return ""
+
+ def add_subtest_failure(lines, subtest, stack=None):
+ lines += self.get_lines_for_unexpected_result(
+ subtest.get("subtest", None),
+ subtest.get("status", None),
+ subtest.get("expected", None),
+ subtest.get("message", None),
+ stack,
+ )
+
+ def make_subtests_failure(test_name, subtests, stack=None):
+ lines = ["Unexpected subtest result in %s:" % test_name]
+ for subtest in subtests[:-1]:
+ add_subtest_failure(lines, subtest, None)
+ add_subtest_failure(lines, subtests[-1], stack)
+ return self.wrap_and_indent_lines(lines, " ") + "\n"
+
+ # Organize the failures by stack trace so we don't print the same stack trace
+ # more than once. They are really tall and we don't want to flood the screen
+ # with duplicate information.
+ output = ""
+ failures_by_stack = collections.defaultdict(list)
+ for failure in unexpected_subtests:
+ # Print stackless results first. They are all separate.
+ if "stack" not in failure:
+ output += make_subtests_failure(test_name, [failure], None)
+ else:
+ failures_by_stack[failure["stack"]].append(failure)
+
+ for stack, failures in six.iteritems(failures_by_stack):
+ output += make_subtests_failure(test_name, failures, stack)
+ return output
+
+ def test_end(self, data):
+ self.completed_tests += 1
+ test_status = data["status"]
+ test_name = data["test"]
+ known_intermittent_statuses = data.get("known_intermittent", [])
+ subtest_failures = self.subtest_failures.pop(test_name, [])
+ if "expected" in data and test_status not in known_intermittent_statuses:
+ had_unexpected_test_result = True
+ else:
+ had_unexpected_test_result = False
+
+ del self.running_tests[data["thread"]]
+ new_display = self.build_status_line()
+
+ if not had_unexpected_test_result and not subtest_failures:
+ self.expected[test_status] += 1
+ if self.interactive:
+ return self.generate_output(text=None, new_display=new_display)
+ else:
+ return self.generate_output(
+ text=" %s\n" % test_name, new_display=new_display
+ )
+
+ if test_status in known_intermittent_statuses:
+ self.known_intermittent_results[(test_name, None)] = data
+
+ # If the test crashed or timed out, we also include any process output,
+ # because there is a good chance that the test produced a stack trace
+ # or other error messages.
+ if test_status in ("CRASH", "TIMEOUT"):
+ stack = self.test_output[test_name] + data.get("stack", "")
+ else:
+ stack = data.get("stack", None)
+
+ output = ""
+ if had_unexpected_test_result:
+ self.unexpected_tests[test_status].append(data)
+ lines = self.get_lines_for_unexpected_result(
+ test_name,
+ test_status,
+ data.get("expected", None),
+ data.get("message", None),
+ stack,
+ )
+ output += self.wrap_and_indent_lines(lines, " ") + "\n"
+
+ if subtest_failures:
+ self.tests_with_failing_subtests.append(test_name)
+ output += self.get_output_for_unexpected_subtests(
+ test_name, subtest_failures
+ )
+ self.test_failure_text += output
+
+ return self.generate_output(text=output, new_display=new_display)
+
+ def test_status(self, data):
+ if "expected" in data and data["status"] not in data.get(
+ "known_intermittent", []
+ ):
+ self.subtest_failures[data["test"]].append(data)
+ elif data["status"] in data.get("known_intermittent", []):
+ self.known_intermittent_results[(data["test"], data["subtest"])] = data
+
+ def suite_end(self, data):
+ self.end_time = data["time"]
+
+ if not self.interactive:
+ output = "\n"
+ else:
+ output = ""
+
+ output += "Ran %i tests finished in %.1f seconds.\n" % (
+ self.completed_tests,
+ (self.end_time - self.start_time) / 1000.0,
+ )
+ output += " \u2022 %i ran as expected. %i tests skipped.\n" % (
+ sum(self.expected.values()),
+ self.expected["SKIP"],
+ )
+ if self.known_intermittent_results:
+ output += " \u2022 %i known intermittent results.\n" % (
+ len(self.known_intermittent_results)
+ )
+
+ def text_for_unexpected_list(text, section):
+ tests = self.unexpected_tests[section]
+ if not tests:
+ return ""
+ return " \u2022 %i tests %s\n" % (len(tests), text)
+
+ output += text_for_unexpected_list("crashed unexpectedly", "CRASH")
+ output += text_for_unexpected_list("had errors unexpectedly", "ERROR")
+ output += text_for_unexpected_list("failed unexpectedly", "FAIL")
+ output += text_for_unexpected_list(
+ "precondition failed unexpectedly", "PRECONDITION_FAILED"
+ )
+ output += text_for_unexpected_list("timed out unexpectedly", "TIMEOUT")
+ output += text_for_unexpected_list("passed unexpectedly", "PASS")
+ output += text_for_unexpected_list("unexpectedly okay", "OK")
+
+ num_with_failing_subtests = len(self.tests_with_failing_subtests)
+ if num_with_failing_subtests:
+ output += (
+ " \u2022 %i tests had unexpected subtest results\n"
+ % num_with_failing_subtests
+ )
+ output += "\n"
+
+ # Repeat failing test output, so that it is easier to find, since the
+ # non-interactive version prints all the test names.
+ if not self.interactive and self.test_failure_text:
+ output += "Tests with unexpected results:\n" + self.test_failure_text
+
+ if self.known_intermittent_results:
+ results = self.get_lines_for_known_intermittents(
+ self.known_intermittent_results
+ )
+ output += "Tests with known intermittent results:\n" + results
+
+ return self.generate_output(text=output, new_display="")
+
+ def process_output(self, data):
+ if data["thread"] not in self.running_tests:
+ return
+ test_name = self.running_tests[data["thread"]]
+ self.test_output[test_name] += data["data"] + "\n"
+
+ def log(self, data):
+ if data.get("component"):
+ message = "%s %s %s" % (data["component"], data["level"], data["message"])
+ else:
+ message = "%s %s" % (data["level"], data["message"])
+ if "stack" in data:
+ message += "\n%s" % data["stack"]
+
+ # We are logging messages that begin with STDERR, because that is how exceptions
+ # in this formatter are indicated.
+ if data["message"].startswith("STDERR"):
+ return self.generate_output(text=message + "\n")
+
+ if data["level"] in ("CRITICAL", "ERROR"):
+ return self.generate_output(text=message + "\n")
+ # Show all messages if show_logs switched on.
+ if self.show_logs:
+ return self.generate_output(text=message + "\n")
diff --git a/testing/mozbase/mozlog/mozlog/formatters/html/__init__.py b/testing/mozbase/mozlog/mozlog/formatters/html/__init__.py
new file mode 100644
index 0000000000..01348f8dfc
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/formatters/html/__init__.py
@@ -0,0 +1,7 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from .html import HTMLFormatter
+
+__all__ = ["HTMLFormatter"]
diff --git a/testing/mozbase/mozlog/mozlog/formatters/html/html.py b/testing/mozbase/mozlog/mozlog/formatters/html/html.py
new file mode 100755
index 0000000000..d1b053e6a4
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/formatters/html/html.py
@@ -0,0 +1,343 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import base64
+import json
+import os
+from collections import defaultdict
+from datetime import datetime
+
+import six
+
+from .. import base
+
+html = None
+raw = None
+
+if six.PY2:
+ from cgi import escape
+else:
+ from html import escape
+
+base_path = os.path.split(__file__)[0]
+
+
+def do_defered_imports():
+ global html
+ global raw
+
+ from .xmlgen import html, raw
+
+
+class HTMLFormatter(base.BaseFormatter):
+ """Formatter that produces a simple HTML-formatted report."""
+
+ def __init__(self):
+ do_defered_imports()
+ self.suite_name = None
+ self.result_rows = []
+ self.test_count = defaultdict(int)
+ self.start_times = {}
+ self.suite_times = {"start": None, "end": None}
+ self.head = None
+ self.env = {}
+
+ def suite_start(self, data):
+ self.suite_times["start"] = data["time"]
+ self.suite_name = data["source"]
+ with open(os.path.join(base_path, "style.css")) as f:
+ self.head = html.head(
+ html.meta(charset="utf-8"),
+ html.title(data["source"]),
+ html.style(raw(f.read())),
+ )
+
+ date_format = "%d %b %Y %H:%M:%S"
+ version_info = data.get("version_info")
+ if version_info:
+ self.env["Device identifier"] = version_info.get("device_id")
+ self.env["Device firmware (base)"] = version_info.get(
+ "device_firmware_version_base"
+ )
+ self.env["Device firmware (date)"] = (
+ datetime.utcfromtimestamp(
+ int(version_info.get("device_firmware_date"))
+ ).strftime(date_format)
+ if "device_firmware_date" in version_info
+ else None
+ )
+ self.env["Device firmware (incremental)"] = version_info.get(
+ "device_firmware_version_incremental"
+ )
+ self.env["Device firmware (release)"] = version_info.get(
+ "device_firmware_version_release"
+ )
+ self.env["Gaia date"] = (
+ datetime.utcfromtimestamp(int(version_info.get("gaia_date"))).strftime(
+ date_format
+ )
+ if "gaia_date" in version_info
+ else None
+ )
+ self.env["Gecko version"] = version_info.get("application_version")
+ self.env["Gecko build"] = version_info.get("application_buildid")
+
+ if version_info.get("application_changeset"):
+ self.env["Gecko revision"] = version_info.get("application_changeset")
+ if version_info.get("application_repository"):
+ self.env["Gecko revision"] = html.a(
+ version_info.get("application_changeset"),
+ href="/rev/".join(
+ [
+ version_info.get("application_repository"),
+ version_info.get("application_changeset"),
+ ]
+ ),
+ target="_blank",
+ )
+
+ if version_info.get("gaia_changeset"):
+ self.env["Gaia revision"] = html.a(
+ version_info.get("gaia_changeset")[:12],
+ href="https://github.com/mozilla-b2g/gaia/commit/%s"
+ % version_info.get("gaia_changeset"),
+ target="_blank",
+ )
+
+ device_info = data.get("device_info")
+ if device_info:
+ self.env["Device uptime"] = device_info.get("uptime")
+ self.env["Device memory"] = device_info.get("memtotal")
+ self.env["Device serial"] = device_info.get("id")
+
+ def suite_end(self, data):
+ self.suite_times["end"] = data["time"]
+ return self.generate_html()
+
+ def test_start(self, data):
+ self.start_times[data["test"]] = data["time"]
+
+ def test_end(self, data):
+ self.make_result_html(data)
+
+ def make_result_html(self, data):
+ tc_time = (data["time"] - self.start_times.pop(data["test"])) / 1000.0
+ additional_html = []
+ debug = data.get("extra", {})
+ # Add support for log exported from wptrunner. The structure of
+ # reftest_screenshots is listed in wptrunner/executors/base.py.
+ if debug.get("reftest_screenshots"):
+ log_data = debug.get("reftest_screenshots", {})
+ debug = {
+ "image1": "data:image/png;base64," + log_data[0].get("screenshot", {}),
+ "image2": "data:image/png;base64," + log_data[2].get("screenshot", {}),
+ "differences": "Not Implemented",
+ }
+
+ links_html = []
+
+ status = status_name = data["status"]
+ expected = data.get("expected", status)
+ known_intermittent = data.get("known_intermittent", [])
+
+ if status != expected and status not in known_intermittent:
+ status_name = "UNEXPECTED_" + status
+ elif status in known_intermittent:
+ status_name = "KNOWN_INTERMITTENT"
+ elif status not in ("PASS", "SKIP"):
+ status_name = "EXPECTED_" + status
+
+ self.test_count[status_name] += 1
+
+ if status in ["SKIP", "FAIL", "PRECONDITION_FAILED", "ERROR"]:
+ if debug.get("differences"):
+ images = [
+ ("image1", "Image 1 (test)"),
+ ("image2", "Image 2 (reference)"),
+ ]
+ for title, description in images:
+ screenshot = "%s" % debug[title]
+ additional_html.append(
+ html.div(
+ html.a(html.img(src=screenshot), href="#"),
+ html.br(),
+ html.a(description),
+ class_="screenshot",
+ )
+ )
+
+ if debug.get("screenshot"):
+ screenshot = "%s" % debug["screenshot"]
+ screenshot = "data:image/png;base64," + screenshot
+
+ additional_html.append(
+ html.div(
+ html.a(html.img(src=screenshot), href="#"), class_="screenshot"
+ )
+ )
+
+ for name, content in debug.items():
+ if name in ["screenshot", "image1", "image2"]:
+ if not content.startswith("data:image/png;base64,"):
+ href = "data:image/png;base64,%s" % content
+ else:
+ href = content
+ else:
+ if not isinstance(content, (six.text_type, six.binary_type)):
+ # All types must be json serializable
+ content = json.dumps(content)
+ # Decode to text type if JSON output is byte string
+ if not isinstance(content, six.text_type):
+ content = content.decode("utf-8")
+ # Encode base64 to avoid that some browsers (such as Firefox, Opera)
+ # treats '#' as the start of another link if it is contained in the data URL.
+ if isinstance(content, six.text_type):
+ is_known_utf8 = True
+ content_bytes = six.text_type(content).encode(
+ "utf-8", "xmlcharrefreplace"
+ )
+ else:
+ is_known_utf8 = False
+ content_bytes = content
+
+ meta = ["text/html"]
+ if is_known_utf8:
+ meta.append("charset=utf-8")
+
+ # base64 is ascii only, which means we don't have to care about encoding
+ # in the case where we don't know the encoding of the input
+ b64_bytes = base64.b64encode(content_bytes)
+ b64_text = b64_bytes.decode()
+ href = "data:%s;base64,%s" % (";".join(meta), b64_text)
+ links_html.append(
+ html.a(name.title(), class_=name, href=href, target="_blank")
+ )
+ links_html.append(" ")
+
+ log = html.div(class_="log")
+ output = data.get("stack", "").splitlines()
+ output.extend(data.get("message", "").splitlines())
+ for line in output:
+ separator = line.startswith(" " * 10)
+ if separator:
+ log.append(line[:80])
+ else:
+ if (
+ line.lower().find("error") != -1
+ or line.lower().find("exception") != -1
+ ):
+ log.append(html.span(raw(escape(line)), class_="error"))
+ else:
+ log.append(raw(escape(line)))
+ log.append(html.br())
+ additional_html.append(log)
+
+ self.result_rows.append(
+ html.tr(
+ [
+ html.td(status_name, class_="col-result"),
+ html.td(data["test"], class_="col-name"),
+ html.td("%.2f" % tc_time, class_="col-duration"),
+ html.td(links_html, class_="col-links"),
+ html.td(additional_html, class_="debug"),
+ ],
+ class_=status_name.lower() + " results-table-row",
+ )
+ )
+
+ def generate_html(self):
+ generated = datetime.utcnow()
+ with open(os.path.join(base_path, "main.js")) as main_f:
+ doc = html.html(
+ self.head,
+ html.body(
+ html.script(raw(main_f.read())),
+ html.p(
+ "Report generated on %s at %s"
+ % (
+ generated.strftime("%d-%b-%Y"),
+ generated.strftime("%H:%M:%S"),
+ )
+ ),
+ html.h2("Environment"),
+ html.table(
+ [
+ html.tr(html.td(k), html.td(v))
+ for k, v in sorted(self.env.items())
+ if v
+ ],
+ id="environment",
+ ),
+ html.h2("Summary"),
+ html.p(
+ "%i tests ran in %.1f seconds."
+ % (
+ sum(six.itervalues(self.test_count)),
+ (self.suite_times["end"] - self.suite_times["start"])
+ / 1000.0,
+ ),
+ html.br(),
+ html.span("%i passed" % self.test_count["PASS"], class_="pass"),
+ ", ",
+ html.span(
+ "%i skipped" % self.test_count["SKIP"], class_="skip"
+ ),
+ ", ",
+ html.span(
+ "%i failed" % self.test_count["UNEXPECTED_FAIL"],
+ class_="fail",
+ ),
+ ", ",
+ html.span(
+ "%i errors" % self.test_count["UNEXPECTED_ERROR"],
+ class_="error",
+ ),
+ ".",
+ html.br(),
+ html.span(
+ "%i expected failures" % self.test_count["EXPECTED_FAIL"],
+ class_="expected_fail",
+ ),
+ ", ",
+ html.span(
+ "%i unexpected passes" % self.test_count["UNEXPECTED_PASS"],
+ class_="unexpected_pass",
+ ),
+ ", ",
+ html.span(
+ "%i known intermittent results"
+ % self.test_count["KNOWN_INTERMITTENT"],
+ class_="known_intermittent",
+ ),
+ ".",
+ ),
+ html.h2("Results"),
+ html.table(
+ [
+ html.thead(
+ html.tr(
+ [
+ html.th(
+ "Result", class_="sortable", col="result"
+ ),
+ html.th("Test", class_="sortable", col="name"),
+ html.th(
+ "Duration",
+ class_="sortable numeric",
+ col="duration",
+ ),
+ html.th("Links"),
+ ]
+ ),
+ id="results-table-head",
+ ),
+ html.tbody(self.result_rows, id="results-table-body"),
+ ],
+ id="results-table",
+ ),
+ ),
+ )
+
+ return "<!DOCTYPE html>\n" + doc.unicode(indent=2)
diff --git a/testing/mozbase/mozlog/mozlog/formatters/html/main.js b/testing/mozbase/mozlog/mozlog/formatters/html/main.js
new file mode 100644
index 0000000000..61403f611b
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/formatters/html/main.js
@@ -0,0 +1,166 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+function toArray(iter) {
+ if (iter === null) {
+ return null;
+ }
+ return Array.prototype.slice.call(iter);
+}
+
+function find(selector, elem) {
+ if (!elem) {
+ elem = document;
+ }
+ return elem.querySelector(selector);
+}
+
+function find_all(selector, elem) {
+ if (!elem) {
+ elem = document;
+ }
+ return toArray(elem.querySelectorAll(selector));
+}
+
+addEventListener("DOMContentLoaded", function () {
+ reset_sort_headers();
+
+ split_debug_onto_two_rows();
+
+ find_all(".col-links a.screenshot").forEach(function (elem) {
+ elem.addEventListener("click", function (event) {
+ var node = elem;
+ while (node && !node.classList.contains("results-table-row")) {
+ node = node.parentNode;
+ }
+ if (node != null) {
+ if (node.nextSibling && node.nextSibling.classList.contains("debug")) {
+ var href = find(".screenshot img", node.nextSibling).src;
+ window.open(href);
+ }
+ }
+ event.preventDefault();
+ });
+ });
+
+ find_all(".screenshot a").forEach(function (elem) {
+ elem.addEventListener("click", function (event) {
+ window.open(find("img", elem).getAttribute("src"));
+ event.preventDefault();
+ });
+ });
+
+ find_all(".sortable").forEach(function (elem) {
+ elem.addEventListener("click", function (event) {
+ toggle_sort_states(elem);
+ var colIndex = toArray(elem.parentNode.childNodes).indexOf(elem);
+ var key = elem.classList.contains("numeric") ? key_num : key_alpha;
+ sort_table(elem, key(colIndex));
+ });
+ });
+});
+
+function sort_table(clicked, key_func) {
+ one_row_for_data();
+ var rows = find_all(".results-table-row");
+ var reversed = !clicked.classList.contains("asc");
+
+ var sorted_rows = sort(rows, key_func, reversed);
+
+ var parent = document.getElementById("results-table-body");
+ sorted_rows.forEach(function (elem) {
+ parent.appendChild(elem);
+ });
+
+ split_debug_onto_two_rows();
+}
+
+function sort(items, key_func, reversed) {
+ var sort_array = items.map(function (item, i) {
+ return [key_func(item), i];
+ });
+ var multiplier = reversed ? -1 : 1;
+
+ sort_array.sort(function (a, b) {
+ var key_a = a[0];
+ var key_b = b[0];
+ return multiplier * (key_a >= key_b ? 1 : -1);
+ });
+
+ return sort_array.map(function (item) {
+ var index = item[1];
+ return items[index];
+ });
+}
+
+function key_alpha(col_index) {
+ return function (elem) {
+ return elem.childNodes[col_index].firstChild.data.toLowerCase();
+ };
+}
+
+function key_num(col_index) {
+ return function (elem) {
+ return parseFloat(elem.childNodes[col_index].firstChild.data);
+ };
+}
+
+function reset_sort_headers() {
+ find_all(".sort-icon").forEach(function (elem) {
+ elem.remove();
+ });
+ find_all(".sortable").forEach(function (elem) {
+ var icon = document.createElement("div");
+ icon.className = "sort-icon";
+ icon.textContent = "vvv";
+ elem.insertBefore(icon, elem.firstChild);
+ elem.classList.remove("desc", "active");
+ elem.classList.add("asc", "inactive");
+ });
+}
+
+function toggle_sort_states(elem) {
+ // if active, toggle between asc and desc
+ if (elem.classList.contains("active")) {
+ elem.classList.toggle("asc");
+ elem.classList.toggle("desc");
+ }
+
+ // if inactive, reset all other functions and add ascending active
+ if (elem.classList.contains("inactive")) {
+ reset_sort_headers();
+ elem.classList.remove("inactive");
+ elem.classList.add("active");
+ }
+}
+
+function split_debug_onto_two_rows() {
+ find_all("tr.results-table-row").forEach(function (elem) {
+ var new_row = document.createElement("tr");
+ new_row.className = "debug";
+ elem.parentNode.insertBefore(new_row, elem.nextSibling);
+ find_all(".debug", elem).forEach(function (td_elem) {
+ if (find(".log", td_elem)) {
+ new_row.appendChild(td_elem);
+ td_elem.colSpan = 5;
+ } else {
+ td_elem.remove();
+ }
+ });
+ });
+}
+
+function one_row_for_data() {
+ find_all("tr.results-table-row").forEach(function (elem) {
+ if (elem.nextSibling.classList.contains("debug")) {
+ toArray(elem.nextSibling.childNodes).forEach(function (td_elem) {
+ elem.appendChild(td_elem);
+ });
+ } else {
+ var new_td = document.createElement("td");
+ new_td.className = "debug";
+ elem.appendChild(new_td);
+ }
+ });
+}
diff --git a/testing/mozbase/mozlog/mozlog/formatters/html/style.css b/testing/mozbase/mozlog/mozlog/formatters/html/style.css
new file mode 100644
index 0000000000..427742dd1f
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/formatters/html/style.css
@@ -0,0 +1,155 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+body {
+ font-family: Helvetica, Arial, sans-serif;
+ font-size: 12px;
+ min-width: 1200px;
+ color: #999;
+}
+h2 {
+ font-size: 16px;
+ color: black;
+}
+
+p {
+ color: black;
+}
+
+a {
+ color: #999;
+}
+
+table {
+ border-collapse: collapse;
+}
+
+/******************************
+ * SUMMARY INFORMATION
+ ******************************/
+
+#environment td {
+ padding: 5px;
+ border: 1px solid #E6E6E6;
+}
+
+#environment tr:nth-child(odd) {
+ background-color: #f6f6f6;
+}
+
+/******************************
+ * TEST RESULT COLORS
+ ******************************/
+span.pass, .pass .col-result {
+ color: green;
+}
+span.expected_fail, .expected_fail .col-result,
+span.expected_skip, .expected_skip .col-result,
+span.skip, .skip .col-result,
+span.known_intermittent, .known_intermittent .col-result {
+ color: orange;
+}
+span.error, .error .col-result,
+span.fail, .fail .col-result,
+span.unexpected_error, .unexpected_error .col-result,
+span.unexpected_fail, .unexpected_fail .col-result,
+span.unexpected_pass, .unexpected_pass .col-result {
+ color: red;
+}
+
+/******************************
+ * RESULTS TABLE
+ *
+ * 1. Table Layout
+ * 2. Debug
+ * 3. Sorting items
+ *
+ ******************************/
+
+/*------------------
+ * 1. Table Layout
+ *------------------*/
+
+#results-table {
+ border: 1px solid #e6e6e6;
+ color: #999;
+ font-size: 12px;
+ width: 100%
+}
+
+#results-table th, #results-table td {
+ padding: 5px;
+ border: 1px solid #E6E6E6;
+ text-align: left
+}
+#results-table th {
+ font-weight: bold
+}
+
+/*------------------
+ * 2. Debug
+ *------------------*/
+
+.log:only-child {
+ height: inherit
+}
+.log {
+ background-color: #e6e6e6;
+ border: 1px solid #e6e6e6;
+ color: black;
+ display: block;
+ font-family: "Courier New", Courier, monospace;
+ height: 230px;
+ overflow-y: scroll;
+ padding: 5px;
+ white-space: pre-wrap
+}
+div.screenshot {
+ border: 1px solid #e6e6e6;
+ float: left;
+ margin-left: 5px;
+ height: 220px
+}
+div.screenshot img {
+ height: 220px
+}
+
+/*if the result is passed or xpassed don't show debug row*/
+.passed + .debug, .unexpected.pass + .debug {
+ display: none;
+}
+
+/*------------------
+ * 3. Sorting items
+ *------------------*/
+.sortable {
+ cursor: pointer;
+}
+
+.sort-icon {
+ font-size: 0px;
+ float: left;
+ margin-right: 5px;
+ margin-top: 5px;
+ /*triangle*/
+ width: 0;
+ height: 0;
+ border-left: 8px solid transparent;
+ border-right: 8px solid transparent;
+}
+
+.inactive .sort-icon {
+ /*finish triangle*/
+ border-top: 8px solid #E6E6E6;
+}
+
+.asc.active .sort-icon {
+ /*finish triangle*/
+ border-bottom: 8px solid #999;
+}
+
+.desc.active .sort-icon {
+ /*finish triangle*/
+ border-top: 8px solid #999;
+}
diff --git a/testing/mozbase/mozlog/mozlog/formatters/html/xmlgen.py b/testing/mozbase/mozlog/mozlog/formatters/html/xmlgen.py
new file mode 100644
index 0000000000..eda2d8d36a
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/formatters/html/xmlgen.py
@@ -0,0 +1,310 @@
+"""
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+This file is originally from: https://bitbucket.org/hpk42/py, specifically:
+https://bitbucket.org/hpk42/py/src/980c8d526463958ee7cae678a7e4e9b054f36b94/py/_xmlgen.py?at=default
+by holger krekel, holger at merlinux eu. 2009
+"""
+
+import re
+import sys
+
+if sys.version_info >= (3, 0):
+
+ def u(s):
+ return s
+
+ def unicode(x):
+ if hasattr(x, "__unicode__"):
+ return x.__unicode__()
+ return str(x)
+
+else:
+
+ def u(s):
+ return unicode(s)
+
+ # pylint: disable=W1612
+ unicode = unicode
+
+
+class NamespaceMetaclass(type):
+ def __getattr__(self, name):
+ if name[:1] == "_":
+ raise AttributeError(name)
+ if self == Namespace:
+ raise ValueError("Namespace class is abstract")
+ tagspec = self.__tagspec__
+ if tagspec is not None and name not in tagspec:
+ raise AttributeError(name)
+ classattr = {}
+ if self.__stickyname__:
+ classattr["xmlname"] = name
+ cls = type(name, (self.__tagclass__,), classattr)
+ setattr(self, name, cls)
+ return cls
+
+
+class Tag(list):
+ class Attr(object):
+ def __init__(self, **kwargs):
+ self.__dict__.update(kwargs)
+
+ def __init__(self, *args, **kwargs):
+ super(Tag, self).__init__(args)
+ self.attr = self.Attr(**kwargs)
+
+ def __unicode__(self):
+ return self.unicode(indent=0)
+
+ __str__ = __unicode__
+
+ def unicode(self, indent=2):
+ l = []
+ SimpleUnicodeVisitor(l.append, indent).visit(self)
+ return u("").join(l)
+
+ def __repr__(self):
+ name = self.__class__.__name__
+ return "<%r tag object %d>" % (name, id(self))
+
+
+Namespace = NamespaceMetaclass(
+ "Namespace",
+ (object,),
+ {
+ "__tagspec__": None,
+ "__tagclass__": Tag,
+ "__stickyname__": False,
+ },
+)
+
+
+class HtmlTag(Tag):
+ def unicode(self, indent=2):
+ l = []
+ HtmlVisitor(l.append, indent, shortempty=False).visit(self)
+ return u("").join(l)
+
+
+# exported plain html namespace
+
+
+class html(Namespace):
+ __tagclass__ = HtmlTag
+ __stickyname__ = True
+ __tagspec__ = dict(
+ [
+ (x, 1)
+ for x in (
+ "a,abbr,acronym,address,applet,area,b,bdo,big,blink,"
+ "blockquote,body,br,button,caption,center,cite,code,col,"
+ "colgroup,comment,dd,del,dfn,dir,div,dl,dt,em,embed,"
+ "fieldset,font,form,frameset,h1,h2,h3,h4,h5,h6,head,html,"
+ "i,iframe,img,input,ins,kbd,label,legend,li,link,listing,"
+ "map,marquee,menu,meta,multicol,nobr,noembed,noframes,"
+ "noscript,object,ol,optgroup,option,p,pre,q,s,script,"
+ "select,small,span,strike,strong,style,sub,sup,table,"
+ "tbody,td,textarea,tfoot,th,thead,title,tr,tt,u,ul,xmp,"
+ "base,basefont,frame,hr,isindex,param,samp,var"
+ ).split(",")
+ if x
+ ]
+ )
+
+ class Style(object):
+ def __init__(self, **kw):
+ for x, y in kw.items():
+ x = x.replace("_", "-")
+ setattr(self, x, y)
+
+
+class raw(object):
+ """just a box that can contain a unicode string that will be
+ included directly in the output"""
+
+ def __init__(self, uniobj):
+ self.uniobj = uniobj
+
+
+class SimpleUnicodeVisitor(object):
+ """recursive visitor to write unicode."""
+
+ def __init__(self, write, indent=0, curindent=0, shortempty=True):
+ self.write = write
+ self.cache = {}
+ self.visited = {} # for detection of recursion
+ self.indent = indent
+ self.curindent = curindent
+ self.parents = []
+ self.shortempty = shortempty # short empty tags or not
+
+ def visit(self, node):
+ """dispatcher on node's class/bases name."""
+ cls = node.__class__
+ try:
+ visitmethod = self.cache[cls]
+ except KeyError:
+ for subclass in cls.__mro__:
+ visitmethod = getattr(self, subclass.__name__, None)
+ if visitmethod is not None:
+ break
+ else:
+ visitmethod = self.__object
+ self.cache[cls] = visitmethod
+ visitmethod(node)
+
+ # the default fallback handler is marked private
+ # to avoid clashes with the tag name object
+ def __object(self, obj):
+ # self.write(obj)
+ self.write(escape(unicode(obj)))
+
+ def raw(self, obj):
+ self.write(obj.uniobj)
+
+ def list(self, obj):
+ assert id(obj) not in self.visited
+ self.visited[id(obj)] = 1
+ for elem in obj:
+ self.visit(elem)
+
+ def Tag(self, tag):
+ assert id(tag) not in self.visited
+ try:
+ tag.parent = self.parents[-1]
+ except IndexError:
+ tag.parent = None
+ self.visited[id(tag)] = 1
+ tagname = getattr(tag, "xmlname", tag.__class__.__name__)
+ if self.curindent and not self._isinline(tagname):
+ self.write("\n" + u(" ") * self.curindent)
+ if tag:
+ self.curindent += self.indent
+ self.write(u("<%s%s>") % (tagname, self.attributes(tag)))
+ self.parents.append(tag)
+ for x in tag:
+ self.visit(x)
+ self.parents.pop()
+ self.write(u("</%s>") % tagname)
+ self.curindent -= self.indent
+ else:
+ nameattr = tagname + self.attributes(tag)
+ if self._issingleton(tagname):
+ self.write(u("<%s/>") % (nameattr,))
+ else:
+ self.write(u("<%s></%s>") % (nameattr, tagname))
+
+ def attributes(self, tag):
+ # serialize attributes
+ attrlist = dir(tag.attr)
+ attrlist.sort()
+ l = []
+ for name in attrlist:
+ res = self.repr_attribute(tag.attr, name)
+ if res is not None:
+ l.append(res)
+ l.extend(self.getstyle(tag))
+ return u("").join(l)
+
+ def repr_attribute(self, attrs, name):
+ if name[:2] != "__":
+ value = getattr(attrs, name)
+ if name.endswith("_"):
+ name = name[:-1]
+ if isinstance(value, raw):
+ insert = value.uniobj
+ else:
+ insert = escape(unicode(value))
+ return ' %s="%s"' % (name, insert)
+
+ def getstyle(self, tag):
+ """return attribute list suitable for styling."""
+ try:
+ styledict = tag.style.__dict__
+ except AttributeError:
+ return []
+ else:
+ stylelist = [x + ": " + y for x, y in styledict.items()]
+ return [u(' style="%s"') % u("; ").join(stylelist)]
+
+ def _issingleton(self, tagname):
+ """can (and will) be overridden in subclasses"""
+ return self.shortempty
+
+ def _isinline(self, tagname):
+ """can (and will) be overridden in subclasses"""
+ return False
+
+
+class HtmlVisitor(SimpleUnicodeVisitor):
+ single = dict(
+ [
+ (x, 1)
+ for x in ("br,img,area,param,col,hr,meta,link,base," "input,frame").split(
+ ","
+ )
+ ]
+ )
+ inline = dict(
+ [
+ (x, 1)
+ for x in (
+ "a abbr acronym b basefont bdo big br cite code dfn em font "
+ "i img input kbd label q s samp select small span strike "
+ "strong sub sup textarea tt u var".split(" ")
+ )
+ ]
+ )
+
+ def repr_attribute(self, attrs, name):
+ if name == "class_":
+ value = getattr(attrs, name)
+ if value is None:
+ return
+ return super(HtmlVisitor, self).repr_attribute(attrs, name)
+
+ def _issingleton(self, tagname):
+ return tagname in self.single
+
+ def _isinline(self, tagname):
+ return tagname in self.inline
+
+
+class _escape:
+ def __init__(self):
+ self.escape = {
+ u('"'): u("&quot;"),
+ u("<"): u("&lt;"),
+ u(">"): u("&gt;"),
+ u("&"): u("&amp;"),
+ u("'"): u("&apos;"),
+ }
+ self.charef_rex = re.compile(u("|").join(self.escape.keys()))
+
+ def _replacer(self, match):
+ return self.escape[match.group(0)]
+
+ def __call__(self, ustring):
+ """xml-escape the given unicode string."""
+ ustring = unicode(ustring)
+ return self.charef_rex.sub(self._replacer, ustring)
+
+
+escape = _escape()
diff --git a/testing/mozbase/mozlog/mozlog/formatters/machformatter.py b/testing/mozbase/mozlog/mozlog/formatters/machformatter.py
new file mode 100644
index 0000000000..2658df10a9
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/formatters/machformatter.py
@@ -0,0 +1,657 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import time
+from functools import reduce
+
+import six
+from mozterm import Terminal
+
+from ..handlers import SummaryHandler
+from . import base
+from .process import strstatus
+from .tbplformatter import TbplFormatter
+
+color_dict = {
+ "log_test_status_fail": "red",
+ "log_process_output": "blue",
+ "log_test_status_pass": "green",
+ "log_test_status_unexpected_fail": "red",
+ "log_test_status_known_intermittent": "yellow",
+ "time": "cyan",
+ "action": "yellow",
+ "pid": "cyan",
+ "heading": "bold_yellow",
+ "sub_heading": "yellow",
+ "error": "red",
+ "warning": "yellow",
+ "bold": "bold",
+ "grey": "grey",
+ "normal": "normal",
+ "bright_black": "bright_black",
+}
+
+DEFAULT = "\x1b(B\x1b[m"
+
+
+def format_seconds(total):
+ """Format number of seconds to MM:SS.DD form."""
+ minutes, seconds = divmod(total, 60)
+ return "%2d:%05.2f" % (minutes, seconds)
+
+
+class TerminalColors(object):
+ def __init__(self, term, color_dict):
+ for key, value in color_dict.items():
+ attribute = getattr(term, value)
+ # In Blessed, these attributes aren't always callable. We can assume
+ # that if they're not, they're just the raw ANSI Escape Sequences.
+ # This TerminalColors class is basically just a lookup table for
+ # what function to call to format/color an input string a certain way.
+ # So if the attribute above is a callable, we can just proceed, but
+ # if it's not, we need to create our own function that prepends the
+ # raw ANSI Escape Sequences to the input string, so that everything
+ # has the same behavior. We append DEFAULT to reset to no formatting
+ # at the end of our string, to prevent text that comes afterwards
+ # from inheriting the prepended formatting.
+ if not callable(attribute):
+
+ def apply_formatting(text):
+ return attribute + text + DEFAULT
+
+ attribute = apply_formatting
+ setattr(self, key, attribute)
+
+
+class MachFormatter(base.BaseFormatter):
+ def __init__(
+ self,
+ start_time=None,
+ write_interval=False,
+ write_times=True,
+ terminal=None,
+ disable_colors=False,
+ summary_on_shutdown=False,
+ verbose=False,
+ enable_screenshot=False,
+ **kwargs
+ ):
+ super(MachFormatter, self).__init__(**kwargs)
+
+ if start_time is None:
+ start_time = time.time()
+ start_time = int(start_time * 1000)
+ self.start_time = start_time
+ self.write_interval = write_interval
+ self.write_times = write_times
+ self.status_buffer = {}
+ self.has_unexpected = {}
+ self.last_time = None
+ self.color_formatter = TerminalColors(
+ Terminal(disable_styling=disable_colors), color_dict
+ )
+ self.verbose = verbose
+ self._known_pids = set()
+ self.tbpl_formatter = None
+ self.enable_screenshot = enable_screenshot
+ self.summary = SummaryHandler()
+ self.summary_on_shutdown = summary_on_shutdown
+
+ message_handlers = {
+ "colors": {
+ "on": self._enable_colors,
+ "off": self._disable_colors,
+ },
+ "summary_on_shutdown": {
+ "on": self._enable_summary_on_shutdown,
+ "off": self._disable_summary_on_shutdown,
+ },
+ }
+
+ for topic, handlers in message_handlers.items():
+ self.message_handler.register_message_handlers(topic, handlers)
+
+ def __call__(self, data):
+ self.summary(data)
+
+ s = super(MachFormatter, self).__call__(data)
+ if s is None:
+ return
+
+ time = self.color_formatter.time(format_seconds(self._time(data)))
+ return "%s %s\n" % (time, s)
+
+ def _enable_colors(self):
+ self.disable_colors = False
+
+ def _disable_colors(self):
+ self.disable_colors = True
+
+ def _enable_summary_on_shutdown(self):
+ self.summary_on_shutdown = True
+
+ def _disable_summary_on_shutdown(self):
+ self.summary_on_shutdown = False
+
+ def _get_test_id(self, data):
+ test_id = data.get("test")
+ if isinstance(test_id, list):
+ test_id = tuple(test_id)
+ return test_id
+
+ def _get_file_name(self, test_id):
+ if isinstance(test_id, (str, six.text_type)):
+ return test_id
+
+ if isinstance(test_id, tuple):
+ return "".join(test_id)
+
+ assert False, "unexpected test_id"
+
+ def suite_start(self, data):
+ num_tests = reduce(lambda x, y: x + len(y), six.itervalues(data["tests"]), 0)
+ action = self.color_formatter.action(data["action"].upper())
+ name = ""
+ if "name" in data:
+ name = " %s -" % (data["name"],)
+ return "%s:%s running %i tests" % (action, name, num_tests)
+
+ def suite_end(self, data):
+ action = self.color_formatter.action(data["action"].upper())
+ rv = [action]
+ if not self.summary_on_shutdown:
+ rv.append(
+ self._format_suite_summary(
+ self.summary.current_suite, self.summary.current
+ )
+ )
+ return "\n".join(rv)
+
+ def _format_expected(self, status, expected, known_intermittent=[]):
+ if status == expected:
+ color = self.color_formatter.log_test_status_pass
+ if expected not in ("PASS", "OK"):
+ color = self.color_formatter.log_test_status_fail
+ status = "EXPECTED-%s" % status
+ else:
+ if status in known_intermittent:
+ color = self.color_formatter.log_test_status_known_intermittent
+ status = "KNOWN-INTERMITTENT-%s" % status
+ else:
+ color = self.color_formatter.log_test_status_fail
+ if status in ("PASS", "OK"):
+ status = "UNEXPECTED-%s" % status
+ return color(status)
+
+ def _format_status(self, test, data):
+ name = data.get("subtest", test)
+ rv = "%s %s" % (
+ self._format_expected(
+ data["status"],
+ data.get("expected", data["status"]),
+ data.get("known_intermittent", []),
+ ),
+ name,
+ )
+ if "message" in data:
+ rv += " - %s" % data["message"]
+ if "stack" in data:
+ rv += self._format_stack(data["stack"])
+ return rv
+
+ def _format_stack(self, stack):
+ return "\n%s\n" % self.color_formatter.bright_black(stack.strip("\n"))
+
+ def _format_suite_summary(self, suite, summary):
+ count = summary["counts"]
+ logs = summary["unexpected_logs"]
+ intermittent_logs = summary["intermittent_logs"]
+ harness_errors = summary["harness_errors"]
+
+ rv = [
+ "",
+ self.color_formatter.sub_heading(suite),
+ self.color_formatter.sub_heading("~" * len(suite)),
+ ]
+
+ # Format check counts
+ checks = self.summary.aggregate("count", count)
+ rv.append(
+ "Ran {} checks ({})".format(
+ sum(checks.values()),
+ ", ".join(
+ ["{} {}s".format(v, k) for k, v in sorted(checks.items()) if v]
+ ),
+ )
+ )
+
+ # Format expected counts
+ checks = self.summary.aggregate("expected", count, include_skip=False)
+ intermittent_checks = self.summary.aggregate(
+ "known_intermittent", count, include_skip=False
+ )
+ intermittents = sum(intermittent_checks.values())
+ known = (
+ " ({} known intermittents)".format(intermittents) if intermittents else ""
+ )
+ rv.append("Expected results: {}{}".format(sum(checks.values()), known))
+
+ # Format skip counts
+ skip_tests = count["test"]["expected"]["skip"]
+ skip_subtests = count["subtest"]["expected"]["skip"]
+ if skip_tests:
+ skipped = "Skipped: {} tests".format(skip_tests)
+ if skip_subtests:
+ skipped = "{}, {} subtests".format(skipped, skip_subtests)
+ rv.append(skipped)
+
+ # Format unexpected counts
+ checks = self.summary.aggregate("unexpected", count)
+ unexpected_count = sum(checks.values())
+ rv.append("Unexpected results: {}".format(unexpected_count))
+ if unexpected_count:
+ for key in ("test", "subtest", "assert"):
+ if not count[key]["unexpected"]:
+ continue
+ status_str = ", ".join(
+ [
+ "{} {}".format(n, s)
+ for s, n in sorted(count[key]["unexpected"].items())
+ ]
+ )
+ rv.append(
+ " {}: {} ({})".format(
+ key, sum(count[key]["unexpected"].values()), status_str
+ )
+ )
+
+ # Format intermittents
+ if intermittents > 0:
+ heading = "Known Intermittent Results"
+ rv.extend(
+ [
+ "",
+ self.color_formatter.heading(heading),
+ self.color_formatter.heading("-" * len(heading)),
+ ]
+ )
+ if count["subtest"]["count"]:
+ for test_id, results in intermittent_logs.items():
+ test = self._get_file_name(test_id)
+ rv.append(self.color_formatter.bold(test))
+ for data in results:
+ rv.append(" %s" % self._format_status(test, data).rstrip())
+ else:
+ for test_id, results in intermittent_logs.items():
+ test = self._get_file_name(test_id)
+ for data in results:
+ assert "subtest" not in data
+ rv.append(self._format_status(test, data).rstrip())
+
+ # Format status
+ testfailed = any(
+ count[key]["unexpected"] for key in ("test", "subtest", "assert")
+ )
+ if not testfailed and not harness_errors:
+ rv.append(self.color_formatter.log_test_status_pass("OK"))
+ else:
+ # Format test failures
+ heading = "Unexpected Results"
+ rv.extend(
+ [
+ "",
+ self.color_formatter.heading(heading),
+ self.color_formatter.heading("-" * len(heading)),
+ ]
+ )
+ if count["subtest"]["count"]:
+ for test_id, results in logs.items():
+ test = self._get_file_name(test_id)
+ rv.append(self.color_formatter.bold(test))
+ for data in results:
+ rv.append(" %s" % self._format_status(test, data).rstrip())
+ else:
+ for test_id, results in logs.items():
+ test = self._get_file_name(test_id)
+ for data in results:
+ assert "subtest" not in data
+ rv.append(self._format_status(test, data).rstrip())
+
+ # Format harness errors
+ if harness_errors:
+ for data in harness_errors:
+ rv.append(self.log(data))
+
+ return "\n".join(rv)
+
+ def test_start(self, data):
+ action = self.color_formatter.action(data["action"].upper())
+ return "%s: %s" % (action, self._get_test_id(data))
+
+ def test_end(self, data):
+ subtests = self._get_subtest_data(data)
+
+ if "expected" in data and data["status"] not in data.get(
+ "known_intermittent", []
+ ):
+ parent_unexpected = True
+ expected_str = ", expected %s" % data["expected"]
+ else:
+ parent_unexpected = False
+ expected_str = ""
+
+ has_screenshots = "reftest_screenshots" in data.get("extra", {})
+
+ test = self._get_test_id(data)
+
+ # Reset the counts to 0
+ self.status_buffer[test] = {"count": 0, "unexpected": 0, "pass": 0}
+ self.has_unexpected[test] = bool(subtests["unexpected"])
+
+ if subtests["count"] != 0:
+ rv = "Test %s%s. Subtests passed %i/%i. Unexpected %s" % (
+ data["status"],
+ expected_str,
+ subtests["pass"],
+ subtests["count"],
+ subtests["unexpected"],
+ )
+ else:
+ rv = "%s%s" % (data["status"], expected_str)
+
+ unexpected = self.summary.current["unexpected_logs"].get(data["test"])
+ if unexpected:
+ if len(unexpected) == 1 and parent_unexpected:
+ message = unexpected[0].get("message", "")
+ if message:
+ rv += " - %s" % message
+ if "stack" in data:
+ rv += self._format_stack(data["stack"])
+ elif not self.verbose:
+ rv += "\n"
+ for d in unexpected:
+ rv += self._format_status(data["test"], d)
+
+ intermittents = self.summary.current["intermittent_logs"].get(data["test"])
+ if intermittents:
+ rv += "\n"
+ for d in intermittents:
+ rv += self._format_status(data["test"], d)
+
+ if "expected" not in data and not bool(subtests["unexpected"]):
+ color = self.color_formatter.log_test_status_pass
+ else:
+ color = self.color_formatter.log_test_status_unexpected_fail
+
+ action = color(data["action"].upper())
+ rv = "%s: %s" % (action, rv)
+ if has_screenshots and self.enable_screenshot:
+ if self.tbpl_formatter is None:
+ self.tbpl_formatter = TbplFormatter()
+ # Create TBPL-like output that can be pasted into the reftest analyser
+ rv = "\n".join((rv, self.tbpl_formatter.test_end(data)))
+ return rv
+
+ def valgrind_error(self, data):
+ rv = " " + data["primary"] + "\n"
+ for line in data["secondary"]:
+ rv = rv + line + "\n"
+
+ return rv
+
+ def lsan_leak(self, data):
+ allowed = data.get("allowed_match")
+ if allowed:
+ prefix = self.color_formatter.log_test_status_fail("FAIL")
+ else:
+ prefix = self.color_formatter.log_test_status_unexpected_fail(
+ "UNEXPECTED-FAIL"
+ )
+
+ return "%s LeakSanitizer: leak at %s" % (prefix, ", ".join(data["frames"]))
+
+ def lsan_summary(self, data):
+ allowed = data.get("allowed", False)
+ if allowed:
+ prefix = self.color_formatter.warning("WARNING")
+ else:
+ prefix = self.color_formatter.error("ERROR")
+
+ return (
+ "%s | LeakSanitizer | "
+ "SUMMARY: AddressSanitizer: %d byte(s) leaked in %d allocation(s)."
+ % (prefix, data["bytes"], data["allocations"])
+ )
+
+ def mozleak_object(self, data):
+ data_log = data.copy()
+ data_log["level"] = "INFO"
+ data_log["message"] = "leakcheck: %s leaked %d %s" % (
+ data["process"],
+ data["bytes"],
+ data["name"],
+ )
+ return self.log(data_log)
+
+ def mozleak_total(self, data):
+ if data["bytes"] is None:
+ # We didn't see a line with name 'TOTAL'
+ if data.get("induced_crash", False):
+ data_log = data.copy()
+ data_log["level"] = "INFO"
+ data_log["message"] = (
+ "leakcheck: %s deliberate crash and thus no leak log\n"
+ % data["process"]
+ )
+ return self.log(data_log)
+ if data.get("ignore_missing", False):
+ return (
+ "%s ignoring missing output line for total leaks\n"
+ % data["process"]
+ )
+
+ status = self.color_formatter.log_test_status_pass("FAIL")
+ return "%s leakcheck: " "%s missing output line for total leaks!\n" % (
+ status,
+ data["process"],
+ )
+
+ if data["bytes"] == 0:
+ return "%s leakcheck: %s no leaks detected!\n" % (
+ self.color_formatter.log_test_status_pass("PASS"),
+ data["process"],
+ )
+
+ message = "leakcheck: %s %d bytes leaked\n" % (data["process"], data["bytes"])
+
+ # data["bytes"] will include any expected leaks, so it can be off
+ # by a few thousand bytes.
+ failure = data["bytes"] > data["threshold"]
+ status = (
+ self.color_formatter.log_test_status_unexpected_fail("UNEXPECTED-FAIL")
+ if failure
+ else self.color_formatter.log_test_status_fail("FAIL")
+ )
+ return "%s %s\n" % (status, message)
+
+ def test_status(self, data):
+ test = self._get_test_id(data)
+ if test not in self.status_buffer:
+ self.status_buffer[test] = {"count": 0, "unexpected": 0, "pass": 0}
+ self.status_buffer[test]["count"] += 1
+
+ if data["status"] == "PASS":
+ self.status_buffer[test]["pass"] += 1
+
+ if "expected" in data and data["status"] not in data.get(
+ "known_intermittent", []
+ ):
+ self.status_buffer[test]["unexpected"] += 1
+
+ if self.verbose:
+ return self._format_status(test, data).rstrip("\n")
+
+ def assertion_count(self, data):
+ if data["min_expected"] <= data["count"] <= data["max_expected"]:
+ return
+
+ if data["min_expected"] != data["max_expected"]:
+ expected = "%i to %i" % (data["min_expected"], data["max_expected"])
+ else:
+ expected = "%i" % data["min_expected"]
+
+ action = self.color_formatter.log_test_status_fail("ASSERT")
+ return "%s: Assertion count %i, expected %s assertions\n" % (
+ action,
+ data["count"],
+ expected,
+ )
+
+ def process_output(self, data):
+ rv = []
+
+ pid = data["process"]
+ if pid.isdigit():
+ pid = "pid:%s" % pid
+ pid = self.color_formatter.pid(pid)
+
+ if "command" in data and data["process"] not in self._known_pids:
+ self._known_pids.add(data["process"])
+ rv.append("%s Full command: %s" % (pid, data["command"]))
+
+ rv.append("%s %s" % (pid, data["data"]))
+ return "\n".join(rv)
+
+ def crash(self, data):
+ test = self._get_test_id(data)
+
+ if data.get("stackwalk_returncode", 0) != 0 and not data.get(
+ "stackwalk_stderr"
+ ):
+ success = True
+ else:
+ success = False
+
+ rv = [
+ "pid:%s. Process type: %s. Test:%s. Minidump analysed:%s. Signature:[%s]"
+ % (
+ data.get("pid", "unknown"),
+ data.get("process_type", None),
+ test,
+ success,
+ data["signature"],
+ )
+ ]
+
+ if data.get("java_stack"):
+ rv.append("Java exception: %s" % data["java_stack"])
+ else:
+ if data.get("reason"):
+ rv.append("Mozilla crash reason: %s" % data["reason"])
+
+ if data.get("minidump_path"):
+ rv.append("Crash dump filename: %s" % data["minidump_path"])
+
+ if data.get("stackwalk_returncode", 0) != 0:
+ rv.append(
+ "minidump-stackwalk exited with return code %d"
+ % data["stackwalk_returncode"]
+ )
+
+ if data.get("stackwalk_stderr"):
+ rv.append("stderr from minidump-stackwalk:")
+ rv.append(data["stackwalk_stderr"])
+ elif data.get("stackwalk_stdout"):
+ rv.append(data["stackwalk_stdout"])
+
+ if data.get("stackwalk_errors"):
+ rv.extend(data.get("stackwalk_errors"))
+
+ rv = "\n".join(rv)
+ if not rv[-1] == "\n":
+ rv += "\n"
+
+ action = self.color_formatter.action(data["action"].upper())
+ return "%s: %s" % (action, rv)
+
+ def process_start(self, data):
+ rv = "Started process `%s`" % data["process"]
+ desc = data.get("command")
+ if desc:
+ rv = "%s (%s)" % (rv, desc)
+ return rv
+
+ def process_exit(self, data):
+ return "%s: %s" % (data["process"], strstatus(data["exitcode"]))
+
+ def log(self, data):
+ level = data.get("level").upper()
+
+ if level in ("CRITICAL", "ERROR"):
+ level = self.color_formatter.error(level)
+ elif level == "WARNING":
+ level = self.color_formatter.warning(level)
+ elif level == "INFO":
+ level = self.color_formatter.log_process_output(level)
+
+ if data.get("component"):
+ rv = " ".join([data["component"], level, data["message"]])
+ else:
+ rv = "%s %s" % (level, data["message"])
+
+ if "stack" in data:
+ rv += "\n%s" % data["stack"]
+
+ return rv
+
+ def lint(self, data):
+ fmt = (
+ "{path} {c1}{lineno}{column} {c2}{level}{normal} {message}"
+ " {c1}{rule}({linter}){normal}"
+ )
+ message = fmt.format(
+ path=data["path"],
+ normal=self.color_formatter.normal,
+ c1=self.color_formatter.grey,
+ c2=self.color_formatter.error
+ if data["level"] == "error"
+ else (self.color_formatter.log_test_status_fail),
+ lineno=str(data["lineno"]),
+ column=(":" + str(data["column"])) if data.get("column") else "",
+ level=data["level"],
+ message=data["message"],
+ rule="{} ".format(data["rule"]) if data.get("rule") else "",
+ linter=data["linter"].lower() if data.get("linter") else "",
+ )
+
+ return message
+
+ def shutdown(self, data):
+ if not self.summary_on_shutdown:
+ return
+
+ heading = "Overall Summary"
+ rv = [
+ "",
+ self.color_formatter.heading(heading),
+ self.color_formatter.heading("=" * len(heading)),
+ ]
+ for suite, summary in self.summary:
+ rv.append(self._format_suite_summary(suite, summary))
+ return "\n".join(rv)
+
+ def _get_subtest_data(self, data):
+ test = self._get_test_id(data)
+ return self.status_buffer.get(test, {"count": 0, "unexpected": 0, "pass": 0})
+
+ def _time(self, data):
+ entry_time = data["time"]
+ if self.write_interval and self.last_time is not None:
+ t = entry_time - self.last_time
+ self.last_time = entry_time
+ else:
+ t = entry_time - self.start_time
+
+ return t / 1000.0
diff --git a/testing/mozbase/mozlog/mozlog/formatters/process.py b/testing/mozbase/mozlog/mozlog/formatters/process.py
new file mode 100644
index 0000000000..012c7a0803
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/formatters/process.py
@@ -0,0 +1,59 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import signal
+
+from six.moves import range
+
+# a dict cache of signal number -> signal name
+_SIG_NAME = None
+
+
+def strsig(n):
+ """
+ Translate a process signal identifier to a human readable string.
+ """
+ global _SIG_NAME
+
+ if _SIG_NAME is None:
+ # cache signal names
+ _SIG_NAME = {}
+ for k in dir(signal):
+ if (
+ k.startswith("SIG")
+ and not k.startswith("SIG_")
+ and k != "SIGCLD"
+ and k != "SIGPOLL"
+ ):
+ _SIG_NAME[getattr(signal, k)] = k
+
+ # Realtime signals mostly have no names
+ if hasattr(signal, "SIGRTMIN") and hasattr(signal, "SIGRTMAX"):
+ for r in range(signal.SIGRTMIN + 1, signal.SIGRTMAX + 1):
+ _SIG_NAME[r] = "SIGRTMIN+" + str(r - signal.SIGRTMIN)
+
+ if n < 0 or n >= signal.NSIG:
+ return "out-of-range signal, number %s" % n
+ try:
+ return _SIG_NAME[n]
+ except KeyError:
+ return "unrecognized signal, number %s" % n
+
+
+def strstatus(status):
+ """
+ Returns a human readable string of a process exit code, as returned
+ by the subprocess module.
+ """
+ # 'status' is the exit status
+ if os.name != "posix":
+ # Windows error codes are easier to look up if printed in hexadecimal
+ if status < 0:
+ status += 2**32
+ return "exit %x" % status
+ elif status >= 0:
+ return "exit %d" % status
+ else:
+ return "killed by %s" % strsig(-status)
diff --git a/testing/mozbase/mozlog/mozlog/formatters/tbplformatter.py b/testing/mozbase/mozlog/mozlog/formatters/tbplformatter.py
new file mode 100644
index 0000000000..103b9a2d54
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/formatters/tbplformatter.py
@@ -0,0 +1,473 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import functools
+from collections import deque
+from functools import reduce
+
+import six
+
+from ..handlers import SummaryHandler
+from .base import BaseFormatter
+from .process import strstatus
+
+
+def output_subtests(func):
+ @functools.wraps(func)
+ def inner(self, data):
+ if self.subtests_count:
+ return self._format_subtests(data.get("component")) + func(self, data)
+ else:
+ return func(self, data)
+
+ return inner
+
+
+class TbplFormatter(BaseFormatter):
+ """Formatter that formats logs in the legacy formatting format used by TBPL
+ This is intended to be used to preserve backward compatibility with existing tools
+ hand-parsing this format.
+ """
+
+ def __init__(self, compact=False, summary_on_shutdown=False, **kwargs):
+ super(TbplFormatter, self).__init__(**kwargs)
+ self.suite_start_time = None
+ self.test_start_times = {}
+ self.buffer = None
+ self.compact = compact
+ self.subtests_count = 0
+
+ self.summary = SummaryHandler()
+ self.summary_on_shutdown = summary_on_shutdown
+
+ def __call__(self, data):
+ if self.summary_on_shutdown:
+ self.summary(data)
+ return super(TbplFormatter, self).__call__(data)
+
+ @property
+ def compact(self):
+ return self._compact
+
+ @compact.setter
+ def compact(self, value):
+ self._compact = value
+ if value:
+ self.buffer = deque([], 10)
+ else:
+ self.buffer = None
+
+ def _format_subtests(self, component, subtract_context=False):
+ count = self.subtests_count
+ if subtract_context:
+ count -= len(self.buffer)
+ self.subtests_count = 0
+ return self._log(
+ {"level": "INFO", "message": "." * count, "component": component}
+ )
+
+ @output_subtests
+ def log(self, data):
+ return self._log(data)
+
+ def _log(self, data):
+ if data.get("component"):
+ message = "%s %s" % (data["component"], data["message"])
+ else:
+ message = data["message"]
+
+ if "stack" in data:
+ message += "\n%s" % data["stack"]
+
+ return "%s\n" % message
+
+ @output_subtests
+ def process_output(self, data):
+ pid = data["process"]
+ if pid.isdigit():
+ pid = "PID %s" % pid
+ return "%s | %s\n" % (pid, data["data"])
+
+ @output_subtests
+ def process_start(self, data):
+ msg = "TEST-INFO | started process %s" % data["process"]
+ if "command" in data:
+ msg = "%s (%s)" % (msg, data["command"])
+ return msg + "\n"
+
+ @output_subtests
+ def process_exit(self, data):
+ return "TEST-INFO | %s: %s\n" % (data["process"], strstatus(data["exitcode"]))
+
+ @output_subtests
+ def shutdown_failure(self, data):
+ return "TEST-UNEXPECTED-FAIL | %s | %s\n" % (data["group"], data["message"])
+
+ @output_subtests
+ def crash(self, data):
+ id = data["test"] if "test" in data else "pid: %s" % data["process"]
+
+ if data.get("java_stack"):
+ # use "<exception> at <top frame>" as a crash signature for java exception
+ sig = data["java_stack"].split("\n")
+ sig = " ".join(sig[0:2])
+ rv = ["PROCESS-CRASH | %s | %s\n[%s]" % (id, sig, data["java_stack"])]
+
+ if data.get("reason"):
+ rv.append("Mozilla crash reason: %s" % data["reason"])
+
+ if data.get("minidump_path"):
+ rv.append("Crash dump filename: %s" % data["minidump_path"])
+
+ else:
+ signature = data["signature"] if data["signature"] else "unknown top frame"
+ reason = data.get("reason", "application crashed")
+ rv = ["PROCESS-CRASH | %s [%s] | %s " % (reason, signature, id)]
+
+ if data.get("process_type"):
+ rv.append("Process type: {}".format(data["process_type"]))
+
+ rv.append("Process pid: {}".format(data.get("pid", "unknown")))
+
+ if data.get("reason"):
+ rv.append("Mozilla crash reason: %s" % data["reason"])
+
+ if data.get("minidump_path"):
+ rv.append("Crash dump filename: %s" % data["minidump_path"])
+
+ if data.get("stackwalk_stderr"):
+ rv.append("stderr from minidump-stackwalk:")
+ rv.append(data["stackwalk_stderr"])
+ elif data.get("stackwalk_stdout"):
+ rv.append(data["stackwalk_stdout"])
+
+ if data.get("stackwalk_returncode", 0) != 0:
+ rv.append(
+ "minidump-stackwalk exited with return code %d"
+ % data["stackwalk_returncode"]
+ )
+
+ if data.get("stackwalk_errors"):
+ rv.extend(data.get("stackwalk_errors"))
+
+ rv = "\n".join(rv)
+ if not rv[-1] == "\n":
+ rv += "\n"
+
+ return rv
+
+ def suite_start(self, data):
+ self.suite_start_time = data["time"]
+ num_tests = reduce(lambda x, y: x + len(y), six.itervalues(data["tests"]), 0)
+ return "SUITE-START | Running %i tests\n" % num_tests
+
+ def test_start(self, data):
+ self.test_start_times[self.test_id(data["test"])] = data["time"]
+
+ return "TEST-START | %s\n" % data["test"]
+
+ def test_status(self, data):
+ if self.compact:
+ if "expected" in data:
+ rv = []
+ rv.append(
+ self._format_subtests(data.get("component"), subtract_context=True)
+ )
+ rv.extend(self._format_status(item) for item in self.buffer)
+ rv.append(self._format_status(data))
+ self.buffer.clear()
+ return "".join(rv)
+ else:
+ self.subtests_count += 1
+ self.buffer.append(data)
+ else:
+ return self._format_status(data)
+
+ def assertion_count(self, data):
+ if data["min_expected"] != data["max_expected"]:
+ expected = "%i to %i" % (data["min_expected"], data["max_expected"])
+ else:
+ expected = "%i" % data["min_expected"]
+
+ if data["count"] < data["min_expected"]:
+ status, comparison = "TEST-UNEXPECTED-PASS", "is less than"
+ elif data["count"] > data["max_expected"]:
+ status, comparison = "TEST-UNEXPECTED-FAIL", "is more than"
+ elif data["count"] > 0:
+ status, comparison = "TEST-KNOWN-FAIL", "matches"
+ else:
+ return
+
+ return "%s | %s | assertion count %i %s expected %s assertions\n" % (
+ status,
+ data["test"],
+ data["count"],
+ comparison,
+ expected,
+ )
+
+ def _format_status(self, data):
+ message = "- " + data["message"] if "message" in data else ""
+ if "stack" in data:
+ message += "\n%s" % data["stack"]
+ if message and message[-1] == "\n":
+ message = message[:-1]
+
+ status = data["status"]
+
+ if "expected" in data:
+ if status in data.get("known_intermittent", []):
+ status = "KNOWN-INTERMITTENT-%s" % status
+ else:
+ if not message:
+ message = "- expected %s" % data["expected"]
+ failure_line = "TEST-UNEXPECTED-%s | %s | %s %s\n" % (
+ status,
+ data["test"],
+ data["subtest"],
+ message,
+ )
+ if data["expected"] != "PASS":
+ info_line = "TEST-INFO | expected %s\n" % data["expected"]
+ return failure_line + info_line
+ return failure_line
+
+ return "TEST-%s | %s | %s %s\n" % (
+ status,
+ data["test"],
+ data["subtest"],
+ message,
+ )
+
+ def test_end(self, data):
+ rv = []
+ if self.compact and self.subtests_count:
+ print_context = "expected" in data
+ rv.append(
+ self._format_subtests(
+ data.get("component"), subtract_context=print_context
+ )
+ )
+ if print_context:
+ rv.extend(self._format_status(item) for item in self.buffer)
+ self.buffer.clear()
+
+ test_id = self.test_id(data["test"])
+ duration_msg = ""
+
+ if test_id in self.test_start_times:
+ start_time = self.test_start_times.pop(test_id)
+ time = data["time"] - start_time
+ duration_msg = "took %ims" % time
+
+ screenshot_msg = ""
+ extra = data.get("extra", {})
+ if "reftest_screenshots" in extra:
+ screenshots = extra["reftest_screenshots"]
+ if len(screenshots) == 3:
+ screenshot_msg = (
+ "\nREFTEST IMAGE 1 (TEST): data:image/png;base64,%s\n"
+ "REFTEST IMAGE 2 (REFERENCE): data:image/png;base64,%s"
+ ) % (screenshots[0]["screenshot"], screenshots[2]["screenshot"])
+ elif len(screenshots) == 1:
+ screenshot_msg = (
+ "\nREFTEST IMAGE: data:image/png;base64,%s"
+ % screenshots[0]["screenshot"]
+ )
+
+ status = data["status"]
+
+ if "expected" in data:
+ if status in data.get("known_intermittent", []):
+ status = "KNOWN-INTERMITTENT-%s" % status
+ else:
+ message = data.get("message", "")
+ if not message:
+ message = "expected %s" % data["expected"]
+ if "stack" in data:
+ message += "\n%s" % data["stack"]
+ if message and message[-1] == "\n":
+ message = message[:-1]
+
+ message += screenshot_msg
+
+ failure_line = "TEST-UNEXPECTED-%s | %s | %s\n" % (
+ data["status"],
+ test_id,
+ message,
+ )
+
+ if data["expected"] not in ("PASS", "OK"):
+ expected_msg = "expected %s | " % data["expected"]
+ else:
+ expected_msg = ""
+ info_line = "TEST-INFO %s%s\n" % (expected_msg, duration_msg)
+
+ return failure_line + info_line
+
+ sections = ["TEST-%s" % status, test_id]
+ if duration_msg:
+ sections.append(duration_msg)
+ rv.append(" | ".join(sections) + "\n")
+ if screenshot_msg:
+ rv.append(screenshot_msg[1:] + "\n")
+ return "".join(rv)
+
+ def suite_end(self, data):
+ start_time = self.suite_start_time
+ # pylint --py3k W1619
+ # in wpt --repeat mode sometimes we miss suite_start()
+ if start_time is None:
+ start_time = data["time"]
+ time = int((data["time"] - start_time) / 1000)
+
+ return "SUITE-END | took %is\n" % time
+
+ def test_id(self, test_id):
+ if isinstance(test_id, (str, six.text_type)):
+ return test_id
+ else:
+ return tuple(test_id)
+
+ @output_subtests
+ def valgrind_error(self, data):
+ rv = "TEST-UNEXPECTED-VALGRIND-ERROR | " + data["primary"] + "\n"
+ for line in data["secondary"]:
+ rv = rv + line + "\n"
+
+ return rv
+
+ def lint(self, data):
+ fmt = "TEST-UNEXPECTED-{level} | {path}:{lineno}{column} | {message} ({rule})"
+ data["column"] = ":%s" % data["column"] if data["column"] else ""
+ data["rule"] = data["rule"] or data["linter"] or ""
+ return fmt.append(fmt.format(**data))
+
+ def lsan_leak(self, data):
+ frames = data.get("frames")
+ allowed_match = data.get("allowed_match")
+ frame_list = ", ".join(frames)
+ prefix = "TEST-UNEXPECTED-FAIL" if not allowed_match else "TEST-FAIL"
+ suffix = (
+ ""
+ if not allowed_match
+ else "INFO | LeakSanitizer | Frame %s matched a expected leak\n"
+ % allowed_match
+ )
+ return "%s | LeakSanitizer | leak at %s\n%s" % (prefix, frame_list, suffix)
+
+ def lsan_summary(self, data):
+ level = "INFO" if data.get("allowed", False) else "ERROR"
+ return (
+ "%s | LeakSanitizer | "
+ "SUMMARY: AddressSanitizer: %d byte(s) leaked in %d allocation(s)."
+ % (level, data["bytes"], data["allocations"])
+ )
+
+ def mozleak_object(self, data):
+ return "TEST-INFO | leakcheck | %s leaked %d %s\n" % (
+ data["process"],
+ data["bytes"],
+ data["name"],
+ )
+
+ def mozleak_total(self, data):
+ if data["bytes"] is None:
+ # We didn't see a line with name 'TOTAL'
+ if data.get("induced_crash", False):
+ return (
+ "TEST-INFO | leakcheck | %s deliberate crash and thus no leak log\n"
+ % data["process"]
+ )
+ if data.get("ignore_missing", False):
+ return (
+ "TEST-INFO | leakcheck | "
+ "%s ignoring missing output line for total leaks\n"
+ % data["process"]
+ )
+
+ return (
+ "TEST-UNEXPECTED-FAIL | leakcheck | "
+ "%s missing output line for total leaks!\n" % data["process"]
+ )
+
+ if data["bytes"] == 0:
+ return "TEST-PASS | leakcheck | %s no leaks detected!\n" % data["process"]
+
+ message = ""
+ bigLeakers = [
+ "nsGlobalWindowInner",
+ "nsGlobalWindowOuter",
+ "Document",
+ "nsDocShell",
+ "BrowsingContext",
+ "BackstagePass",
+ ]
+ for bigLeakName in bigLeakers:
+ if bigLeakName in data["objects"]:
+ message = "leakcheck large %s | %s" % (bigLeakName, data["scope"])
+ break
+
+ # Create a comma delimited string of the first N leaked objects found,
+ # to aid with bug summary matching in TBPL. Note: The order of the objects
+ # had no significance (they're sorted alphabetically).
+ if message == "":
+ max_objects = 5
+ object_summary = ", ".join(data["objects"][:max_objects])
+ if len(data["objects"]) > max_objects:
+ object_summary += ", ..."
+
+ message = "leakcheck | %s %d bytes leaked (%s)\n" % (
+ data["process"],
+ data["bytes"],
+ object_summary,
+ )
+
+ # data["bytes"] will include any expected leaks, so it can be off
+ # by a few thousand bytes.
+ if data["bytes"] > data["threshold"]:
+ return "TEST-UNEXPECTED-FAIL | %s\n" % message
+ else:
+ return "WARNING | %s\n" % message
+
+ def _format_suite_summary(self, suite, summary):
+ counts = summary["counts"]
+ logs = summary["unexpected_logs"]
+ intermittent_logs = summary["intermittent_logs"]
+
+ total = sum(self.summary.aggregate("count", counts).values())
+ expected = sum(self.summary.aggregate("expected", counts).values())
+ intermittents = sum(
+ self.summary.aggregate("known_intermittent", counts).values()
+ )
+ known = (
+ " ({} known intermittent tests)".format(intermittents)
+ if intermittents
+ else ""
+ )
+ status_str = "{}/{}{}".format(expected, total, known)
+ rv = ["{}: {}".format(suite, status_str)]
+
+ for results in logs.values():
+ for data in results:
+ rv.append(" {}".format(self._format_status(data)))
+
+ if intermittent_logs:
+ rv.append("Known Intermittent tests:")
+ for results in intermittent_logs.values():
+ for data in results:
+ data["subtest"] = data.get("subtest", "")
+ rv.append(" {}".format(self._format_status(data)))
+
+ return "\n".join(rv)
+
+ def shutdown(self, data):
+ if not self.summary_on_shutdown:
+ return
+
+ rv = ["", "Overall Summary"]
+ for suite, summary in self.summary:
+ rv.append(self._format_suite_summary(suite, summary))
+ rv.append("")
+ return "\n".join(rv)
diff --git a/testing/mozbase/mozlog/mozlog/formatters/unittest.py b/testing/mozbase/mozlog/mozlog/formatters/unittest.py
new file mode 100755
index 0000000000..3eba864d85
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/formatters/unittest.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+from . import base
+
+
+class UnittestFormatter(base.BaseFormatter):
+ """Formatter designed to produce output in a format like that used by
+ the ``unittest`` module in the standard library."""
+
+ def __init__(self):
+ self.fails = []
+ self.errors = []
+ self.tests_run = 0
+ self.start_time = None
+ self.end_time = None
+
+ def suite_start(self, data):
+ self.start_time = data["time"]
+
+ def test_start(self, data):
+ self.tests_run += 1
+
+ def test_end(self, data):
+ char = "."
+ if "expected" in data:
+ status = data["status"]
+ char = {"FAIL": "F", "PRECONDITION_FAILED": "F", "ERROR": "E", "PASS": "X"}[
+ status
+ ]
+
+ if status == "FAIL" or status == "PRECONDITION_FAILED":
+ self.fails.append(data)
+ elif status == "ERROR":
+ self.errors.append(data)
+
+ elif data["status"] == "SKIP":
+ char = "S"
+ return char
+
+ def assertion_count(self, data):
+ if data["count"] < data["min_expected"]:
+ char = "X"
+ elif data["count"] > data["max_expected"]:
+ char = "F"
+ self.fails.append(
+ {
+ "test": data["test"],
+ "message": (
+ "assertion count %i is greated than %i"
+ % (data["count"], data["max_expected"])
+ ),
+ }
+ )
+ elif data["count"] > 0:
+ char = "."
+ else:
+ char = "."
+
+ return char
+
+ def suite_end(self, data):
+ self.end_time = data["time"]
+ summary = "\n".join(
+ [self.output_fails(), self.output_errors(), self.output_summary()]
+ )
+ return "\n%s\n" % summary
+
+ def output_fails(self):
+ return "\n".join("FAIL %(test)s\n%(message)s\n" % data for data in self.fails)
+
+ def output_errors(self):
+ return "\n".join("ERROR %(test)s\n%(message)s" % data for data in self.errors)
+
+ def output_summary(self):
+ # pylint --py3k W1619
+ return "Ran %i tests in %.1fs" % (
+ self.tests_run,
+ (self.end_time - self.start_time) / 1000,
+ )
diff --git a/testing/mozbase/mozlog/mozlog/formatters/xunit.py b/testing/mozbase/mozlog/mozlog/formatters/xunit.py
new file mode 100644
index 0000000000..02966d713a
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/formatters/xunit.py
@@ -0,0 +1,115 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from xml.etree import ElementTree
+
+import six
+
+from . import base
+
+
+def format_test_id(test_id):
+ """Take a test id and return something that looks a bit like
+ a class path"""
+ if not isinstance(test_id, six.string_types):
+ # Not sure how to deal with reftests yet
+ raise NotImplementedError
+
+ # Turn a path into something like a class heirachy
+ return test_id.replace(".", "_").replace("/", ".")
+
+
+class XUnitFormatter(base.BaseFormatter):
+ """Formatter that produces XUnit-style XML output.
+
+ The tree is created in-memory so this formatter may be problematic
+ with very large log files.
+
+ Note that the data model isn't a perfect match. In
+ particular XUnit assumes that each test has a unittest-style
+ class name and function name, which isn't the case for us. The
+ implementation currently replaces path names with something that
+ looks like class names, but this doesn't work for test types that
+ actually produce class names, or for test types that have multiple
+ components in their test id (e.g. reftests)."""
+
+ def __init__(self):
+ self.tree = ElementTree.ElementTree()
+ self.root = None
+ self.suite_start_time = None
+ self.test_start_time = None
+
+ self.tests_run = 0
+ self.errors = 0
+ self.failures = 0
+ self.skips = 0
+
+ def suite_start(self, data):
+ self.root = ElementTree.Element("testsuite")
+ self.tree.root = self.root
+ self.suite_start_time = data["time"]
+
+ def test_start(self, data):
+ self.tests_run += 1
+ self.test_start_time = data["time"]
+
+ def _create_result(self, data):
+ test = ElementTree.SubElement(self.root, "testcase")
+ name = format_test_id(data["test"])
+ extra = data.get("extra") or {}
+ test.attrib["classname"] = extra.get("class_name") or name
+
+ if "subtest" in data:
+ test.attrib["name"] = data["subtest"]
+ # We generally don't know how long subtests take
+ test.attrib["time"] = "0"
+ else:
+ if "." in name:
+ test_name = name.rsplit(".", 1)[1]
+ else:
+ test_name = name
+ test.attrib["name"] = extra.get("method_name") or test_name
+ test.attrib["time"] = "%.2f" % (
+ (data["time"] - self.test_start_time) / 1000.0
+ )
+
+ if "expected" in data and data["expected"] != data["status"]:
+ if data["status"] in ("NOTRUN", "ASSERT", "ERROR"):
+ result = ElementTree.SubElement(test, "error")
+ self.errors += 1
+ else:
+ result = ElementTree.SubElement(test, "failure")
+ self.failures += 1
+
+ result.attrib["message"] = "Expected %s, got %s" % (
+ data["expected"],
+ data["status"],
+ )
+ result.text = "%s\n%s" % (data.get("stack", ""), data.get("message", ""))
+
+ elif data["status"] == "SKIP":
+ result = ElementTree.SubElement(test, "skipped")
+ self.skips += 1
+
+ def test_status(self, data):
+ self._create_result(data)
+
+ def test_end(self, data):
+ self._create_result(data)
+
+ def suite_end(self, data):
+ self.root.attrib.update(
+ {
+ "tests": str(self.tests_run),
+ "errors": str(self.errors),
+ "failures": str(self.failures),
+ "skips": str(self.skips),
+ "time": "%.2f" % ((data["time"] - self.suite_start_time) / 1000.0),
+ }
+ )
+ xml_string = ElementTree.tostring(self.root, encoding="utf8")
+ # pretty printing can not be done from xml.etree
+ from xml.dom import minidom
+
+ return minidom.parseString(xml_string).toprettyxml()
diff --git a/testing/mozbase/mozlog/mozlog/handlers/__init__.py b/testing/mozbase/mozlog/mozlog/handlers/__init__.py
new file mode 100644
index 0000000000..e14bdf675f
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/handlers/__init__.py
@@ -0,0 +1,19 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from .base import BaseHandler, LogLevelFilter, StreamHandler
+from .bufferhandler import BufferHandler
+from .statushandler import StatusHandler
+from .summaryhandler import SummaryHandler
+from .valgrindhandler import ValgrindHandler
+
+__all__ = [
+ "LogLevelFilter",
+ "StreamHandler",
+ "BaseHandler",
+ "StatusHandler",
+ "SummaryHandler",
+ "BufferHandler",
+ "ValgrindHandler",
+]
diff --git a/testing/mozbase/mozlog/mozlog/handlers/base.py b/testing/mozbase/mozlog/mozlog/handlers/base.py
new file mode 100644
index 0000000000..064b1dee0b
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/handlers/base.py
@@ -0,0 +1,124 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import codecs
+import locale
+from threading import Lock
+
+import six
+
+from mozlog.handlers.messagehandler import MessageHandler
+from mozlog.structuredlog import log_levels
+
+
+class BaseHandler(object):
+ """A base handler providing message handling facilities to
+ derived classes.
+ """
+
+ def __init__(self, inner):
+ self.message_handler = MessageHandler()
+ if hasattr(inner, "message_handler"):
+ self.message_handler.wrapped.append(inner)
+
+
+class LogLevelFilter(BaseHandler):
+ """Handler that filters out messages with action of log and a level
+ lower than some specified level.
+
+ :param inner: Handler to use for messages that pass this filter
+ :param level: Minimum log level to process
+ """
+
+ def __init__(self, inner, level):
+ BaseHandler.__init__(self, inner)
+ self.inner = inner
+ self.level = log_levels[level.upper()]
+
+ def __call__(self, item):
+ if item["action"] != "log" or log_levels[item["level"].upper()] <= self.level:
+ return self.inner(item)
+
+
+class StreamHandler(BaseHandler):
+ """Handler for writing to a file-like object
+
+ :param stream: File-like object to write log messages to
+ :param formatter: formatter to convert messages to string format
+ """
+
+ _lock = Lock()
+
+ def __init__(self, stream, formatter):
+ BaseHandler.__init__(self, formatter)
+ assert stream is not None
+ if six.PY2:
+ # This is a hack to deal with the case where we are passed a
+ # StreamWriter (e.g. by mach for stdout). A StreamWriter requires
+ # the code to handle unicode in exactly the opposite way compared
+ # to a normal stream i.e. you always have to pass in a Unicode
+ # object rather than a string object. Cope with that by extracting
+ # the underlying raw stream.
+ if isinstance(stream, codecs.StreamWriter):
+ stream = stream.stream
+
+ self.formatter = formatter
+ self.stream = stream
+
+ def __call__(self, data):
+ """Write a log message.
+
+ :param data: Structured log message dictionary."""
+ formatted = self.formatter(data)
+ if not formatted:
+ return
+ with self._lock:
+ if six.PY3:
+ import io
+
+ import mozfile
+
+ source_enc = "utf-8"
+ target_enc = "utf-8"
+ if isinstance(self.stream, io.BytesIO):
+ target_enc = None
+ if hasattr(self.stream, "encoding"):
+ target_enc = self.stream.encoding
+ if target_enc is None:
+ target_enc = locale.getpreferredencoding()
+
+ if isinstance(self.stream, io.StringIO) and isinstance(
+ formatted, bytes
+ ):
+ formatted = formatted.decode(source_enc, "replace")
+ elif (
+ isinstance(self.stream, io.BytesIO)
+ or isinstance(self.stream, mozfile.NamedTemporaryFile)
+ ) and isinstance(formatted, str):
+ formatted = formatted.encode(target_enc, "replace")
+ elif isinstance(formatted, str):
+ # Suppress eventual surrogates, but keep as string.
+ # TODO: It is yet unclear how we can end up with
+ # surrogates here, see comment on patch on bug 1794401.
+ formatted_bin = formatted.encode(target_enc, "replace")
+ formatted = formatted_bin.decode(target_enc, "ignore")
+
+ # It seems that under Windows we can have cp1252 encoding
+ # for the output stream and that not all unicode chars map
+ # well. We just ignore those errors here (they have no
+ # consequences for the executed tests, anyways).
+ try:
+ self.stream.write(formatted)
+ except UnicodeEncodeError:
+ return
+ else:
+ if isinstance(formatted, six.text_type):
+ self.stream.write(formatted.encode("utf-8", "replace"))
+ elif isinstance(formatted, str):
+ self.stream.write(formatted)
+ else:
+ assert False, "Got output from the formatter of an unexpected type"
+
+ self.stream.flush()
diff --git a/testing/mozbase/mozlog/mozlog/handlers/bufferhandler.py b/testing/mozbase/mozlog/mozlog/handlers/bufferhandler.py
new file mode 100644
index 0000000000..5fcdcf967a
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/handlers/bufferhandler.py
@@ -0,0 +1,86 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from .base import BaseHandler
+
+
+class BufferHandler(BaseHandler):
+ """Handler that maintains a circular buffer of messages based on the
+ size and actions specified by a user.
+
+ :param inner: The underlying handler used to emit messages.
+ :param message_limit: The maximum number of messages to retain for
+ context. If None, the buffer will grow without limit.
+ :param buffered_actions: The set of actions to include in the buffer
+ rather than log directly.
+ """
+
+ def __init__(self, inner, message_limit=100, buffered_actions=None):
+ BaseHandler.__init__(self, inner)
+ self.inner = inner
+ self.message_limit = message_limit
+ if buffered_actions is None:
+ buffered_actions = ["log", "test_status"]
+ self.buffered_actions = set(buffered_actions)
+ self._buffering = True
+
+ if self.message_limit is not None:
+ self._buffer = [None] * self.message_limit
+ self._buffer_pos = 0
+ else:
+ self._buffer = []
+
+ self.message_handler.register_message_handlers(
+ "buffer",
+ {
+ "on": self._enable_buffering,
+ "off": self._disable_buffering,
+ "flush": self._flush_buffered,
+ "clear": self._clear_buffer,
+ },
+ )
+
+ def __call__(self, data):
+ action = data["action"]
+ if "bypass_mozlog_buffer" in data:
+ data.pop("bypass_mozlog_buffer")
+ self.inner(data)
+ return
+ if not self._buffering or action not in self.buffered_actions:
+ self.inner(data)
+ return
+
+ self._add_message(data)
+
+ def _add_message(self, data):
+ if self.message_limit is None:
+ self._buffer.append(data)
+ else:
+ self._buffer[self._buffer_pos] = data
+ self._buffer_pos = (self._buffer_pos + 1) % self.message_limit
+
+ def _enable_buffering(self):
+ self._buffering = True
+
+ def _disable_buffering(self):
+ self._buffering = False
+
+ def _clear_buffer(self):
+ """Clear the buffer of unwanted messages."""
+ current_size = len([m for m in self._buffer if m is not None])
+ if self.message_limit is not None:
+ self._buffer = [None] * self.message_limit
+ else:
+ self._buffer = []
+ return current_size
+
+ def _flush_buffered(self):
+ """Logs the contents of the current buffer"""
+ for msg in self._buffer[self._buffer_pos :]:
+ if msg is not None:
+ self.inner(msg)
+ for msg in self._buffer[: self._buffer_pos]:
+ if msg is not None:
+ self.inner(msg)
+ return self._clear_buffer()
diff --git a/testing/mozbase/mozlog/mozlog/handlers/messagehandler.py b/testing/mozbase/mozlog/mozlog/handlers/messagehandler.py
new file mode 100644
index 0000000000..606dd83926
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/handlers/messagehandler.py
@@ -0,0 +1,39 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+class MessageHandler(object):
+ """A message handler providing message handling facilities to
+ classes derived from BaseHandler and BaseFormatter. This is a
+ composition class, to ensure handlers and formatters remain separate.
+
+ :param inner: A handler-like callable that may receive messages
+ from a log user.
+ """
+
+ def __init__(self):
+ self.message_handlers = {}
+ self.wrapped = []
+
+ def register_message_handlers(self, topic, handlers):
+ self.message_handlers[topic] = handlers
+
+ def handle_message(self, topic, cmd, *args):
+ """Handles a message for the given topic by calling a subclass-defined
+ callback for the command.
+
+ :param topic: The topic of the broadcasted message. Handlers opt-in to
+ receiving messages by identifying a topic when calling
+ register_message_handlers.
+ :param command: The command to issue. This is a string that corresponds
+ to a callback provided by the target.
+ :param arg: Arguments to pass to the identified message callback, if any.
+ """
+ rv = []
+ if topic in self.message_handlers and cmd in self.message_handlers[topic]:
+ rv.append(self.message_handlers[topic][cmd](*args))
+ if self.wrapped:
+ for inner in self.wrapped:
+ rv.extend(inner.handle_message(topic, cmd, *args))
+ return rv
diff --git a/testing/mozbase/mozlog/mozlog/handlers/statushandler.py b/testing/mozbase/mozlog/mozlog/handlers/statushandler.py
new file mode 100644
index 0000000000..500f2e1d89
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/handlers/statushandler.py
@@ -0,0 +1,87 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from collections import defaultdict, namedtuple
+
+RunSummary = namedtuple(
+ "RunSummary",
+ (
+ "unexpected_statuses",
+ "expected_statuses",
+ "known_intermittent_statuses",
+ "log_level_counts",
+ "action_counts",
+ ),
+)
+
+
+class StatusHandler(object):
+ """A handler used to determine an overall status for a test run according
+ to a sequence of log messages."""
+
+ def __init__(self):
+ # The count of each type of unexpected result status (includes tests and subtests)
+ self.unexpected_statuses = defaultdict(int)
+ # The count of each type of expected result status (includes tests and subtests)
+ self.expected_statuses = defaultdict(int)
+ # The count of known intermittent result statuses (includes tests and subtests)
+ self.known_intermittent_statuses = defaultdict(int)
+ # The count of actions logged
+ self.action_counts = defaultdict(int)
+ # The count of messages logged at each log level
+ self.log_level_counts = defaultdict(int)
+ # The count of "No tests run" error messages seen
+ self.no_tests_run_count = 0
+
+ def __call__(self, data):
+ action = data["action"]
+ known_intermittent = data.get("known_intermittent", [])
+ self.action_counts[action] += 1
+
+ if action == "log":
+ if data["level"] == "ERROR" and data["message"] == "No tests ran":
+ self.no_tests_run_count += 1
+ self.log_level_counts[data["level"]] += 1
+
+ if action in ("test_status", "test_end"):
+ status = data["status"]
+ # Don't count known_intermittent status as unexpected
+ if "expected" in data and status not in known_intermittent:
+ self.unexpected_statuses[status] += 1
+ else:
+ self.expected_statuses[status] += 1
+ # Count known_intermittent as expected and intermittent.
+ if status in known_intermittent:
+ self.known_intermittent_statuses[status] += 1
+
+ if action == "assertion_count":
+ if data["count"] < data["min_expected"]:
+ self.unexpected_statuses["PASS"] += 1
+ elif data["count"] > data["max_expected"]:
+ self.unexpected_statuses["FAIL"] += 1
+ elif data["count"]:
+ self.expected_statuses["FAIL"] += 1
+ else:
+ self.expected_statuses["PASS"] += 1
+
+ if action == "lsan_leak":
+ if not data.get("allowed_match"):
+ self.unexpected_statuses["FAIL"] += 1
+
+ if action == "lsan_summary":
+ if not data.get("allowed", False):
+ self.unexpected_statuses["FAIL"] += 1
+
+ if action == "mozleak_total":
+ if data["bytes"] is not None and data["bytes"] > data.get("threshold", 0):
+ self.unexpected_statuses["FAIL"] += 1
+
+ def summarize(self):
+ return RunSummary(
+ dict(self.unexpected_statuses),
+ dict(self.expected_statuses),
+ dict(self.known_intermittent_statuses),
+ dict(self.log_level_counts),
+ dict(self.action_counts),
+ )
diff --git a/testing/mozbase/mozlog/mozlog/handlers/summaryhandler.py b/testing/mozbase/mozlog/mozlog/handlers/summaryhandler.py
new file mode 100644
index 0000000000..347e52abdb
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/handlers/summaryhandler.py
@@ -0,0 +1,193 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from collections import OrderedDict, defaultdict
+
+import six
+
+from ..reader import LogHandler
+
+
+class SummaryHandler(LogHandler):
+ """Handler class for storing suite summary information.
+
+ Can handle multiple suites in a single run. Summary
+ information is stored on the self.summary instance variable.
+
+ Per suite summary information can be obtained by calling 'get'
+ or iterating over this class.
+ """
+
+ def __init__(self, **kwargs):
+ super(SummaryHandler, self).__init__(**kwargs)
+
+ self.summary = OrderedDict()
+ self.current_suite = None
+
+ @property
+ def current(self):
+ return self.summary.get(self.current_suite)
+
+ def __getitem__(self, suite):
+ """Return summary information for the given suite.
+
+ The summary is of the form:
+
+ {
+ 'counts': {
+ '<check>': {
+ 'count': int,
+ 'expected': {
+ '<status>': int,
+ },
+ 'unexpected': {
+ '<status>': int,
+ },
+ 'known_intermittent': {
+ '<status>': int,
+ },
+ },
+ },
+ 'unexpected_logs': {
+ '<test>': [<data>]
+ },
+ 'intermittent_logs': {
+ '<test>': [<data>]
+ }
+ }
+
+ Valid values for <check> are `test`, `subtest` and `assert`. Valid
+ <status> keys are defined in the :py:mod:`mozlog.logtypes` module. The
+ <test> key is the id as logged by `test_start`. Finally the <data>
+ field is the log data from any `test_end` or `test_status` log messages
+ that have an unexpected result.
+
+ Mozlog's structuredlog has a `known_intermittent` field indicating if a
+ `test` and `subtest` <status> are expected to arise intermittently.
+ Known intermittent results are logged as both as `expected` and
+ `known_intermittent`.
+ """
+ return self.summary[suite]
+
+ def __iter__(self):
+ """Iterate over summaries.
+
+ Yields a tuple of (suite, summary). The summary returned is
+ the same format as returned by 'get'.
+ """
+ for suite, data in six.iteritems(self.summary):
+ yield suite, data
+
+ @classmethod
+ def aggregate(cls, key, counts, include_skip=True):
+ """Helper method for aggregating count data by 'key' instead of by 'check'."""
+ assert key in ("count", "expected", "unexpected", "known_intermittent")
+
+ res = defaultdict(int)
+ for check, val in counts.items():
+ if key == "count":
+ res[check] += val[key]
+ continue
+
+ for status, num in val[key].items():
+ if not include_skip and status == "skip":
+ continue
+ res[check] += num
+ return res
+
+ def suite_start(self, data):
+ self.current_suite = data.get("name", "suite {}".format(len(self.summary) + 1))
+ if self.current_suite not in self.summary:
+ self.summary[self.current_suite] = {
+ "counts": {
+ "test": {
+ "count": 0,
+ "expected": defaultdict(int),
+ "unexpected": defaultdict(int),
+ "known_intermittent": defaultdict(int),
+ },
+ "subtest": {
+ "count": 0,
+ "expected": defaultdict(int),
+ "unexpected": defaultdict(int),
+ "known_intermittent": defaultdict(int),
+ },
+ "assert": {
+ "count": 0,
+ "expected": defaultdict(int),
+ "unexpected": defaultdict(int),
+ "known_intermittent": defaultdict(int),
+ },
+ },
+ "unexpected_logs": OrderedDict(),
+ "intermittent_logs": OrderedDict(),
+ "harness_errors": [],
+ }
+
+ def test_start(self, data):
+ self.current["counts"]["test"]["count"] += 1
+
+ def test_status(self, data):
+ logs = self.current["unexpected_logs"]
+ intermittent_logs = self.current["intermittent_logs"]
+ count = self.current["counts"]
+ count["subtest"]["count"] += 1
+
+ if "expected" in data:
+ if data["status"] not in data.get("known_intermittent", []):
+ count["subtest"]["unexpected"][data["status"].lower()] += 1
+ if data["test"] not in logs:
+ logs[data["test"]] = []
+ logs[data["test"]].append(data)
+ else:
+ count["subtest"]["expected"][data["status"].lower()] += 1
+ count["subtest"]["known_intermittent"][data["status"].lower()] += 1
+ if data["test"] not in intermittent_logs:
+ intermittent_logs[data["test"]] = []
+ intermittent_logs[data["test"]].append(data)
+ else:
+ count["subtest"]["expected"][data["status"].lower()] += 1
+
+ def test_end(self, data):
+ logs = self.current["unexpected_logs"]
+ intermittent_logs = self.current["intermittent_logs"]
+ count = self.current["counts"]
+ if "expected" in data:
+ if data["status"] not in data.get("known_intermittent", []):
+ count["test"]["unexpected"][data["status"].lower()] += 1
+ if data["test"] not in logs:
+ logs[data["test"]] = []
+ logs[data["test"]].append(data)
+ else:
+ count["test"]["expected"][data["status"].lower()] += 1
+ count["test"]["known_intermittent"][data["status"].lower()] += 1
+ if data["test"] not in intermittent_logs:
+ intermittent_logs[data["test"]] = []
+ intermittent_logs[data["test"]].append(data)
+ else:
+ count["test"]["expected"][data["status"].lower()] += 1
+
+ def assertion_count(self, data):
+ count = self.current["counts"]
+ count["assert"]["count"] += 1
+
+ if data["min_expected"] <= data["count"] <= data["max_expected"]:
+ if data["count"] > 0:
+ count["assert"]["expected"]["fail"] += 1
+ else:
+ count["assert"]["expected"]["pass"] += 1
+ elif data["max_expected"] < data["count"]:
+ count["assert"]["unexpected"]["fail"] += 1
+ else:
+ count["assert"]["unexpected"]["pass"] += 1
+
+ def log(self, data):
+ if not self.current_suite:
+ return
+
+ logs = self.current["harness_errors"]
+ level = data.get("level").upper()
+
+ if level in ("CRITICAL", "ERROR"):
+ logs.append(data)
diff --git a/testing/mozbase/mozlog/mozlog/handlers/valgrindhandler.py b/testing/mozbase/mozlog/mozlog/handlers/valgrindhandler.py
new file mode 100644
index 0000000000..0f9b06e8f7
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/handlers/valgrindhandler.py
@@ -0,0 +1,138 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import re
+
+from .base import BaseHandler
+
+
+class ValgrindHandler(BaseHandler):
+ def __init__(self, inner):
+ BaseHandler.__init__(self, inner)
+ self.inner = inner
+ self.vFilter = ValgrindFilter()
+
+ def __call__(self, data):
+ tmp = self.vFilter(data)
+ if tmp is not None:
+ self.inner(tmp)
+
+
+class ValgrindFilter(object):
+ """
+ A class for handling Valgrind output.
+
+ Valgrind errors look like this:
+
+ ==60741== 40 (24 direct, 16 indirect) bytes in 1 blocks are definitely lost in loss
+ record 2,746 of 5,235
+ ==60741== at 0x4C26B43: calloc (vg_replace_malloc.c:593)
+ ==60741== by 0x63AEF65: PR_Calloc (prmem.c:443)
+ ==60741== by 0x69F236E: PORT_ZAlloc_Util (secport.c:117)
+ ==60741== by 0x69F1336: SECITEM_AllocItem_Util (secitem.c:28)
+ ==60741== by 0xA04280B: ffi_call_unix64 (in /builds/slave/m-in-l64-valgrind-000000000000/objdir/toolkit/library/libxul.so) # noqa
+ ==60741== by 0xA042443: ffi_call (ffi64.c:485)
+
+ For each such error, this class extracts most or all of the first (error
+ kind) line, plus the function name in each of the first few stack entries.
+ With this data it constructs and prints a TEST-UNEXPECTED-FAIL message that
+ TBPL will highlight.
+
+ It buffers these lines from which text is extracted so that the
+ TEST-UNEXPECTED-FAIL message can be printed before the full error.
+
+ Parsing the Valgrind output isn't ideal, and it may break in the future if
+ Valgrind changes the format of the messages, or introduces new error kinds.
+ To protect against this, we also count how many lines containing
+ "<insert_a_suppression_name_here>" are seen. Thanks to the use of
+ --gen-suppressions=yes, exactly one of these lines is present per error. If
+ the count of these lines doesn't match the error count found during
+ parsing, then the parsing has missed one or more errors and we can fail
+ appropriately.
+ """
+
+ def __init__(self):
+ # The regexps in this list match all of Valgrind's errors. Note that
+ # Valgrind is English-only, so we don't have to worry about
+ # localization.
+ self.re_error = re.compile(
+ r"==\d+== ("
+ + r"(Use of uninitialised value of size \d+)|"
+ + r"(Conditional jump or move depends on uninitialised value\(s\))|"
+ + r"(Syscall param .* contains uninitialised byte\(s\))|"
+ + r"(Syscall param .* points to (unaddressable|uninitialised) byte\(s\))|"
+ + r"((Unaddressable|Uninitialised) byte\(s\) found during client check request)|"
+ + r"(Invalid free\(\) / delete / delete\[\] / realloc\(\))|"
+ + r"(Mismatched free\(\) / delete / delete \[\])|"
+ + r"(Invalid (read|write) of size \d+)|"
+ + r"(Jump to the invalid address stated on the next line)|"
+ + r"(Source and destination overlap in .*)|"
+ + r"(.* bytes in .* blocks are .* lost)"
+ + r")"
+ )
+ # Match identifer chars, plus ':' for namespaces, and '\?' in order to
+ # match "???" which Valgrind sometimes produces.
+ self.re_stack_entry = re.compile(r"^==\d+==.*0x[A-Z0-9]+: ([A-Za-z0-9_:\?]+)")
+ self.re_suppression = re.compile(r" *<insert_a_suppression_name_here>")
+ self.error_count = 0
+ self.suppression_count = 0
+ self.number_of_stack_entries_to_get = 0
+ self.curr_failure_msg = ""
+ self.buffered_lines = []
+
+ # Takes a message and returns a message
+ def __call__(self, msg):
+ # Pass through everything that isn't plain text
+ if msg["action"] != "log":
+ return msg
+
+ line = msg["message"]
+ output_message = None
+ if self.number_of_stack_entries_to_get == 0:
+ # Look for the start of a Valgrind error.
+ m = re.search(self.re_error, line)
+ if m:
+ self.error_count += 1
+ self.number_of_stack_entries_to_get = 4
+ self.curr_failure_msg = m.group(1) + " at "
+ self.buffered_lines = [line]
+ else:
+ output_message = msg
+
+ else:
+ # We've recently found a Valgrind error, and are now extracting
+ # details from the first few stack entries.
+ self.buffered_lines.append(line)
+ m = re.match(self.re_stack_entry, line)
+ if m:
+ self.curr_failure_msg += m.group(1)
+ else:
+ self.curr_failure_msg += "?!?"
+
+ self.number_of_stack_entries_to_get -= 1
+ if self.number_of_stack_entries_to_get != 0:
+ self.curr_failure_msg += " / "
+ else:
+ # We've finished getting the first few stack entries. Emit
+ # the failure action, comprising the primary message and the
+ # buffered lines, and then reset state. Copy the mandatory
+ # fields from the incoming message, since there's nowhere
+ # else to get them from.
+ output_message = { # Mandatory fields
+ "action": "valgrind_error",
+ "time": msg["time"],
+ "thread": msg["thread"],
+ "pid": msg["pid"],
+ "source": msg["source"],
+ # valgrind_error specific fields
+ "primary": self.curr_failure_msg,
+ "secondary": self.buffered_lines,
+ }
+ self.curr_failure_msg = ""
+ self.buffered_lines = []
+
+ if re.match(self.re_suppression, line):
+ self.suppression_count += 1
+
+ return output_message
diff --git a/testing/mozbase/mozlog/mozlog/logtypes.py b/testing/mozbase/mozlog/mozlog/logtypes.py
new file mode 100644
index 0000000000..00ff414d1c
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/logtypes.py
@@ -0,0 +1,302 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import inspect
+
+import six
+from six.moves import range, zip
+
+convertor_registry = {}
+missing = object()
+no_default = object()
+
+
+class log_action(object):
+ def __init__(self, *args):
+ self.args = {}
+
+ self.args_no_default = []
+ self.args_with_default = []
+ self.optional_args = set()
+
+ # These are the required fields in a log message that usually aren't
+ # supplied by the caller, but can be in the case of log_raw
+ self.default_args = [
+ Unicode("action"),
+ Int("time"),
+ Unicode("thread"),
+ Int("pid", default=None),
+ Unicode("source"),
+ Unicode("component"),
+ ]
+
+ for arg in args:
+ if arg.default is no_default:
+ self.args_no_default.append(arg.name)
+ else:
+ self.args_with_default.append(arg.name)
+
+ if arg.optional:
+ self.optional_args.add(arg.name)
+
+ if arg.name in self.args:
+ raise ValueError("Repeated argument name %s" % arg.name)
+
+ self.args[arg.name] = arg
+
+ for extra in self.default_args:
+ self.args[extra.name] = extra
+
+ def __call__(self, f):
+ convertor_registry[f.__name__] = self
+ converter = self
+
+ def inner(self, *args, **kwargs):
+ data = converter.convert(*args, **kwargs)
+ return f(self, data)
+
+ if hasattr(f, "__doc__"):
+ setattr(inner, "__doc__", f.__doc__)
+
+ return inner
+
+ def convert(self, *args, **kwargs):
+ data = {}
+ values = {}
+ values.update(kwargs)
+
+ positional_no_default = [
+ item for item in self.args_no_default if item not in values
+ ]
+
+ num_no_default = len(positional_no_default)
+
+ if len(args) < num_no_default:
+ raise TypeError("Too few arguments")
+
+ if len(args) > num_no_default + len(self.args_with_default):
+ raise TypeError("Too many arguments")
+
+ for i, name in enumerate(positional_no_default):
+ values[name] = args[i]
+
+ positional_with_default = [
+ self.args_with_default[i] for i in range(len(args) - num_no_default)
+ ]
+
+ for i, name in enumerate(positional_with_default):
+ if name in values:
+ raise TypeError("Argument %s specified twice" % name)
+ values[name] = args[i + num_no_default]
+
+ # Fill in missing arguments
+ for name in self.args_with_default:
+ if name not in values:
+ values[name] = self.args[name].default
+
+ for key, value in six.iteritems(values):
+ if key in self.args:
+ out_value = self.args[key](value)
+ if out_value is not missing:
+ if key in self.optional_args and value == self.args[key].default:
+ pass
+ else:
+ data[key] = out_value
+ else:
+ raise TypeError("Unrecognised argument %s" % key)
+
+ return data
+
+ def convert_known(self, **kwargs):
+ known_kwargs = {
+ name: value for name, value in six.iteritems(kwargs) if name in self.args
+ }
+ return self.convert(**known_kwargs)
+
+
+class DataType(object):
+ def __init__(self, name, default=no_default, optional=False):
+ self.name = name
+ self.default = default
+
+ if default is no_default and optional is not False:
+ raise ValueError("optional arguments require a default value")
+
+ self.optional = optional
+
+ def __call__(self, value):
+ if value == self.default:
+ if self.optional:
+ return missing
+ return self.default
+
+ try:
+ return self.convert(value)
+ except Exception:
+ raise ValueError(
+ "Failed to convert value %s of type %s for field %s to type %s"
+ % (value, type(value).__name__, self.name, self.__class__.__name__)
+ )
+
+
+class ContainerType(DataType):
+ """A DataType that contains other DataTypes.
+
+ ContainerTypes must specify which other DataType they will contain. ContainerTypes
+ may contain other ContainerTypes.
+
+ Some examples:
+
+ List(Int, 'numbers')
+ Tuple((Unicode, Int, Any), 'things')
+ Dict(Unicode, 'config')
+ Dict({TestId: Status}, 'results')
+ Dict(List(Unicode), 'stuff')
+ """
+
+ def __init__(self, item_type, name=None, **kwargs):
+ DataType.__init__(self, name, **kwargs)
+ self.item_type = self._format_item_type(item_type)
+
+ def _format_item_type(self, item_type):
+ if inspect.isclass(item_type):
+ return item_type(None)
+ return item_type
+
+
+class Unicode(DataType):
+ def convert(self, data):
+ if isinstance(data, six.text_type):
+ return data
+ if isinstance(data, str):
+ return data.decode("utf8", "replace")
+ return six.text_type(data)
+
+
+class TestId(DataType):
+ def convert(self, data):
+ if isinstance(data, six.text_type):
+ return data
+ elif isinstance(data, bytes):
+ return data.decode("utf-8", "replace")
+ elif isinstance(data, (tuple, list)):
+ # This is really a bit of a hack; should really split out convertors from the
+ # fields they operate on
+ func = Unicode(None).convert
+ return tuple(func(item) for item in data)
+ else:
+ raise ValueError
+
+
+class Status(DataType):
+ allowed = [
+ "PASS",
+ "FAIL",
+ "OK",
+ "ERROR",
+ "TIMEOUT",
+ "CRASH",
+ "ASSERT",
+ "PRECONDITION_FAILED",
+ "SKIP",
+ ]
+
+ def convert(self, data):
+ value = data.upper()
+ if value not in self.allowed:
+ raise ValueError
+ return value
+
+
+class SubStatus(Status):
+ allowed = [
+ "PASS",
+ "FAIL",
+ "ERROR",
+ "TIMEOUT",
+ "ASSERT",
+ "PRECONDITION_FAILED",
+ "NOTRUN",
+ "SKIP",
+ ]
+
+
+class Dict(ContainerType):
+ def _format_item_type(self, item_type):
+ superfmt = super(Dict, self)._format_item_type
+
+ if isinstance(item_type, dict):
+ if len(item_type) != 1:
+ raise ValueError(
+ "Dict item type specifier must contain a single entry."
+ )
+ key_type, value_type = list(item_type.items())[0]
+ return superfmt(key_type), superfmt(value_type)
+ return Any(None), superfmt(item_type)
+
+ def convert(self, data):
+ key_type, value_type = self.item_type
+ return {
+ key_type.convert(k): value_type.convert(v) for k, v in dict(data).items()
+ }
+
+
+class List(ContainerType):
+ def convert(self, data):
+ # while dicts and strings _can_ be cast to lists,
+ # doing so is likely not intentional behaviour
+ if isinstance(data, (six.string_types, dict)):
+ raise ValueError("Expected list but got %s" % type(data))
+ return [self.item_type.convert(item) for item in data]
+
+
+class TestList(DataType):
+ """A TestList is a list of tests that can be either keyed by a group name,
+ or specified as a flat list.
+ """
+
+ def convert(self, data):
+ if isinstance(data, (list, tuple)):
+ data = {"default": data}
+ return Dict({Unicode: List(Unicode)}).convert(data)
+
+
+class Int(DataType):
+ def convert(self, data):
+ return int(data)
+
+
+class Any(DataType):
+ def convert(self, data):
+ return data
+
+
+class Boolean(DataType):
+ def convert(self, data):
+ return bool(data)
+
+
+class Tuple(ContainerType):
+ def _format_item_type(self, item_type):
+ superfmt = super(Tuple, self)._format_item_type
+
+ if isinstance(item_type, (tuple, list)):
+ return [superfmt(t) for t in item_type]
+ return (superfmt(item_type),)
+
+ def convert(self, data):
+ if len(data) != len(self.item_type):
+ raise ValueError(
+ "Expected %i items got %i" % (len(self.item_type), len(data))
+ )
+ return tuple(
+ item_type.convert(value) for item_type, value in zip(self.item_type, data)
+ )
+
+
+class Nullable(ContainerType):
+ def convert(self, data):
+ if data is None:
+ return data
+ return self.item_type.convert(data)
diff --git a/testing/mozbase/mozlog/mozlog/proxy.py b/testing/mozbase/mozlog/mozlog/proxy.py
new file mode 100644
index 0000000000..1d5994406b
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/proxy.py
@@ -0,0 +1,81 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from threading import Thread
+
+from .structuredlog import StructuredLogger, get_default_logger
+
+
+class ProxyLogger(object):
+ """
+ A ProxyLogger behaves like a
+ :class:`mozlog.structuredlog.StructuredLogger`.
+
+ Each method and attribute access will be forwarded to the underlying
+ StructuredLogger.
+
+ RuntimeError will be raised when the default logger is not yet initialized.
+ """
+
+ def __init__(self, component=None):
+ self.logger = None
+ self._component = component
+
+ def __getattr__(self, name):
+ if self.logger is None:
+ self.logger = get_default_logger(component=self._component)
+ if self.logger is None:
+ raise RuntimeError("Default logger is not initialized!")
+ return getattr(self.logger, name)
+
+
+def get_proxy_logger(component=None):
+ """
+ Returns a :class:`ProxyLogger` for the given component.
+ """
+ return ProxyLogger(component)
+
+
+class QueuedProxyLogger(StructuredLogger):
+ """Logger that logs via a queue.
+
+ This is intended for multiprocessing use cases where there are
+ some subprocesses which want to share a log handler with the main thread,
+ without the overhead of having a multiprocessing lock for all logger
+ access."""
+
+ threads = {}
+
+ def __init__(self, logger, mp_context=None):
+ StructuredLogger.__init__(self, logger.name)
+
+ if mp_context is None:
+ import multiprocessing as mp_context
+
+ if logger.name not in self.threads:
+ self.threads[logger.name] = LogQueueThread(mp_context.Queue(), logger)
+ self.threads[logger.name].start()
+ self.queue = self.threads[logger.name].queue
+
+ def _handle_log(self, data):
+ self.queue.put(data)
+
+
+class LogQueueThread(Thread):
+ def __init__(self, queue, logger):
+ self.queue = queue
+ self.logger = logger
+ Thread.__init__(self, name="Thread-Log")
+ self.daemon = True
+
+ def run(self):
+ while True:
+ try:
+ msg = self.queue.get()
+ except (EOFError, IOError):
+ break
+ if msg is None:
+ break
+ else:
+ self.logger._handle_log(msg)
diff --git a/testing/mozbase/mozlog/mozlog/pytest_mozlog/__init__.py b/testing/mozbase/mozlog/mozlog/pytest_mozlog/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/pytest_mozlog/__init__.py
diff --git a/testing/mozbase/mozlog/mozlog/pytest_mozlog/plugin.py b/testing/mozbase/mozlog/mozlog/pytest_mozlog/plugin.py
new file mode 100644
index 0000000000..ce51dacdec
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/pytest_mozlog/plugin.py
@@ -0,0 +1,127 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import time
+
+import pytest
+import six
+
+import mozlog
+
+
+def pytest_addoption(parser):
+ # We can't simply use mozlog.commandline.add_logging_group(parser) here because
+ # Pytest's parser doesn't have the add_argument_group method Mozlog expects.
+ group = parser.getgroup("mozlog")
+
+ for name, (_class, _help) in six.iteritems(mozlog.commandline.log_formatters):
+ group.addoption("--log-{0}".format(name), action="append", help=_help)
+
+ formatter_options = six.iteritems(mozlog.commandline.fmt_options)
+ for name, (_class, _help, formatters, action) in formatter_options:
+ for formatter in formatters:
+ if formatter in mozlog.commandline.log_formatters:
+ group.addoption(
+ "--log-{0}-{1}".format(formatter, name), action=action, help=_help
+ )
+
+
+def pytest_configure(config):
+ # If using pytest-xdist for parallelization, only register plugin on master process
+ if not hasattr(config, "slaveinput"):
+ config.pluginmanager.register(MozLog())
+
+
+class MozLog(object):
+ def __init__(self):
+ self._started = False
+ self.results = {}
+ self.start_time = int(time.time() * 1000) # in ms for Mozlog compatibility
+
+ def _log_suite_start(self, tests):
+ if not self._started:
+ # As this is called for each node when using pytest-xdist, we want
+ # to avoid logging multiple suite_start messages.
+ self.logger.suite_start(
+ tests=tests, time=self.start_time, run_info=self.run_info
+ )
+ self._started = True
+
+ def pytest_configure(self, config):
+ mozlog.commandline.setup_logging(
+ "pytest",
+ config.known_args_namespace,
+ defaults={},
+ allow_unused_options=True,
+ )
+ self.logger = mozlog.get_default_logger(component="pytest")
+
+ def pytest_sessionstart(self, session):
+ """Called before test collection; records suite start time to log later"""
+ self.start_time = int(time.time() * 1000) # in ms for Mozlog compatibility
+ self.run_info = getattr(session.config, "_metadata", None)
+
+ def pytest_collection_finish(self, session):
+ """Called after test collection is completed, just before tests are run (suite start)"""
+ self._log_suite_start([item.nodeid for item in session.items])
+
+ @pytest.mark.optionalhook
+ def pytest_xdist_node_collection_finished(self, node, ids):
+ """Called after each pytest-xdist node collection is completed"""
+ self._log_suite_start(ids)
+
+ def pytest_sessionfinish(self, session, exitstatus):
+ self.logger.suite_end()
+
+ def pytest_runtest_logstart(self, nodeid, location):
+ self.logger.test_start(test=nodeid)
+
+ def pytest_runtest_logreport(self, report):
+ """Called 3 times per test (setup, call, teardown), indicated by report.when"""
+ test = report.nodeid
+ status = expected = "PASS"
+ message = stack = None
+ if hasattr(report, "wasxfail"):
+ expected = "FAIL"
+ if report.failed or report.outcome == "rerun":
+ status = "FAIL" if report.when == "call" else "ERROR"
+ if report.skipped:
+ status = "SKIP" if not hasattr(report, "wasxfail") else "FAIL"
+ if report.longrepr is not None:
+ longrepr = report.longrepr
+ if isinstance(longrepr, six.string_types):
+ # When using pytest-xdist, longrepr is serialised as a str
+ message = stack = longrepr
+ if longrepr.startswith("[XPASS(strict)]"):
+ # Strict expected failures have an outcome of failed when
+ # they unexpectedly pass.
+ expected, status = ("FAIL", "PASS")
+ elif hasattr(longrepr, "reprcrash"):
+ # For failures, longrepr is a ReprExceptionInfo
+ crash = longrepr.reprcrash
+ message = "{0} (line {1})".format(crash.message, crash.lineno)
+ stack = longrepr.reprtraceback
+ elif hasattr(longrepr, "errorstring"):
+ message = longrepr.errorstring
+ stack = longrepr.errorstring
+ elif hasattr(longrepr, "__getitem__") and len(longrepr) == 3:
+ # For skips, longrepr is a tuple of (file, lineno, reason)
+ message = report.longrepr[-1]
+ else:
+ raise ValueError(
+ "Unable to convert longrepr to message:\ntype %s\nfields: %s"
+ % (longrepr.__class__, dir(longrepr))
+ )
+ if status != expected or expected != "PASS":
+ self.results[test] = (status, expected, message, stack)
+ if report.outcome == "rerun" or report.when == "teardown":
+ defaults = ("PASS", "PASS", None, None)
+ status, expected, message, stack = self.results.get(test, defaults)
+ self.logger.test_end(
+ test=test,
+ status=status,
+ expected=expected,
+ message=message,
+ stack=stack,
+ )
diff --git a/testing/mozbase/mozlog/mozlog/reader.py b/testing/mozbase/mozlog/mozlog/reader.py
new file mode 100644
index 0000000000..af5470f351
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/reader.py
@@ -0,0 +1,78 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import json
+
+
+def read(log_f, raise_on_error=False):
+ """Return a generator that will return the entries in a structured log file.
+ Note that the caller must not close the file whilst the generator is still
+ in use.
+
+ :param log_f: file-like object containing the raw log entries, one per line
+ :param raise_on_error: boolean indicating whether ValueError should be raised
+ for lines that cannot be decoded."""
+ while True:
+ line = log_f.readline()
+ if not line:
+ # This allows log_f to be a stream like stdout
+ break
+ try:
+ yield json.loads(line)
+ except ValueError:
+ if raise_on_error:
+ raise
+
+
+def imap_log(log_iter, action_map):
+ """Create an iterator that will invoke a callback per action for each item in a
+ iterable containing structured log entries
+
+ :param log_iter: Iterator returning structured log entries
+ :param action_map: Dictionary mapping action name to callback function. Log items
+ with actions not in this dictionary will be skipped.
+ """
+ for item in log_iter:
+ if item["action"] in action_map:
+ yield action_map[item["action"]](item)
+
+
+def each_log(log_iter, action_map):
+ """Call a callback for each item in an iterable containing structured
+ log entries
+
+ :param log_iter: Iterator returning structured log entries
+ :param action_map: Dictionary mapping action name to callback function. Log items
+ with actions not in this dictionary will be skipped.
+ """
+ for item in log_iter:
+ if item["action"] in action_map:
+ action_map[item["action"]](item)
+
+
+class LogHandler(object):
+ """Base class for objects that act as log handlers. A handler is a callable
+ that takes a log entry as the only argument.
+
+ Subclasses are expected to provide a method for each action type they
+ wish to handle, each taking a single argument for the test data.
+ For example a trivial subclass that just produces the id of each test as
+ it starts might be::
+
+ class StartIdHandler(LogHandler):
+ def test_start(data):
+ #For simplicity in the example pretend the id is always a string
+ return data["test"]
+ """
+
+ def __call__(self, data):
+ if hasattr(self, data["action"]):
+ handler = getattr(self, data["action"])
+ return handler(data)
+
+
+def handle_log(log_iter, handler):
+ """Call a handler for each item in a log, discarding the return value"""
+ for item in log_iter:
+ handler(item)
diff --git a/testing/mozbase/mozlog/mozlog/scripts/__init__.py b/testing/mozbase/mozlog/mozlog/scripts/__init__.py
new file mode 100644
index 0000000000..f1392507f8
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/scripts/__init__.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import argparse
+
+import format as formatlog
+import logmerge
+import six
+import unstable
+
+
+def get_parser():
+ parser = argparse.ArgumentParser(
+ "structlog", description="Tools for dealing with structured logs"
+ )
+
+ commands = {
+ "unstable": (unstable.get_parser, unstable.main),
+ "format": (formatlog.get_parser, formatlog.main),
+ "logmerge": (logmerge.get_parser, logmerge.main),
+ }
+
+ sub_parser = parser.add_subparsers(title="Subcommands")
+
+ for command, (parser_func, main_func) in six.iteritems(commands):
+ parent = parser_func(False)
+ command_parser = sub_parser.add_parser(
+ command, description=parent.description, parents=[parent]
+ )
+ command_parser.set_defaults(func=main_func)
+
+ return parser
+
+
+def main():
+ parser = get_parser()
+ args = parser.parse_args()
+ args.func(**vars(args))
diff --git a/testing/mozbase/mozlog/mozlog/scripts/format.py b/testing/mozbase/mozlog/mozlog/scripts/format.py
new file mode 100644
index 0000000000..27a643068f
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/scripts/format.py
@@ -0,0 +1,55 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import argparse
+import sys
+
+from .. import commandline, handlers, reader
+
+
+def get_parser(add_help=True):
+ parser = argparse.ArgumentParser(
+ "format", description="Format a structured log stream", add_help=add_help
+ )
+ parser.add_argument(
+ "--input",
+ action="store",
+ default=None,
+ help="Filename to read from, defaults to stdin",
+ )
+ parser.add_argument(
+ "--output",
+ action="store",
+ default=None,
+ help="Filename to write to, defaults to stdout",
+ )
+ parser.add_argument(
+ "format", choices=list(commandline.log_formatters.keys()), help="Format to use"
+ )
+ return parser
+
+
+def main(**kwargs):
+ if kwargs["input"] is None:
+ input_file = sys.stdin
+ else:
+ input_file = open(kwargs["input"])
+ if kwargs["output"] is None:
+ output_file = sys.stdout
+ else:
+ output_file = open(kwargs["output"], "w")
+
+ formatter = commandline.log_formatters[kwargs["format"]][0]()
+
+ handler = handlers.StreamHandler(stream=output_file, formatter=formatter)
+
+ for data in reader.read(input_file):
+ handler(data)
+
+
+if __name__ == "__main__":
+ parser = get_parser()
+ args = parser.parse_args()
+ kwargs = vars(args)
+ main(**kwargs)
diff --git a/testing/mozbase/mozlog/mozlog/scripts/logmerge.py b/testing/mozbase/mozlog/mozlog/scripts/logmerge.py
new file mode 100644
index 0000000000..1114795387
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/scripts/logmerge.py
@@ -0,0 +1,90 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import argparse
+import json
+import os
+import sys
+import time
+from threading import current_thread
+
+from mozlog.reader import read
+
+
+def dump_entry(entry, output):
+ json.dump(entry, output)
+ output.write("\n")
+
+
+def fill_process_info(event):
+ # pylint: disable=W1633
+ event["time"] = int(round(time.time() * 1000))
+ event["thread"] = current_thread().name
+ event["pid"] = os.getpid()
+ return event
+
+
+def process_until(reader, output, action):
+ for entry in reader:
+ if entry["action"] == action:
+ return entry
+ dump_entry(entry, output)
+
+
+def process_until_suite_start(reader, output):
+ return process_until(reader, output, "suite_start")
+
+
+def process_until_suite_end(reader, output):
+ return process_until(reader, output, "suite_end")
+
+
+def validate_start_events(events):
+ for start in events:
+ if not start["run_info"] == events[0]["run_info"]:
+ print("Error: different run_info entries", file=sys.stderr)
+ sys.exit(1)
+
+
+def merge_start_events(events):
+ for start in events[1:]:
+ events[0]["tests"].extend(start["tests"])
+ return events[0]
+
+
+def get_parser(add_help=True):
+ parser = argparse.ArgumentParser(
+ "logmerge", description="Merge multiple log files.", add_help=add_help
+ )
+ parser.add_argument("-o", dest="output", help="output file, defaults to stdout")
+ parser.add_argument(
+ "files", metavar="File", type=str, nargs="+", help="file to be merged"
+ )
+ return parser
+
+
+def main(**kwargs):
+ if kwargs["output"] is None:
+ output = sys.stdout
+ else:
+ output = open(kwargs["output"], "w")
+ readers = [read(open(filename, "r")) for filename in kwargs["files"]]
+ start_events = [process_until_suite_start(reader, output) for reader in readers]
+ validate_start_events(start_events)
+ merged_start_event = merge_start_events(start_events)
+ dump_entry(fill_process_info(merged_start_event), output)
+
+ end_events = [process_until_suite_end(reader, output) for reader in readers]
+ dump_entry(fill_process_info(end_events[0]), output)
+
+ for reader in readers:
+ for entry in reader:
+ dump_entry(entry, output)
+
+
+if __name__ == "__main__":
+ parser = get_parser()
+ args = parser.parse_args()
+ kwargs = vars(args)
+ main(**kwargs)
diff --git a/testing/mozbase/mozlog/mozlog/scripts/unstable.py b/testing/mozbase/mozlog/mozlog/scripts/unstable.py
new file mode 100644
index 0000000000..4292aced2a
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/scripts/unstable.py
@@ -0,0 +1,148 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import argparse
+import json
+from collections import defaultdict
+
+import six
+
+from mozlog import reader
+
+
+class StatusHandler(reader.LogHandler):
+ def __init__(self):
+ self.run_info = None
+ self.statuses = defaultdict(
+ lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
+ )
+
+ def test_id(self, test):
+ if type(test) in (str, six.text_type):
+ return test
+ else:
+ return tuple(test)
+
+ def suite_start(self, item):
+ self.run_info = tuple(sorted(item.get("run_info", {}).items()))
+
+ def test_status(self, item):
+ self.statuses[self.run_info][self.test_id(item["test"])][item["subtest"]][
+ item["status"]
+ ] += 1
+
+ def test_end(self, item):
+ self.statuses[self.run_info][self.test_id(item["test"])][None][
+ item["status"]
+ ] += 1
+
+ def suite_end(self, item):
+ self.run_info = None
+
+
+def get_statuses(filenames):
+ handler = StatusHandler()
+
+ for filename in filenames:
+ with open(filename) as f:
+ reader.handle_log(reader.read(f), handler)
+
+ return handler.statuses
+
+
+def _filter(results_cmp):
+ def inner(statuses):
+ rv = defaultdict(lambda: defaultdict(dict))
+
+ for run_info, tests in six.iteritems(statuses):
+ for test, subtests in six.iteritems(tests):
+ for name, results in six.iteritems(subtests):
+ if results_cmp(results):
+ rv[run_info][test][name] = results
+
+ return rv
+
+ return inner
+
+
+filter_unstable = _filter(lambda x: len(x) > 1)
+filter_stable = _filter(lambda x: len(x) == 1)
+
+
+def group_results(data):
+ rv = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
+
+ for run_info, tests in six.iteritems(data):
+ for test, subtests in six.iteritems(tests):
+ for name, results in six.iteritems(subtests):
+ for status, number in six.iteritems(results):
+ rv[test][name][status] += number
+ return rv
+
+
+def print_results(data):
+ for run_info, tests in six.iteritems(data):
+ run_str = (
+ " ".join("%s:%s" % (k, v) for k, v in run_info)
+ if run_info
+ else "No Run Info"
+ )
+ print(run_str)
+ print("=" * len(run_str))
+ print_run(tests)
+
+
+def print_run(tests):
+ for test, subtests in sorted(tests.items()):
+ print("\n" + str(test))
+ print("-" * len(test))
+ for name, results in six.iteritems(subtests):
+ print(
+ "[%s]: %s"
+ % (
+ name if name is not None else "",
+ " ".join("%s (%i)" % (k, v) for k, v in six.iteritems(results)),
+ )
+ )
+
+
+def get_parser(add_help=True):
+ parser = argparse.ArgumentParser(
+ "unstable",
+ description="List tests that don't give consistent "
+ "results from one or more runs.",
+ add_help=add_help,
+ )
+ parser.add_argument(
+ "--json", action="store_true", default=False, help="Output in JSON format"
+ )
+ parser.add_argument(
+ "--group",
+ action="store_true",
+ default=False,
+ help="Group results from different run types",
+ )
+ parser.add_argument("log_file", nargs="+", help="Log files to read")
+ return parser
+
+
+def main(**kwargs):
+ unstable = filter_unstable(get_statuses(kwargs["log_file"]))
+ if kwargs["group"]:
+ unstable = group_results(unstable)
+
+ if kwargs["json"]:
+ print(json.dumps(unstable))
+ else:
+ if not kwargs["group"]:
+ print_results(unstable)
+ else:
+ print_run(unstable)
+
+
+if __name__ == "__main__":
+ parser = get_parser()
+ args = parser.parse_args()
+ kwargs = vars(args)
+ main(**kwargs)
diff --git a/testing/mozbase/mozlog/mozlog/stdadapter.py b/testing/mozbase/mozlog/mozlog/stdadapter.py
new file mode 100644
index 0000000000..0c13d61e43
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/stdadapter.py
@@ -0,0 +1,50 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import logging
+
+from .structuredlog import StructuredLogger, log_levels
+
+
+class UnstructuredHandler(logging.Handler):
+ def __init__(self, name=None, level=logging.NOTSET):
+ self.structured = StructuredLogger(name)
+ logging.Handler.__init__(self, level=level)
+
+ def emit(self, record):
+ if record.levelname in log_levels:
+ log_func = getattr(self.structured, record.levelname.lower())
+ else:
+ log_func = self.logger.debug
+ log_func(record.msg)
+
+ def handle(self, record):
+ self.emit(record)
+
+
+class LoggingWrapper(object):
+ def __init__(self, wrapped):
+ self.wrapped = wrapped
+ self.wrapped.addHandler(
+ UnstructuredHandler(
+ self.wrapped.name, logging.getLevelName(self.wrapped.level)
+ )
+ )
+
+ def add_handler(self, handler):
+ self.addHandler(handler)
+
+ def remove_handler(self, handler):
+ self.removeHandler(handler)
+
+ def __getattr__(self, name):
+ return getattr(self.wrapped, name)
+
+
+def std_logging_adapter(logger):
+ """Adapter for stdlib logging so that it produces structured
+ messages rather than standard logging messages
+
+ :param logger: logging.Logger to wrap"""
+ return LoggingWrapper(logger)
diff --git a/testing/mozbase/mozlog/mozlog/structuredlog.py b/testing/mozbase/mozlog/mozlog/structuredlog.py
new file mode 100644
index 0000000000..4a56087996
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/structuredlog.py
@@ -0,0 +1,820 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import json
+import sys
+import time
+import traceback
+from multiprocessing import current_process
+from threading import Lock, current_thread
+
+import six
+
+from .logtypes import (
+ Any,
+ Boolean,
+ Dict,
+ Int,
+ List,
+ Nullable,
+ Status,
+ SubStatus,
+ TestId,
+ TestList,
+ Tuple,
+ Unicode,
+ convertor_registry,
+ log_action,
+)
+
+"""Structured Logging for recording test results.
+
+Allowed actions, and subfields:
+ suite_start
+ tests - List of test names
+ name - Name for the suite
+ run_info - Dictionary of run properties
+
+ add_subsuite
+ name - Name for the subsuite (must be unique)
+ run_info - Updates to the suite run_info (optional)
+
+ group_start
+ name - Name for the test group
+
+ group_end
+ name - Name for the test group
+
+ suite_end
+
+ test_start
+ test - ID for the test
+ path - Relative path to test (optional)
+ subsuite - Name of the subsuite to which test belongs (optional)
+ group - Name of the test group for the incoming test (optional)
+
+ test_end
+ test - ID for the test
+ status [PASS | FAIL | OK | ERROR | TIMEOUT | CRASH |
+ ASSERT PRECONDITION_FAILED | SKIP] - test status
+ expected [As for status] - Status that the test was expected to get,
+ or absent if the test got the expected status
+ extra - Dictionary of harness-specific extra information e.g. debug info
+ known_intermittent - List of known intermittent statuses that should
+ not fail a test. eg. ['FAIL', 'TIMEOUT']
+ subsuite - Name of the subsuite to which test belongs (optional)
+ group - Name of the test group for the incoming test (optional)
+
+ test_status
+ test - ID for the test
+ subtest - Name of the subtest
+ status [PASS | FAIL | TIMEOUT |
+ PRECONDITION_FAILED | NOTRUN | SKIP] - test status
+ expected [As for status] - Status that the subtest was expected to get,
+ or absent if the subtest got the expected status
+ known_intermittent - List of known intermittent statuses that should
+ not fail a test. eg. ['FAIL', 'TIMEOUT']
+ subsuite - Name of the subsuite to which test belongs (optional)
+ group - Name of the test group for the incoming test (optional)
+
+ process_output
+ process - PID of the process
+ command - Command line of the process
+ data - Output data from the process
+ test - ID of the test that the process was running (optional)
+ subsuite - Name of the subsuite that the process was running (optional)
+
+ assertion_count
+ count - Number of assertions produced
+ min_expected - Minimum expected number of assertions
+ max_expected - Maximum expected number of assertions
+ subsuite - Name of the subsuite for the tests that ran (optional)
+
+ lsan_leak
+ frames - List of stack frames from the leak report
+ scope - An identifier for the set of tests run during the browser session
+ (e.g. a directory name)
+ allowed_match - A stack frame in the list that matched a rule meaning the
+ leak is expected
+ subsuite - Name of the subsuite for the tests that ran (optional)
+
+ lsan_summary
+ bytes - Number of bytes leaked
+ allocations - Number of allocations
+ allowed - Boolean indicating whether all detected leaks matched allow rules
+ subsuite - Name of the subsuite for the tests that ran (optional)
+
+ mozleak_object
+ process - Process that leaked
+ bytes - Number of bytes that leaked
+ name - Name of the object that leaked
+ scope - An identifier for the set of tests run during the browser session
+ (e.g. a directory name)
+ allowed - Boolean indicating whether the leak was permitted
+ subsuite - Name of the subsuite for the tests that ran (optional)
+
+ log
+ level [CRITICAL | ERROR | WARNING |
+ INFO | DEBUG] - level of the logging message
+ message - Message to log
+
+Subfields for all messages:
+ action - the action type of the current message
+ time - the timestamp in ms since the epoch of the log message
+ thread - name for the thread emitting the message
+ pid - id of the python process in which the logger is running
+ source - name for the source emitting the message
+ component - name of the subcomponent emitting the message
+"""
+
+_default_logger_name = None
+
+
+def get_default_logger(component=None):
+ """Gets the default logger if available, optionally tagged with component
+ name. Will return None if not yet set
+
+ :param component: The component name to tag log messages with
+ """
+ global _default_logger_name
+
+ if not _default_logger_name:
+ return None
+
+ return StructuredLogger(_default_logger_name, component=component)
+
+
+def set_default_logger(default_logger):
+ """Sets the default logger to logger.
+
+ It can then be retrieved with :py:func:`get_default_logger`
+
+ Note that :py:func:`~mozlog.commandline.setup_logging` will
+ set a default logger for you, so there should be no need to call this
+ function if you're using setting up logging that way (recommended).
+
+ :param default_logger: The logger to set to default.
+ """
+ global _default_logger_name
+
+ _default_logger_name = default_logger.name
+
+
+log_levels = dict(
+ (k.upper(), v)
+ for v, k in enumerate(["critical", "error", "warning", "info", "debug"])
+)
+
+lint_levels = ["ERROR", "WARNING"]
+
+
+def log_actions():
+ """Returns the set of actions implemented by mozlog."""
+ return set(convertor_registry.keys())
+
+
+class LoggerShutdownError(Exception):
+ """Raised when attempting to log after logger.shutdown() has been called."""
+
+
+class LoggerState(object):
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.handlers = []
+ self.subsuites = set()
+ self.running_tests = set()
+ self.suite_started = False
+ self.component_states = {}
+ self.has_shutdown = False
+
+
+class ComponentState(object):
+ def __init__(self):
+ self.filter_ = None
+
+
+class StructuredLogger(object):
+ _lock = Lock()
+ _logger_states = {}
+ """Create a structured logger with the given name
+
+ :param name: The name of the logger.
+ :param component: A subcomponent that the logger belongs to (typically a library name)
+ """
+
+ def __init__(self, name, component=None):
+ self.name = name
+ self.component = component
+
+ with self._lock:
+ if name not in self._logger_states:
+ self._logger_states[name] = LoggerState()
+
+ if component not in self._logger_states[name].component_states:
+ self._logger_states[name].component_states[component] = ComponentState()
+
+ self._state = self._logger_states[name]
+ self._component_state = self._state.component_states[component]
+
+ def add_handler(self, handler):
+ """Add a handler to the current logger"""
+ self._state.handlers.append(handler)
+
+ def remove_handler(self, handler):
+ """Remove a handler from the current logger"""
+ self._state.handlers.remove(handler)
+
+ def reset_state(self):
+ """Resets the logger to a brand new state. This means all handlers
+ are removed, running tests are discarded and components are reset.
+ """
+ self._state.reset()
+ self._component_state = self._state.component_states[
+ self.component
+ ] = ComponentState()
+
+ def send_message(self, topic, command, *args):
+ """Send a message to each handler configured for this logger. This
+ part of the api is useful to those users requiring dynamic control
+ of a handler's behavior.
+
+ :param topic: The name used by handlers to subscribe to a message.
+ :param command: The name of the command to issue.
+ :param args: Any arguments known to the target for specialized
+ behavior.
+ """
+ rv = []
+ for handler in self._state.handlers:
+ if hasattr(handler, "message_handler"):
+ rv += handler.message_handler.handle_message(topic, command, *args)
+ return rv
+
+ @property
+ def has_shutdown(self):
+ """Property indicating whether the logger has been shutdown"""
+ return self._state.has_shutdown
+
+ @property
+ def handlers(self):
+ """A list of handlers that will be called when a
+ message is logged from this logger"""
+ return self._state.handlers
+
+ @property
+ def component_filter(self):
+ return self._component_state.filter_
+
+ @component_filter.setter
+ def component_filter(self, value):
+ self._component_state.filter_ = value
+
+ def log_raw(self, raw_data):
+ if "action" not in raw_data:
+ raise ValueError
+
+ action = raw_data["action"]
+ converted_data = convertor_registry[action].convert_known(**raw_data)
+ for k, v in six.iteritems(raw_data):
+ if (
+ k not in converted_data
+ and k not in convertor_registry[action].optional_args
+ ):
+ converted_data[k] = v
+
+ data = self._make_log_data(action, converted_data)
+
+ if action in ("test_status", "test_end"):
+ if (
+ data["expected"] == data["status"]
+ or data["status"] == "SKIP"
+ or "expected" not in raw_data
+ ):
+ del data["expected"]
+
+ if not self._ensure_suite_state(action, data):
+ return
+
+ self._handle_log(data)
+
+ def _log_data(self, action, data=None):
+ if data is None:
+ data = {}
+
+ if data.get("subsuite") and data["subsuite"] not in self._state.subsuites:
+ self.error(f"Unrecognised subsuite {data['subsuite']}")
+ return
+
+ log_data = self._make_log_data(action, data)
+ self._handle_log(log_data)
+
+ def _handle_log(self, data):
+ if self._state.has_shutdown:
+ raise LoggerShutdownError(
+ "{} action received after shutdown.".format(data["action"])
+ )
+
+ with self._lock:
+ if self.component_filter:
+ data = self.component_filter(data)
+ if data is None:
+ return
+
+ for handler in self.handlers:
+ try:
+ handler(data)
+ except Exception:
+ # Write the exception details directly to stderr because
+ # log() would call this method again which is currently locked.
+ print(
+ "%s: Failure calling log handler:" % __name__,
+ file=sys.__stderr__,
+ )
+ print(traceback.format_exc(), file=sys.__stderr__)
+
+ def _make_log_data(self, action, data):
+ all_data = {
+ "action": action,
+ "time": int(time.time() * 1000),
+ "thread": current_thread().name,
+ "pid": current_process().pid,
+ "source": self.name,
+ }
+ if self.component:
+ all_data["component"] = self.component
+ all_data.update(data)
+ return all_data
+
+ def _ensure_suite_state(self, action, data):
+ if action == "suite_start":
+ if self._state.suite_started:
+ # limit data to reduce unnecessary log bloat
+ self.error(
+ "Got second suite_start message before suite_end. "
+ + "Logged with data: {}".format(json.dumps(data)[:100])
+ )
+ return False
+ self._state.suite_started = True
+ elif action == "suite_end":
+ if not self._state.suite_started:
+ self.error(
+ "Got suite_end message before suite_start. "
+ + "Logged with data: {}".format(json.dumps(data))
+ )
+ return False
+ self._state.suite_started = False
+ return True
+
+ @log_action(
+ TestList("tests"),
+ Unicode("name", default=None, optional=True),
+ Dict(Any, "run_info", default=None, optional=True),
+ Dict(Any, "version_info", default=None, optional=True),
+ Dict(Any, "device_info", default=None, optional=True),
+ Dict(Any, "extra", default=None, optional=True),
+ )
+ def suite_start(self, data):
+ """Log a suite_start message
+
+ :param dict tests: Test identifiers that will be run in the suite, keyed by group name.
+ :param str name: Optional name to identify the suite.
+ :param dict run_info: Optional information typically provided by mozinfo.
+ :param dict version_info: Optional target application version information provided
+ by mozversion.
+ :param dict device_info: Optional target device information provided by mozdevice.
+ """
+ if not self._ensure_suite_state("suite_start", data):
+ return
+
+ self._log_data("suite_start", data)
+
+ @log_action(
+ Unicode("name"),
+ Dict(Any, "run_info", default=None, optional=True),
+ )
+ def add_subsuite(self, data):
+ """Log a add_subsuite message
+
+ :param str name: Name to identify the subsuite.
+ :param dict run_info: Optional information about the subsuite. This updates the suite run_info.
+ """
+ if data["name"] in self._state.subsuites:
+ return
+ run_info = data.get("run_info", {"subsuite": data["name"]})
+ if "subsuite" not in run_info:
+ run_info = run_info.copy()
+ run_info["subsuite"] = data["name"]
+ data["run_info"] = run_info
+ self._state.subsuites.add(data["name"])
+ self._log_data("add_subsuite", data)
+
+ @log_action(
+ Unicode("name"),
+ )
+ def group_start(self, data):
+ """Log a group_start message
+
+ :param str name: Name to identify the test group.
+ """
+ self._log_data("group_start", data)
+
+ @log_action(
+ Unicode("name"),
+ )
+ def group_end(self, data):
+ """Log a group_end message
+
+ :param str name: Name to identify the test group.
+ """
+ self._log_data("group_end", data)
+
+ @log_action(Dict(Any, "extra", default=None, optional=True))
+ def suite_end(self, data):
+ """Log a suite_end message"""
+ if not self._ensure_suite_state("suite_end", data):
+ return
+
+ self._state.subsuites.clear()
+
+ self._log_data("suite_end", data)
+
+ @log_action(
+ TestId("test"),
+ Unicode("path", default=None, optional=True),
+ Unicode("subsuite", default=None, optional=True),
+ Unicode("group", default=None, optional=True),
+ )
+ def test_start(self, data):
+ """Log a test_start message
+
+ :param test: Identifier of the test that will run.
+ :param path: Path to test relative to some base (typically the root of
+ the source tree).
+ :param subsuite: Optional name of the subsuite to which the test belongs.
+ :param group: Optional name of the test group or manifest name (useful
+ when running in paralle)
+ """
+ if not self._state.suite_started:
+ self.error(
+ "Got test_start message before suite_start for test %s" % data["test"]
+ )
+ return
+ test_key = (data.get("subsuite"), data["test"])
+ if test_key in self._state.running_tests:
+ self.error("test_start for %s logged while in progress." % data["test"])
+ return
+ self._state.running_tests.add(test_key)
+ self._log_data("test_start", data)
+
+ @log_action(
+ TestId("test"),
+ Unicode("subtest"),
+ SubStatus("status"),
+ SubStatus("expected", default="PASS"),
+ Unicode("message", default=None, optional=True),
+ Unicode("stack", default=None, optional=True),
+ Dict(Any, "extra", default=None, optional=True),
+ List(SubStatus, "known_intermittent", default=None, optional=True),
+ Unicode("subsuite", default=None, optional=True),
+ Unicode("group", default=None, optional=True),
+ )
+ def test_status(self, data):
+ """
+ Log a test_status message indicating a subtest result. Tests that
+ do not have subtests are not expected to produce test_status messages.
+
+ :param test: Identifier of the test that produced the result.
+ :param subtest: Name of the subtest.
+ :param status: Status string indicating the subtest result
+ :param expected: Status string indicating the expected subtest result.
+ :param message: Optional string containing a message associated with the result.
+ :param stack: Optional stack trace encountered during test execution.
+ :param extra: Optional suite-specific data associated with the test result.
+ :param known_intermittent: Optional list of string expected intermittent statuses
+ :param subsuite: Optional name of the subsuite to which the test belongs.
+ :param group: Optional name of the test group or manifest name (useful
+ when running in paralle)
+ """
+
+ if data["expected"] == data["status"] or data["status"] == "SKIP":
+ del data["expected"]
+
+ test_key = (data.get("subsuite"), data["test"])
+ if test_key not in self._state.running_tests:
+ self.error(
+ "test_status for %s logged while not in progress. "
+ "Logged with data: %s" % (data["test"], json.dumps(data))
+ )
+ return
+
+ self._log_data("test_status", data)
+
+ @log_action(
+ TestId("test"),
+ Status("status"),
+ Status("expected", default="OK"),
+ Unicode("message", default=None, optional=True),
+ Unicode("stack", default=None, optional=True),
+ Dict(Any, "extra", default=None, optional=True),
+ List(Status, "known_intermittent", default=None, optional=True),
+ Unicode("subsuite", default=None, optional=True),
+ Unicode("group", default=None, optional=True),
+ )
+ def test_end(self, data):
+ """
+ Log a test_end message indicating that a test completed. For tests
+ with subtests this indicates whether the overall test completed without
+ errors. For tests without subtests this indicates the test result
+ directly.
+
+ :param test: Identifier of the test that produced the result.
+ :param status: Status string indicating the test result
+ :param expected: Status string indicating the expected test result.
+ :param message: Optonal string containing a message associated with the result.
+ :param stack: Optional stack trace encountered during test execution.
+ :param extra: Optional suite-specific data associated with the test result.
+ :param subsuite: Optional name of the subsuite to which the test belongs.
+ :param group: Optional name of the test group or manifest name (useful
+ when running in paralle)
+ """
+
+ if data["expected"] == data["status"] or data["status"] == "SKIP":
+ del data["expected"]
+
+ test_key = (data.get("subsuite"), data["test"])
+ if test_key not in self._state.running_tests:
+ self.error(
+ "test_end for %s logged while not in progress. "
+ "Logged with data: %s" % (data["test"], json.dumps(data))
+ )
+ else:
+ self._state.running_tests.remove(test_key)
+ self._log_data("test_end", data)
+
+ @log_action(
+ Unicode("process"),
+ Unicode("data"),
+ Unicode("command", default=None, optional=True),
+ TestId("test", default=None, optional=True),
+ Unicode("subsuite", default=None, optional=True),
+ )
+ def process_output(self, data):
+ """Log output from a managed process.
+
+ :param process: A unique identifier for the process producing the output
+ (typically the pid)
+ :param data: The output to log
+ :param command: Optional string representing the full command line used to start
+ the process.
+ :param test: Optional ID of the test which the process was running.
+ :param subsuite: Optional name of the subsuite which the process was running.
+ """
+ self._log_data("process_output", data)
+
+ @log_action(
+ Unicode("process", default=None),
+ Unicode("signature", default="[Unknown]"),
+ TestId("test", default=None, optional=True),
+ Unicode("minidump_path", default=None, optional=True),
+ Unicode("minidump_extra", default=None, optional=True),
+ Int("stackwalk_retcode", default=None, optional=True),
+ Unicode("stackwalk_stdout", default=None, optional=True),
+ Unicode("stackwalk_stderr", default=None, optional=True),
+ Unicode("reason", default=None, optional=True),
+ Unicode("java_stack", default=None, optional=True),
+ Unicode("process_type", default=None, optional=True),
+ List(Unicode, "stackwalk_errors", default=None),
+ Unicode("subsuite", default=None, optional=True),
+ )
+ def crash(self, data):
+ if data["stackwalk_errors"] is None:
+ data["stackwalk_errors"] = []
+
+ self._log_data("crash", data)
+
+ @log_action(Unicode("group", default=None), Unicode("message", default=None))
+ def shutdown_failure(self, data):
+ self._log_data("shutdown_failure", data)
+
+ @log_action(
+ Unicode("primary", default=None), List(Unicode, "secondary", default=None)
+ )
+ def valgrind_error(self, data):
+ self._log_data("valgrind_error", data)
+
+ @log_action(
+ Unicode("process"),
+ Unicode("command", default=None, optional=True),
+ Unicode("subsuite", default=None, optional=True),
+ )
+ def process_start(self, data):
+ """Log start event of a process.
+
+ :param process: A unique identifier for the process producing the
+ output (typically the pid)
+ :param command: Optional string representing the full command line used to
+ start the process.
+ :param subsuite: Optional name of the subsuite using the process.
+ """
+ self._log_data("process_start", data)
+
+ @log_action(
+ Unicode("process"),
+ Int("exitcode"),
+ Unicode("command", default=None, optional=True),
+ Unicode("subsuite", default=None, optional=True),
+ )
+ def process_exit(self, data):
+ """Log exit event of a process.
+
+ :param process: A unique identifier for the process producing the
+ output (typically the pid)
+ :param exitcode: the exit code
+ :param command: Optional string representing the full command line used to
+ start the process.
+ :param subsuite: Optional name of the subsuite using the process.
+ """
+ self._log_data("process_exit", data)
+
+ @log_action(
+ TestId("test"),
+ Int("count"),
+ Int("min_expected"),
+ Int("max_expected"),
+ Unicode("subsuite", default=None, optional=True),
+ )
+ def assertion_count(self, data):
+ """Log count of assertions produced when running a test.
+
+ :param count: Number of assertions produced
+ :param min_expected: Minimum expected number of assertions
+ :param max_expected: Maximum expected number of assertions
+ :param subsuite: Optional name of the subsuite for the tests that ran
+ """
+ self._log_data("assertion_count", data)
+
+ @log_action(
+ List(Unicode, "frames"),
+ Unicode("scope", optional=True, default=None),
+ Unicode("allowed_match", optional=True, default=None),
+ Unicode("subsuite", default=None, optional=True),
+ )
+ def lsan_leak(self, data):
+ self._log_data("lsan_leak", data)
+
+ @log_action(
+ Int("bytes"),
+ Int("allocations"),
+ Boolean("allowed", optional=True, default=False),
+ Unicode("subsuite", default=None, optional=True),
+ )
+ def lsan_summary(self, data):
+ self._log_data("lsan_summary", data)
+
+ @log_action(
+ Unicode("process"),
+ Int("bytes"),
+ Unicode("name"),
+ Unicode("scope", optional=True, default=None),
+ Boolean("allowed", optional=True, default=False),
+ Unicode("subsuite", default=None, optional=True),
+ )
+ def mozleak_object(self, data):
+ self._log_data("mozleak_object", data)
+
+ @log_action(
+ Unicode("process"),
+ Nullable(Int, "bytes"),
+ Int("threshold"),
+ List(Unicode, "objects"),
+ Unicode("scope", optional=True, default=None),
+ Boolean("induced_crash", optional=True, default=False),
+ Boolean("ignore_missing", optional=True, default=False),
+ Unicode("subsuite", default=None, optional=True),
+ )
+ def mozleak_total(self, data):
+ self._log_data("mozleak_total", data)
+
+ @log_action()
+ def shutdown(self, data):
+ """Shutdown the logger.
+
+ This logs a 'shutdown' action after which any further attempts to use
+ the logger will raise a :exc:`LoggerShutdownError`.
+
+ This is also called implicitly from the destructor or
+ when exiting the context manager.
+ """
+ self._log_data("shutdown", data)
+ self._state.has_shutdown = True
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc, val, tb):
+ self.shutdown()
+
+
+def _log_func(level_name):
+ @log_action(Unicode("message"), Any("exc_info", default=False))
+ def log(self, data):
+ exc_info = data.pop("exc_info", None)
+ if exc_info:
+ if not isinstance(exc_info, tuple):
+ exc_info = sys.exc_info()
+ if exc_info != (None, None, None):
+ bt = traceback.format_exception(*exc_info)
+ data["stack"] = "\n".join(bt)
+
+ data["level"] = level_name
+ self._log_data("log", data)
+
+ log.__doc__ = (
+ """Log a message with level %s
+
+:param message: The string message to log
+:param exc_info: Either a boolean indicating whether to include a traceback
+ derived from sys.exc_info() or a three-item tuple in the
+ same format as sys.exc_info() containing exception information
+ to log.
+"""
+ % level_name
+ )
+ log.__name__ = str(level_name).lower()
+ return log
+
+
+def _lint_func(level_name):
+ @log_action(
+ Unicode("path"),
+ Unicode("message", default=""),
+ Int("lineno", default=0),
+ Int("column", default=None, optional=True),
+ Unicode("hint", default=None, optional=True),
+ Unicode("source", default=None, optional=True),
+ Unicode("rule", default=None, optional=True),
+ Tuple((Int, Int), "lineoffset", default=None, optional=True),
+ Unicode("linter", default=None, optional=True),
+ )
+ def lint(self, data):
+ data["level"] = level_name
+ self._log_data("lint", data)
+
+ lint.__doc__ = """Log an error resulting from a failed lint check
+
+ :param linter: name of the linter that flagged this error
+ :param path: path to the file containing the error
+ :param message: text describing the error
+ :param lineno: line number that contains the error
+ :param column: column containing the error
+ :param hint: suggestion for fixing the error (optional)
+ :param source: source code context of the error (optional)
+ :param rule: name of the rule that was violated (optional)
+ :param lineoffset: denotes an error spans multiple lines, of the form
+ (<lineno offset>, <num lines>) (optional)
+ """
+ lint.__name__ = str("lint_%s" % level_name)
+ return lint
+
+
+# Create all the methods on StructuredLog for log/lint levels
+for level_name in log_levels:
+ setattr(StructuredLogger, level_name.lower(), _log_func(level_name))
+
+for level_name in lint_levels:
+ level_name = level_name.lower()
+ name = "lint_%s" % level_name
+ setattr(StructuredLogger, name, _lint_func(level_name))
+
+
+class StructuredLogFileLike(object):
+ """Wrapper for file-like objects to redirect writes to logger
+ instead. Each call to `write` becomes a single log entry of type `log`.
+
+ When using this it is important that the callees i.e. the logging
+ handlers do not themselves try to write to the wrapped file as this
+ will cause infinite recursion.
+
+ :param logger: `StructuredLogger` to which to redirect the file write operations.
+ :param level: log level to use for each write.
+ :param prefix: String prefix to prepend to each log entry.
+ """
+
+ def __init__(self, logger, level="info", prefix=None):
+ self.logger = logger
+ self.log_func = getattr(self.logger, level)
+ self.prefix = prefix
+
+ def write(self, data):
+ if data.endswith("\n"):
+ data = data[:-1]
+ if data.endswith("\r"):
+ data = data[:-1]
+ if self.prefix is not None:
+ data = "%s: %s" % (self.prefix, data)
+ self.log_func(data)
+
+ def flush(self):
+ pass
diff --git a/testing/mozbase/mozlog/mozlog/unstructured/__init__.py b/testing/mozbase/mozlog/mozlog/unstructured/__init__.py
new file mode 100644
index 0000000000..24b8f5b405
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/unstructured/__init__.py
@@ -0,0 +1,8 @@
+# flake8: noqa
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from .logger import *
+from .loggingmixin import LoggingMixin
+from .loglistener import LogMessageServer
diff --git a/testing/mozbase/mozlog/mozlog/unstructured/logger.py b/testing/mozbase/mozlog/mozlog/unstructured/logger.py
new file mode 100644
index 0000000000..db436c9d11
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/unstructured/logger.py
@@ -0,0 +1,191 @@
+# flake8: noqa
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import json
+
+# Some of the build slave environments don't see the following when doing
+# 'from logging import *'
+# see https://bugzilla.mozilla.org/show_bug.cgi?id=700415#c35
+from logging import *
+from logging import addLevelName, basicConfig, debug
+from logging import getLogger as getSysLogger
+from logging import getLoggerClass, info, setLoggerClass, shutdown
+
+_default_level = INFO
+_LoggerClass = getLoggerClass()
+
+# Define mozlog specific log levels
+START = _default_level + 1
+END = _default_level + 2
+PASS = _default_level + 3
+KNOWN_FAIL = _default_level + 4
+FAIL = _default_level + 5
+CRASH = _default_level + 6
+# Define associated text of log levels
+addLevelName(START, "TEST-START")
+addLevelName(END, "TEST-END")
+addLevelName(PASS, "TEST-PASS")
+addLevelName(KNOWN_FAIL, "TEST-KNOWN-FAIL")
+addLevelName(FAIL, "TEST-UNEXPECTED-FAIL")
+addLevelName(CRASH, "PROCESS-CRASH")
+
+
+class MozLogger(_LoggerClass):
+ """
+ MozLogger class which adds some convenience log levels
+ related to automated testing in Mozilla and ability to
+ output structured log messages.
+ """
+
+ def testStart(self, message, *args, **kwargs):
+ """Logs a test start message"""
+ self.log(START, message, *args, **kwargs)
+
+ def testEnd(self, message, *args, **kwargs):
+ """Logs a test end message"""
+ self.log(END, message, *args, **kwargs)
+
+ def testPass(self, message, *args, **kwargs):
+ """Logs a test pass message"""
+ self.log(PASS, message, *args, **kwargs)
+
+ def testFail(self, message, *args, **kwargs):
+ """Logs a test fail message"""
+ self.log(FAIL, message, *args, **kwargs)
+
+ def testKnownFail(self, message, *args, **kwargs):
+ """Logs a test known fail message"""
+ self.log(KNOWN_FAIL, message, *args, **kwargs)
+
+ def processCrash(self, message, *args, **kwargs):
+ """Logs a process crash message"""
+ self.log(CRASH, message, *args, **kwargs)
+
+ def log_structured(self, action, params=None):
+ """Logs a structured message object."""
+ if params is None:
+ params = {}
+
+ level = params.get("_level", _default_level)
+ if isinstance(level, int):
+ params["_level"] = getLevelName(level)
+ else:
+ params["_level"] = level
+ level = getLevelName(level.upper())
+
+ # If the logger is fed a level number unknown to the logging
+ # module, getLevelName will return a string. Unfortunately,
+ # the logging module will raise a type error elsewhere if
+ # the level is not an integer.
+ if not isinstance(level, int):
+ level = _default_level
+
+ params["action"] = action
+
+ # The can message be None. This is expected, and shouldn't cause
+ # unstructured formatters to fail.
+ message = params.get("_message")
+
+ self.log(level, message, extra={"params": params})
+
+
+class JSONFormatter(Formatter):
+ """Log formatter for emitting structured JSON entries."""
+
+ def format(self, record):
+ # Default values determined by logger metadata
+ # pylint: disable=W1633
+ output = {
+ "_time": int(round(record.created * 1000, 0)),
+ "_namespace": record.name,
+ "_level": getLevelName(record.levelno),
+ }
+
+ # If this message was created by a call to log_structured,
+ # anything specified by the caller's params should act as
+ # an override.
+ output.update(getattr(record, "params", {}))
+
+ if record.msg and output.get("_message") is None:
+ # For compatibility with callers using the printf like
+ # API exposed by python logging, call the default formatter.
+ output["_message"] = Formatter.format(self, record)
+
+ return json.dumps(output, indent=output.get("indent"))
+
+
+class MozFormatter(Formatter):
+ """
+ MozFormatter class used to standardize formatting
+ If a different format is desired, this can be explicitly
+ overriden with the log handler's setFormatter() method
+ """
+
+ level_length = 0
+ max_level_length = len("TEST-START")
+
+ def __init__(self, include_timestamp=False):
+ """
+ Formatter.__init__ has fmt and datefmt parameters that won't have
+ any affect on a MozFormatter instance.
+
+ :param include_timestamp: if True, include formatted time at the
+ beginning of the message
+ """
+ self.include_timestamp = include_timestamp
+ Formatter.__init__(self)
+
+ def format(self, record):
+ # Handles padding so record levels align nicely
+ if len(record.levelname) > self.level_length:
+ pad = 0
+ if len(record.levelname) <= self.max_level_length:
+ self.level_length = len(record.levelname)
+ else:
+ pad = self.level_length - len(record.levelname) + 1
+ sep = "|".rjust(pad)
+ fmt = "%(name)s %(levelname)s " + sep + " %(message)s"
+ if self.include_timestamp:
+ fmt = "%(asctime)s " + fmt
+ # this protected member is used to define the format
+ # used by the base Formatter's method
+ self._fmt = fmt
+ return Formatter(fmt=fmt).format(record)
+
+
+def getLogger(name, handler=None):
+ """
+ Returns the logger with the specified name.
+ If the logger doesn't exist, it is created.
+ If handler is specified, adds it to the logger. Otherwise a default handler
+ that logs to standard output will be used.
+
+ :param name: The name of the logger to retrieve
+ :param handler: A handler to add to the logger. If the logger already exists,
+ and a handler is specified, an exception will be raised. To
+ add a handler to an existing logger, call that logger's
+ addHandler method.
+ """
+ setLoggerClass(MozLogger)
+
+ if name in Logger.manager.loggerDict:
+ if handler:
+ raise ValueError(
+ "The handler parameter requires "
+ + "that a logger by this name does "
+ + "not already exist"
+ )
+ return Logger.manager.loggerDict[name]
+
+ logger = getSysLogger(name)
+ logger.setLevel(_default_level)
+
+ if handler is None:
+ handler = StreamHandler()
+ handler.setFormatter(MozFormatter())
+
+ logger.addHandler(handler)
+ logger.propagate = False
+ return logger
diff --git a/testing/mozbase/mozlog/mozlog/unstructured/loggingmixin.py b/testing/mozbase/mozlog/mozlog/unstructured/loggingmixin.py
new file mode 100644
index 0000000000..727e8f1a7d
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/unstructured/loggingmixin.py
@@ -0,0 +1,42 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from .logger import Logger, getLogger
+
+
+class LoggingMixin(object):
+ """Expose a subset of logging functions to an inheriting class."""
+
+ def set_logger(self, logger_instance=None, name=None):
+ """Method for setting the underlying logger instance to be used."""
+
+ if logger_instance and not isinstance(logger_instance, Logger):
+ raise ValueError("logger_instance must be an instance of Logger")
+
+ if name is None:
+ name = ".".join([self.__module__, self.__class__.__name__])
+
+ self._logger = logger_instance or getLogger(name)
+
+ def _log_msg(self, cmd, *args, **kwargs):
+ if not hasattr(self, "_logger"):
+ self._logger = getLogger(
+ ".".join([self.__module__, self.__class__.__name__])
+ )
+ getattr(self._logger, cmd)(*args, **kwargs)
+
+ def log(self, *args, **kwargs):
+ self._log_msg("log", *args, **kwargs)
+
+ def info(self, *args, **kwargs):
+ self._log_msg("info", *args, **kwargs)
+
+ def error(self, *args, **kwargs):
+ self._log_msg("error", *args, **kwargs)
+
+ def warn(self, *args, **kwargs):
+ self._log_msg("warn", *args, **kwargs)
+
+ def log_structured(self, *args, **kwargs):
+ self._log_msg("log_structured", *args, **kwargs)
diff --git a/testing/mozbase/mozlog/mozlog/unstructured/loglistener.py b/testing/mozbase/mozlog/mozlog/unstructured/loglistener.py
new file mode 100644
index 0000000000..0d57061236
--- /dev/null
+++ b/testing/mozbase/mozlog/mozlog/unstructured/loglistener.py
@@ -0,0 +1,50 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import json
+import socket
+
+from six.moves import socketserver
+
+
+class LogMessageServer(socketserver.TCPServer):
+ def __init__(self, server_address, logger, message_callback=None, timeout=3):
+ socketserver.TCPServer.__init__(self, server_address, LogMessageHandler)
+ self._logger = logger
+ self._message_callback = message_callback
+ self.timeout = timeout
+
+
+class LogMessageHandler(socketserver.BaseRequestHandler):
+ """Processes output from a connected log source, logging to an
+ existing logger upon receipt of a well-formed log messsage."""
+
+ def handle(self):
+ """Continually listens for log messages."""
+ self._partial_message = ""
+ self.request.settimeout(self.server.timeout)
+
+ while True:
+ try:
+ data = self.request.recv(1024)
+ if not data:
+ return
+ self.process_message(data.decode())
+ except socket.timeout:
+ return
+
+ def process_message(self, data):
+ """Processes data from a connected log source. Messages are assumed
+ to be newline delimited, and generally well-formed JSON."""
+ for part in data.split("\n"):
+ msg_string = self._partial_message + part
+ try:
+ msg = json.loads(msg_string)
+ self._partial_message = ""
+ self.server._logger.log_structured(msg.get("action", "UNKNOWN"), msg)
+ if self.server._message_callback:
+ self.server._message_callback()
+
+ except ValueError:
+ self._partial_message = msg_string
diff --git a/testing/mozbase/mozlog/setup.cfg b/testing/mozbase/mozlog/setup.cfg
new file mode 100644
index 0000000000..3c6e79cf31
--- /dev/null
+++ b/testing/mozbase/mozlog/setup.cfg
@@ -0,0 +1,2 @@
+[bdist_wheel]
+universal=1
diff --git a/testing/mozbase/mozlog/setup.py b/testing/mozbase/mozlog/setup.py
new file mode 100644
index 0000000000..3283374363
--- /dev/null
+++ b/testing/mozbase/mozlog/setup.py
@@ -0,0 +1,43 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from setuptools import find_packages, setup
+
+PACKAGE_NAME = "mozlog"
+PACKAGE_VERSION = "8.0.0"
+DEPS = [
+ "blessed>=1.19.1",
+ "mozterm",
+ "mozfile",
+ "six >= 1.13.0",
+]
+
+
+setup(
+ name=PACKAGE_NAME,
+ version=PACKAGE_VERSION,
+ description="Robust log handling specialized for logging in the Mozilla universe",
+ long_description="see https://firefox-source-docs.mozilla.org/mozbase/index.html",
+ author="Mozilla Automation and Testing Team",
+ author_email="tools@lists.mozilla.org",
+ url="https://wiki.mozilla.org/Auto-tools/Projects/Mozbase",
+ license="Mozilla Public License 2.0 (MPL 2.0)",
+ packages=find_packages(),
+ zip_safe=False,
+ install_requires=DEPS,
+ tests_require=["mozfile"],
+ platforms=["Any"],
+ classifiers=[
+ "Development Status :: 4 - Beta",
+ "Environment :: Console",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
+ "Operating System :: OS Independent",
+ "Programming Language :: Python :: 2.7",
+ "Programming Language :: Python :: 3.5",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ ],
+ package_data={"mozlog": ["formatters/html/main.js", "formatters/html/style.css"]},
+ entry_points={"console_scripts": ["structlog = mozlog.scripts:main"]},
+)
diff --git a/testing/mozbase/mozlog/tests/conftest.py b/testing/mozbase/mozlog/tests/conftest.py
new file mode 100644
index 0000000000..eaf79897e4
--- /dev/null
+++ b/testing/mozbase/mozlog/tests/conftest.py
@@ -0,0 +1,24 @@
+import pytest
+from mozlog.formatters import ErrorSummaryFormatter, MachFormatter
+from mozlog.handlers import StreamHandler
+from mozlog.structuredlog import StructuredLogger
+from six import StringIO
+
+
+@pytest.fixture
+def get_logger():
+ # Ensure a new state instance is created for each test function.
+ StructuredLogger._logger_states = {}
+ formatters = {
+ "mach": MachFormatter,
+ "errorsummary": ErrorSummaryFormatter,
+ }
+
+ def inner(name, **fmt_args):
+ buf = StringIO()
+ fmt = formatters[name](**fmt_args)
+ logger = StructuredLogger("test_logger")
+ logger.add_handler(StreamHandler(buf, fmt))
+ return logger
+
+ return inner
diff --git a/testing/mozbase/mozlog/tests/manifest.toml b/testing/mozbase/mozlog/tests/manifest.toml
new file mode 100644
index 0000000000..6154550965
--- /dev/null
+++ b/testing/mozbase/mozlog/tests/manifest.toml
@@ -0,0 +1,16 @@
+[DEFAULT]
+subsuite = "mozbase"
+
+["test_capture.py"]
+
+["test_errorsummary.py"]
+
+["test_formatters.py"]
+
+["test_logger.py"]
+
+["test_logtypes.py"]
+
+["test_structured.py"]
+
+["test_terminal_colors.py"]
diff --git a/testing/mozbase/mozlog/tests/test_capture.py b/testing/mozbase/mozlog/tests/test_capture.py
new file mode 100644
index 0000000000..6e14e155aa
--- /dev/null
+++ b/testing/mozbase/mozlog/tests/test_capture.py
@@ -0,0 +1,37 @@
+import sys
+import unittest
+
+import mozunit
+from mozlog import capture, structuredlog
+from test_structured import TestHandler
+
+
+class TestCaptureIO(unittest.TestCase):
+ """Tests expected logging output of CaptureIO"""
+
+ def setUp(self):
+ self.logger = structuredlog.StructuredLogger("test")
+ self.handler = TestHandler()
+ self.logger.add_handler(self.handler)
+
+ def test_captureio_log(self):
+ """
+ CaptureIO takes in two arguments. The second argument must
+ be truthy in order for the code to run. Hence, the string
+ "capture_stdio" has been used in this test case.
+ """
+ with capture.CaptureIO(self.logger, "capture_stdio"):
+ print("message 1")
+ sys.stdout.write("message 2")
+ sys.stderr.write("message 3")
+ sys.stdout.write("\xff")
+ log = self.handler.items
+ messages = [item["message"] for item in log]
+ self.assertIn("STDOUT: message 1", messages)
+ self.assertIn("STDOUT: message 2", messages)
+ self.assertIn("STDERR: message 3", messages)
+ self.assertIn("STDOUT: \xff", messages)
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/testing/mozbase/mozlog/tests/test_errorsummary.py b/testing/mozbase/mozlog/tests/test_errorsummary.py
new file mode 100644
index 0000000000..30a5a304b2
--- /dev/null
+++ b/testing/mozbase/mozlog/tests/test_errorsummary.py
@@ -0,0 +1,262 @@
+# -*- coding: utf-8 -*-
+
+import json
+import time
+
+import mozunit
+import pytest
+
+# flake8: noqa
+
+
+@pytest.mark.parametrize(
+ "logs,expected",
+ (
+ pytest.param(
+ [
+ (
+ "suite_start",
+ {
+ "manifestA": ["test_foo", "test_bar", "test_baz"],
+ "manifestB": ["test_something"],
+ },
+ ),
+ ("test_start", "test_foo"),
+ ("test_end", "test_foo", "SKIP"),
+ ("test_start", "test_bar"),
+ ("test_end", "test_bar", "OK"),
+ ("test_start", "test_something"),
+ ("test_end", "test_something", "OK"),
+ ("test_start", "test_baz"),
+ ("test_end", "test_baz", "PASS", "FAIL"),
+ ("suite_end",),
+ ],
+ """
+ {"groups": ["manifestA", "manifestB"], "action": "test_groups", "line": 0}
+ {"test": "test_baz", "subtest": null, "group": "manifestA", "status": "PASS", "expected": "FAIL", "message": null, "stack": null, "known_intermittent": [], "action": "test_result", "line": 8}
+ {"group": "manifestA", "status": "ERROR", "duration": 20, "action": "group_result", "line": 9}
+ {"group": "manifestB", "status": "OK", "duration": 10, "action": "group_result", "line": 9}
+ """.strip(),
+ id="basic",
+ ),
+ pytest.param(
+ [
+ ("suite_start", {"manifest": ["test_foo"]}),
+ ("test_start", "test_foo"),
+ ("suite_end",),
+ ],
+ """
+ {"groups": ["manifest"], "action": "test_groups", "line": 0}
+ {"group": "manifest", "status": null, "duration": 0, "action": "group_result", "line": 2}
+ """.strip(),
+ id="missing_test_end",
+ ),
+ pytest.param(
+ [
+ ("suite_start", {"manifest": ["test_foo"]}),
+ ("test_start", "test_foo"),
+ ("test_status", "test_foo", "subtest", "PASS"),
+ ("suite_end",),
+ ],
+ """
+ {"groups": ["manifest"], "action": "test_groups", "line": 0}
+ {"group": "manifest", "status": "ERROR", "duration": null, "action": "group_result", "line": 3}
+ """.strip(),
+ id="missing_test_end_with_test_status_ok",
+ marks=pytest.mark.xfail, # status is OK but should be ERROR
+ ),
+ pytest.param(
+ [
+ (
+ "suite_start",
+ {
+ "manifestA": ["test_foo", "test_bar", "test_baz"],
+ "manifestB": ["test_something"],
+ },
+ ),
+ ("test_start", "test_foo"),
+ ("test_end", "test_foo", "SKIP"),
+ ("test_start", "test_bar"),
+ ("test_end", "test_bar", "CRASH"),
+ ("test_start", "test_something"),
+ ("test_end", "test_something", "OK"),
+ ("test_start", "test_baz"),
+ ("test_end", "test_baz", "FAIL", "FAIL"),
+ ("suite_end",),
+ ],
+ """
+ {"groups": ["manifestA", "manifestB"], "action": "test_groups", "line": 0}
+ {"test": "test_bar", "subtest": null, "group": "manifestA", "status": "CRASH", "expected": "OK", "message": null, "stack": null, "known_intermittent": [], "action": "test_result", "line": 4}
+ {"group": "manifestA", "status": "ERROR", "duration": 20, "action": "group_result", "line": 9}
+ {"group": "manifestB", "status": "OK", "duration": 10, "action": "group_result", "line": 9}
+ """.strip(),
+ id="crash_and_group_status",
+ ),
+ pytest.param(
+ [
+ (
+ "suite_start",
+ {
+ "manifestA": ["test_foo", "test_bar", "test_baz"],
+ "manifestB": ["test_something"],
+ },
+ ),
+ ("test_start", "test_foo"),
+ ("test_end", "test_foo", "SKIP"),
+ ("test_start", "test_bar"),
+ ("test_end", "test_bar", "OK"),
+ ("test_start", "test_something"),
+ ("test_end", "test_something", "OK"),
+ ("test_start", "test_baz"),
+ ("test_status", "test_baz", "subtest", "FAIL", "FAIL"),
+ ("test_end", "test_baz", "OK"),
+ ("suite_end",),
+ ],
+ """
+ {"groups": ["manifestA", "manifestB"], "action": "test_groups", "line": 0}
+ {"group": "manifestA", "status": "OK", "duration": 29, "action": "group_result", "line": 10}
+ {"group": "manifestB", "status": "OK", "duration": 10, "action": "group_result", "line": 10}
+ """.strip(),
+ id="fail_expected_fail",
+ ),
+ pytest.param(
+ [
+ (
+ "suite_start",
+ {
+ "manifestA": ["test_foo", "test_bar", "test_baz"],
+ "manifestB": ["test_something"],
+ },
+ ),
+ ("test_start", "test_foo"),
+ ("test_end", "test_foo", "SKIP"),
+ ("test_start", "test_bar"),
+ ("test_end", "test_bar", "OK"),
+ ("test_start", "test_something"),
+ ("test_end", "test_something", "OK"),
+ ("test_start", "test_baz"),
+ ("test_status", "test_baz", "Test timed out", "FAIL", "PASS"),
+ ("test_status", "test_baz", "", "TIMEOUT", "PASS"),
+ ("crash", "", "signature", "manifestA"),
+ ("test_end", "test_baz", "OK"),
+ ("suite_end",),
+ ],
+ """
+ {"groups": ["manifestA", "manifestB"], "action": "test_groups", "line": 0}
+ {"test": "test_baz", "subtest": "Test timed out", "group": "manifestA", "status": "FAIL", "expected": "PASS", "message": null, "stack": null, "known_intermittent": [], "action": "test_result", "line": 8}
+ {"test": "test_baz", "subtest": "", "group": "manifestA", "status": "TIMEOUT", "expected": "PASS", "message": null, "stack": null, "known_intermittent": [], "action": "test_result", "line": 9}
+ {"test": "manifestA", "group": "manifestA", "signature": "signature", "stackwalk_stderr": null, "stackwalk_stdout": null, "action": "crash", "line": 10}
+ {"group": "manifestA", "status": "ERROR", "duration": 49, "action": "group_result", "line": 12}
+ {"group": "manifestB", "status": "OK", "duration": 10, "action": "group_result", "line": 12}
+ """.strip(),
+ id="timeout_and_crash",
+ ),
+ pytest.param(
+ [
+ (
+ "suite_start",
+ {
+ "manifestA": ["test_foo", "test_bar", "test_baz"],
+ "manifestB": ["test_something"],
+ },
+ ),
+ ("test_start", "test_foo"),
+ ("test_end", "test_foo", "SKIP"),
+ ("test_start", "test_bar"),
+ ("test_end", "test_bar", "CRASH", "CRASH"),
+ ("test_start", "test_something"),
+ ("test_end", "test_something", "OK"),
+ ("test_start", "test_baz"),
+ ("test_end", "test_baz", "FAIL", "FAIL"),
+ ("suite_end",),
+ ],
+ """
+ {"groups": ["manifestA", "manifestB"], "action": "test_groups", "line": 0}
+ {"group": "manifestA", "status": "OK", "duration": 20, "action": "group_result", "line": 9}
+ {"group": "manifestB", "status": "OK", "duration": 10, "action": "group_result", "line": 9}
+ """.strip(),
+ id="crash_expected_crash",
+ ),
+ pytest.param(
+ [
+ (
+ "suite_start",
+ {
+ "manifestA": ["test_foo", "test_bar", "test_baz"],
+ "manifestB": ["test_something"],
+ },
+ ),
+ ("test_start", "test_foo"),
+ ("test_end", "test_foo", "SKIP"),
+ ("test_start", "test_bar"),
+ ("test_end", "test_bar", "OK"),
+ ("test_start", "test_something"),
+ ("test_end", "test_something", "OK"),
+ ("test_start", "test_baz"),
+ ("test_end", "test_baz", "FAIL", "FAIL"),
+ ("crash", "", "", "manifestA"),
+ ("suite_end",),
+ ],
+ """
+ {"groups": ["manifestA", "manifestB"], "action": "test_groups", "line": 0}
+ {"test": "manifestA", "group": "manifestA", "signature": "", "stackwalk_stderr": null, "stackwalk_stdout": null, "action": "crash", "line": 9}
+ {"group": "manifestA", "status": "ERROR", "duration": 20, "action": "group_result", "line": 10}
+ {"group": "manifestB", "status": "OK", "duration": 10, "action": "group_result", "line": 10}
+ """.strip(),
+ id="assertion_crash_on_shutdown",
+ ),
+ pytest.param(
+ [
+ (
+ "suite_start",
+ {
+ "manifestA": ["test_foo", "test_bar", "test_baz"],
+ "manifestB": ["test_something"],
+ },
+ ),
+ ("test_start", "test_foo"),
+ ("test_end", "test_foo", "SKIP"),
+ ("test_start", "test_bar"),
+ ("test_end", "test_bar", "OK"),
+ ("test_start", "test_something"),
+ ("test_end", "test_something", "OK"),
+ ("test_start", "test_baz"),
+ ("test_end", "test_baz", "FAIL"),
+ ],
+ """
+ {"groups": ["manifestA", "manifestB"], "action": "test_groups", "line": 0}
+ {"test": "test_baz", "group": "manifestA", "status": "FAIL", "expected": "OK", "subtest": null, "message": null, "stack": null, "known_intermittent": [], "action": "test_result", "line": 8}
+ """.strip(),
+ id="timeout_no_group_status",
+ ),
+ ),
+)
+def test_errorsummary(monkeypatch, get_logger, logs, expected):
+ ts = {"ts": 0.0} # need to use dict since 'nonlocal' doesn't exist on PY2
+
+ def fake_time():
+ ts["ts"] += 0.01
+ return ts["ts"]
+
+ monkeypatch.setattr(time, "time", fake_time)
+ logger = get_logger("errorsummary")
+
+ for log in logs:
+ getattr(logger, log[0])(*log[1:])
+
+ buf = logger.handlers[0].stream
+ result = buf.getvalue()
+ print("Dumping result for copy/paste:")
+ print(result)
+
+ expected = expected.split("\n")
+ for i, line in enumerate(result.split("\n")):
+ if not line:
+ continue
+
+ data = json.loads(line)
+ assert data == json.loads(expected[i])
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/testing/mozbase/mozlog/tests/test_formatters.py b/testing/mozbase/mozlog/tests/test_formatters.py
new file mode 100644
index 0000000000..e0e3a51d97
--- /dev/null
+++ b/testing/mozbase/mozlog/tests/test_formatters.py
@@ -0,0 +1,767 @@
+# encoding: utf-8
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import signal
+import unittest
+import xml.etree.ElementTree as ET
+from textwrap import dedent
+
+import mozunit
+import pytest
+from mozlog.formatters import (
+ GroupingFormatter,
+ HTMLFormatter,
+ MachFormatter,
+ TbplFormatter,
+ XUnitFormatter,
+)
+from mozlog.handlers import StreamHandler
+from mozlog.structuredlog import StructuredLogger
+from six import StringIO, ensure_text, unichr
+
+FORMATS = {
+ # A list of tuples consisting of (name, options, expected string).
+ "PASS": [
+ (
+ "mach",
+ {},
+ dedent(
+ """
+ 0:00.00 SUITE_START: running 3 tests
+ 0:00.00 TEST_START: test_foo
+ 0:00.00 TEST_END: OK
+ 0:00.00 TEST_START: test_bar
+ 0:00.00 TEST_END: Test OK. Subtests passed 1/1. Unexpected 0
+ 0:00.00 TEST_START: test_baz
+ 0:00.00 TEST_END: FAIL
+ 0:00.00 SUITE_END
+
+ suite 1
+ ~~~~~~~
+ Ran 4 checks (1 subtests, 3 tests)
+ Expected results: 4
+ Unexpected results: 0
+ OK
+ """
+ ).lstrip("\n"),
+ ),
+ (
+ "mach",
+ {"verbose": True},
+ dedent(
+ """
+ 0:00.00 SUITE_START: running 3 tests
+ 0:00.00 TEST_START: test_foo
+ 0:00.00 TEST_END: OK
+ 0:00.00 TEST_START: test_bar
+ 0:00.00 PASS a subtest
+ 0:00.00 TEST_END: Test OK. Subtests passed 1/1. Unexpected 0
+ 0:00.00 TEST_START: test_baz
+ 0:00.00 TEST_END: FAIL
+ 0:00.00 SUITE_END
+
+ suite 1
+ ~~~~~~~
+ Ran 4 checks (1 subtests, 3 tests)
+ Expected results: 4
+ Unexpected results: 0
+ OK
+ """
+ ).lstrip("\n"),
+ ),
+ ],
+ "FAIL": [
+ (
+ "mach",
+ {},
+ dedent(
+ """
+ 0:00.00 SUITE_START: running 3 tests
+ 0:00.00 TEST_START: test_foo
+ 0:00.00 TEST_END: FAIL, expected PASS - expected 0 got 1
+ 0:00.00 TEST_START: test_bar
+ 0:00.00 TEST_END: Test OK. Subtests passed 0/2. Unexpected 2
+ FAIL a subtest - expected 0 got 1
+ SimpleTest.is@SimpleTest/SimpleTest.js:312:5
+ @caps/tests/mochitest/test_bug246699.html:53:1
+ TIMEOUT another subtest
+ 0:00.00 TEST_START: test_baz
+ 0:00.00 TEST_END: PASS, expected FAIL
+ 0:00.00 SUITE_END
+
+ suite 1
+ ~~~~~~~
+ Ran 5 checks (2 subtests, 3 tests)
+ Expected results: 1
+ Unexpected results: 4
+ test: 2 (1 fail, 1 pass)
+ subtest: 2 (1 fail, 1 timeout)
+
+ Unexpected Results
+ ------------------
+ test_foo
+ FAIL test_foo - expected 0 got 1
+ test_bar
+ FAIL a subtest - expected 0 got 1
+ SimpleTest.is@SimpleTest/SimpleTest.js:312:5
+ @caps/tests/mochitest/test_bug246699.html:53:1
+ TIMEOUT another subtest
+ test_baz
+ UNEXPECTED-PASS test_baz
+ """
+ ).lstrip("\n"),
+ ),
+ (
+ "mach",
+ {"verbose": True},
+ dedent(
+ """
+ 0:00.00 SUITE_START: running 3 tests
+ 0:00.00 TEST_START: test_foo
+ 0:00.00 TEST_END: FAIL, expected PASS - expected 0 got 1
+ 0:00.00 TEST_START: test_bar
+ 0:00.00 FAIL a subtest - expected 0 got 1
+ SimpleTest.is@SimpleTest/SimpleTest.js:312:5
+ @caps/tests/mochitest/test_bug246699.html:53:1
+ 0:00.00 TIMEOUT another subtest
+ 0:00.00 TEST_END: Test OK. Subtests passed 0/2. Unexpected 2
+ 0:00.00 TEST_START: test_baz
+ 0:00.00 TEST_END: PASS, expected FAIL
+ 0:00.00 SUITE_END
+
+ suite 1
+ ~~~~~~~
+ Ran 5 checks (2 subtests, 3 tests)
+ Expected results: 1
+ Unexpected results: 4
+ test: 2 (1 fail, 1 pass)
+ subtest: 2 (1 fail, 1 timeout)
+
+ Unexpected Results
+ ------------------
+ test_foo
+ FAIL test_foo - expected 0 got 1
+ test_bar
+ FAIL a subtest - expected 0 got 1
+ SimpleTest.is@SimpleTest/SimpleTest.js:312:5
+ @caps/tests/mochitest/test_bug246699.html:53:1
+ TIMEOUT another subtest
+ test_baz
+ UNEXPECTED-PASS test_baz
+ """
+ ).lstrip("\n"),
+ ),
+ ],
+ "PRECONDITION_FAILED": [
+ (
+ "mach",
+ {},
+ dedent(
+ """
+ 0:00.00 SUITE_START: running 2 tests
+ 0:00.00 TEST_START: test_foo
+ 0:00.00 TEST_END: PRECONDITION_FAILED, expected OK
+ 0:00.00 TEST_START: test_bar
+ 0:00.00 TEST_END: Test OK. Subtests passed 1/2. Unexpected 1
+ PRECONDITION_FAILED another subtest
+ 0:00.00 SUITE_END
+
+ suite 1
+ ~~~~~~~
+ Ran 4 checks (2 subtests, 2 tests)
+ Expected results: 2
+ Unexpected results: 2
+ test: 1 (1 precondition_failed)
+ subtest: 1 (1 precondition_failed)
+
+ Unexpected Results
+ ------------------
+ test_foo
+ PRECONDITION_FAILED test_foo
+ test_bar
+ PRECONDITION_FAILED another subtest
+ """
+ ).lstrip("\n"),
+ ),
+ (
+ "mach",
+ {"verbose": True},
+ dedent(
+ """
+ 0:00.00 SUITE_START: running 2 tests
+ 0:00.00 TEST_START: test_foo
+ 0:00.00 TEST_END: PRECONDITION_FAILED, expected OK
+ 0:00.00 TEST_START: test_bar
+ 0:00.00 PASS a subtest
+ 0:00.00 PRECONDITION_FAILED another subtest
+ 0:00.00 TEST_END: Test OK. Subtests passed 1/2. Unexpected 1
+ 0:00.00 SUITE_END
+
+ suite 1
+ ~~~~~~~
+ Ran 4 checks (2 subtests, 2 tests)
+ Expected results: 2
+ Unexpected results: 2
+ test: 1 (1 precondition_failed)
+ subtest: 1 (1 precondition_failed)
+
+ Unexpected Results
+ ------------------
+ test_foo
+ PRECONDITION_FAILED test_foo
+ test_bar
+ PRECONDITION_FAILED another subtest
+ """
+ ).lstrip("\n"),
+ ),
+ ],
+ "KNOWN-INTERMITTENT": [
+ (
+ "mach",
+ {},
+ dedent(
+ """
+ 0:00.00 SUITE_START: running 3 tests
+ 0:00.00 TEST_START: test_foo
+ 0:00.00 TEST_END: FAIL
+ KNOWN-INTERMITTENT-FAIL test_foo
+ 0:00.00 TEST_START: test_bar
+ 0:00.00 TEST_END: Test OK. Subtests passed 1/1. Unexpected 0
+ KNOWN-INTERMITTENT-PASS a subtest
+ 0:00.00 TEST_START: test_baz
+ 0:00.00 TEST_END: FAIL
+ 0:00.00 SUITE_END
+
+ suite 1
+ ~~~~~~~
+ Ran 4 checks (1 subtests, 3 tests)
+ Expected results: 4 (2 known intermittents)
+ Unexpected results: 0
+
+ Known Intermittent Results
+ --------------------------
+ test_foo
+ KNOWN-INTERMITTENT-FAIL test_foo
+ test_bar
+ KNOWN-INTERMITTENT-PASS a subtest
+ OK
+ """
+ ).lstrip("\n"),
+ ),
+ (
+ "mach",
+ {"verbose": True},
+ dedent(
+ """
+ 0:00.00 SUITE_START: running 3 tests
+ 0:00.00 TEST_START: test_foo
+ 0:00.00 TEST_END: FAIL
+ KNOWN-INTERMITTENT-FAIL test_foo
+ 0:00.00 TEST_START: test_bar
+ 0:00.00 KNOWN-INTERMITTENT-PASS a subtest
+ 0:00.00 TEST_END: Test OK. Subtests passed 1/1. Unexpected 0
+ KNOWN-INTERMITTENT-PASS a subtest
+ 0:00.00 TEST_START: test_baz
+ 0:00.00 TEST_END: FAIL
+ 0:00.00 SUITE_END
+
+ suite 1
+ ~~~~~~~
+ Ran 4 checks (1 subtests, 3 tests)
+ Expected results: 4 (2 known intermittents)
+ Unexpected results: 0
+
+ Known Intermittent Results
+ --------------------------
+ test_foo
+ KNOWN-INTERMITTENT-FAIL test_foo
+ test_bar
+ KNOWN-INTERMITTENT-PASS a subtest
+ OK
+ """
+ ).lstrip("\n"),
+ ),
+ ],
+}
+
+
+def ids(test):
+ ids = []
+ for value in FORMATS[test]:
+ args = ", ".join(["{}={}".format(k, v) for k, v in value[1].items()])
+ if args:
+ args = "-{}".format(args)
+ ids.append("{}{}".format(value[0], args))
+ return ids
+
+
+@pytest.fixture(autouse=True)
+def timestamp(monkeypatch):
+ def fake_time(*args, **kwargs):
+ return 0
+
+ monkeypatch.setattr(MachFormatter, "_time", fake_time)
+
+
+@pytest.mark.parametrize("name,opts,expected", FORMATS["PASS"], ids=ids("PASS"))
+def test_pass(get_logger, name, opts, expected):
+ logger = get_logger(name, **opts)
+
+ logger.suite_start(["test_foo", "test_bar", "test_baz"])
+ logger.test_start("test_foo")
+ logger.test_end("test_foo", "OK")
+ logger.test_start("test_bar")
+ logger.test_status("test_bar", "a subtest", "PASS")
+ logger.test_end("test_bar", "OK")
+ logger.test_start("test_baz")
+ logger.test_end("test_baz", "FAIL", "FAIL", "expected 0 got 1")
+ logger.suite_end()
+
+ buf = logger.handlers[0].stream
+ result = buf.getvalue()
+ print("Dumping result for copy/paste:")
+ print(result)
+ assert result == expected
+
+
+@pytest.mark.parametrize("name,opts,expected", FORMATS["FAIL"], ids=ids("FAIL"))
+def test_fail(get_logger, name, opts, expected):
+ stack = """
+ SimpleTest.is@SimpleTest/SimpleTest.js:312:5
+ @caps/tests/mochitest/test_bug246699.html:53:1
+""".strip(
+ "\n"
+ )
+
+ logger = get_logger(name, **opts)
+
+ logger.suite_start(["test_foo", "test_bar", "test_baz"])
+ logger.test_start("test_foo")
+ logger.test_end("test_foo", "FAIL", "PASS", "expected 0 got 1")
+ logger.test_start("test_bar")
+ logger.test_status(
+ "test_bar", "a subtest", "FAIL", "PASS", "expected 0 got 1", stack
+ )
+ logger.test_status("test_bar", "another subtest", "TIMEOUT")
+ logger.test_end("test_bar", "OK")
+ logger.test_start("test_baz")
+ logger.test_end("test_baz", "PASS", "FAIL")
+ logger.suite_end()
+
+ buf = logger.handlers[0].stream
+ result = buf.getvalue()
+ print("Dumping result for copy/paste:")
+ print(result)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "name,opts,expected", FORMATS["PRECONDITION_FAILED"], ids=ids("PRECONDITION_FAILED")
+)
+def test_precondition_failed(get_logger, name, opts, expected):
+ logger = get_logger(name, **opts)
+
+ logger.suite_start(["test_foo", "test_bar"])
+ logger.test_start("test_foo")
+ logger.test_end("test_foo", "PRECONDITION_FAILED")
+ logger.test_start("test_bar")
+ logger.test_status("test_bar", "a subtest", "PASS")
+ logger.test_status("test_bar", "another subtest", "PRECONDITION_FAILED")
+ logger.test_end("test_bar", "OK")
+ logger.suite_end()
+
+ buf = logger.handlers[0].stream
+ result = buf.getvalue()
+ print("Dumping result for copy/paste:")
+ print(result)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "name,opts,expected", FORMATS["KNOWN-INTERMITTENT"], ids=ids("KNOWN-INTERMITTENT")
+)
+def test_known_intermittent(get_logger, name, opts, expected):
+ logger = get_logger(name, **opts)
+
+ logger.suite_start(["test_foo", "test_bar", "test_baz"])
+ logger.test_start("test_foo")
+ logger.test_end("test_foo", "FAIL", "PASS", known_intermittent=["FAIL"])
+ logger.test_start("test_bar")
+ logger.test_status(
+ "test_bar", "a subtest", "PASS", "FAIL", known_intermittent=["PASS"]
+ )
+ logger.test_end("test_bar", "OK")
+ logger.test_start("test_baz")
+ logger.test_end(
+ "test_baz", "FAIL", "FAIL", "expected 0 got 1", known_intermittent=["PASS"]
+ )
+ logger.suite_end()
+
+ buf = logger.handlers[0].stream
+ result = buf.getvalue()
+ print("Dumping result for copy/paste:")
+ print(result)
+ assert result == expected
+
+
+class FormatterTest(unittest.TestCase):
+ def setUp(self):
+ self.position = 0
+ self.logger = StructuredLogger("test_%s" % type(self).__name__)
+ self.output_file = StringIO()
+ self.handler = StreamHandler(self.output_file, self.get_formatter())
+ self.logger.add_handler(self.handler)
+
+ def set_position(self, pos=None):
+ if pos is None:
+ pos = self.output_file.tell()
+ self.position = pos
+
+ def get_formatter(self):
+ raise NotImplementedError(
+ "FormatterTest subclasses must implement get_formatter"
+ )
+
+ @property
+ def loglines(self):
+ self.output_file.seek(self.position)
+ return [ensure_text(line.rstrip()) for line in self.output_file.readlines()]
+
+
+class TestHTMLFormatter(FormatterTest):
+ def get_formatter(self):
+ return HTMLFormatter()
+
+ def test_base64_string(self):
+ self.logger.suite_start([])
+ self.logger.test_start("string_test")
+ self.logger.test_end("string_test", "FAIL", extra={"data": "foobar"})
+ self.logger.suite_end()
+ self.assertIn("data:text/html;charset=utf-8;base64,Zm9vYmFy", self.loglines[-3])
+
+ def test_base64_unicode(self):
+ self.logger.suite_start([])
+ self.logger.test_start("unicode_test")
+ self.logger.test_end("unicode_test", "FAIL", extra={"data": unichr(0x02A9)})
+ self.logger.suite_end()
+ self.assertIn("data:text/html;charset=utf-8;base64,yqk=", self.loglines[-3])
+
+ def test_base64_other(self):
+ self.logger.suite_start([])
+ self.logger.test_start("int_test")
+ self.logger.test_end("int_test", "FAIL", extra={"data": {"foo": "bar"}})
+ self.logger.suite_end()
+ self.assertIn(
+ "data:text/html;charset=utf-8;base64,eyJmb28iOiAiYmFyIn0=",
+ self.loglines[-3],
+ )
+
+
+class TestTBPLFormatter(FormatterTest):
+ def get_formatter(self):
+ return TbplFormatter()
+
+ def test_unexpected_message(self):
+ self.logger.suite_start([])
+ self.logger.test_start("timeout_test")
+ self.logger.test_end("timeout_test", "TIMEOUT", message="timed out")
+ self.assertIn(
+ "TEST-UNEXPECTED-TIMEOUT | timeout_test | timed out", self.loglines
+ )
+ self.logger.suite_end()
+
+ def test_default_unexpected_end_message(self):
+ self.logger.suite_start([])
+ self.logger.test_start("timeout_test")
+ self.logger.test_end("timeout_test", "TIMEOUT")
+ self.assertIn(
+ "TEST-UNEXPECTED-TIMEOUT | timeout_test | expected OK", self.loglines
+ )
+ self.logger.suite_end()
+
+ def test_default_unexpected_status_message(self):
+ self.logger.suite_start([])
+ self.logger.test_start("timeout_test")
+ self.logger.test_status("timeout_test", "subtest", status="TIMEOUT")
+ self.assertIn(
+ "TEST-UNEXPECTED-TIMEOUT | timeout_test | subtest - expected PASS",
+ self.loglines,
+ )
+ self.logger.test_end("timeout_test", "OK")
+ self.logger.suite_end()
+
+ def test_known_intermittent_end(self):
+ self.logger.suite_start([])
+ self.logger.test_start("intermittent_test")
+ self.logger.test_end(
+ "intermittent_test",
+ status="FAIL",
+ expected="PASS",
+ known_intermittent=["FAIL"],
+ )
+ # test_end log format:
+ # "TEST-KNOWN-INTERMITTENT-<STATUS> | <test> | took <duration>ms"
+ # where duration may be different each time
+ self.assertIn(
+ "TEST-KNOWN-INTERMITTENT-FAIL | intermittent_test | took ", self.loglines[2]
+ )
+ self.assertIn("ms", self.loglines[2])
+ self.logger.suite_end()
+
+ def test_known_intermittent_status(self):
+ self.logger.suite_start([])
+ self.logger.test_start("intermittent_test")
+ self.logger.test_status(
+ "intermittent_test",
+ "subtest",
+ status="FAIL",
+ expected="PASS",
+ known_intermittent=["FAIL"],
+ )
+ self.assertIn(
+ "TEST-KNOWN-INTERMITTENT-FAIL | intermittent_test | subtest", self.loglines
+ )
+ self.logger.test_end("intermittent_test", "OK")
+ self.logger.suite_end()
+
+ def test_single_newline(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test1")
+ self.set_position()
+ self.logger.test_status("test1", "subtest", status="PASS", expected="FAIL")
+ self.logger.test_end("test1", "OK")
+ self.logger.suite_end()
+
+ # This sequence should not produce blanklines
+ for line in self.loglines:
+ self.assertNotEqual("", line)
+
+ def test_process_exit(self):
+ self.logger.process_exit(1234, 0)
+ self.assertIn("TEST-INFO | 1234: exit 0", self.loglines)
+
+ @unittest.skipUnless(os.name == "posix", "posix only")
+ def test_process_exit_with_sig(self):
+ # subprocess return code is negative when process
+ # has been killed by signal on posix.
+ self.logger.process_exit(1234, -signal.SIGTERM)
+ self.assertIn("TEST-INFO | 1234: killed by SIGTERM", self.loglines)
+
+
+class TestTBPLFormatterWithShutdown(FormatterTest):
+ def get_formatter(self):
+ return TbplFormatter(summary_on_shutdown=True)
+
+ def test_suite_summary_on_shutdown(self):
+ self.logger.suite_start([])
+ self.logger.test_start("summary_test")
+ self.logger.test_status(
+ "summary_test", "subtest", "FAIL", "PASS", known_intermittent=["FAIL"]
+ )
+ self.logger.test_end("summary_test", "FAIL", "OK", known_intermittent=["FAIL"])
+ self.logger.suite_end()
+ self.logger.shutdown()
+
+ self.assertIn("suite 1: 2/2 (2 known intermittent tests)", self.loglines)
+ self.assertIn("Known Intermittent tests:", self.loglines)
+ self.assertIn(
+ "TEST-KNOWN-INTERMITTENT-FAIL | summary_test | subtest", self.loglines
+ )
+
+
+class TestMachFormatter(FormatterTest):
+ def get_formatter(self):
+ return MachFormatter(disable_colors=True)
+
+ def test_summary(self):
+ self.logger.suite_start([])
+
+ # Some tests that pass
+ self.logger.test_start("test1")
+ self.logger.test_end("test1", status="PASS", expected="PASS")
+
+ self.logger.test_start("test2")
+ self.logger.test_end("test2", status="PASS", expected="TIMEOUT")
+
+ self.logger.test_start("test3")
+ self.logger.test_end("test3", status="FAIL", expected="PASS")
+
+ self.set_position()
+ self.logger.suite_end()
+
+ self.assertIn("Ran 3 checks (3 tests)", self.loglines)
+ self.assertIn("Expected results: 1", self.loglines)
+ self.assertIn(
+ """
+Unexpected results: 2
+ test: 2 (1 fail, 1 pass)
+""".strip(),
+ "\n".join(self.loglines),
+ )
+ self.assertNotIn("test1", self.loglines)
+ self.assertIn("UNEXPECTED-PASS test2", self.loglines)
+ self.assertIn("FAIL test3", self.loglines)
+
+ def test_summary_subtests(self):
+ self.logger.suite_start([])
+
+ self.logger.test_start("test1")
+ self.logger.test_status("test1", "subtest1", status="PASS")
+ self.logger.test_status("test1", "subtest2", status="FAIL")
+ self.logger.test_end("test1", status="OK", expected="OK")
+
+ self.logger.test_start("test2")
+ self.logger.test_status("test2", "subtest1", status="TIMEOUT", expected="PASS")
+ self.logger.test_end("test2", status="TIMEOUT", expected="OK")
+
+ self.set_position()
+ self.logger.suite_end()
+
+ self.assertIn("Ran 5 checks (3 subtests, 2 tests)", self.loglines)
+ self.assertIn("Expected results: 2", self.loglines)
+ self.assertIn(
+ """
+Unexpected results: 3
+ test: 1 (1 timeout)
+ subtest: 2 (1 fail, 1 timeout)
+""".strip(),
+ "\n".join(self.loglines),
+ )
+
+ def test_summary_ok(self):
+ self.logger.suite_start([])
+
+ self.logger.test_start("test1")
+ self.logger.test_status("test1", "subtest1", status="PASS")
+ self.logger.test_status("test1", "subtest2", status="PASS")
+ self.logger.test_end("test1", status="OK", expected="OK")
+
+ self.logger.test_start("test2")
+ self.logger.test_status("test2", "subtest1", status="PASS", expected="PASS")
+ self.logger.test_end("test2", status="OK", expected="OK")
+
+ self.set_position()
+ self.logger.suite_end()
+
+ self.assertIn("OK", self.loglines)
+ self.assertIn("Expected results: 5", self.loglines)
+ self.assertIn("Unexpected results: 0", self.loglines)
+
+ def test_process_start(self):
+ self.logger.process_start(1234)
+ self.assertIn("Started process `1234`", self.loglines[0])
+
+ def test_process_start_with_command(self):
+ self.logger.process_start(1234, command="test cmd")
+ self.assertIn("Started process `1234` (test cmd)", self.loglines[0])
+
+ def test_process_exit(self):
+ self.logger.process_exit(1234, 0)
+ self.assertIn("1234: exit 0", self.loglines[0])
+
+ @unittest.skipUnless(os.name == "posix", "posix only")
+ def test_process_exit_with_sig(self):
+ # subprocess return code is negative when process
+ # has been killed by signal on posix.
+ self.logger.process_exit(1234, -signal.SIGTERM)
+ self.assertIn("1234: killed by SIGTERM", self.loglines[0])
+
+
+class TestGroupingFormatter(FormatterTest):
+ def get_formatter(self):
+ return GroupingFormatter()
+
+ def test_results_total(self):
+ self.logger.suite_start([])
+
+ self.logger.test_start("test1")
+ self.logger.test_status("test1", "subtest1", status="PASS")
+ self.logger.test_status("test1", "subtest1", status="PASS")
+ self.logger.test_end("test1", status="OK")
+
+ self.logger.test_start("test2")
+ self.logger.test_status(
+ "test2",
+ "subtest2",
+ status="FAIL",
+ expected="PASS",
+ known_intermittent=["FAIL"],
+ )
+ self.logger.test_end("test2", status="FAIL", expected="OK")
+
+ self.set_position()
+ self.logger.suite_end()
+
+ self.assertIn("Ran 2 tests finished in 0.0 seconds.", self.loglines)
+ self.assertIn(" \u2022 1 ran as expected. 0 tests skipped.", self.loglines)
+ self.assertIn(" \u2022 1 known intermittent results.", self.loglines)
+ self.assertIn(" \u2022 1 tests failed unexpectedly", self.loglines)
+ self.assertIn(" \u25B6 FAIL [expected OK] test2", self.loglines)
+ self.assertIn(
+ " \u25B6 FAIL [expected PASS, known intermittent [FAIL] test2, subtest2",
+ self.loglines,
+ )
+
+
+class TestXUnitFormatter(FormatterTest):
+ def get_formatter(self):
+ return XUnitFormatter()
+
+ def log_as_xml(self):
+ return ET.fromstring("\n".join(self.loglines))
+
+ def test_stacktrace_is_present(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test1")
+ self.logger.test_end(
+ "test1", "fail", message="Test message", stack="this\nis\na\nstack"
+ )
+ self.logger.suite_end()
+
+ root = self.log_as_xml()
+ self.assertIn("this\nis\na\nstack", root.find("testcase/failure").text)
+
+ def test_failure_message(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test1")
+ self.logger.test_end("test1", "fail", message="Test message")
+ self.logger.suite_end()
+
+ root = self.log_as_xml()
+ self.assertEqual(
+ "Expected OK, got FAIL", root.find("testcase/failure").get("message")
+ )
+
+ def test_suite_attrs(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test1")
+ self.logger.test_end("test1", "ok", message="Test message")
+ self.logger.suite_end()
+
+ root = self.log_as_xml()
+ self.assertEqual(root.get("skips"), "0")
+ self.assertEqual(root.get("failures"), "0")
+ self.assertEqual(root.get("errors"), "0")
+ self.assertEqual(root.get("tests"), "1")
+
+ def test_time_is_not_rounded(self):
+ # call formatter directly, it is easier here
+ formatter = self.get_formatter()
+ formatter.suite_start(dict(time=55000))
+ formatter.test_start(dict(time=55100))
+ formatter.test_end(
+ dict(time=55558, test="id", message="message", status="PASS")
+ )
+ xml_string = formatter.suite_end(dict(time=55559))
+
+ root = ET.fromstring(xml_string)
+ self.assertEqual(root.get("time"), "0.56")
+ self.assertEqual(root.find("testcase").get("time"), "0.46")
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/testing/mozbase/mozlog/tests/test_logger.py b/testing/mozbase/mozlog/tests/test_logger.py
new file mode 100644
index 0000000000..0776d87000
--- /dev/null
+++ b/testing/mozbase/mozlog/tests/test_logger.py
@@ -0,0 +1,303 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import datetime
+import json
+import socket
+import threading
+import time
+import unittest
+
+import mozfile
+import mozlog.unstructured as mozlog
+import mozunit
+import six
+
+
+class ListHandler(mozlog.Handler):
+ """Mock handler appends messages to a list for later inspection."""
+
+ def __init__(self):
+ mozlog.Handler.__init__(self)
+ self.messages = []
+
+ def emit(self, record):
+ self.messages.append(self.format(record))
+
+
+class TestLogging(unittest.TestCase):
+ """Tests behavior of basic mozlog api."""
+
+ def test_logger_defaults(self):
+ """Tests the default logging format and behavior."""
+
+ default_logger = mozlog.getLogger("default.logger")
+ self.assertEqual(default_logger.name, "default.logger")
+ self.assertEqual(len(default_logger.handlers), 1)
+ self.assertTrue(isinstance(default_logger.handlers[0], mozlog.StreamHandler))
+
+ f = mozfile.NamedTemporaryFile()
+ list_logger = mozlog.getLogger(
+ "file.logger", handler=mozlog.FileHandler(f.name)
+ )
+ self.assertEqual(len(list_logger.handlers), 1)
+ self.assertTrue(isinstance(list_logger.handlers[0], mozlog.FileHandler))
+ f.close()
+
+ self.assertRaises(
+ ValueError, mozlog.getLogger, "file.logger", handler=ListHandler()
+ )
+
+ def test_timestamps(self):
+ """Verifies that timestamps are included when asked for."""
+ log_name = "test"
+ handler = ListHandler()
+ handler.setFormatter(mozlog.MozFormatter())
+ log = mozlog.getLogger(log_name, handler=handler)
+ log.info("no timestamp")
+ self.assertTrue(handler.messages[-1].startswith("%s " % log_name))
+ handler.setFormatter(mozlog.MozFormatter(include_timestamp=True))
+ log.info("timestamp")
+ # Just verify that this raises no exceptions.
+ datetime.datetime.strptime(handler.messages[-1][:23], "%Y-%m-%d %H:%M:%S,%f")
+
+
+class TestStructuredLogging(unittest.TestCase):
+ """Tests structured output in mozlog."""
+
+ def setUp(self):
+ self.handler = ListHandler()
+ self.handler.setFormatter(mozlog.JSONFormatter())
+ self.logger = mozlog.MozLogger("test.Logger")
+ self.logger.addHandler(self.handler)
+ self.logger.setLevel(mozlog.DEBUG)
+
+ def check_messages(self, expected, actual):
+ """Checks actual for equality with corresponding fields in actual.
+ The actual message should contain all fields in expected, and
+ should be identical, with the exception of the timestamp field.
+ The actual message should contain no fields other than the timestamp
+ field and those present in expected."""
+
+ self.assertTrue(isinstance(actual["_time"], six.integer_types))
+
+ for k, v in expected.items():
+ self.assertEqual(v, actual[k])
+
+ for k in actual.keys():
+ if k != "_time":
+ self.assertTrue(expected.get(k) is not None)
+
+ def test_structured_output(self):
+ self.logger.log_structured(
+ "test_message", {"_level": mozlog.INFO, "_message": "message one"}
+ )
+ self.logger.log_structured(
+ "test_message", {"_level": mozlog.INFO, "_message": "message two"}
+ )
+ self.logger.log_structured(
+ "error_message", {"_level": mozlog.ERROR, "diagnostic": "unexpected error"}
+ )
+
+ message_one_expected = {
+ "_namespace": "test.Logger",
+ "_level": "INFO",
+ "_message": "message one",
+ "action": "test_message",
+ }
+ message_two_expected = {
+ "_namespace": "test.Logger",
+ "_level": "INFO",
+ "_message": "message two",
+ "action": "test_message",
+ }
+ message_three_expected = {
+ "_namespace": "test.Logger",
+ "_level": "ERROR",
+ "diagnostic": "unexpected error",
+ "action": "error_message",
+ }
+
+ message_one_actual = json.loads(self.handler.messages[0])
+ message_two_actual = json.loads(self.handler.messages[1])
+ message_three_actual = json.loads(self.handler.messages[2])
+
+ self.check_messages(message_one_expected, message_one_actual)
+ self.check_messages(message_two_expected, message_two_actual)
+ self.check_messages(message_three_expected, message_three_actual)
+
+ def test_unstructured_conversion(self):
+ """Tests that logging to a logger with a structured formatter
+ via the traditional logging interface works as expected."""
+ self.logger.info("%s %s %d", "Message", "number", 1)
+ self.logger.error("Message number 2")
+ self.logger.debug(
+ "Message with %s",
+ "some extras",
+ extra={"params": {"action": "mozlog_test_output", "is_failure": False}},
+ )
+ message_one_expected = {
+ "_namespace": "test.Logger",
+ "_level": "INFO",
+ "_message": "Message number 1",
+ }
+ message_two_expected = {
+ "_namespace": "test.Logger",
+ "_level": "ERROR",
+ "_message": "Message number 2",
+ }
+ message_three_expected = {
+ "_namespace": "test.Logger",
+ "_level": "DEBUG",
+ "_message": "Message with some extras",
+ "action": "mozlog_test_output",
+ "is_failure": False,
+ }
+
+ message_one_actual = json.loads(self.handler.messages[0])
+ message_two_actual = json.loads(self.handler.messages[1])
+ message_three_actual = json.loads(self.handler.messages[2])
+
+ self.check_messages(message_one_expected, message_one_actual)
+ self.check_messages(message_two_expected, message_two_actual)
+ self.check_messages(message_three_expected, message_three_actual)
+
+ def message_callback(self):
+ if len(self.handler.messages) == 3:
+ message_one_expected = {
+ "_namespace": "test.Logger",
+ "_level": "DEBUG",
+ "_message": "socket message one",
+ "action": "test_message",
+ }
+ message_two_expected = {
+ "_namespace": "test.Logger",
+ "_level": "DEBUG",
+ "_message": "socket message two",
+ "action": "test_message",
+ }
+ message_three_expected = {
+ "_namespace": "test.Logger",
+ "_level": "DEBUG",
+ "_message": "socket message three",
+ "action": "test_message",
+ }
+
+ message_one_actual = json.loads(self.handler.messages[0])
+
+ message_two_actual = json.loads(self.handler.messages[1])
+
+ message_three_actual = json.loads(self.handler.messages[2])
+
+ self.check_messages(message_one_expected, message_one_actual)
+ self.check_messages(message_two_expected, message_two_actual)
+ self.check_messages(message_three_expected, message_three_actual)
+
+ def test_log_listener(self):
+ connection = "127.0.0.1", 0
+ self.log_server = mozlog.LogMessageServer(
+ connection, self.logger, message_callback=self.message_callback, timeout=0.5
+ )
+
+ message_string_one = json.dumps(
+ {
+ "_message": "socket message one",
+ "action": "test_message",
+ "_level": "DEBUG",
+ }
+ )
+ message_string_two = json.dumps(
+ {
+ "_message": "socket message two",
+ "action": "test_message",
+ "_level": "DEBUG",
+ }
+ )
+
+ message_string_three = json.dumps(
+ {
+ "_message": "socket message three",
+ "action": "test_message",
+ "_level": "DEBUG",
+ }
+ )
+
+ message_string = (
+ message_string_one
+ + "\n"
+ + message_string_two
+ + "\n"
+ + message_string_three
+ + "\n"
+ )
+
+ server_thread = threading.Thread(target=self.log_server.handle_request)
+ server_thread.start()
+
+ host, port = self.log_server.server_address
+
+ sock = socket.socket()
+ sock.connect((host, port))
+
+ # Sleeps prevent listener from receiving entire message in a single call
+ # to recv in order to test reconstruction of partial messages.
+ sock.sendall(message_string[:8].encode())
+ time.sleep(0.01)
+ sock.sendall(message_string[8:32].encode())
+ time.sleep(0.01)
+ sock.sendall(message_string[32:64].encode())
+ time.sleep(0.01)
+ sock.sendall(message_string[64:128].encode())
+ time.sleep(0.01)
+ sock.sendall(message_string[128:].encode())
+
+ server_thread.join()
+
+
+class Loggable(mozlog.LoggingMixin):
+ """Trivial class inheriting from LoggingMixin"""
+
+ pass
+
+
+class TestLoggingMixin(unittest.TestCase):
+ """Tests basic use of LoggingMixin"""
+
+ def test_mixin(self):
+ loggable = Loggable()
+ self.assertTrue(not hasattr(loggable, "_logger"))
+ loggable.log(mozlog.INFO, "This will instantiate the logger")
+ self.assertTrue(hasattr(loggable, "_logger"))
+ self.assertEqual(loggable._logger.name, "test_logger.Loggable")
+
+ self.assertRaises(ValueError, loggable.set_logger, "not a logger")
+
+ logger = mozlog.MozLogger("test.mixin")
+ handler = ListHandler()
+ logger.addHandler(handler)
+ loggable.set_logger(logger)
+ self.assertTrue(isinstance(loggable._logger.handlers[0], ListHandler))
+ self.assertEqual(loggable._logger.name, "test.mixin")
+
+ loggable.log(mozlog.WARN, 'message for "log" method')
+ loggable.info('message for "info" method')
+ loggable.error('message for "error" method')
+ loggable.log_structured(
+ "test_message",
+ params={"_message": "message for " + '"log_structured" method'},
+ )
+
+ expected_messages = [
+ 'message for "log" method',
+ 'message for "info" method',
+ 'message for "error" method',
+ 'message for "log_structured" method',
+ ]
+
+ actual_messages = loggable._logger.handlers[0].messages
+ self.assertEqual(expected_messages, actual_messages)
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/testing/mozbase/mozlog/tests/test_logtypes.py b/testing/mozbase/mozlog/tests/test_logtypes.py
new file mode 100644
index 0000000000..177d25cdb0
--- /dev/null
+++ b/testing/mozbase/mozlog/tests/test_logtypes.py
@@ -0,0 +1,106 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import unittest
+
+import mozunit
+from mozlog.logtypes import Any, Dict, Int, List, TestList, Tuple, Unicode
+
+
+class TestContainerTypes(unittest.TestCase):
+ def test_dict_type_basic(self):
+ d = Dict("name")
+ with self.assertRaises(ValueError):
+ d({"foo": "bar"})
+
+ d = Dict(Any, "name")
+ d({"foo": "bar"}) # doesn't raise
+
+ def test_dict_type_with_dictionary_item_type(self):
+ d = Dict({Int: Int}, "name")
+ with self.assertRaises(ValueError):
+ d({"foo": 1})
+
+ with self.assertRaises(ValueError):
+ d({1: "foo"})
+
+ d({1: 2}) # doesn't raise
+
+ def test_dict_type_with_recursive_item_types(self):
+ d = Dict(Dict({Unicode: List(Int)}), "name")
+ with self.assertRaises(ValueError):
+ d({"foo": "bar"})
+
+ with self.assertRaises(ValueError):
+ d({"foo": {"bar": "baz"}})
+
+ with self.assertRaises(ValueError):
+ d({"foo": {"bar": ["baz"]}})
+
+ d({"foo": {"bar": [1]}}) # doesn't raise
+
+ def test_list_type_basic(self):
+ l = List("name")
+ with self.assertRaises(ValueError):
+ l(["foo"])
+
+ l = List(Any, "name")
+ l(["foo", 1]) # doesn't raise
+
+ def test_list_type_with_recursive_item_types(self):
+ l = List(Dict(List(Tuple((Unicode, Int)))), "name")
+ with self.assertRaises(ValueError):
+ l(["foo"])
+
+ with self.assertRaises(ValueError):
+ l([{"foo": "bar"}])
+
+ with self.assertRaises(ValueError):
+ l([{"foo": ["bar"]}])
+
+ l([{"foo": [("bar", 1)]}]) # doesn't raise
+
+ def test_tuple_type_basic(self):
+ t = Tuple("name")
+ with self.assertRaises(ValueError):
+ t((1,))
+
+ t = Tuple(Any, "name")
+ t((1,)) # doesn't raise
+
+ def test_tuple_type_with_tuple_item_type(self):
+ t = Tuple((Unicode, Int))
+ with self.assertRaises(ValueError):
+ t(("foo", "bar"))
+
+ t(("foo", 1)) # doesn't raise
+
+ def test_tuple_type_with_recursive_item_types(self):
+ t = Tuple((Dict(List(Any)), List(Dict(Any)), Unicode), "name")
+ with self.assertRaises(ValueError):
+ t(({"foo": "bar"}, [{"foo": "bar"}], "foo"))
+
+ with self.assertRaises(ValueError):
+ t(({"foo": ["bar"]}, ["foo"], "foo"))
+
+ t(({"foo": ["bar"]}, [{"foo": "bar"}], "foo")) # doesn't raise
+
+
+class TestDataTypes(unittest.TestCase):
+ def test_test_list(self):
+ t = TestList("name")
+ with self.assertRaises(ValueError):
+ t("foo")
+
+ with self.assertRaises(ValueError):
+ t({"foo": 1})
+
+ d1 = t({"default": ["bar"]}) # doesn't raise
+ d2 = t(["bar"]) # doesn't raise
+
+ self.assertDictContainsSubset(d1, d2)
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/testing/mozbase/mozlog/tests/test_structured.py b/testing/mozbase/mozlog/tests/test_structured.py
new file mode 100644
index 0000000000..4dc0993263
--- /dev/null
+++ b/testing/mozbase/mozlog/tests/test_structured.py
@@ -0,0 +1,1162 @@
+# -*- coding: utf-8 -*-
+
+import argparse
+import json
+import optparse
+import os
+import sys
+import unittest
+
+import mozfile
+import mozunit
+import six
+from mozlog import commandline, formatters, handlers, reader, stdadapter, structuredlog
+from six import StringIO
+
+
+class TestHandler(object):
+ def __init__(self):
+ self.items = []
+
+ def __call__(self, data):
+ self.items.append(data)
+
+ @property
+ def last_item(self):
+ return self.items[-1]
+
+ @property
+ def empty(self):
+ return not self.items
+
+
+class BaseStructuredTest(unittest.TestCase):
+ def setUp(self):
+ self.logger = structuredlog.StructuredLogger("test")
+ self.handler = TestHandler()
+ self.logger.add_handler(self.handler)
+
+ def pop_last_item(self):
+ return self.handler.items.pop()
+
+ def assert_log_equals(self, expected, actual=None):
+ if actual is None:
+ actual = self.pop_last_item()
+
+ all_expected = {"pid": os.getpid(), "thread": "MainThread", "source": "test"}
+ specials = set(["time"])
+
+ all_expected.update(expected)
+ for key, value in six.iteritems(all_expected):
+ self.assertEqual(actual[key], value)
+
+ self.assertEqual(set(all_expected.keys()) | specials, set(actual.keys()))
+
+
+class TestStatusHandler(BaseStructuredTest):
+ def setUp(self):
+ super(TestStatusHandler, self).setUp()
+ self.handler = handlers.StatusHandler()
+ self.logger.add_handler(self.handler)
+
+ def test_failure_run(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test1")
+ self.logger.test_status("test1", "sub1", status="PASS")
+ self.logger.test_status("test1", "sub2", status="TIMEOUT")
+ self.logger.test_status(
+ "test1", "sub3", status="FAIL", expected="PASS", known_intermittent=["FAIL"]
+ )
+ self.logger.test_end("test1", status="OK")
+ self.logger.suite_end()
+ summary = self.handler.summarize()
+ self.assertIn("TIMEOUT", summary.unexpected_statuses)
+ self.assertEqual(1, summary.unexpected_statuses["TIMEOUT"])
+ self.assertIn("PASS", summary.expected_statuses)
+ self.assertEqual(1, summary.expected_statuses["PASS"])
+ self.assertIn("OK", summary.expected_statuses)
+ self.assertEqual(1, summary.expected_statuses["OK"])
+ self.assertIn("FAIL", summary.expected_statuses)
+ self.assertEqual(1, summary.expected_statuses["FAIL"])
+ self.assertIn("FAIL", summary.known_intermittent_statuses)
+ self.assertEqual(1, summary.known_intermittent_statuses["FAIL"])
+ self.assertEqual(3, summary.action_counts["test_status"])
+ self.assertEqual(1, summary.action_counts["test_end"])
+
+ def test_precondition_failed_run(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test1")
+ self.logger.test_end("test1", status="PRECONDITION_FAILED")
+ self.logger.test_start("test2")
+ self.logger.test_status("test2", "sub1", status="PRECONDITION_FAILED")
+ self.logger.test_end("test2", status="OK")
+ self.logger.suite_end()
+ summary = self.handler.summarize()
+ self.assertEqual(1, summary.expected_statuses["OK"])
+ self.assertEqual(2, summary.unexpected_statuses["PRECONDITION_FAILED"])
+
+ def test_error_run(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test1")
+ self.logger.error("ERRR!")
+ self.logger.test_end("test1", status="OK")
+ self.logger.test_start("test2")
+ self.logger.test_end("test2", status="OK")
+ self.logger.suite_end()
+ summary = self.handler.summarize()
+ self.assertIn("ERROR", summary.log_level_counts)
+ self.assertEqual(1, summary.log_level_counts["ERROR"])
+ self.assertIn("OK", summary.expected_statuses)
+ self.assertEqual(2, summary.expected_statuses["OK"])
+
+
+class TestSummaryHandler(BaseStructuredTest):
+ def setUp(self):
+ super(TestSummaryHandler, self).setUp()
+ self.handler = handlers.SummaryHandler()
+ self.logger.add_handler(self.handler)
+
+ def test_failure_run(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test1")
+ self.logger.test_status("test1", "sub1", status="PASS")
+ self.logger.test_status("test1", "sub2", status="TIMEOUT")
+ self.logger.assertion_count("test1", 5, 1, 10)
+ self.logger.assertion_count("test1", 5, 10, 15)
+ self.logger.test_end("test1", status="OK")
+ self.logger.suite_end()
+
+ counts = self.handler.current["counts"]
+ self.assertIn("timeout", counts["subtest"]["unexpected"])
+ self.assertEqual(1, counts["subtest"]["unexpected"]["timeout"])
+ self.assertIn("pass", counts["subtest"]["expected"])
+ self.assertEqual(1, counts["subtest"]["expected"]["pass"])
+ self.assertIn("ok", counts["test"]["expected"])
+ self.assertEqual(1, counts["test"]["expected"]["ok"])
+ self.assertIn("pass", counts["assert"]["unexpected"])
+ self.assertEqual(1, counts["assert"]["unexpected"]["pass"])
+ self.assertIn("fail", counts["assert"]["expected"])
+ self.assertEqual(1, counts["assert"]["expected"]["fail"])
+
+ logs = self.handler.current["unexpected_logs"]
+ self.assertEqual(1, len(logs))
+ self.assertIn("test1", logs)
+ self.assertEqual(1, len(logs["test1"]))
+ self.assertEqual("sub2", logs["test1"][0]["subtest"])
+
+ def test_precondition_failed_run(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test1")
+ self.logger.test_status("test1", "sub1", status="PASS")
+ self.logger.test_end("test1", status="PRECONDITION_FAILED")
+ self.logger.test_start("test2")
+ self.logger.test_status("test2", "sub1", status="PRECONDITION_FAILED")
+ self.logger.test_status("test2", "sub2", status="PRECONDITION_FAILED")
+ self.logger.test_end("test2", status="OK")
+ self.logger.suite_end()
+
+ counts = self.handler.current["counts"]
+ self.assertIn("precondition_failed", counts["test"]["unexpected"])
+ self.assertEqual(1, counts["test"]["unexpected"]["precondition_failed"])
+ self.assertIn("pass", counts["subtest"]["expected"])
+ self.assertEqual(1, counts["subtest"]["expected"]["pass"])
+ self.assertIn("ok", counts["test"]["expected"])
+ self.assertEqual(1, counts["test"]["expected"]["ok"])
+ self.assertIn("precondition_failed", counts["subtest"]["unexpected"])
+ self.assertEqual(2, counts["subtest"]["unexpected"]["precondition_failed"])
+
+
+class TestStructuredLog(BaseStructuredTest):
+ def test_suite_start(self):
+ self.logger.suite_start(["test"], "logtest")
+ self.assert_log_equals(
+ {"action": "suite_start", "name": "logtest", "tests": {"default": ["test"]}}
+ )
+ self.logger.suite_end()
+
+ def test_suite_end(self):
+ self.logger.suite_start([])
+ self.logger.suite_end()
+ self.assert_log_equals({"action": "suite_end"})
+
+ def test_add_subsuite(self):
+ self.logger.suite_start([])
+ self.logger.add_subsuite("other")
+ self.assert_log_equals(
+ {
+ "action": "add_subsuite",
+ "name": "other",
+ "run_info": {"subsuite": "other"},
+ }
+ )
+ self.logger.suite_end()
+
+ def test_add_subsuite_duplicate(self):
+ self.logger.suite_start([])
+ self.logger.add_subsuite("other")
+ # This should be a no-op
+ self.logger.add_subsuite("other")
+ self.assert_log_equals(
+ {
+ "action": "add_subsuite",
+ "name": "other",
+ "run_info": {"subsuite": "other"},
+ }
+ )
+ self.assert_log_equals({"action": "suite_start", "tests": {"default": []}})
+
+ self.logger.suite_end()
+
+ def test_start(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test1")
+ self.assert_log_equals({"action": "test_start", "test": "test1"})
+
+ self.logger.test_start(("test1", "==", "test1-ref"), path="path/to/test")
+ self.assert_log_equals(
+ {
+ "action": "test_start",
+ "test": ("test1", "==", "test1-ref"),
+ "path": "path/to/test",
+ }
+ )
+ self.logger.suite_end()
+
+ def test_start_inprogress(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test1")
+ self.logger.test_start("test1")
+ self.assert_log_equals(
+ {
+ "action": "log",
+ "message": "test_start for test1 logged while in progress.",
+ "level": "ERROR",
+ }
+ )
+ self.logger.suite_end()
+
+ def test_start_inprogress_subsuite(self):
+ self.logger.suite_start([])
+ self.logger.add_subsuite("other")
+ self.logger.test_start("test1")
+ self.logger.test_start("test1", subsuite="other")
+ self.assert_log_equals(
+ {
+ "action": "test_start",
+ "test": "test1",
+ "subsuite": "other",
+ }
+ )
+ self.logger.suite_end()
+
+ def test_status(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test1")
+ self.logger.test_status(
+ "test1", "subtest name", "fail", expected="FAIL", message="Test message"
+ )
+ self.assert_log_equals(
+ {
+ "action": "test_status",
+ "subtest": "subtest name",
+ "status": "FAIL",
+ "message": "Test message",
+ "test": "test1",
+ }
+ )
+ self.logger.test_end("test1", "OK")
+ self.logger.suite_end()
+
+ def test_status_1(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test1")
+ self.logger.test_status("test1", "subtest name", "fail")
+ self.assert_log_equals(
+ {
+ "action": "test_status",
+ "subtest": "subtest name",
+ "status": "FAIL",
+ "expected": "PASS",
+ "test": "test1",
+ }
+ )
+ self.logger.test_end("test1", "OK")
+ self.logger.suite_end()
+
+ def test_status_2(self):
+ self.assertRaises(
+ ValueError,
+ self.logger.test_status,
+ "test1",
+ "subtest name",
+ "XXXUNKNOWNXXX",
+ )
+
+ def test_status_extra(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test1")
+ self.logger.test_status(
+ "test1", "subtest name", "FAIL", expected="PASS", extra={"data": 42}
+ )
+ self.assert_log_equals(
+ {
+ "action": "test_status",
+ "subtest": "subtest name",
+ "status": "FAIL",
+ "expected": "PASS",
+ "test": "test1",
+ "extra": {"data": 42},
+ }
+ )
+ self.logger.test_end("test1", "OK")
+ self.logger.suite_end()
+
+ def test_status_stack(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test1")
+ self.logger.test_status(
+ "test1",
+ "subtest name",
+ "FAIL",
+ expected="PASS",
+ stack="many\nlines\nof\nstack",
+ )
+ self.assert_log_equals(
+ {
+ "action": "test_status",
+ "subtest": "subtest name",
+ "status": "FAIL",
+ "expected": "PASS",
+ "test": "test1",
+ "stack": "many\nlines\nof\nstack",
+ }
+ )
+ self.logger.test_end("test1", "OK")
+ self.logger.suite_end()
+
+ def test_status_known_intermittent(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test1")
+ self.logger.test_status(
+ "test1", "subtest name", "fail", known_intermittent=["FAIL"]
+ )
+ self.assert_log_equals(
+ {
+ "action": "test_status",
+ "subtest": "subtest name",
+ "status": "FAIL",
+ "expected": "PASS",
+ "known_intermittent": ["FAIL"],
+ "test": "test1",
+ }
+ )
+ self.logger.test_end("test1", "OK")
+ self.logger.suite_end()
+
+ def test_status_not_started(self):
+ self.logger.test_status("test_UNKNOWN", "subtest", "PASS")
+ self.assertTrue(
+ self.pop_last_item()["message"].startswith(
+ "test_status for test_UNKNOWN logged while not in progress. Logged with data: {"
+ )
+ )
+
+ def test_remove_optional_defaults(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test1")
+ self.logger.test_status(
+ "test1", "subtest name", "fail", message=None, stack=None
+ )
+ self.assert_log_equals(
+ {
+ "action": "test_status",
+ "subtest": "subtest name",
+ "status": "FAIL",
+ "expected": "PASS",
+ "test": "test1",
+ }
+ )
+ self.logger.test_end("test1", "OK")
+ self.logger.suite_end()
+
+ def test_remove_optional_defaults_raw_log(self):
+ self.logger.log_raw({"action": "suite_start", "tests": [1], "name": None})
+ self.assert_log_equals({"action": "suite_start", "tests": {"default": ["1"]}})
+ self.logger.suite_end()
+
+ def test_end(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test1")
+ self.logger.test_end("test1", "fail", message="Test message")
+ self.assert_log_equals(
+ {
+ "action": "test_end",
+ "status": "FAIL",
+ "expected": "OK",
+ "message": "Test message",
+ "test": "test1",
+ }
+ )
+ self.logger.suite_end()
+
+ def test_end_1(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test1")
+ self.logger.test_end("test1", "PASS", expected="PASS", extra={"data": 123})
+ self.assert_log_equals(
+ {
+ "action": "test_end",
+ "status": "PASS",
+ "extra": {"data": 123},
+ "test": "test1",
+ }
+ )
+ self.logger.suite_end()
+
+ def test_end_2(self):
+ self.assertRaises(ValueError, self.logger.test_end, "test1", "XXXUNKNOWNXXX")
+
+ def test_end_stack(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test1")
+ self.logger.test_end(
+ "test1", "PASS", expected="PASS", stack="many\nlines\nof\nstack"
+ )
+ self.assert_log_equals(
+ {
+ "action": "test_end",
+ "status": "PASS",
+ "test": "test1",
+ "stack": "many\nlines\nof\nstack",
+ }
+ )
+ self.logger.suite_end()
+
+ def test_end_no_start(self):
+ self.logger.test_end("test1", "PASS", expected="PASS")
+ self.assertTrue(
+ self.pop_last_item()["message"].startswith(
+ "test_end for test1 logged while not in progress. Logged with data: {"
+ )
+ )
+ self.logger.suite_end()
+
+ def test_end_no_start_subsuite(self):
+ self.logger.suite_start([])
+ self.logger.add_subsuite("other")
+ self.logger.test_start("test1", subsuite="other")
+ self.logger.test_end("test1", "PASS", expected="PASS")
+ self.assertTrue(
+ self.pop_last_item()["message"].startswith(
+ "test_end for test1 logged while not in progress. Logged with data: {"
+ )
+ )
+ self.logger.test_end("test1", "OK", subsuite="other")
+ self.assert_log_equals(
+ {
+ "action": "test_end",
+ "status": "OK",
+ "test": "test1",
+ "subsuite": "other",
+ }
+ )
+ self.logger.suite_end()
+
+ def test_end_twice(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test2")
+ self.logger.test_end("test2", "PASS", expected="PASS")
+ self.assert_log_equals(
+ {"action": "test_end", "status": "PASS", "test": "test2"}
+ )
+ self.logger.test_end("test2", "PASS", expected="PASS")
+ last_item = self.pop_last_item()
+ self.assertEqual(last_item["action"], "log")
+ self.assertEqual(last_item["level"], "ERROR")
+ self.assertTrue(
+ last_item["message"].startswith(
+ "test_end for test2 logged while not in progress. Logged with data: {"
+ )
+ )
+ self.logger.suite_end()
+
+ def test_suite_start_twice(self):
+ self.logger.suite_start([])
+ self.assert_log_equals({"action": "suite_start", "tests": {"default": []}})
+ self.logger.suite_start([])
+ last_item = self.pop_last_item()
+ self.assertEqual(last_item["action"], "log")
+ self.assertEqual(last_item["level"], "ERROR")
+ self.logger.suite_end()
+
+ def test_suite_end_no_start(self):
+ self.logger.suite_start([])
+ self.assert_log_equals({"action": "suite_start", "tests": {"default": []}})
+ self.logger.suite_end()
+ self.assert_log_equals({"action": "suite_end"})
+ self.logger.suite_end()
+ last_item = self.pop_last_item()
+ self.assertEqual(last_item["action"], "log")
+ self.assertEqual(last_item["level"], "ERROR")
+
+ def test_multiple_loggers_suite_start(self):
+ logger1 = structuredlog.StructuredLogger("test")
+ self.logger.suite_start([])
+ logger1.suite_start([])
+ last_item = self.pop_last_item()
+ self.assertEqual(last_item["action"], "log")
+ self.assertEqual(last_item["level"], "ERROR")
+
+ def test_multiple_loggers_test_start(self):
+ logger1 = structuredlog.StructuredLogger("test")
+ self.logger.suite_start([])
+ self.logger.test_start("test")
+ logger1.test_start("test")
+ last_item = self.pop_last_item()
+ self.assertEqual(last_item["action"], "log")
+ self.assertEqual(last_item["level"], "ERROR")
+
+ def test_process(self):
+ self.logger.process_output(1234, "test output")
+ self.assert_log_equals(
+ {"action": "process_output", "process": "1234", "data": "test output"}
+ )
+
+ def test_process_start(self):
+ self.logger.process_start(1234)
+ self.assert_log_equals({"action": "process_start", "process": "1234"})
+
+ def test_process_exit(self):
+ self.logger.process_exit(1234, 0)
+ self.assert_log_equals(
+ {"action": "process_exit", "process": "1234", "exitcode": 0}
+ )
+
+ def test_log(self):
+ for level in ["critical", "error", "warning", "info", "debug"]:
+ getattr(self.logger, level)("message")
+ self.assert_log_equals(
+ {"action": "log", "level": level.upper(), "message": "message"}
+ )
+
+ def test_logging_adapter(self):
+ import logging
+
+ logging.basicConfig(level="DEBUG")
+ old_level = logging.root.getEffectiveLevel()
+ logging.root.setLevel("DEBUG")
+
+ std_logger = logging.getLogger("test")
+ std_logger.setLevel("DEBUG")
+
+ logger = stdadapter.std_logging_adapter(std_logger)
+
+ try:
+ for level in ["critical", "error", "warning", "info", "debug"]:
+ getattr(logger, level)("message")
+ self.assert_log_equals(
+ {"action": "log", "level": level.upper(), "message": "message"}
+ )
+ finally:
+ logging.root.setLevel(old_level)
+
+ def test_add_remove_handlers(self):
+ handler = TestHandler()
+ self.logger.add_handler(handler)
+ self.logger.info("test1")
+
+ self.assert_log_equals({"action": "log", "level": "INFO", "message": "test1"})
+
+ self.assert_log_equals(
+ {"action": "log", "level": "INFO", "message": "test1"},
+ actual=handler.last_item,
+ )
+
+ self.logger.remove_handler(handler)
+ self.logger.info("test2")
+
+ self.assert_log_equals({"action": "log", "level": "INFO", "message": "test2"})
+
+ self.assert_log_equals(
+ {"action": "log", "level": "INFO", "message": "test1"},
+ actual=handler.last_item,
+ )
+
+ def test_wrapper(self):
+ file_like = structuredlog.StructuredLogFileLike(self.logger)
+
+ file_like.write("line 1")
+
+ self.assert_log_equals({"action": "log", "level": "INFO", "message": "line 1"})
+
+ file_like.write("line 2\n")
+
+ self.assert_log_equals({"action": "log", "level": "INFO", "message": "line 2"})
+
+ file_like.write("line 3\r")
+
+ self.assert_log_equals({"action": "log", "level": "INFO", "message": "line 3"})
+
+ file_like.write("line 4\r\n")
+
+ self.assert_log_equals({"action": "log", "level": "INFO", "message": "line 4"})
+
+ def test_shutdown(self):
+ # explicit shutdown
+ log = structuredlog.StructuredLogger("test 1")
+ log.add_handler(self.handler)
+ log.info("line 1")
+ self.assert_log_equals(
+ {"action": "log", "level": "INFO", "message": "line 1", "source": "test 1"}
+ )
+ log.shutdown()
+ self.assert_log_equals({"action": "shutdown", "source": "test 1"})
+ with self.assertRaises(structuredlog.LoggerShutdownError):
+ log.info("bad log")
+ with self.assertRaises(structuredlog.LoggerShutdownError):
+ log.log_raw({"action": "log", "level": "info", "message": "bad log"})
+
+ # shutdown still applies to new instances
+ del log
+ log = structuredlog.StructuredLogger("test 1")
+ with self.assertRaises(structuredlog.LoggerShutdownError):
+ log.info("bad log")
+
+ # context manager shutdown
+ with structuredlog.StructuredLogger("test 2") as log:
+ log.add_handler(self.handler)
+ log.info("line 2")
+ self.assert_log_equals(
+ {
+ "action": "log",
+ "level": "INFO",
+ "message": "line 2",
+ "source": "test 2",
+ }
+ )
+ self.assert_log_equals({"action": "shutdown", "source": "test 2"})
+
+ # shutdown prevents logging across instances
+ log1 = structuredlog.StructuredLogger("test 3")
+ log2 = structuredlog.StructuredLogger("test 3", component="bar")
+ log1.shutdown()
+ with self.assertRaises(structuredlog.LoggerShutdownError):
+ log2.info("line 3")
+
+
+class TestTypeConversions(BaseStructuredTest):
+ def test_raw(self):
+ self.logger.log_raw({"action": "suite_start", "tests": [1], "time": "1234"})
+ self.assert_log_equals(
+ {"action": "suite_start", "tests": {"default": ["1"]}, "time": 1234}
+ )
+ self.logger.suite_end()
+
+ def test_tuple(self):
+ self.logger.suite_start([])
+ if six.PY3:
+ self.logger.test_start(
+ (
+ b"\xf0\x90\x8d\x84\xf0\x90\x8c\xb4\xf0\x90"
+ b"\x8d\x83\xf0\x90\x8d\x84".decode(),
+ 42,
+ "\u16a4",
+ )
+ )
+ else:
+ self.logger.test_start(
+ (
+ "\xf0\x90\x8d\x84\xf0\x90\x8c\xb4\xf0\x90"
+ "\x8d\x83\xf0\x90\x8d\x84",
+ 42,
+ "\u16a4",
+ )
+ )
+ self.assert_log_equals(
+ {
+ "action": "test_start",
+ "test": ("\U00010344\U00010334\U00010343\U00010344", "42", "\u16a4"),
+ }
+ )
+ self.logger.suite_end()
+
+ def test_non_string_messages(self):
+ self.logger.suite_start([])
+ self.logger.info(1)
+ self.assert_log_equals({"action": "log", "message": "1", "level": "INFO"})
+ self.logger.info([1, (2, "3"), "s", "s" + chr(255)])
+ if six.PY3:
+ self.assert_log_equals(
+ {
+ "action": "log",
+ "message": "[1, (2, '3'), 's', 's\xff']",
+ "level": "INFO",
+ }
+ )
+ else:
+ self.assert_log_equals(
+ {
+ "action": "log",
+ "message": "[1, (2, '3'), 's', 's\\xff']",
+ "level": "INFO",
+ }
+ )
+
+ self.logger.suite_end()
+
+ def test_utf8str_write(self):
+ with mozfile.NamedTemporaryFile() as logfile:
+ _fmt = formatters.TbplFormatter()
+ _handler = handlers.StreamHandler(logfile, _fmt)
+ self.logger.add_handler(_handler)
+ self.logger.suite_start([])
+ self.logger.info("☺")
+ logfile.seek(0)
+ data = logfile.readlines()[-1].strip()
+ if six.PY3:
+ self.assertEqual(data.decode(), "☺")
+ else:
+ self.assertEqual(data, "☺")
+ self.logger.suite_end()
+ self.logger.remove_handler(_handler)
+
+ def test_arguments(self):
+ self.logger.info(message="test")
+ self.assert_log_equals({"action": "log", "message": "test", "level": "INFO"})
+
+ self.logger.suite_start([], run_info={})
+ self.assert_log_equals(
+ {"action": "suite_start", "tests": {"default": []}, "run_info": {}}
+ )
+ self.logger.test_start(test="test1")
+ self.logger.test_status("subtest1", "FAIL", test="test1", status="PASS")
+ self.assert_log_equals(
+ {
+ "action": "test_status",
+ "test": "test1",
+ "subtest": "subtest1",
+ "status": "PASS",
+ "expected": "FAIL",
+ }
+ )
+ self.logger.process_output(123, "data", "test")
+ self.assert_log_equals(
+ {
+ "action": "process_output",
+ "process": "123",
+ "command": "test",
+ "data": "data",
+ }
+ )
+ self.assertRaises(
+ TypeError,
+ self.logger.test_status,
+ subtest="subtest2",
+ status="FAIL",
+ expected="PASS",
+ )
+ self.assertRaises(
+ TypeError,
+ self.logger.test_status,
+ "test1",
+ "subtest1",
+ "group1",
+ "PASS",
+ "FAIL",
+ "message",
+ "stack",
+ {},
+ [],
+ None,
+ "unexpected",
+ )
+ self.assertRaises(TypeError, self.logger.test_status, "test1", test="test2")
+ self.logger.suite_end()
+
+
+class TestComponentFilter(BaseStructuredTest):
+ def test_filter_component(self):
+ component_logger = structuredlog.StructuredLogger(
+ self.logger.name, "test_component"
+ )
+ component_logger.component_filter = handlers.LogLevelFilter(lambda x: x, "info")
+
+ self.logger.debug("Test")
+ self.assertFalse(self.handler.empty)
+ self.assert_log_equals({"action": "log", "level": "DEBUG", "message": "Test"})
+ self.assertTrue(self.handler.empty)
+
+ component_logger.info("Test 1")
+ self.assertFalse(self.handler.empty)
+ self.assert_log_equals(
+ {
+ "action": "log",
+ "level": "INFO",
+ "message": "Test 1",
+ "component": "test_component",
+ }
+ )
+
+ component_logger.debug("Test 2")
+ self.assertTrue(self.handler.empty)
+
+ component_logger.component_filter = None
+
+ component_logger.debug("Test 3")
+ self.assertFalse(self.handler.empty)
+ self.assert_log_equals(
+ {
+ "action": "log",
+ "level": "DEBUG",
+ "message": "Test 3",
+ "component": "test_component",
+ }
+ )
+
+ def test_filter_default_component(self):
+ component_logger = structuredlog.StructuredLogger(
+ self.logger.name, "test_component"
+ )
+
+ self.logger.debug("Test")
+ self.assertFalse(self.handler.empty)
+ self.assert_log_equals({"action": "log", "level": "DEBUG", "message": "Test"})
+
+ self.logger.component_filter = handlers.LogLevelFilter(lambda x: x, "info")
+
+ self.logger.debug("Test 1")
+ self.assertTrue(self.handler.empty)
+
+ component_logger.debug("Test 2")
+ self.assertFalse(self.handler.empty)
+ self.assert_log_equals(
+ {
+ "action": "log",
+ "level": "DEBUG",
+ "message": "Test 2",
+ "component": "test_component",
+ }
+ )
+
+ self.logger.component_filter = None
+
+ self.logger.debug("Test 3")
+ self.assertFalse(self.handler.empty)
+ self.assert_log_equals({"action": "log", "level": "DEBUG", "message": "Test 3"})
+
+ def test_filter_message_mutuate(self):
+ def filter_mutate(msg):
+ if msg["action"] == "log":
+ msg["message"] = "FILTERED! %s" % msg["message"]
+ return msg
+
+ self.logger.component_filter = filter_mutate
+ self.logger.debug("Test")
+ self.assert_log_equals(
+ {"action": "log", "level": "DEBUG", "message": "FILTERED! Test"}
+ )
+ self.logger.component_filter = None
+
+
+class TestCommandline(unittest.TestCase):
+ def setUp(self):
+ self.logfile = mozfile.NamedTemporaryFile()
+
+ @property
+ def loglines(self):
+ self.logfile.seek(0)
+ return [line.rstrip() for line in self.logfile.readlines()]
+
+ def test_setup_logging(self):
+ parser = argparse.ArgumentParser()
+ commandline.add_logging_group(parser)
+ args = parser.parse_args(["--log-raw=-"])
+ logger = commandline.setup_logging("test_setup_logging", args, {})
+ self.assertEqual(len(logger.handlers), 1)
+
+ def test_setup_logging_optparse(self):
+ parser = optparse.OptionParser()
+ commandline.add_logging_group(parser)
+ args, _ = parser.parse_args(["--log-raw=-"])
+ logger = commandline.setup_logging("test_optparse", args, {})
+ self.assertEqual(len(logger.handlers), 1)
+ self.assertIsInstance(logger.handlers[0], handlers.StreamHandler)
+
+ def test_limit_formatters(self):
+ parser = argparse.ArgumentParser()
+ commandline.add_logging_group(parser, include_formatters=["raw"])
+ other_formatters = [fmt for fmt in commandline.log_formatters if fmt != "raw"]
+ # check that every formatter except raw is not present
+ for fmt in other_formatters:
+ with self.assertRaises(SystemExit):
+ parser.parse_args(["--log-%s=-" % fmt])
+ with self.assertRaises(SystemExit):
+ parser.parse_args(["--log-%s-level=error" % fmt])
+ # raw is still ok
+ args = parser.parse_args(["--log-raw=-"])
+ logger = commandline.setup_logging("test_setup_logging2", args, {})
+ self.assertEqual(len(logger.handlers), 1)
+
+ def test_setup_logging_optparse_unicode(self):
+ parser = optparse.OptionParser()
+ commandline.add_logging_group(parser)
+ args, _ = parser.parse_args(["--log-raw=-"])
+ logger = commandline.setup_logging("test_optparse_unicode", args, {})
+ self.assertEqual(len(logger.handlers), 1)
+ self.assertEqual(logger.handlers[0].stream, sys.stdout)
+ self.assertIsInstance(logger.handlers[0], handlers.StreamHandler)
+
+ def test_logging_defaultlevel(self):
+ parser = argparse.ArgumentParser()
+ commandline.add_logging_group(parser)
+
+ args = parser.parse_args(["--log-tbpl=%s" % self.logfile.name])
+ logger = commandline.setup_logging("test_fmtopts", args, {})
+ logger.info("INFO message")
+ logger.debug("DEBUG message")
+ logger.error("ERROR message")
+ # The debug level is not logged by default.
+ self.assertEqual([b"INFO message", b"ERROR message"], self.loglines)
+
+ def test_logging_errorlevel(self):
+ parser = argparse.ArgumentParser()
+ commandline.add_logging_group(parser)
+ args = parser.parse_args(
+ ["--log-tbpl=%s" % self.logfile.name, "--log-tbpl-level=error"]
+ )
+ logger = commandline.setup_logging("test_fmtopts", args, {})
+ logger.info("INFO message")
+ logger.debug("DEBUG message")
+ logger.error("ERROR message")
+
+ # Only the error level and above were requested.
+ self.assertEqual([b"ERROR message"], self.loglines)
+
+ def test_logging_debuglevel(self):
+ parser = argparse.ArgumentParser()
+ commandline.add_logging_group(parser)
+ args = parser.parse_args(
+ ["--log-tbpl=%s" % self.logfile.name, "--log-tbpl-level=debug"]
+ )
+ logger = commandline.setup_logging("test_fmtopts", args, {})
+ logger.info("INFO message")
+ logger.debug("DEBUG message")
+ logger.error("ERROR message")
+ # Requesting a lower log level than default works as expected.
+ self.assertEqual(
+ [b"INFO message", b"DEBUG message", b"ERROR message"], self.loglines
+ )
+
+ def test_unused_options(self):
+ parser = argparse.ArgumentParser()
+ commandline.add_logging_group(parser)
+ args = parser.parse_args(["--log-tbpl-level=error"])
+ self.assertRaises(
+ ValueError, commandline.setup_logging, "test_fmtopts", args, {}
+ )
+
+
+class TestBuffer(BaseStructuredTest):
+ def assert_log_equals(self, expected, actual=None):
+ if actual is None:
+ actual = self.pop_last_item()
+
+ all_expected = {
+ "pid": os.getpid(),
+ "thread": "MainThread",
+ "source": "testBuffer",
+ }
+ specials = set(["time"])
+
+ all_expected.update(expected)
+ for key, value in six.iteritems(all_expected):
+ self.assertEqual(actual[key], value)
+
+ self.assertEqual(set(all_expected.keys()) | specials, set(actual.keys()))
+
+ def setUp(self):
+ self.logger = structuredlog.StructuredLogger("testBuffer")
+ self.handler = handlers.BufferHandler(TestHandler(), message_limit=4)
+ self.logger.add_handler(self.handler)
+
+ def tearDown(self):
+ self.logger.remove_handler(self.handler)
+
+ def pop_last_item(self):
+ return self.handler.inner.items.pop()
+
+ def test_buffer_messages(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test1")
+ self.logger.send_message("buffer", "off")
+ self.logger.test_status("test1", "sub1", status="PASS")
+ # Even for buffered actions, the buffer does not interfere if
+ # buffering is turned off.
+ self.assert_log_equals(
+ {
+ "action": "test_status",
+ "test": "test1",
+ "status": "PASS",
+ "subtest": "sub1",
+ }
+ )
+ self.logger.send_message("buffer", "on")
+ self.logger.test_status("test1", "sub2", status="PASS")
+ self.logger.test_status("test1", "sub3", status="PASS")
+ self.logger.test_status("test1", "sub4", status="PASS")
+ self.logger.test_status("test1", "sub5", status="PASS")
+ self.logger.test_status("test1", "sub6", status="PASS")
+ self.logger.test_status("test1", "sub7", status="PASS")
+ self.logger.test_end("test1", status="OK")
+ self.logger.send_message("buffer", "clear")
+ self.assert_log_equals({"action": "test_end", "test": "test1", "status": "OK"})
+ self.logger.suite_end()
+
+ def test_buffer_size(self):
+ self.logger.suite_start([])
+ self.logger.test_start("test1")
+ self.logger.test_status("test1", "sub1", status="PASS")
+ self.logger.test_status("test1", "sub2", status="PASS")
+ self.logger.test_status("test1", "sub3", status="PASS")
+ self.logger.test_status("test1", "sub4", status="PASS")
+ self.logger.test_status("test1", "sub5", status="PASS")
+ self.logger.test_status("test1", "sub6", status="PASS")
+ self.logger.test_status("test1", "sub7", status="PASS")
+
+ # No test status messages made it to the underlying handler.
+ self.assert_log_equals({"action": "test_start", "test": "test1"})
+
+ # The buffer's actual size never grows beyond the specified limit.
+ self.assertEqual(len(self.handler._buffer), 4)
+
+ self.logger.test_status("test1", "sub8", status="FAIL")
+ # The number of messages deleted comes back in a list.
+ self.assertEqual([4], self.logger.send_message("buffer", "flush"))
+
+ # When the buffer is dumped, the failure is the last thing logged
+ self.assert_log_equals(
+ {
+ "action": "test_status",
+ "test": "test1",
+ "subtest": "sub8",
+ "status": "FAIL",
+ "expected": "PASS",
+ }
+ )
+ # Three additional messages should have been retained for context
+ self.assert_log_equals(
+ {
+ "action": "test_status",
+ "test": "test1",
+ "status": "PASS",
+ "subtest": "sub7",
+ }
+ )
+ self.assert_log_equals(
+ {
+ "action": "test_status",
+ "test": "test1",
+ "status": "PASS",
+ "subtest": "sub6",
+ }
+ )
+ self.assert_log_equals(
+ {
+ "action": "test_status",
+ "test": "test1",
+ "status": "PASS",
+ "subtest": "sub5",
+ }
+ )
+ self.assert_log_equals({"action": "suite_start", "tests": {"default": []}})
+
+
+class TestReader(unittest.TestCase):
+ def to_file_like(self, obj):
+ data_str = "\n".join(json.dumps(item) for item in obj)
+ return StringIO(data_str)
+
+ def test_read(self):
+ data = [
+ {"action": "action_0", "data": "data_0"},
+ {"action": "action_1", "data": "data_1"},
+ ]
+
+ f = self.to_file_like(data)
+ self.assertEqual(data, list(reader.read(f)))
+
+ def test_imap_log(self):
+ data = [
+ {"action": "action_0", "data": "data_0"},
+ {"action": "action_1", "data": "data_1"},
+ ]
+
+ f = self.to_file_like(data)
+
+ def f_action_0(item):
+ return ("action_0", item["data"])
+
+ def f_action_1(item):
+ return ("action_1", item["data"])
+
+ res_iter = reader.imap_log(
+ reader.read(f), {"action_0": f_action_0, "action_1": f_action_1}
+ )
+ self.assertEqual(
+ [("action_0", "data_0"), ("action_1", "data_1")], list(res_iter)
+ )
+
+ def test_each_log(self):
+ data = [
+ {"action": "action_0", "data": "data_0"},
+ {"action": "action_1", "data": "data_1"},
+ ]
+
+ f = self.to_file_like(data)
+
+ count = {"action_0": 0, "action_1": 0}
+
+ def f_action_0(item):
+ count[item["action"]] += 1
+
+ def f_action_1(item):
+ count[item["action"]] += 2
+
+ reader.each_log(
+ reader.read(f), {"action_0": f_action_0, "action_1": f_action_1}
+ )
+
+ self.assertEqual({"action_0": 1, "action_1": 2}, count)
+
+ def test_handler(self):
+ data = [
+ {"action": "action_0", "data": "data_0"},
+ {"action": "action_1", "data": "data_1"},
+ ]
+
+ f = self.to_file_like(data)
+
+ test = self
+
+ class ReaderTestHandler(reader.LogHandler):
+ def __init__(self):
+ self.action_0_count = 0
+ self.action_1_count = 0
+
+ def action_0(self, item):
+ test.assertEqual(item["action"], "action_0")
+ self.action_0_count += 1
+
+ def action_1(self, item):
+ test.assertEqual(item["action"], "action_1")
+ self.action_1_count += 1
+
+ handler = ReaderTestHandler()
+ reader.handle_log(reader.read(f), handler)
+
+ self.assertEqual(handler.action_0_count, 1)
+ self.assertEqual(handler.action_1_count, 1)
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/testing/mozbase/mozlog/tests/test_terminal_colors.py b/testing/mozbase/mozlog/tests/test_terminal_colors.py
new file mode 100644
index 0000000000..2dd72b7d53
--- /dev/null
+++ b/testing/mozbase/mozlog/tests/test_terminal_colors.py
@@ -0,0 +1,62 @@
+# encoding: utf-8
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import sys
+from io import StringIO
+
+import mozunit
+import pytest
+from mozterm import Terminal
+
+
+@pytest.fixture
+def terminal():
+ blessed = pytest.importorskip("blessed")
+
+ kind = "xterm-256color"
+ try:
+ term = Terminal(stream=StringIO(), force_styling=True, kind=kind)
+ except blessed.curses.error:
+ pytest.skip("terminal '{}' not found".format(kind))
+
+ return term
+
+
+EXPECTED_DICT = {
+ "log_test_status_fail": "\x1b[31mlog_test_status_fail\x1b(B\x1b[m",
+ "log_process_output": "\x1b[34mlog_process_output\x1b(B\x1b[m",
+ "log_test_status_pass": "\x1b[32mlog_test_status_pass\x1b(B\x1b[m",
+ "log_test_status_unexpected_fail": "\x1b[31mlog_test_status_unexpected_fail\x1b(B\x1b[m",
+ "log_test_status_known_intermittent": "\x1b[33mlog_test_status_known_intermittent\x1b(B\x1b[m",
+ "time": "\x1b[36mtime\x1b(B\x1b[m",
+ "action": "\x1b[33maction\x1b(B\x1b[m",
+ "pid": "\x1b[36mpid\x1b(B\x1b[m",
+ "heading": "\x1b[1m\x1b[33mheading\x1b(B\x1b[m",
+ "sub_heading": "\x1b[33msub_heading\x1b(B\x1b[m",
+ "error": "\x1b[31merror\x1b(B\x1b[m",
+ "warning": "\x1b[33mwarning\x1b(B\x1b[m",
+ "bold": "\x1b[1mbold\x1b(B\x1b[m",
+ "grey": "\x1b[38;2;190;190;190mgrey\x1b(B\x1b[m",
+ "normal": "\x1b[90mnormal\x1b(B\x1b[m",
+ "bright_black": "\x1b[90mbright_black\x1b(B\x1b[m",
+}
+
+
+@pytest.mark.skipif(
+ not sys.platform.startswith("win"),
+ reason="Only do ANSI Escape Sequence comparisons on Windows.",
+)
+def test_terminal_colors(terminal):
+ from mozlog.formatters.machformatter import TerminalColors, color_dict
+
+ actual_dict = TerminalColors(terminal, color_dict)
+
+ for key in color_dict:
+ assert getattr(actual_dict, key)(key) == EXPECTED_DICT[key]
+
+
+if __name__ == "__main__":
+ mozunit.main()