summaryrefslogtreecommitdiffstats
path: root/testing/raptor/raptor/raptor_profiling.py
blob: 289a160bb3e386b440201baa3bc0a2238cb79756 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

"""
Superclass to handle profiling in Raptor-Browsertime.
"""

import gzip
import json
import os

from logger.logger import RaptorLogger

here = os.path.dirname(os.path.realpath(__file__))
LOG = RaptorLogger(component="raptor-profiling")
import tempfile


class RaptorProfiling:
    """
    Superclass for handling profling for Firefox and Chrom* applications.
    """

    def __init__(self, upload_dir, raptor_config, test_config):
        self.upload_dir = upload_dir
        self.raptor_config = raptor_config
        self.test_config = test_config

        # Create a temporary directory into which the tests can put
        # their profiles. These files will be assembled into one big
        # zip file later on, which is put into the MOZ_UPLOAD_DIR.
        self.temp_profile_dir = tempfile.mkdtemp()

    def _open_profile_file(self, profile_path):
        """Open a profile file given a path and return the contents."""
        if profile_path.endswith(".gz"):
            with gzip.open(profile_path, "r") as profile_file:
                profile = json.load(profile_file)
        else:
            with open(profile_path, "r", encoding="utf-8") as profile_file:
                profile = json.load(profile_file)
        return profile

    def collect_profiles(self):
        """Collect and return all profile files"""

        def __get_test_type():
            """Returns the type of test that was run.

            For benchmark/scenario tests, we return those specific types,
            but for pageloads we return cold or warm depending on the --cold
            flag.
            """
            if self.test_config.get("type", "pageload") not in (
                "benchmark",
                "scenario",
            ):
                return "cold" if self.raptor_config.get("cold", False) else "warm"
            else:
                return self.test_config.get("type", "benchmark")

        res = []
        if self.raptor_config.get("browsertime"):
            topdir = self.raptor_config.get("browsertime_result_dir")

            # Get the browsertime.json file along with the cold/warm splits
            # if they exist from a chimera test
            results = {"main": None, "cold": None, "warm": None}
            profiling_dir = os.path.join(topdir, "profiling")
            result_dir = profiling_dir if self._is_extra_profiler_run else topdir

            if not os.path.isdir(result_dir):
                # Result directory not found. Return early. Caller will decide
                # if this should throw an error or not.
                LOG.info("Could not find the result directory.")
                return []
            for filename in os.listdir(result_dir):
                if filename == "browsertime.json":
                    results["main"] = os.path.join(result_dir, filename)
                elif filename == "cold-browsertime.json":
                    results["cold"] = os.path.join(result_dir, filename)
                elif filename == "warm-browsertime.json":
                    results["warm"] = os.path.join(result_dir, filename)
                if all(results.values()):
                    break

            if not any(results.values()):
                if self._is_extra_profiler_run:
                    LOG.info(
                        "Could not find any browsertime result JSONs in the artifacts "
                        " for the extra profiler run"
                    )
                    return []
                else:
                    raise Exception(
                        "Could not find any browsertime result JSONs in the artifacts"
                    )

            profile_locations = []
            if self.raptor_config.get("chimera", False):
                if results["warm"] is None or results["cold"] is None:
                    if self._is_extra_profiler_run:
                        LOG.info(
                            "The test ran in chimera mode but we found no cold "
                            "and warm browsertime JSONs. Cannot collect profiles. "
                            "Failing silently because this is an extra profiler run."
                        )
                        return []
                    else:
                        raise Exception(
                            "The test ran in chimera mode but we found no cold "
                            "and warm browsertime JSONs. Cannot collect profiles."
                        )
                profile_locations.extend(
                    [("cold", results["cold"]), ("warm", results["warm"])]
                )
            else:
                # When we don't run in chimera mode, it means that we
                # either ran a benchmark, scenario test or separate
                # warm/cold pageload tests.
                profile_locations.append(
                    (
                        __get_test_type(),
                        results["main"],
                    )
                )

            for testtype, results_json in profile_locations:
                with open(results_json, encoding="utf-8") as f:
                    data = json.load(f)
                results_dir = os.path.dirname(results_json)
                for entry in data:
                    try:
                        for rel_profile_path in entry["files"][
                            self.profile_entry_string
                        ]:
                            res.append(
                                {
                                    "path": os.path.join(results_dir, rel_profile_path),
                                    "type": testtype,
                                }
                            )
                    except KeyError:
                        if self._is_extra_profiler_run:
                            LOG.info("Failed to find profiles for extra profiler run.")
                        else:
                            LOG.error("Failed to find profiles.")
        else:
            # Raptor-webext stores its profiles in the self.temp_profile_dir
            # directory
            for profile in os.listdir(self.temp_profile_dir):
                res.append(
                    {
                        "path": os.path.join(self.temp_profile_dir, profile),
                        "type": __get_test_type(),
                    }
                )

        LOG.info("Found %s profiles: %s" % (len(res), str(res)))
        return res