summaryrefslogtreecommitdiffstats
path: root/taskcluster/gecko_taskgraph/loader/test.py
blob: c97acecd1aeab5689ab75aa7ebc14bbc6075ef4d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.


import logging

from taskgraph.util.yaml import load_yaml

from gecko_taskgraph.util.copy_task import copy_task

from .transform import loader as transform_loader

logger = logging.getLogger(__name__)


def loader(kind, path, config, params, loaded_tasks):
    """
    Generate tasks implementing Gecko tests.
    """

    builds_by_platform = get_builds_by_platform(
        dep_kind="build", loaded_tasks=loaded_tasks
    )
    signed_builds_by_platform = get_builds_by_platform(
        dep_kind="build-signing", loaded_tasks=loaded_tasks
    )

    # get the test platforms for those build tasks
    test_platforms_cfg = load_yaml(path, "test-platforms.yml")
    test_platforms = get_test_platforms(
        test_platforms_cfg, builds_by_platform, signed_builds_by_platform
    )

    # expand the test sets for each of those platforms
    test_sets_cfg = load_yaml(path, "test-sets.yml")
    test_platforms = expand_tests(test_sets_cfg, test_platforms)

    # load the test descriptions
    tests = transform_loader(kind, path, config, params, loaded_tasks)
    test_descriptions = {t.pop("name"): t for t in tests}

    # generate all tests for all test platforms
    for test_platform_name, test_platform in test_platforms.items():
        for test_name in test_platform["test-names"]:
            test = copy_task(test_descriptions[test_name])
            test["build-platform"] = test_platform["build-platform"]
            test["test-platform"] = test_platform_name
            test["build-label"] = test_platform["build-label"]
            if test_platform.get("build-signing-label", None):
                test["build-signing-label"] = test_platform["build-signing-label"]

            test["build-attributes"] = test_platform["build-attributes"]
            test["test-name"] = test_name
            if test_platform.get("shippable"):
                test.setdefault("attributes", {})["shippable"] = True
                test["attributes"]["shipping_product"] = test_platform[
                    "shipping_product"
                ]

            logger.debug(
                "Generating tasks for test {} on platform {}".format(
                    test_name, test["test-platform"]
                )
            )
            yield test


def get_builds_by_platform(dep_kind, loaded_tasks):
    """Find the build tasks on which tests will depend, keyed by
    platform/type.  Returns a dictionary mapping build platform to task."""
    builds_by_platform = {}
    for task in loaded_tasks:
        if task.kind != dep_kind:
            continue

        build_platform = task.attributes.get("build_platform")
        build_type = task.attributes.get("build_type")
        if not build_platform or not build_type:
            continue
        platform = f"{build_platform}/{build_type}"
        if platform in builds_by_platform:
            raise Exception("multiple build jobs for " + platform)
        builds_by_platform[platform] = task
    return builds_by_platform


def get_test_platforms(
    test_platforms_cfg, builds_by_platform, signed_builds_by_platform={}
):
    """Get the test platforms for which test tasks should be generated,
    based on the available build platforms.  Returns a dictionary mapping
    test platform to {test-set, build-platform, build-label}."""
    test_platforms = {}
    for test_platform, cfg in test_platforms_cfg.items():
        build_platform = cfg["build-platform"]
        if build_platform not in builds_by_platform:
            logger.warning(
                "No build task with platform {}; ignoring test platform {}".format(
                    build_platform, test_platform
                )
            )
            continue
        test_platforms[test_platform] = {
            "build-platform": build_platform,
            "build-label": builds_by_platform[build_platform].label,
            "build-attributes": builds_by_platform[build_platform].attributes,
        }

        if builds_by_platform[build_platform].attributes.get("shippable"):
            test_platforms[test_platform]["shippable"] = builds_by_platform[
                build_platform
            ].attributes["shippable"]
            test_platforms[test_platform]["shipping_product"] = builds_by_platform[
                build_platform
            ].attributes["shipping_product"]

        test_platforms[test_platform].update(cfg)

    return test_platforms


def expand_tests(test_sets_cfg, test_platforms):
    """Expand the test sets in `test_platforms` out to sets of test names.
    Returns a dictionary like `get_test_platforms`, with an additional
    `test-names` key for each test platform, containing a set of test
    names."""
    rv = {}
    for test_platform, cfg in test_platforms.items():
        test_sets = cfg["test-sets"]
        if not set(test_sets) <= set(test_sets_cfg):
            raise Exception(
                "Test sets {} for test platform {} are not defined".format(
                    ", ".join(test_sets), test_platform
                )
            )
        test_names = set()
        for test_set in test_sets:
            test_names.update(test_sets_cfg[test_set])
        rv[test_platform] = cfg.copy()
        rv[test_platform]["test-names"] = test_names
    return rv