diff options
Diffstat (limited to 'taskcluster/gecko_taskgraph/util')
-rw-r--r-- | taskcluster/gecko_taskgraph/util/backstop.py | 28 | ||||
-rw-r--r-- | taskcluster/gecko_taskgraph/util/chunking.py | 2 | ||||
-rw-r--r-- | taskcluster/gecko_taskgraph/util/hg.py | 2 | ||||
-rw-r--r-- | taskcluster/gecko_taskgraph/util/perfile.py | 13 |
4 files changed, 19 insertions, 26 deletions
diff --git a/taskcluster/gecko_taskgraph/util/backstop.py b/taskcluster/gecko_taskgraph/util/backstop.py index 26c9a4fb91..18c9166083 100644 --- a/taskcluster/gecko_taskgraph/util/backstop.py +++ b/taskcluster/gecko_taskgraph/util/backstop.py @@ -37,22 +37,17 @@ def is_backstop( return True project = params["project"] - pushid = int(params["pushlog_id"]) - pushdate = int(params["pushdate"]) - if project in TRY_PROJECTS: return False if project not in integration_projects: return True - # On every Nth push, want to run all tasks. - if pushid % push_interval == 0: - return True - - if time_interval <= 0: + # This push was explicitly set to run nothing (e.g via DONTBUILD), so + # shouldn't be a backstop candidate. + if params["target_tasks_method"] == "nothing": return False - # We also want to ensure we run all tasks at least once per N minutes. + # Find the last backstop to compute push and time intervals. subs = {"trust-domain": trust_domain, "project": project} index = BACKSTOP_INDEX.format(**subs) @@ -67,9 +62,7 @@ def is_backstop( return True try: - last_pushdate = get_artifact(last_backstop_id, "public/parameters.yml")[ - "pushdate" - ] + last_params = get_artifact(last_backstop_id, "public/parameters.yml") except HTTPError as e: # If the last backstop decision task exists in the index, but # parameters.yml isn't available yet, it means the decision task is @@ -79,6 +72,15 @@ def is_backstop( return False raise - if (pushdate - last_pushdate) / 60 >= time_interval: + # On every Nth push, want to run all tasks. + if int(params["pushlog_id"]) - int(last_params["pushlog_id"]) >= push_interval: + return True + + if time_interval <= 0: + return False + + # We also want to ensure we run all tasks at least once per N minutes. + if (params["pushdate"] - last_params["pushdate"]) / 60 >= time_interval: return True + return False diff --git a/taskcluster/gecko_taskgraph/util/chunking.py b/taskcluster/gecko_taskgraph/util/chunking.py index a0ed56de78..a8ae4d8b6b 100644 --- a/taskcluster/gecko_taskgraph/util/chunking.py +++ b/taskcluster/gecko_taskgraph/util/chunking.py @@ -101,8 +101,8 @@ def guess_mozinfo_from_task(task, repo=""): ("linux", "1804"): "18.04", ("macosx", "1015"): "10.15", ("macosx", "1100"): "11.00", - ("windows", "7"): "6.1", ("windows", "10"): "10.0", + ("windows", "11"): "11.0", } for (name, old_ver), new_ver in os_versions.items(): if p_os["name"] == name and p_os["version"] == old_ver: diff --git a/taskcluster/gecko_taskgraph/util/hg.py b/taskcluster/gecko_taskgraph/util/hg.py index 18a92fbd0d..17d341cdc0 100644 --- a/taskcluster/gecko_taskgraph/util/hg.py +++ b/taskcluster/gecko_taskgraph/util/hg.py @@ -101,7 +101,7 @@ def get_json_automationrelevance(repository, revision): logger.debug("Querying version control for metadata: %s", url) def get_automationrelevance(): - response = requests.get(url, timeout=30) + response = requests.get(url, timeout=60) return response.json() return retry(get_automationrelevance, attempts=10, sleeptime=10) diff --git a/taskcluster/gecko_taskgraph/util/perfile.py b/taskcluster/gecko_taskgraph/util/perfile.py index 4e82d87dad..4c18ca98be 100644 --- a/taskcluster/gecko_taskgraph/util/perfile.py +++ b/taskcluster/gecko_taskgraph/util/perfile.py @@ -12,15 +12,12 @@ import taskgraph from mozbuild.util import memoize from mozpack.path import match as mozpackmatch -from gecko_taskgraph import files_changed - -from .. import GECKO - logger = logging.getLogger(__name__) @memoize -def perfile_number_of_chunks(is_try, try_task_config, head_repository, head_rev, type): +def perfile_number_of_chunks(is_try, try_task_config, files_changed, type): + changed_files = set(files_changed) if taskgraph.fast and not is_try: # When iterating on taskgraph changes, the exact number of chunks that # test-verify runs usually isn't important, so skip it when going fast. @@ -55,17 +52,11 @@ def perfile_number_of_chunks(is_try, try_task_config, head_repository, head_rev, # Returning 0 means no tests to run, this captures non test-verify tasks return 1 - changed_files = set() if try_task_config: suite_to_paths = json.loads(try_task_config) specified_files = itertools.chain.from_iterable(suite_to_paths.values()) changed_files.update(specified_files) - if is_try: - changed_files.update(files_changed.get_locally_changed_files(GECKO)) - else: - changed_files.update(files_changed.get_changed_files(head_repository, head_rev)) - test_count = 0 for pattern in file_patterns: for path in changed_files: |