From 8dd16259287f58f9273002717ec4d27e97127719 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 12 Jun 2024 07:43:14 +0200 Subject: Merging upstream version 127.0. Signed-off-by: Daniel Baumann --- .../taskcluster_taskgraph-6.3.0.dist-info/LICENSE | 373 ----------------- .../taskcluster_taskgraph-6.3.0.dist-info/METADATA | 28 -- .../taskcluster_taskgraph-6.3.0.dist-info/RECORD | 80 ---- .../taskcluster_taskgraph-6.3.0.dist-info/WHEEL | 5 - .../entry_points.txt | 2 - .../top_level.txt | 1 - .../taskcluster_taskgraph-8.0.1.dist-info/LICENSE | 373 +++++++++++++++++ .../taskcluster_taskgraph-8.0.1.dist-info/METADATA | 123 ++++++ .../taskcluster_taskgraph-8.0.1.dist-info/RECORD | 79 ++++ .../taskcluster_taskgraph-8.0.1.dist-info/WHEEL | 5 + .../entry_points.txt | 2 + .../top_level.txt | 1 + .../taskcluster_taskgraph/taskgraph/__init__.py | 2 +- .../taskgraph/actions/add_new_jobs.py | 2 +- .../taskgraph/actions/cancel.py | 4 +- .../taskgraph/actions/cancel_all.py | 4 +- .../taskgraph/actions/rebuild_cached_tasks.py | 2 +- .../taskgraph/actions/registry.py | 34 +- .../taskgraph/actions/retrigger.py | 26 +- .../taskgraph/actions/util.py | 15 +- .../taskcluster_taskgraph/taskgraph/config.py | 20 +- .../taskcluster_taskgraph/taskgraph/create.py | 2 +- .../taskcluster_taskgraph/taskgraph/decision.py | 23 +- .../taskcluster_taskgraph/taskgraph/docker.py | 48 ++- .../taskgraph/files_changed.py | 91 ----- .../taskcluster_taskgraph/taskgraph/generator.py | 8 +- .../taskgraph/loader/default.py | 4 +- .../python/taskcluster_taskgraph/taskgraph/main.py | 83 +++- .../taskcluster_taskgraph/taskgraph/morph.py | 1 + .../taskgraph/optimize/base.py | 12 + .../taskgraph/optimize/strategies.py | 16 +- .../taskcluster_taskgraph/taskgraph/parameters.py | 5 +- .../taskgraph/run-task/run-task | 14 +- .../taskgraph/target_tasks.py | 8 +- .../taskgraph/transforms/__init__.py | 3 - .../taskgraph/transforms/base.py | 2 +- .../taskgraph/transforms/code_review.py | 10 +- .../taskgraph/transforms/docker_image.py | 16 +- .../taskgraph/transforms/fetch.py | 65 +-- .../taskgraph/transforms/from_deps.py | 21 +- .../taskgraph/transforms/job/__init__.py | 453 --------------------- .../taskgraph/transforms/job/common.py | 171 -------- .../taskgraph/transforms/job/index_search.py | 37 -- .../taskgraph/transforms/job/run_task.py | 231 ----------- .../taskgraph/transforms/job/toolchain.py | 175 -------- .../taskgraph/transforms/run/__init__.py | 451 ++++++++++++++++++++ .../taskgraph/transforms/run/common.py | 165 ++++++++ .../taskgraph/transforms/run/index_search.py | 37 ++ .../taskgraph/transforms/run/run_task.py | 231 +++++++++++ .../taskgraph/transforms/run/toolchain.py | 175 ++++++++ .../taskgraph/transforms/task.py | 61 +-- .../taskgraph/transforms/task_context.py | 10 +- .../taskgraph/util/archive.py | 52 ++- .../taskgraph/util/cached_tasks.py | 45 +- .../taskgraph/util/decision.py | 79 ---- .../taskcluster_taskgraph/taskgraph/util/docker.py | 21 +- .../taskcluster_taskgraph/taskgraph/util/hash.py | 11 +- .../taskgraph/util/keyed_by.py | 16 +- .../taskgraph/util/memoize.py | 35 +- .../taskgraph/util/parameterization.py | 21 +- .../taskcluster_taskgraph/taskgraph/util/schema.py | 12 +- .../taskgraph/util/set_name.py | 34 ++ .../taskcluster_taskgraph/taskgraph/util/shell.py | 2 +- .../taskgraph/util/taskcluster.py | 71 ++-- .../taskcluster_taskgraph/taskgraph/util/time.py | 4 +- .../taskgraph/util/treeherder.py | 15 +- .../taskcluster_taskgraph/taskgraph/util/vcs.py | 36 +- .../taskcluster_taskgraph/taskgraph/util/verify.py | 27 +- .../taskcluster_taskgraph/taskgraph/util/yaml.py | 5 +- 69 files changed, 2192 insertions(+), 2104 deletions(-) delete mode 100644 third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/LICENSE delete mode 100644 third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/METADATA delete mode 100644 third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/RECORD delete mode 100644 third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/WHEEL delete mode 100644 third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/entry_points.txt delete mode 100644 third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/top_level.txt create mode 100644 third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/LICENSE create mode 100644 third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/METADATA create mode 100644 third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/RECORD create mode 100644 third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/WHEEL create mode 100644 third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/entry_points.txt create mode 100644 third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/top_level.txt delete mode 100644 third_party/python/taskcluster_taskgraph/taskgraph/files_changed.py delete mode 100644 third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/__init__.py delete mode 100644 third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/common.py delete mode 100644 third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/index_search.py delete mode 100644 third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/run_task.py delete mode 100644 third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/toolchain.py create mode 100644 third_party/python/taskcluster_taskgraph/taskgraph/transforms/run/__init__.py create mode 100644 third_party/python/taskcluster_taskgraph/taskgraph/transforms/run/common.py create mode 100644 third_party/python/taskcluster_taskgraph/taskgraph/transforms/run/index_search.py create mode 100644 third_party/python/taskcluster_taskgraph/taskgraph/transforms/run/run_task.py create mode 100644 third_party/python/taskcluster_taskgraph/taskgraph/transforms/run/toolchain.py delete mode 100644 third_party/python/taskcluster_taskgraph/taskgraph/util/decision.py create mode 100644 third_party/python/taskcluster_taskgraph/taskgraph/util/set_name.py (limited to 'third_party/python/taskcluster_taskgraph') diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/LICENSE b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/LICENSE deleted file mode 100644 index a612ad9813..0000000000 --- a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/LICENSE +++ /dev/null @@ -1,373 +0,0 @@ -Mozilla Public License Version 2.0 -================================== - -1. Definitions --------------- - -1.1. "Contributor" - means each individual or legal entity that creates, contributes to - the creation of, or owns Covered Software. - -1.2. "Contributor Version" - means the combination of the Contributions of others (if any) used - by a Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - means Source Code Form to which the initial Contributor has attached - the notice in Exhibit A, the Executable Form of such Source Code - Form, and Modifications of such Source Code Form, in each case - including portions thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - (a) that the initial Contributor has attached the notice described - in Exhibit B to the Covered Software; or - - (b) that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the - terms of a Secondary License. - -1.6. "Executable Form" - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - means a work that combines Covered Software with other material, in - a separate file or files, that is not Covered Software. - -1.8. "License" - means this document. - -1.9. "Licensable" - means having the right to grant, to the maximum extent possible, - whether at the time of the initial grant or subsequently, any and - all of the rights conveyed by this License. - -1.10. "Modifications" - means any of the following: - - (a) any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered - Software; or - - (b) any new file in Source Code Form that contains any Covered - Software. - -1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the - License, by the making, using, selling, offering for sale, having - made, import, or transfer of either its Contributions or its - Contributor Version. - -1.12. "Secondary License" - means either the GNU General Public License, Version 2.0, the GNU - Lesser General Public License, Version 2.1, the GNU Affero General - Public License, Version 3.0, or any later versions of those - licenses. - -1.13. "Source Code Form" - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants and Conditions --------------------------------- - -2.1. Grants - -Each Contributor hereby grants You a world-wide, royalty-free, -non-exclusive license: - -(a) under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - -(b) under Patent Claims of such Contributor to make, use, sell, offer - for sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - -The licenses granted in Section 2.1 with respect to any Contribution -become effective for each Contribution on the date the Contributor first -distributes such Contribution. - -2.3. Limitations on Grant Scope - -The licenses granted in this Section 2 are the only rights granted under -this License. No additional rights or licenses will be implied from the -distribution or licensing of Covered Software under this License. -Notwithstanding Section 2.1(b) above, no patent license is granted by a -Contributor: - -(a) for any code that a Contributor has removed from Covered Software; - or - -(b) for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - -(c) under Patent Claims infringed by Covered Software in the absence of - its Contributions. - -This License does not grant any rights in the trademarks, service marks, -or logos of any Contributor (except as may be necessary to comply with -the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - -No Contributor makes additional grants as a result of Your choice to -distribute the Covered Software under a subsequent version of this -License (see Section 10.2) or under the terms of a Secondary License (if -permitted under the terms of Section 3.3). - -2.5. Representation - -Each Contributor represents that the Contributor believes its -Contributions are its original creation(s) or it has sufficient rights -to grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - -This License is not intended to limit any rights You have under -applicable copyright doctrines of fair use, fair dealing, or other -equivalents. - -2.7. Conditions - -Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted -in Section 2.1. - -3. Responsibilities -------------------- - -3.1. Distribution of Source Form - -All distribution of Covered Software in Source Code Form, including any -Modifications that You create or to which You contribute, must be under -the terms of this License. You must inform recipients that the Source -Code Form of the Covered Software is governed by the terms of this -License, and how they can obtain a copy of this License. You may not -attempt to alter or restrict the recipients' rights in the Source Code -Form. - -3.2. Distribution of Executable Form - -If You distribute Covered Software in Executable Form then: - -(a) such Covered Software must also be made available in Source Code - Form, as described in Section 3.1, and You must inform recipients of - the Executable Form how they can obtain a copy of such Source Code - Form by reasonable means in a timely manner, at a charge no more - than the cost of distribution to the recipient; and - -(b) You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter - the recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - -You may create and distribute a Larger Work under terms of Your choice, -provided that You also comply with the requirements of this License for -the Covered Software. If the Larger Work is a combination of Covered -Software with a work governed by one or more Secondary Licenses, and the -Covered Software is not Incompatible With Secondary Licenses, this -License permits You to additionally distribute such Covered Software -under the terms of such Secondary License(s), so that the recipient of -the Larger Work may, at their option, further distribute the Covered -Software under the terms of either this License or such Secondary -License(s). - -3.4. Notices - -You may not remove or alter the substance of any license notices -(including copyright notices, patent notices, disclaimers of warranty, -or limitations of liability) contained within the Source Code Form of -the Covered Software, except that You may alter any license notices to -the extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - -You may choose to offer, and to charge a fee for, warranty, support, -indemnity or liability obligations to one or more recipients of Covered -Software. However, You may do so only on Your own behalf, and not on -behalf of any Contributor. You must make it absolutely clear that any -such warranty, support, indemnity, or liability obligation is offered by -You alone, and You hereby agree to indemnify every Contributor for any -liability incurred by such Contributor as a result of warranty, support, -indemnity or liability terms You offer. You may include additional -disclaimers of warranty and limitations of liability specific to any -jurisdiction. - -4. Inability to Comply Due to Statute or Regulation ---------------------------------------------------- - -If it is impossible for You to comply with any of the terms of this -License with respect to some or all of the Covered Software due to -statute, judicial order, or regulation then You must: (a) comply with -the terms of this License to the maximum extent possible; and (b) -describe the limitations and the code they affect. Such description must -be placed in a text file included with all distributions of the Covered -Software under this License. Except to the extent prohibited by statute -or regulation, such description must be sufficiently detailed for a -recipient of ordinary skill to be able to understand it. - -5. Termination --------------- - -5.1. The rights granted under this License will terminate automatically -if You fail to comply with any of its terms. However, if You become -compliant, then the rights granted under this License from a particular -Contributor are reinstated (a) provisionally, unless and until such -Contributor explicitly and finally terminates Your grants, and (b) on an -ongoing basis, if such Contributor fails to notify You of the -non-compliance by some reasonable means prior to 60 days after You have -come back into compliance. Moreover, Your grants from a particular -Contributor are reinstated on an ongoing basis if such Contributor -notifies You of the non-compliance by some reasonable means, this is the -first time You have received notice of non-compliance with this License -from such Contributor, and You become compliant prior to 30 days after -Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent -infringement claim (excluding declaratory judgment actions, -counter-claims, and cross-claims) alleging that a Contributor Version -directly or indirectly infringes any patent, then the rights granted to -You by any and all Contributors for the Covered Software under Section -2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all -end user license agreements (excluding distributors and resellers) which -have been validly granted by You or Your distributors under this License -prior to termination shall survive termination. - -************************************************************************ -* * -* 6. Disclaimer of Warranty * -* ------------------------- * -* * -* Covered Software is provided under this License on an "as is" * -* basis, without warranty of any kind, either expressed, implied, or * -* statutory, including, without limitation, warranties that the * -* Covered Software is free of defects, merchantable, fit for a * -* particular purpose or non-infringing. The entire risk as to the * -* quality and performance of the Covered Software is with You. * -* Should any Covered Software prove defective in any respect, You * -* (not any Contributor) assume the cost of any necessary servicing, * -* repair, or correction. This disclaimer of warranty constitutes an * -* essential part of this License. No use of any Covered Software is * -* authorized under this License except under this disclaimer. * -* * -************************************************************************ - -************************************************************************ -* * -* 7. Limitation of Liability * -* -------------------------- * -* * -* Under no circumstances and under no legal theory, whether tort * -* (including negligence), contract, or otherwise, shall any * -* Contributor, or anyone who distributes Covered Software as * -* permitted above, be liable to You for any direct, indirect, * -* special, incidental, or consequential damages of any character * -* including, without limitation, damages for lost profits, loss of * -* goodwill, work stoppage, computer failure or malfunction, or any * -* and all other commercial damages or losses, even if such party * -* shall have been informed of the possibility of such damages. This * -* limitation of liability shall not apply to liability for death or * -* personal injury resulting from such party's negligence to the * -* extent applicable law prohibits such limitation. Some * -* jurisdictions do not allow the exclusion or limitation of * -* incidental or consequential damages, so this exclusion and * -* limitation may not apply to You. * -* * -************************************************************************ - -8. Litigation -------------- - -Any litigation relating to this License may be brought only in the -courts of a jurisdiction where the defendant maintains its principal -place of business and such litigation shall be governed by laws of that -jurisdiction, without reference to its conflict-of-law provisions. -Nothing in this Section shall prevent a party's ability to bring -cross-claims or counter-claims. - -9. Miscellaneous ----------------- - -This License represents the complete agreement concerning the subject -matter hereof. If any provision of this License is held to be -unenforceable, such provision shall be reformed only to the extent -necessary to make it enforceable. Any law or regulation which provides -that the language of a contract shall be construed against the drafter -shall not be used to construe this License against a Contributor. - -10. Versions of the License ---------------------------- - -10.1. New Versions - -Mozilla Foundation is the license steward. Except as provided in Section -10.3, no one other than the license steward has the right to modify or -publish new versions of this License. Each version will be given a -distinguishing version number. - -10.2. Effect of New Versions - -You may distribute the Covered Software under the terms of the version -of the License under which You originally received the Covered Software, -or under the terms of any subsequent version published by the license -steward. - -10.3. Modified Versions - -If you create software not governed by this License, and you want to -create a new license for such software, you may create and use a -modified version of this License if you rename the license and remove -any references to the name of the license steward (except to note that -such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary -Licenses - -If You choose to distribute Source Code Form that is Incompatible With -Secondary Licenses under the terms of this version of the License, the -notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice -------------------------------------------- - - This Source Code Form is subject to the terms of the Mozilla Public - License, v. 2.0. If a copy of the MPL was not distributed with this - file, You can obtain one at http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular -file, then You may include the notice in a location (such as a LICENSE -file in a relevant directory) where a recipient would be likely to look -for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice ---------------------------------------------------------- - - This Source Code Form is "Incompatible With Secondary Licenses", as - defined by the Mozilla Public License, v. 2.0. diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/METADATA b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/METADATA deleted file mode 100644 index 536b4274f6..0000000000 --- a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/METADATA +++ /dev/null @@ -1,28 +0,0 @@ -Metadata-Version: 2.1 -Name: taskcluster-taskgraph -Version: 6.3.0 -Summary: Build taskcluster taskgraphs -Home-page: https://github.com/taskcluster/taskgraph -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Console -Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0) -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Classifier: Programming Language :: Python :: 3.9 -Classifier: Programming Language :: Python :: 3.10 -Classifier: Programming Language :: Python :: 3.11 -Classifier: Topic :: Software Development -License-File: LICENSE -Requires-Dist: appdirs (>=1.4) -Requires-Dist: cookiecutter (~=2.1) -Requires-Dist: json-e (>=2.7) -Requires-Dist: mozilla-repo-urls -Requires-Dist: PyYAML (>=5.3.1) -Requires-Dist: redo (>=2.0) -Requires-Dist: requests (>=2.25) -Requires-Dist: slugid (>=2.0) -Requires-Dist: taskcluster-urls (>=11.0) -Requires-Dist: voluptuous (>=0.12.1) -Provides-Extra: load-image -Requires-Dist: zstandard ; extra == 'load-image' - diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/RECORD b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/RECORD deleted file mode 100644 index 3a6dfdfc35..0000000000 --- a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/RECORD +++ /dev/null @@ -1,80 +0,0 @@ -taskgraph/__init__.py,sha256=ILqRnb_Cy7WBFggPsK8BML-nmWySW-capstDs3pWb-c,729 -taskgraph/config.py,sha256=XJYKaA9Egn7aiyZ0v70VCq3Kc-XkK08CK2LDsDfsDR8,4822 -taskgraph/create.py,sha256=MeWVr5gKJefjwK_3_xZUcDDu2NVH97gbUuu1dw_I9hA,5184 -taskgraph/decision.py,sha256=qARBTlLYJ7NVw3aflrspRn_hFmvKcrXJ058yao_4b7A,12882 -taskgraph/docker.py,sha256=6tdGVrKFNonznRJSP4IDZEhKnjV-wYKsR0nXnoDOvZk,7924 -taskgraph/files_changed.py,sha256=W3_gEgUT-mVH9DaaU_8X6gYpftrqBU3kgveGbzPLziU,2793 -taskgraph/filter_tasks.py,sha256=R7tYXiaVPGIkQ6O1c9-QJrKZ59m9pFXCloUlPraVnZU,866 -taskgraph/generator.py,sha256=AmkMCVNmj5spJhRfpSx7-zP3v8OU7i8zAbGMROLLEG8,15668 -taskgraph/graph.py,sha256=bHUsv2pPa2SSaWgBY-ItIj7REPd0o4fFYrwoQbwFKTY,4680 -taskgraph/main.py,sha256=UHSywURHwD56w2vGHgjA8O7K1yaCltgMXlJuuFfFjvY,26802 -taskgraph/morph.py,sha256=Q6weAi-xpJM4XoKA2mM6gVXQYLnE1YSws53vTZygMkY,9192 -taskgraph/parameters.py,sha256=xaEUElvdKhxHeJNRMF-6JBFDFiVO1Es2fm38PJQ1JA4,12134 -taskgraph/target_tasks.py,sha256=41BIVwiATy8DCQujPduTtnFmgHlKOfw6RPGL4b20WO8,3324 -taskgraph/task.py,sha256=tRr7WhJ2qjYXi-77wva17CpfK53m6W_cl-xzks_GGaQ,3240 -taskgraph/taskgraph.py,sha256=Fh5cX8LrgYmkpVP_uhpfRgHSKHfZjO-VGSmnFUjEru0,2434 -taskgraph/actions/__init__.py,sha256=lVP1e0YyELg7-_42MWWDbT0cKv_p53BApVE6vWOiPww,416 -taskgraph/actions/add_new_jobs.py,sha256=HAfuRDzFti_YmeudxqVl6hgrEbm-ki5-jSCDMC0HBDE,1836 -taskgraph/actions/cancel.py,sha256=UQSt_6y3S6PXNmUo_mNaUOuDvK2bixWjzdjTKXieEEg,1309 -taskgraph/actions/cancel_all.py,sha256=zrKgnW63gMGS5yldJieDt-GAR_XTiGRgybWAipIUCqQ,1941 -taskgraph/actions/rebuild_cached_tasks.py,sha256=UrVAvTmkkF4TAB5vNSpK1kJqMhMkKAMGmrifxH9kQJQ,1086 -taskgraph/actions/registry.py,sha256=xmhoEGMyYj6TTRFwMowZAUp0aqvtLvdVfmRWM7Yh7xo,13122 -taskgraph/actions/retrigger.py,sha256=wF08p_CgsfqraYelc3JLmPcqBFcO-Yt8gZZLlJZBixQ,9387 -taskgraph/actions/util.py,sha256=TxWxMWiKZeuKRwqiUawzjzpa5VF5AWgAKCLy7YaKG80,10661 -taskgraph/loader/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -taskgraph/loader/default.py,sha256=ND_Sx7yx7io1B-6rWEGkg3UIy7iO3DvSLMXwcEqF1N8,1185 -taskgraph/loader/transform.py,sha256=olUBPjxk3eEIg25sduxlcyqhjoig4ts5kPlT_zs6g9g,2147 -taskgraph/optimize/__init__.py,sha256=Oqpq1RW8QzOcu7zaMlNQ3BHT9ws9e_93FWfCqzNcQps,123 -taskgraph/optimize/base.py,sha256=WvoDNewyHG46IQbG3th-aau9OxSKegsYNfvdOEmunbA,18341 -taskgraph/optimize/strategies.py,sha256=IifMlxppVrIABsvn6UBwQYBFUdxkmyZz_FOtK6yNPps,2380 -taskgraph/run-task/fetch-content,sha256=G1aAvZlTg0yWHqxhSxi4RvfxW-KBJ5JwnGtWRqfH_bg,29990 -taskgraph/run-task/hgrc,sha256=BybWLDR89bWi3pE5T05UqmDHs02CbLypE-omLZWU6Uk,896 -taskgraph/run-task/robustcheckout.py,sha256=vPKvHb3fIIJli9ZVZG88XYoa8Sohy2JrpmH6pDgBDHI,30813 -taskgraph/run-task/run-task,sha256=Mpr195iq9eOh6B4MBpPzEDlxeNyJq0Fa2yrtlJunlXE,45434 -taskgraph/transforms/__init__.py,sha256=aw1dz2sRWZcbTILl6SVDuqIEw0mDdjSYu3LCVs-RLXE,110 -taskgraph/transforms/base.py,sha256=LFw2NwhrSriI3vbcCttArTFb7uHxckQpHeFZmatofvM,5146 -taskgraph/transforms/cached_tasks.py,sha256=Z10VD1kEBVXJvj8qSsNTq2mYpklh0V1EN8OT6QK3v_E,2607 -taskgraph/transforms/chunking.py,sha256=7z9oXiA2dDguYwJPaZYCi-fEzbc--O9avZAFS3vP_kg,2592 -taskgraph/transforms/code_review.py,sha256=eE2xrDtdD_n3HT3caQ2HGAkPm6Uutdm4hDCpCoFjEps,707 -taskgraph/transforms/docker_image.py,sha256=AUuWMx43FcQfgbXy4_2Sjae0cWrh5XWMMcJ3ItcoKes,7606 -taskgraph/transforms/fetch.py,sha256=ORnxpVidOQtI1q1xeHl1c1jlShXD8R_jTGC2CX3lLM4,10479 -taskgraph/transforms/from_deps.py,sha256=1mdjIWYshVI2zBywzB3JEqOyvqgVjFvarcQt9PLDSc4,8950 -taskgraph/transforms/notify.py,sha256=0sga-Ls9dhWLAsL0FBjXmVbbduee8LAZp_1pHBQR0iI,6019 -taskgraph/transforms/task.py,sha256=0oQYH7Upjus0-gzCrYbE0tUKZQUEv6Uq1adGBqiNM60,52254 -taskgraph/transforms/task_context.py,sha256=FxZwT69ozierogtlCTNvk7zCW52d0HdhCaJN7EDmI1s,4272 -taskgraph/transforms/job/__init__.py,sha256=JbNpqdoJRId24QVGe821r6u7Zvm2fTNvME_PMGunaoU,17706 -taskgraph/transforms/job/common.py,sha256=ldlbRI8sdEd-eUcre4GtXMerUg0RQZ_XSe9GwAkfI3I,5897 -taskgraph/transforms/job/index_search.py,sha256=Ngh9FFu1bx2kHVTChW2vcrbnb3SzMneRHopXk18RfB4,1220 -taskgraph/transforms/job/run_task.py,sha256=s9gq1bPdzBB0j2OguXJpWn1-S5Ctltqo4aLsB4kzpUc,8385 -taskgraph/transforms/job/toolchain.py,sha256=GOqIvp1MgtV-6whi2ofgSCFB7GolikZbfLXz0C1h0vc,6015 -taskgraph/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -taskgraph/util/archive.py,sha256=nzYn8cQ3NfLAeV-2SuTNoeQ6hg8m40f6FQcSTyVIKwQ,2855 -taskgraph/util/attributes.py,sha256=pPOFmwkDQQ-IqfDpVghZ10YI_qXRY4Bi5JP3xr6XVvc,2964 -taskgraph/util/cached_tasks.py,sha256=o-yJ91wlWbzoDB2GvKPpGcDE27_IEMgczp_figEBjV8,3406 -taskgraph/util/decision.py,sha256=uTC143FpTKQkGff5jIz3voWRYXBCHgx-XAm7FMW53hE,2433 -taskgraph/util/dependencies.py,sha256=3Qba3zI87JYR5fk5FndGzEVW-5NIzzZrBf9rVYcnLD0,2734 -taskgraph/util/docker.py,sha256=rTbzUt8S6s3N1r8gmwHrqsIY9VZ7TDWBM-jZQ5w0P_U,7762 -taskgraph/util/hash.py,sha256=31sQmDwQOavA5hWsmzWDNFoFTaTp5a7qLSQLNTEALD8,1661 -taskgraph/util/keyed_by.py,sha256=cgBH4tG8eH5UUrm5q4ODG7A4fzkGAOI7feVoZy3V8Ho,3419 -taskgraph/util/memoize.py,sha256=XDlwc-56gzoY8QTwOoiCOYL-igX7JoMcY-9Ih80Euc8,1331 -taskgraph/util/parameterization.py,sha256=dzxh8Bc8MBKoDMwj2V2AQab9UrC-JcM3tg0hDVTWpjc,3184 -taskgraph/util/path.py,sha256=e-JloOQV2-Oua_pe335bv4xWAB07vb82TKpu_zCOl0w,4466 -taskgraph/util/python_path.py,sha256=ed4F5z2mId56LauVczgxm_LGxgQi8XlxlYDgXOPZyII,1576 -taskgraph/util/readonlydict.py,sha256=XzTG-gqGqWVlSkDxSyOL6Ur7Z0ONhIJ9DVLWV3q4q1w,787 -taskgraph/util/schema.py,sha256=JGd0Imjfv6JKCY_tjJtOYwI6uwKUaNgzAcvcZj5WE6A,8323 -taskgraph/util/shell.py,sha256=MB9zHVSvxgOuszgmKr2rWUDahANZkbHHNkjjagZG_3I,1317 -taskgraph/util/taskcluster.py,sha256=cGUGvkrefRHngjyZm_iQRYKRlGi4jMIr7ky0fi_YBrg,12445 -taskgraph/util/taskgraph.py,sha256=ecKEvTfmLVvEKLPO_0g34CqVvc0iCzuNMh3064BZNrE,1969 -taskgraph/util/templates.py,sha256=HGTaIKCpAwEzBDHq0cDai1HJjPJrdnHsjJz6N4LVpKI,2139 -taskgraph/util/time.py,sha256=pNFcTH-iYRfm2-okm1lMATc4B5wO-_FXbOFXEtXD27g,3390 -taskgraph/util/treeherder.py,sha256=A3rpPUQB60Gn1Yx-OZgKuWWGJ8x0-6tcdeeslzco9ag,2687 -taskgraph/util/vcs.py,sha256=54Haq2XyC5CmPnjrPRQZY5wUeoFsaV9pWTYvBjPcVMA,18917 -taskgraph/util/verify.py,sha256=cSd7EeP9hUvp-5WOvKDHrvpFAGb_LuiNPxPp0-YmNEA,8947 -taskgraph/util/workertypes.py,sha256=1wgM6vLrlgtyv8854anVIs0Bx11kV8JJJaKcOHJc2j0,2498 -taskgraph/util/yaml.py,sha256=hfKI_D8Q7dimq4_VvO3WEh8CJsTrsIMwN6set7HIQbY,990 -taskcluster_taskgraph-6.3.0.dist-info/LICENSE,sha256=HyVuytGSiAUQ6ErWBHTqt1iSGHhLmlC8fO7jTCuR8dU,16725 -taskcluster_taskgraph-6.3.0.dist-info/METADATA,sha256=MgIgtvNBRjc0CjnoD-7KHLPpz3sGlja2CZU3GzUMW84,1046 -taskcluster_taskgraph-6.3.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 -taskcluster_taskgraph-6.3.0.dist-info/entry_points.txt,sha256=2hxDzE3qq_sHh-J3ROqwpxgQgxO-196phWAQREl2-XA,50 -taskcluster_taskgraph-6.3.0.dist-info/top_level.txt,sha256=3JNeYn_hNiNXC7DrdH_vcv-WYSE7QdgGjdvUYvSjVp0,10 -taskcluster_taskgraph-6.3.0.dist-info/RECORD,, diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/WHEEL b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/WHEEL deleted file mode 100644 index becc9a66ea..0000000000 --- a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.37.1) -Root-Is-Purelib: true -Tag: py3-none-any - diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/entry_points.txt b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/entry_points.txt deleted file mode 100644 index dec40df69f..0000000000 --- a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/entry_points.txt +++ /dev/null @@ -1,2 +0,0 @@ -[console_scripts] -taskgraph = taskgraph.main:main diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/top_level.txt b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/top_level.txt deleted file mode 100644 index f3840b68ef..0000000000 --- a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-6.3.0.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -taskgraph diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/LICENSE b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/LICENSE new file mode 100644 index 0000000000..a612ad9813 --- /dev/null +++ b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/METADATA b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/METADATA new file mode 100644 index 0000000000..e549db9aa3 --- /dev/null +++ b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/METADATA @@ -0,0 +1,123 @@ +Metadata-Version: 2.1 +Name: taskcluster-taskgraph +Version: 8.0.1 +Summary: Build taskcluster taskgraphs +Home-page: https://github.com/taskcluster/taskgraph +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0) +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Topic :: Software Development +License-File: LICENSE +Requires-Dist: appdirs >=1.4 +Requires-Dist: cookiecutter ~=2.1 +Requires-Dist: json-e >=2.7 +Requires-Dist: mozilla-repo-urls +Requires-Dist: PyYAML >=5.3.1 +Requires-Dist: redo >=2.0 +Requires-Dist: requests >=2.25 +Requires-Dist: slugid >=2.0 +Requires-Dist: taskcluster-urls >=11.0 +Requires-Dist: voluptuous >=0.12.1 +Provides-Extra: load-image +Requires-Dist: zstandard ; extra == 'load-image' + + +.. image:: https://firefox-ci-tc.services.mozilla.com/api/github/v1/repository/taskcluster/taskgraph/main/badge.svg + :target: https://firefox-ci-tc.services.mozilla.com/api/github/v1/repository/taskcluster/taskgraph/main/latest + :alt: Task Status + +.. image:: https://results.pre-commit.ci/badge/github/taskcluster/taskgraph/main.svg + :target: https://results.pre-commit.ci/latest/github/taskcluster/taskgraph/main + :alt: pre-commit.ci status + +.. image:: https://codecov.io/gh/taskcluster/taskgraph/branch/main/graph/badge.svg?token=GJIV52ZQNP + :target: https://codecov.io/gh/taskcluster/taskgraph + :alt: Code Coverage + +.. image:: https://badge.fury.io/py/taskcluster-taskgraph.svg + :target: https://badge.fury.io/py/taskcluster-taskgraph + :alt: Pypi Version + +.. image:: https://readthedocs.org/projects/taskcluster-taskgraph/badge/?version=latest + :target: https://taskcluster-taskgraph.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status + +.. image:: https://img.shields.io/badge/license-MPL%202.0-orange.svg + :target: http://mozilla.org/MPL/2.0 + :alt: License + +Taskgraph +========= + +Taskgraph is a Python library to generate graphs of tasks for the `Taskcluster +CI`_ service. It is the recommended approach for configuring tasks once your +project outgrows a single `.taskcluster.yml`_ file and is what powers the over +30,000 tasks and counting that make up Firefox's CI. + +For more information and usage instructions, `see the docs`_. + +How It Works +------------ + +Taskgraph leverages the fact that Taskcluster is a generic task execution +platform. This means that tasks can be scheduled via its `comprehensive API`_, +and aren't limited to being triggered in response to supported events. + +Taskgraph leverages this execution platform to allow CI systems to scale to any +size or complexity. + +1. A *decision task* is created via Taskcluster's normal `.taskcluster.yml`_ + file. This task invokes ``taskgraph``. +2. Taskgraph evaluates a series of yaml based task definitions (similar to + those other CI offerings provide). +3. Taskgraph applies transforms on top of these task definitions. Transforms + are Python functions that can programmatically alter or even clone a task + definition. +4. Taskgraph applies some optional optimization logic to remove unnecessary + tasks. +5. Taskgraph submits the resulting *task graph* to Taskcluster via its API. + +Taskgraph's combination of declarative task configuration combined with +programmatic alteration are what allow it to support CI systems of any scale. +Taskgraph is the library that powers the 30,000+ tasks making up `Firefox's +CI`_. + +.. _Taskcluster CI: https://taskcluster.net/ +.. _comprehensive API: https://docs.taskcluster.net/docs/reference/platform/queue/api +.. _.taskcluster.yml: https://docs.taskcluster.net/docs/reference/integrations/github/taskcluster-yml-v1 +.. _Firefox's CI: https://treeherder.mozilla.org/jobs?repo=mozilla-central +.. _see the docs: https://taskcluster-taskgraph.readthedocs.io + +Installation +------------ + +Taskgraph supports Python 3.8 and up, and can be installed from Pypi: + +.. code-block:: + + pip install taskcluster-taskgraph + + +Alternatively, the repo can be cloned and installed directly: + +.. code-block:: + + git clone https://github.com/taskcluster/taskgraph + cd taskgraph + python setup.py install + +In both cases, it's recommended to use a Python `virtual environment`_. + +.. _virtual environment: https://docs.python.org/3/tutorial/venv.html + +Get Involved +------------ + +If you'd like to get involved, please see our `contributing docs`_! + +.. _contributing docs: https://github.com/taskcluster/taskgraph/blob/main/CONTRIBUTING.rst diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/RECORD b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/RECORD new file mode 100644 index 0000000000..c04e803ff2 --- /dev/null +++ b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/RECORD @@ -0,0 +1,79 @@ +taskgraph/__init__.py,sha256=hCl3NLzC-cVXlKhuzf0-_0wd0gYmNA3oshXfTaa9DNQ,729 +taskgraph/config.py,sha256=8vntWUrPwGds22mFKYAgcsD4Mr8hoONTv2ssGBcClLw,5108 +taskgraph/create.py,sha256=_zokjSM3ZaO04l2LiMhenE8qXDZVfYvueIIu5hGUhzc,5185 +taskgraph/decision.py,sha256=sG0CIj9OSOdfN65LSt6dRYFWbns9_JraVC5fQU1_7oc,13012 +taskgraph/docker.py,sha256=rk-tAMycHnapFyR2Q-XJXzC2A4uv0i-VykLZfwl-pRo,8417 +taskgraph/filter_tasks.py,sha256=R7tYXiaVPGIkQ6O1c9-QJrKZ59m9pFXCloUlPraVnZU,866 +taskgraph/generator.py,sha256=zrH1zfy-8akksKTSOf6e4FEsdOd5y7-h1Jne_2Jabcc,15703 +taskgraph/graph.py,sha256=bHUsv2pPa2SSaWgBY-ItIj7REPd0o4fFYrwoQbwFKTY,4680 +taskgraph/main.py,sha256=tgfAEcNUJfmADteL24yJR5u7tzU4v3mzmxiogVSCK8Y,29072 +taskgraph/morph.py,sha256=bwkaSGdTZLcK_rhF2st2mCGv9EHN5WdbnDeuZcqp9UA,9208 +taskgraph/parameters.py,sha256=hrwUHHu4PS79w-fQ3qNnLSyjRto1EDlidE8e1GzIy8U,12272 +taskgraph/target_tasks.py,sha256=9_v66bzmQFELPsfIDGITXrqzsmEiLq1EeuJFhycKL0M,3356 +taskgraph/task.py,sha256=tRr7WhJ2qjYXi-77wva17CpfK53m6W_cl-xzks_GGaQ,3240 +taskgraph/taskgraph.py,sha256=Fh5cX8LrgYmkpVP_uhpfRgHSKHfZjO-VGSmnFUjEru0,2434 +taskgraph/actions/__init__.py,sha256=lVP1e0YyELg7-_42MWWDbT0cKv_p53BApVE6vWOiPww,416 +taskgraph/actions/add_new_jobs.py,sha256=c8vGWGXMr4qqW2Axz9rbBrDopabZB3gf3SVFLBZH8ak,1865 +taskgraph/actions/cancel.py,sha256=xrIzlB5KzcnQ4_HultoIcnlxtbQhUi7723g5K2iQoY0,1263 +taskgraph/actions/cancel_all.py,sha256=zNiHtOiSQQxLyNJYtaW0JKPazHXSgZrq1C6o8DGYxG8,1887 +taskgraph/actions/rebuild_cached_tasks.py,sha256=r1QTri2ey30TdEztUgc-nkiHdJPe8Sbn7FvKeR_kt0Y,1115 +taskgraph/actions/registry.py,sha256=hubblOhL3fbWDRtKv7_6HiD0P94hzQrpjdMkj23CGCg,13564 +taskgraph/actions/retrigger.py,sha256=MKkoZDAe0SKIq6fHqwAc1Ici_wIGRd7MxeBNhwoDEGE,9388 +taskgraph/actions/util.py,sha256=gB8MZb8juP1S7EsLHJivr6BBY2bf5IUiIpN7Mq9-kXo,10964 +taskgraph/loader/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +taskgraph/loader/default.py,sha256=_bBJG6l04v44Jm5HSIEnVndC05NpNmq5L28QfJHk0wo,1185 +taskgraph/loader/transform.py,sha256=olUBPjxk3eEIg25sduxlcyqhjoig4ts5kPlT_zs6g9g,2147 +taskgraph/optimize/__init__.py,sha256=Oqpq1RW8QzOcu7zaMlNQ3BHT9ws9e_93FWfCqzNcQps,123 +taskgraph/optimize/base.py,sha256=wTViUwVmY9sZvlzSuGwkVrETCo0v2OfyNxFFgzJrDNc,18982 +taskgraph/optimize/strategies.py,sha256=UryFI5TizzEF_2NO8MyuKwqVektHfJeG_t0_zZwxEds,2577 +taskgraph/run-task/fetch-content,sha256=G1aAvZlTg0yWHqxhSxi4RvfxW-KBJ5JwnGtWRqfH_bg,29990 +taskgraph/run-task/hgrc,sha256=BybWLDR89bWi3pE5T05UqmDHs02CbLypE-omLZWU6Uk,896 +taskgraph/run-task/robustcheckout.py,sha256=vPKvHb3fIIJli9ZVZG88XYoa8Sohy2JrpmH6pDgBDHI,30813 +taskgraph/run-task/run-task,sha256=ev64Ud2X3482B05aurUcWD93_sZS1aW2N-eVutRHF5k,45753 +taskgraph/transforms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +taskgraph/transforms/base.py,sha256=z20Yh619srbwuQJWASRtG2_j6NUbWlCujTTCHWLa0GY,5147 +taskgraph/transforms/cached_tasks.py,sha256=Z10VD1kEBVXJvj8qSsNTq2mYpklh0V1EN8OT6QK3v_E,2607 +taskgraph/transforms/chunking.py,sha256=7z9oXiA2dDguYwJPaZYCi-fEzbc--O9avZAFS3vP_kg,2592 +taskgraph/transforms/code_review.py,sha256=tevRFQli3MkzW_0Zhr-hwlVti8hFaXEz94llwhBu_ns,713 +taskgraph/transforms/docker_image.py,sha256=GScS7Lld3YcS57eC30wp3DJM_ATLrmmVfZzINKgC1fM,7546 +taskgraph/transforms/fetch.py,sha256=u1M57LQOi0kHz6FFP1qah3yJh15eXYqQCF_F6r5qjh0,10662 +taskgraph/transforms/from_deps.py,sha256=_cdIefdRkZYWaFJaWpsglivvG8bBGWd4beg7QgNl0Jc,8885 +taskgraph/transforms/notify.py,sha256=0sga-Ls9dhWLAsL0FBjXmVbbduee8LAZp_1pHBQR0iI,6019 +taskgraph/transforms/task.py,sha256=nRzNAxLjA6BsFktZAA9Upqb_pSFNhjoCzKm0QDxvVgM,52586 +taskgraph/transforms/task_context.py,sha256=9v3ke967atAYCtQxIblSFucJA1tum9Q8QpXQeTwNIzU,4278 +taskgraph/transforms/run/__init__.py,sha256=gVJ4eNquKNlygX18OtWTDnl6FFsZlA12bxfvB3kZz14,17761 +taskgraph/transforms/run/common.py,sha256=G3WdMHU5YWUfk1uR6xsxWY7MQKjU9tnqtRDmGttUqt4,5626 +taskgraph/transforms/run/index_search.py,sha256=ABIaX2FFx02o1StZgNAB_ZDXc1lTFO2aUIBH5BuUjtA,1224 +taskgraph/transforms/run/run_task.py,sha256=0GI8syzGtRDT07g_6SXG99JtxDBe09zsW5ltL-aUhYU,8403 +taskgraph/transforms/run/toolchain.py,sha256=KiuBfJ6CShwGYIIljy4i7iYSHFFXF_A_zSvRGUgYboA,6033 +taskgraph/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +taskgraph/util/archive.py,sha256=NIqg2Su5PUqOv4JM60uFixsMsOXN26u5paB1Uh6foSI,4707 +taskgraph/util/attributes.py,sha256=pPOFmwkDQQ-IqfDpVghZ10YI_qXRY4Bi5JP3xr6XVvc,2964 +taskgraph/util/cached_tasks.py,sha256=-AqmOjrkI5PMAlAlQl1wShCrs0HA5lWLBgkxTcFstFM,4150 +taskgraph/util/dependencies.py,sha256=3Qba3zI87JYR5fk5FndGzEVW-5NIzzZrBf9rVYcnLD0,2734 +taskgraph/util/docker.py,sha256=ffQ6KloQNz_kwYemSZEkh4xUMVMeotnnwphWZth1PqQ,8112 +taskgraph/util/hash.py,sha256=U5h6WwC3zs0ooX8odc7AjgPQKKFpDXL7PemoyENPJYo,1644 +taskgraph/util/keyed_by.py,sha256=EMWNRRqYB0AS7A4Y4lthYf2HB7G2ercGFf4hN9zwyaY,3348 +taskgraph/util/memoize.py,sha256=CvCGl-_qft062b3GZC4aHbPfEOPtqR9oOUEqvk9aojQ,294 +taskgraph/util/parameterization.py,sha256=DiPE-4jappGMPljDhhZI52BP7dLBGZHu5EI1cW4aRYg,3392 +taskgraph/util/path.py,sha256=e-JloOQV2-Oua_pe335bv4xWAB07vb82TKpu_zCOl0w,4466 +taskgraph/util/python_path.py,sha256=ed4F5z2mId56LauVczgxm_LGxgQi8XlxlYDgXOPZyII,1576 +taskgraph/util/readonlydict.py,sha256=XzTG-gqGqWVlSkDxSyOL6Ur7Z0ONhIJ9DVLWV3q4q1w,787 +taskgraph/util/schema.py,sha256=HmbbJ_i5uxZZHZSJ8sVWaD-VMhZI4ymx0STNcjO5t2M,8260 +taskgraph/util/set_name.py,sha256=cha9awo2nMQ9jfSEcbyNkZkCq_1Yg_kKJTfvDzabHSc,1134 +taskgraph/util/shell.py,sha256=nf__ly0Ikhj92AiEBCQtvyyckm8UfO_3DSgz0SU-7QA,1321 +taskgraph/util/taskcluster.py,sha256=LScpZknMycOOneIcRMf236rCTMRHHGxFTc9Lh7mRKaI,13057 +taskgraph/util/taskgraph.py,sha256=ecKEvTfmLVvEKLPO_0g34CqVvc0iCzuNMh3064BZNrE,1969 +taskgraph/util/templates.py,sha256=HGTaIKCpAwEzBDHq0cDai1HJjPJrdnHsjJz6N4LVpKI,2139 +taskgraph/util/time.py,sha256=XauJ0DbU0fyFvHLzJLG4ehHv9KaKixxETro89GPC1yk,3350 +taskgraph/util/treeherder.py,sha256=kc8jCy_lYduBxVMYOQzWpmI_6i2bRmkQLKq5DGmbiDI,2721 +taskgraph/util/vcs.py,sha256=FjS82fiTsoQ_ArjTCDOtDGfNdVUp_8zvVKB9SoAG3Rs,18019 +taskgraph/util/verify.py,sha256=htrNX7aXMMDzxymsFVcs0kaO5gErFHd62g9cQsZI_WE,8518 +taskgraph/util/workertypes.py,sha256=1wgM6vLrlgtyv8854anVIs0Bx11kV8JJJaKcOHJc2j0,2498 +taskgraph/util/yaml.py,sha256=-LaIf3RROuaSWckOOGN5Iviu-DHWxIChgHn9a7n6ec4,1059 +taskcluster_taskgraph-8.0.1.dist-info/LICENSE,sha256=HyVuytGSiAUQ6ErWBHTqt1iSGHhLmlC8fO7jTCuR8dU,16725 +taskcluster_taskgraph-8.0.1.dist-info/METADATA,sha256=qg-m62f4BGLh2jBAr_-OQZhraOSciTrv5EyNY0Wwq8I,4688 +taskcluster_taskgraph-8.0.1.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92 +taskcluster_taskgraph-8.0.1.dist-info/entry_points.txt,sha256=2hxDzE3qq_sHh-J3ROqwpxgQgxO-196phWAQREl2-XA,50 +taskcluster_taskgraph-8.0.1.dist-info/top_level.txt,sha256=3JNeYn_hNiNXC7DrdH_vcv-WYSE7QdgGjdvUYvSjVp0,10 +taskcluster_taskgraph-8.0.1.dist-info/RECORD,, diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/WHEEL b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/WHEEL new file mode 100644 index 0000000000..bab98d6758 --- /dev/null +++ b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.43.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/entry_points.txt b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/entry_points.txt new file mode 100644 index 0000000000..dec40df69f --- /dev/null +++ b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +taskgraph = taskgraph.main:main diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/top_level.txt b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/top_level.txt new file mode 100644 index 0000000000..f3840b68ef --- /dev/null +++ b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-8.0.1.dist-info/top_level.txt @@ -0,0 +1 @@ +taskgraph diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/__init__.py b/third_party/python/taskcluster_taskgraph/taskgraph/__init__.py index 81cc763230..0bd794101c 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/__init__.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/__init__.py @@ -2,7 +2,7 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. -__version__ = "6.3.0" +__version__ = "8.0.1" # Maximum number of dependencies a single task can have # https://docs.taskcluster.net/reference/platform/taskcluster-queue/references/api#createTask diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/actions/add_new_jobs.py b/third_party/python/taskcluster_taskgraph/taskgraph/actions/add_new_jobs.py index c5e1821546..f635250086 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/actions/add_new_jobs.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/actions/add_new_jobs.py @@ -40,7 +40,7 @@ from taskgraph.actions.util import ( ) def add_new_jobs_action(parameters, graph_config, input, task_group_id, task_id): decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels( - parameters, graph_config + parameters, graph_config, task_group_id=task_group_id ) to_run = [] diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/actions/cancel.py b/third_party/python/taskcluster_taskgraph/taskgraph/actions/cancel.py index 03788c6538..33a5394e68 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/actions/cancel.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/actions/cancel.py @@ -34,9 +34,7 @@ def cancel_action(parameters, graph_config, input, task_group_id, task_id): # cannot be cancelled at this time, but it's also not running # anymore, so we can ignore this error. logger.info( - 'Task "{}" is past its deadline and cannot be cancelled.'.format( - task_id - ) + f'Task "{task_id}" is past its deadline and cannot be cancelled.' ) return raise diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/actions/cancel_all.py b/third_party/python/taskcluster_taskgraph/taskgraph/actions/cancel_all.py index d3e0440839..55453b7624 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/actions/cancel_all.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/actions/cancel_all.py @@ -43,9 +43,7 @@ def cancel_all_action(parameters, graph_config, input, task_group_id, task_id): # cannot be cancelled at this time, but it's also not running # anymore, so we can ignore this error. logger.info( - "Task {} is past its deadline and cannot be cancelled.".format( - task_id - ) + f"Task {task_id} is past its deadline and cannot be cancelled." ) return raise diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/actions/rebuild_cached_tasks.py b/third_party/python/taskcluster_taskgraph/taskgraph/actions/rebuild_cached_tasks.py index 2b88e6a698..8ea2e37150 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/actions/rebuild_cached_tasks.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/actions/rebuild_cached_tasks.py @@ -18,7 +18,7 @@ def rebuild_cached_tasks_action( parameters, graph_config, input, task_group_id, task_id ): decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels( - parameters, graph_config + parameters, graph_config, task_group_id=task_group_id ) cached_tasks = [ label diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/actions/registry.py b/third_party/python/taskcluster_taskgraph/taskgraph/actions/registry.py index 1e909d30c7..20955bd3f2 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/actions/registry.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/actions/registry.py @@ -154,9 +154,7 @@ def register_callback_action( ], "register_callback_action must be used as decorator" if not cb_name: cb_name = name - assert cb_name not in callbacks, "callback name {} is not unique".format( - cb_name - ) + assert cb_name not in callbacks, f"callback name {cb_name} is not unique" def action_builder(parameters, graph_config, decision_task_id): if not available(parameters): @@ -165,11 +163,11 @@ def register_callback_action( actionPerm = "generic" if generic else cb_name # gather up the common decision-task-supplied data for this action - repo_param = "head_repository" repository = { - "url": parameters[repo_param], + "url": parameters["head_repository"], "project": parameters["project"], "level": parameters["level"], + "base_url": parameters["base_repository"], } revision = parameters["head_rev"] @@ -181,6 +179,9 @@ def register_callback_action( branch = parameters.get("head_ref") if branch: push["branch"] = branch + base_branch = parameters.get("base_ref") + if base_branch and branch != base_branch: + push["base_branch"] = base_branch action = { "name": name, @@ -215,13 +216,16 @@ def register_callback_action( if "/" in actionPerm: raise Exception("`/` is not allowed in action names; use `-`") + if parameters["tasks_for"].startswith("github-pull-request"): + hookId = f"in-tree-pr-action-{level}-{actionPerm}/{tcyml_hash}" + else: + hookId = f"in-tree-action-{level}-{actionPerm}/{tcyml_hash}" + rv.update( { "kind": "hook", "hookGroupId": f"project-{trustDomain}", - "hookId": "in-tree-action-{}-{}/{}".format( - level, actionPerm, tcyml_hash - ), + "hookId": hookId, "hookPayload": { # provide the decision-task parameters as context for triggerHook "decision": { @@ -297,16 +301,20 @@ def sanity_check_task_scope(callback, parameters, graph_config): actionPerm = "generic" if action.generic else action.cb_name - repo_param = "head_repository" - raw_url = parameters[repo_param] + raw_url = parameters["base_repository"] parsed_url = parse(raw_url) - expected_scope = f"assume:{parsed_url.taskcluster_role_prefix}:action:{actionPerm}" + action_scope = f"assume:{parsed_url.taskcluster_role_prefix}:action:{actionPerm}" + pr_action_scope = ( + f"assume:{parsed_url.taskcluster_role_prefix}:pr-action:{actionPerm}" + ) # the scope should appear literally; no need for a satisfaction check. The use of # get_current_scopes here calls the auth service through the Taskcluster Proxy, giving # the precise scopes available to this task. - if expected_scope not in taskcluster.get_current_scopes(): - raise ValueError(f"Expected task scope {expected_scope} for this action") + if not set((action_scope, pr_action_scope)) & set(taskcluster.get_current_scopes()): + raise ValueError( + f"Expected task scope {action_scope} or {pr_action_scope} for this action" + ) def trigger_action_callback( diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/actions/retrigger.py b/third_party/python/taskcluster_taskgraph/taskgraph/actions/retrigger.py index fd488b35fc..6c6091a47a 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/actions/retrigger.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/actions/retrigger.py @@ -33,9 +33,7 @@ def _should_retrigger(task_graph, label): """ if label not in task_graph: logger.info( - "Task {} not in full taskgraph, assuming task should not be retriggered.".format( - label - ) + f"Task {label} not in full taskgraph, assuming task should not be retriggered." ) return False return task_graph[label].attributes.get("retrigger", False) @@ -67,7 +65,9 @@ def retrigger_decision_action(parameters, graph_config, input, task_group_id, ta # absolute timestamps relative to the current time. task = taskcluster.get_task_definition(task_id) task = relativize_datestamps(task) - create_task_from_def(slugid(), task, parameters["level"]) + create_task_from_def( + slugid(), task, parameters["level"], graph_config["trust-domain"] + ) @register_callback_action( @@ -144,7 +144,7 @@ def retrigger_decision_action(parameters, graph_config, input, task_group_id, ta ) def retrigger_action(parameters, graph_config, input, task_group_id, task_id): decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels( - parameters, graph_config + parameters, graph_config, task_group_id=task_group_id ) task = taskcluster.get_task_definition(task_id) @@ -155,8 +155,8 @@ def retrigger_action(parameters, graph_config, input, task_group_id, task_id): if not input.get("force", None) and not _should_retrigger(full_task_graph, label): logger.info( - "Not retriggering task {}, task should not be retrigged " - "and force not specified.".format(label) + f"Not retriggering task {label}, task should not be retrigged " + "and force not specified." ) sys.exit(1) @@ -201,14 +201,12 @@ def rerun_action(parameters, graph_config, input, task_group_id, task_id): task = taskcluster.get_task_definition(task_id) parameters = dict(parameters) decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels( - parameters, graph_config + parameters, graph_config, task_group_id=task_group_id ) label = task["metadata"]["name"] if task_id not in label_to_taskid.values(): logger.error( - "Refusing to rerun {}: taskId {} not in decision task {} label_to_taskid!".format( - label, task_id, decision_task_id - ) + f"Refusing to rerun {label}: taskId {task_id} not in decision task {decision_task_id} label_to_taskid!" ) _rerun_task(task_id, label) @@ -218,9 +216,7 @@ def _rerun_task(task_id, label): state = taskcluster.state_task(task_id) if state not in RERUN_STATES: logger.warning( - "No need to rerun {}: state '{}' not in {}!".format( - label, state, RERUN_STATES - ) + f"No need to rerun {label}: state '{state}' not in {RERUN_STATES}!" ) return taskcluster.rerun_task(task_id) @@ -261,7 +257,7 @@ def _rerun_task(task_id, label): ) def retrigger_multiple(parameters, graph_config, input, task_group_id, task_id): decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels( - parameters, graph_config + parameters, graph_config, task_group_id=task_group_id ) suffixes = [] diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/actions/util.py b/third_party/python/taskcluster_taskgraph/taskgraph/actions/util.py index cf81029da2..41e3b035de 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/actions/util.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/actions/util.py @@ -32,8 +32,15 @@ def get_parameters(decision_task_id): return get_artifact(decision_task_id, "public/parameters.yml") -def fetch_graph_and_labels(parameters, graph_config): - decision_task_id = find_decision_task(parameters, graph_config) +def fetch_graph_and_labels(parameters, graph_config, task_group_id=None): + try: + # Look up the decision_task id in the index + decision_task_id = find_decision_task(parameters, graph_config) + except KeyError: + if not task_group_id: + raise + # Not found (e.g. from github-pull-request), fall back to the task group id. + decision_task_id = task_group_id # First grab the graph and labels generated during the initial decision task full_task_graph = get_artifact(decision_task_id, "public/full-task-graph.json") @@ -90,7 +97,7 @@ def fetch_graph_and_labels(parameters, graph_config): return (decision_task_id, full_task_graph, label_to_taskid) -def create_task_from_def(task_id, task_def, level): +def create_task_from_def(task_id, task_def, level, trust_domain): """Create a new task from a definition rather than from a label that is already in the full-task-graph. The task definition will have {relative-datestamp': '..'} rendered just like in a decision task. @@ -98,7 +105,7 @@ def create_task_from_def(task_id, task_def, level): It is useful if you want to "edit" the full_task_graph and then hand it to this function. No dependencies will be scheduled. You must handle this yourself. Seeing how create_tasks handles it might prove helpful.""" - task_def["schedulerId"] = f"gecko-level-{level}" + task_def["schedulerId"] = f"{trust_domain}-level-{level}" label = task_def["metadata"]["name"] session = get_session() create.create_task(session, task_id, label, task_def) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/config.py b/third_party/python/taskcluster_taskgraph/taskgraph/config.py index 7ea7dc7b33..ac384eab86 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/config.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/config.py @@ -40,6 +40,11 @@ graph_config_schema = Schema( description="Default 'deadline' for tasks, in relative date format. " "Eg: '1 week'", ): optionally_keyed_by("project", str), + Optional( + "task-expires-after", + description="Default 'expires-after' for level 1 tasks, in relative date format. " + "Eg: '90 days'", + ): str, Required("workers"): { Required("aliases"): { str: { @@ -61,6 +66,10 @@ graph_config_schema = Schema( description="The taskcluster index prefix to use for caching tasks. " "Defaults to `trust-domain`.", ): str, + Optional( + "cache-pull-requests", + description="Should tasks from pull requests populate the cache", + ): bool, Optional( "index-path-regexes", description="Regular expressions matching index paths to be summarized.", @@ -102,28 +111,27 @@ class GraphConfig: Add the project's taskgraph directory to the python path, and register any extensions present. """ - modify_path = os.path.dirname(self.root_dir) if GraphConfig._PATH_MODIFIED: - if GraphConfig._PATH_MODIFIED == modify_path: + if GraphConfig._PATH_MODIFIED == self.root_dir: # Already modified path with the same root_dir. # We currently need to do this to enable actions to call # taskgraph_decision, e.g. relpro. return raise Exception("Can't register multiple directories on python path.") - GraphConfig._PATH_MODIFIED = modify_path - sys.path.insert(0, modify_path) + GraphConfig._PATH_MODIFIED = self.root_dir + sys.path.insert(0, self.root_dir) register_path = self["taskgraph"].get("register") if register_path: find_object(register_path)(self) @property def vcs_root(self): - if path.split(self.root_dir)[-2:] != ["taskcluster", "ci"]: + if path.split(self.root_dir)[-1:] != ["taskcluster"]: raise Exception( "Not guessing path to vcs root. " "Graph config in non-standard location." ) - return os.path.dirname(os.path.dirname(self.root_dir)) + return os.path.dirname(self.root_dir) @property def taskcluster_yml(self): diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/create.py b/third_party/python/taskcluster_taskgraph/taskgraph/create.py index deb1ac5348..e8baabb8a8 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/create.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/create.py @@ -104,7 +104,7 @@ def create_tasks(graph_config, taskgraph, label_to_taskid, params, decision_task def create_task(session, task_id, label, task_def): # create the task using 'http://taskcluster/queue', which is proxied to the queue service - # with credentials appropriate to this job. + # with credentials appropriate to this task. # Resolve timestamps now = current_json_time(datetime_format=True) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/decision.py b/third_party/python/taskcluster_taskgraph/taskgraph/decision.py index ed412f4473..d9eb9f3e90 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/decision.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/decision.py @@ -46,21 +46,21 @@ try_task_config_schema_v2 = Schema( ) -def full_task_graph_to_runnable_jobs(full_task_json): - runnable_jobs = {} +def full_task_graph_to_runnable_tasks(full_task_json): + runnable_tasks = {} for label, node in full_task_json.items(): if not ("extra" in node["task"] and "treeherder" in node["task"]["extra"]): continue th = node["task"]["extra"]["treeherder"] - runnable_jobs[label] = {"symbol": th["symbol"]} + runnable_tasks[label] = {"symbol": th["symbol"]} for i in ("groupName", "groupSymbol", "collection"): if i in th: - runnable_jobs[label][i] = th[i] + runnable_tasks[label][i] = th[i] if th.get("machine", {}).get("platform"): - runnable_jobs[label]["platform"] = th["machine"]["platform"] - return runnable_jobs + runnable_tasks[label]["platform"] = th["machine"]["platform"] + return runnable_tasks def taskgraph_decision(options, parameters=None): @@ -104,7 +104,7 @@ def taskgraph_decision(options, parameters=None): # write out the public/runnable-jobs.json file write_artifact( - "runnable-jobs.json", full_task_graph_to_runnable_jobs(full_task_json) + "runnable-jobs.json", full_task_graph_to_runnable_tasks(full_task_json) ) # this is just a test to check whether the from_json() function is working @@ -185,6 +185,9 @@ def get_decision_parameters(graph_config, options): # Define default filter list, as most configurations shouldn't need # custom filters. + parameters["files_changed"] = repo.get_changed_files( + rev=parameters["head_rev"], base_rev=parameters["base_rev"] + ) parameters["filters"] = [ "target_tasks_method", ] @@ -214,9 +217,9 @@ def get_decision_parameters(graph_config, options): parameters.update(PER_PROJECT_PARAMETERS[project]) except KeyError: logger.warning( - "using default project parameters; add {} to " - "PER_PROJECT_PARAMETERS in {} to customize behavior " - "for this project".format(project, __file__) + f"using default project parameters; add {project} to " + f"PER_PROJECT_PARAMETERS in {__file__} to customize behavior " + "for this project" ) parameters.update(PER_PROJECT_PARAMETERS["default"]) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/docker.py b/third_party/python/taskcluster_taskgraph/taskgraph/docker.py index 23897cbbee..9f849525fc 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/docker.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/docker.py @@ -18,6 +18,22 @@ except ImportError as e: from taskgraph.util import docker from taskgraph.util.taskcluster import get_artifact_url, get_session +DEPLOY_WARNING = """ +***************************************************************** +WARNING: Image is not suitable for deploying/pushing. + +To automatically tag the image the following files are required: +- {image_dir}/REGISTRY +- {image_dir}/VERSION + +The REGISTRY file contains the Docker registry hosting the image. +A default REGISTRY file may also be defined in the parent docker +directory. + +The VERSION file contains the version of the image. +***************************************************************** +""" + def get_image_digest(image_name): from taskgraph.generator import load_tasks_for_kind @@ -34,7 +50,7 @@ def get_image_digest(image_name): def load_image_by_name(image_name, tag=None): from taskgraph.generator import load_tasks_for_kind - from taskgraph.optimize import IndexSearch + from taskgraph.optimize.strategies import IndexSearch from taskgraph.parameters import Parameters params = Parameters( @@ -43,8 +59,9 @@ def load_image_by_name(image_name, tag=None): ) tasks = load_tasks_for_kind(params, "docker-image") task = tasks[f"build-docker-image-{image_name}"] + deadline = None task_id = IndexSearch().should_replace_task( - task, {}, task.optimization.get("index-search", []) + task, {}, deadline, task.optimization.get("index-search", []) ) if task_id in (True, False): @@ -52,8 +69,10 @@ def load_image_by_name(image_name, tag=None): "Could not find artifacts for a docker image " "named `{image_name}`. Local commits and other changes " "in your checkout may cause this error. Try " - "updating to a fresh checkout of mozilla-central " - "to download image.".format(image_name=image_name) + "updating to a fresh checkout of {project} " + "to download image.".format( + image_name=image_name, project=params["project"] + ) ) return False @@ -102,19 +121,18 @@ def build_image(name, tag, args=None): buf = BytesIO() docker.stream_context_tar(".", image_dir, buf, "", args) - subprocess.run( - ["docker", "image", "build", "--no-cache", "-t", tag, "-"], input=buf.getvalue() - ) + cmdargs = ["docker", "image", "build", "--no-cache", "-"] + if tag: + cmdargs.insert(-1, f"-t={tag}") + subprocess.run(cmdargs, input=buf.getvalue()) - print(f"Successfully built {name} and tagged with {tag}") + msg = f"Successfully built {name}" + if tag: + msg += f" and tagged with {tag}" + print(msg) - if tag.endswith(":latest"): - print("*" * 50) - print("WARNING: no VERSION file found in image directory.") - print("Image is not suitable for deploying/pushing.") - print("Create an image suitable for deploying/pushing by creating") - print("a VERSION file in the image directory.") - print("*" * 50) + if not tag or tag.endswith(":latest"): + print(DEPLOY_WARNING.format(image_dir=os.path.relpath(image_dir), image=name)) def load_image(url, imageName=None, imageTag=None): diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/files_changed.py b/third_party/python/taskcluster_taskgraph/taskgraph/files_changed.py deleted file mode 100644 index 6be6e5eeee..0000000000 --- a/third_party/python/taskcluster_taskgraph/taskgraph/files_changed.py +++ /dev/null @@ -1,91 +0,0 @@ -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. - -""" -Support for optimizing tasks based on the set of files that have changed. -""" - - -import logging -import os - -import requests -from redo import retry - -from .util.memoize import memoize -from .util.path import match as match_path -from .util.vcs import get_repository - -logger = logging.getLogger(__name__) - - -@memoize -def get_changed_files(head_repository_url, head_rev, base_rev=None): - """ - Get the set of files changed between revisions. - Responses are cached, so multiple calls with the same arguments are OK. - """ - repo_path = os.getcwd() - repository = get_repository(repo_path) - - if repository.tool == "hg": - # TODO Use VCS version once tested enough - return _get_changed_files_json_automationrelevance( - head_repository_url, head_rev - ) - - return repository.get_changed_files(rev=head_rev, base_rev=base_rev) - - -def _get_changed_files_json_automationrelevance(head_repository_url, head_rev): - """ - Get the set of files changed in the push headed by the given revision. - """ - url = "{}/json-automationrelevance/{}".format( - head_repository_url.rstrip("/"), head_rev - ) - logger.debug("Querying version control for metadata: %s", url) - - def get_automationrelevance(): - response = requests.get(url, timeout=30) - return response.json() - - contents = retry(get_automationrelevance, attempts=10, sleeptime=10) - - logger.debug( - "{} commits influencing task scheduling:".format(len(contents["changesets"])) - ) - changed_files = set() - for c in contents["changesets"]: - desc = "" # Support empty desc - if c["desc"]: - desc = c["desc"].splitlines()[0].encode("ascii", "ignore") - logger.debug(" {cset} {desc}".format(cset=c["node"][0:12], desc=desc)) - changed_files |= set(c["files"]) - - return changed_files - - -def check(params, file_patterns): - """Determine whether any of the files changed between 2 revisions - match any of the given file patterns.""" - - head_repository_url = params.get("head_repository") - head_rev = params.get("head_rev") - if not head_repository_url or not head_rev: - logger.warning( - "Missing `head_repository` or `head_rev` parameters; " - "assuming all files have changed" - ) - return True - - base_rev = params.get("base_rev") - changed_files = get_changed_files(head_repository_url, head_rev, base_rev) - - for pattern in file_patterns: - for path in changed_files: - if match_path(path, pattern): - return True - - return False diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/generator.py b/third_party/python/taskcluster_taskgraph/taskgraph/generator.py index 4ed2a41520..d649b91706 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/generator.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/generator.py @@ -91,7 +91,7 @@ class Kind: @classmethod def load(cls, root_dir, graph_config, kind_name): - path = os.path.join(root_dir, kind_name) + path = os.path.join(root_dir, "kinds", kind_name) kind_yml = os.path.join(path, "kind.yml") if not os.path.exists(kind_yml): raise KindNotFound(kind_yml) @@ -125,13 +125,13 @@ class TaskGraphGenerator: write_artifacts=False, ): """ - @param root_dir: root directory, with subdirectories for each kind + @param root_dir: root directory containing the Taskgraph config.yml file @param parameters: parameters for this task-graph generation, or callable taking a `GraphConfig` and returning parameters @type parameters: Union[Parameters, Callable[[GraphConfig], Parameters]] """ if root_dir is None: - root_dir = "taskcluster/ci" + root_dir = "taskcluster" self.root_dir = root_dir self._parameters = parameters self._decision_task_id = decision_task_id @@ -243,7 +243,7 @@ class TaskGraphGenerator: yield kind queue.extend(kind.config.get("kind-dependencies", [])) else: - for kind_name in os.listdir(self.root_dir): + for kind_name in os.listdir(os.path.join(self.root_dir, "kinds")): try: yield Kind.load(self.root_dir, graph_config, kind_name) except KindNotFound: diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/loader/default.py b/third_party/python/taskcluster_taskgraph/taskgraph/loader/default.py index 5b2c258917..f060a1d92d 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/loader/default.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/loader/default.py @@ -11,7 +11,7 @@ logger = logging.getLogger(__name__) DEFAULT_TRANSFORMS = [ - "taskgraph.transforms.job:transforms", + "taskgraph.transforms.run:transforms", "taskgraph.transforms.task:transforms", ] @@ -20,7 +20,7 @@ def loader(kind, path, config, params, loaded_tasks): """ This default loader builds on the `transform` loader by providing sensible default transforms that the majority of simple tasks will need. - Specifically, `job` and `task` transforms will be appended to the end of the + Specifically, `run` and `task` transforms will be appended to the end of the list of transforms in the kind being loaded. """ transform_refs = config.setdefault("transforms", []) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/main.py b/third_party/python/taskcluster_taskgraph/taskgraph/main.py index 88a4e2539b..e68cd5a787 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/main.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/main.py @@ -18,6 +18,7 @@ from concurrent.futures import ProcessPoolExecutor, as_completed from pathlib import Path from textwrap import dedent from typing import Any, List +from urllib.parse import urlparse import appdirs import yaml @@ -95,7 +96,7 @@ def get_filtered_taskgraph(taskgraph, tasksregex, exclude_keys): for key in exclude_keys: obj = task attrs = key.split(".") - while attrs[0] in obj: + while obj and attrs[0] in obj: if len(attrs) == 1: del obj[attrs[0]] break @@ -120,7 +121,7 @@ def get_taskgraph_generator(root, parameters): return TaskGraphGenerator(root_dir=root, parameters=parameters) -def format_taskgraph(options, parameters, logfile=None): +def format_taskgraph(options, parameters, overrides, logfile=None): import taskgraph from taskgraph.parameters import parameters_loader @@ -138,7 +139,7 @@ def format_taskgraph(options, parameters, logfile=None): if isinstance(parameters, str): parameters = parameters_loader( parameters, - overrides={"target-kinds": options.get("target_kinds")}, + overrides=overrides, strict=False, ) @@ -172,7 +173,7 @@ def dump_output(out, path=None, params_spec=None): print(out + "\n", file=fh) -def generate_taskgraph(options, parameters, logdir): +def generate_taskgraph(options, parameters, overrides, logdir): from taskgraph.parameters import Parameters def logfile(spec): @@ -188,14 +189,16 @@ def generate_taskgraph(options, parameters, logdir): # tracebacks a little more readable and avoids additional process overhead. if len(parameters) == 1: spec = parameters[0] - out = format_taskgraph(options, spec, logfile(spec)) + out = format_taskgraph(options, spec, overrides, logfile(spec)) dump_output(out, options["output_file"]) return 0 futures = {} with ProcessPoolExecutor(max_workers=options["max_workers"]) as executor: for spec in parameters: - f = executor.submit(format_taskgraph, options, spec, logfile(spec)) + f = executor.submit( + format_taskgraph, options, spec, overrides, logfile(spec) + ) futures[f] = spec returncode = 0 @@ -292,6 +295,15 @@ def generate_taskgraph(options, parameters, logdir): "generations will happen from the same invocation (one per parameters " "specified).", ) +@argument( + "--force-local-files-changed", + default=False, + action="store_true", + help="Compute the 'files-changed' parameter from local version control, " + "even when explicitly using a parameter set that already has it defined. " + "Note that this is already the default behaviour when no parameters are " + "specified.", +) @argument( "--no-optimize", dest="optimize", @@ -366,9 +378,11 @@ def show_taskgraph(options): diffdir = None output_file = options["output_file"] - if options["diff"]: + if options["diff"] or options["force_local_files_changed"]: repo = get_repository(os.getcwd()) + if options["diff"]: + assert repo is not None if not repo.working_directory_clean(): print( "abort: can't diff taskgraph with dirty working directory", @@ -392,15 +406,22 @@ def show_taskgraph(options): ) print(f"Generating {options['graph_attr']} @ {cur_rev}", file=sys.stderr) + overrides = { + "target-kinds": options.get("target_kinds"), + } parameters: List[Any[str, Parameters]] = options.pop("parameters") if not parameters: - overrides = { - "target-kinds": options.get("target_kinds"), - } parameters = [ parameters_loader(None, strict=False, overrides=overrides) ] # will use default values + # This is the default behaviour anyway, so no need to re-compute. + options["force_local_files_changed"] = False + + elif options["force_local_files_changed"]: + assert repo is not None + overrides["files-changed"] = sorted(repo.get_changed_files("AM")) + for param in parameters[:]: if isinstance(param, str) and os.path.isdir(param): parameters.remove(param) @@ -426,7 +447,7 @@ def show_taskgraph(options): # to setup its `mach` based logging. setup_logging() - ret = generate_taskgraph(options, parameters, logdir) + ret = generate_taskgraph(options, parameters, overrides, logdir) if options["diff"]: assert diffdir is not None @@ -450,7 +471,7 @@ def show_taskgraph(options): diffdir, f"{options['graph_attr']}_{base_rev_file}" ) print(f"Generating {options['graph_attr']} @ {base_rev}", file=sys.stderr) - ret |= generate_taskgraph(options, parameters, logdir) + ret |= generate_taskgraph(options, parameters, overrides, logdir) finally: repo.update(cur_rev) @@ -463,6 +484,8 @@ def show_taskgraph(options): f"--label={options['graph_attr']}@{cur_rev}", ] + non_fatal_failures = [] + for spec in parameters: base_path = os.path.join( diffdir, f"{options['graph_attr']}_{base_rev_file}" @@ -475,7 +498,20 @@ def show_taskgraph(options): base_path += f"_{params_name}" cur_path += f"_{params_name}" + # If the base or cur files are missing it means that generation + # failed. If one of them failed but not the other, the failure is + # likely due to the patch making changes to taskgraph in modules + # that don't get reloaded (safe to ignore). If both generations + # failed, there's likely a real issue. + base_missing = not os.path.isfile(base_path) + cur_missing = not os.path.isfile(cur_path) + if base_missing != cur_missing: # != is equivalent to XOR for booleans + non_fatal_failures.append(os.path.basename(base_path)) + continue + try: + # If the output file(s) are missing, this command will raise + # CalledProcessError with a returncode > 1. proc = subprocess.run( diffcmd + [base_path, cur_path], capture_output=True, @@ -500,6 +536,16 @@ def show_taskgraph(options): params_spec=spec if len(parameters) > 1 else None, ) + if non_fatal_failures: + failstr = "\n ".join(sorted(non_fatal_failures)) + print( + "WARNING: Diff skipped for the following generation{s} " + "due to failures:\n {failstr}".format( + s="s" if len(non_fatal_failures) > 1 else "", failstr=failstr + ), + file=sys.stderr, + ) + if options["format"] != "json": print( "If you were expecting differences in task bodies " @@ -661,7 +707,7 @@ def decision(options): @argument( "--root", "-r", - default="taskcluster/ci", + default="taskcluster", help="root of the taskgraph definition relative to topsrcdir", ) def action_callback(options): @@ -697,7 +743,7 @@ def action_callback(options): @argument( "--root", "-r", - default="taskcluster/ci", + default="taskcluster", help="root of the taskgraph definition relative to topsrcdir", ) @argument( @@ -835,6 +881,10 @@ def init_taskgraph(options): ) return 1 + context["repo_name"] = urlparse(repo_url).path.rsplit("/", 1)[-1] + if context["repo_name"].endswith(".git"): + context["repo_name"] = context["repo_name"][: -len(".git")] + # Generate the project. cookiecutter( options["template"], @@ -867,6 +917,11 @@ def setup_logging(): def main(args=sys.argv[1:]): setup_logging() parser = create_parser() + + if not args: + parser.print_help() + sys.exit(1) + args = parser.parse_args(args) try: return args.command(vars(args)) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/morph.py b/third_party/python/taskcluster_taskgraph/taskgraph/morph.py index bfa1560270..e4bb268ab8 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/morph.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/morph.py @@ -38,6 +38,7 @@ registered_morphs = [] def register_morph(func): registered_morphs.append(func) + return func def amend_taskgraph(taskgraph, label_to_taskid, to_add): diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/optimize/base.py b/third_party/python/taskcluster_taskgraph/taskgraph/optimize/base.py index 367b94e1de..e5477d35b7 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/optimize/base.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/optimize/base.py @@ -271,14 +271,19 @@ def replace_tasks( dependencies_of = target_task_graph.graph.links_dict() for label in target_task_graph.graph.visit_postorder(): + logger.debug(f"replace_tasks: {label}") # if we're not allowed to optimize, that's easy.. if label in do_not_optimize: + logger.debug(f"replace_tasks: {label} is in do_not_optimize") continue # if this task depends on un-replaced, un-removed tasks, do not replace if any( l not in replaced and l not in removed_tasks for l in dependencies_of[label] ): + logger.debug( + f"replace_tasks: {label} depends on an unreplaced or unremoved task" + ) continue # if the task already exists, that's an easy replacement @@ -287,6 +292,7 @@ def replace_tasks( label_to_taskid[label] = repl replaced.add(label) opt_counts["existing_tasks"] += 1 + logger.debug(f"replace_tasks: {label} replaced from existing_tasks") continue # call the optimization strategy @@ -304,14 +310,20 @@ def replace_tasks( repl = opt.should_replace_task(task, params, deadline, arg) if repl: if repl is True: + logger.debug(f"replace_tasks: {label} removed by optimization strategy") # True means remove this task; get_subgraph will catch any # problems with removed tasks being depended on removed_tasks.add(label) else: + logger.debug( + f"replace_tasks: {label} replaced by optimization strategy" + ) label_to_taskid[label] = repl replaced.add(label) opt_counts[opt_by] += 1 continue + else: + logger.debug(f"replace_tasks: {label} kept by optimization strategy") _log_optimization("replaced", opt_counts) return replaced diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/optimize/strategies.py b/third_party/python/taskcluster_taskgraph/taskgraph/optimize/strategies.py index 973b550632..5baecfe645 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/optimize/strategies.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/optimize/strategies.py @@ -1,8 +1,8 @@ import logging from datetime import datetime -from taskgraph import files_changed from taskgraph.optimize.base import OptimizationStrategy, register_strategy +from taskgraph.util.path import match as match_path from taskgraph.util.taskcluster import find_task_id, status_task logger = logging.getLogger(__name__) @@ -48,17 +48,23 @@ class IndexSearch(OptimizationStrategy): @register_strategy("skip-unless-changed") class SkipUnlessChanged(OptimizationStrategy): + + def check(self, files_changed, patterns): + for pattern in patterns: + for path in files_changed: + if match_path(path, pattern): + return True + return False + def should_remove_task(self, task, params, file_patterns): # pushlog_id == -1 - this is the case when run from a cron.yml job or on a git repository if params.get("repository_type") == "hg" and params.get("pushlog_id") == -1: return False - changed = files_changed.check(params, file_patterns) + changed = self.check(params["files_changed"], file_patterns) if not changed: logger.debug( - 'no files found matching a pattern in `skip-unless-changed` for "{}"'.format( - task.label - ) + f'no files found matching a pattern in `skip-unless-changed` for "{task.label}"' ) return True return False diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/parameters.py b/third_party/python/taskcluster_taskgraph/taskgraph/parameters.py index 48571d97ad..c69b201e34 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/parameters.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/parameters.py @@ -40,6 +40,7 @@ base_schema = Schema( Required("do_not_optimize"): [str], Required("enable_always_target"): Any(bool, [str]), Required("existing_tasks"): {str: str}, + Required("files_changed"): [str], Required("filters"): [str], Required("head_ref"): str, Required("head_repository"): str, @@ -86,6 +87,7 @@ def _get_defaults(repo_root=None): # Use fake values if no repo is detected. repo = Mock(branch="", head_rev="", tool="git") repo.get_url.return_value = "" + repo.get_changed_files.return_value = [] try: repo_url = repo.get_url() @@ -108,6 +110,7 @@ def _get_defaults(repo_root=None): "do_not_optimize": [], "enable_always_target": True, "existing_tasks": {}, + "files_changed": repo.get_changed_files("AM"), "filters": ["target_tasks_method"], "head_ref": repo.branch or repo.head_rev, "head_repository": repo_url, @@ -284,7 +287,7 @@ class Parameters(ReadOnlyDict): else: raise ParameterMismatch( "Don't know how to determine file URL for non-github" - "repo: {}".format(repo) + f"repo: {repo}" ) else: raise RuntimeError( diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/run-task/run-task b/third_party/python/taskcluster_taskgraph/taskgraph/run-task/run-task index 267b5283ea..f3a343de33 100755 --- a/third_party/python/taskcluster_taskgraph/taskgraph/run-task/run-task +++ b/third_party/python/taskcluster_taskgraph/taskgraph/run-task/run-task @@ -1,4 +1,4 @@ -#!/usr/bin/python3 -u +#!/usr/bin/env -S python3 -u # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. @@ -624,6 +624,11 @@ def git_checkout( "Must specify both ssh_key_file and ssh_known_hosts_file, if either are specified", ) + # Bypass Git's "safe directory" feature as the destination could be + # coming from a cache and therefore cloned by a different user. + args = ["git", "config", "--global", "--add", "safe.directory", Path(destination_path).as_posix()] + retry_required_command(b"vcs", args, extra_env=env) + if not os.path.exists(destination_path): # Repository doesn't already exist, needs to be cloned args = [ @@ -782,9 +787,7 @@ def hg_checkout( branch: Optional[str], revision: Optional[str], ): - if IS_MACOSX: - hg_bin = "/tools/python27-mercurial/bin/hg" - elif IS_POSIX: + if IS_MACOSX or IS_POSIX: hg_bin = "hg" elif IS_WINDOWS: # This is where OCC installs it in the AMIs. @@ -1007,7 +1010,8 @@ def install_pip_requirements(repositories): if not requirements: return - cmd = [sys.executable, "-mpip", "install"] + # TODO: Stop using system Python (#381) + cmd = [sys.executable, "-mpip", "install", "--break-system-packages"] if os.environ.get("PIP_DISABLE_REQUIRE_HASHES") != "1": cmd.append("--require-hashes") diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/target_tasks.py b/third_party/python/taskcluster_taskgraph/taskgraph/target_tasks.py index 1119a1c960..7f44b6ab60 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/target_tasks.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/target_tasks.py @@ -14,7 +14,7 @@ _target_task_methods = {} _GIT_REFS_HEADS_PREFIX = "refs/heads/" -def _target_task(name): +def register_target_task(name): def wrap(func): _target_task_methods[name] = func return func @@ -81,7 +81,7 @@ def standard_filter(task, parameters): ) -@_target_task("default") +@register_target_task("default") def target_tasks_default(full_task_graph, parameters, graph_config): """Target the tasks which have indicated they should be run on this project via the `run_on_projects` attributes.""" @@ -90,7 +90,7 @@ def target_tasks_default(full_task_graph, parameters, graph_config): ] -@_target_task("codereview") +@register_target_task("codereview") def target_tasks_codereview(full_task_graph, parameters, graph_config): """Target the tasks which have indicated they should be run on this project via the `run_on_projects` attributes.""" @@ -101,7 +101,7 @@ def target_tasks_codereview(full_task_graph, parameters, graph_config): ] -@_target_task("nothing") +@register_target_task("nothing") def target_tasks_nothing(full_task_graph, parameters, graph_config): """Select nothing, for DONTBUILD pushes""" return [] diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/__init__.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/__init__.py index 4fa7b5fc0c..e69de29bb2 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/__init__.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/__init__.py @@ -1,3 +0,0 @@ -from taskgraph.transforms import ( # noqa: Added for backwards compat - notify as release_notifications, -) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/base.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/base.py index e6fcd2400c..fda0c584fc 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/base.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/base.py @@ -147,7 +147,7 @@ class ValidateSchema: kind=config.kind, name=task["name"] ) elif "label" in task: - error = "In job {label!r}:".format(label=task["label"]) + error = "In task {label!r}:".format(label=task["label"]) elif "primary-dependency" in task: error = "In {kind} kind task for {dependency!r}:".format( kind=config.kind, dependency=task["primary-dependency"].label diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/code_review.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/code_review.py index bdb655b97d..2c859c36f6 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/code_review.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/code_review.py @@ -12,12 +12,12 @@ transforms = TransformSequence() @transforms.add -def add_dependencies(config, jobs): - for job in jobs: - job.setdefault("soft-dependencies", []) - job["soft-dependencies"] += [ +def add_dependencies(config, tasks): + for task in tasks: + task.setdefault("soft-dependencies", []) + task["soft-dependencies"] += [ dep_task.label for dep_task in config.kind_dependencies_tasks.values() if dep_task.attributes.get("code-review") is True ] - yield job + yield task diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/docker_image.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/docker_image.py index d0c5b9c97b..b58320092b 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/docker_image.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/docker_image.py @@ -92,9 +92,7 @@ def fill_template(config, tasks): for p in packages: if p not in available_packages: raise Exception( - "Missing package job for {}-{}: {}".format( - config.kind, image_name, p - ) + f"Missing package job for {config.kind}-{image_name}: {p}" ) if not taskgraph.fast: @@ -119,9 +117,7 @@ def fill_template(config, tasks): digest_data += [json.dumps(args, sort_keys=True)] context_hashes[image_name] = context_hash - description = "Build the docker image {} for use by dependent tasks".format( - image_name - ) + description = f"Build the docker image {image_name} for use by dependent tasks" args["DOCKER_IMAGE_PACKAGES"] = " ".join(f"<{p}>" for p in packages) @@ -132,6 +128,8 @@ def fill_template(config, tasks): # burn more CPU once to reduce image size. zstd_level = "3" if int(config.params["level"]) == 1 else "10" + expires = config.graph_config._config.get("task-expires-after", "28 days") + # include some information that is useful in reconstructing this task # from JSON taskdesc = { @@ -142,7 +140,7 @@ def fill_template(config, tasks): "artifact_prefix": "public", }, "always-target": True, - "expires-after": "28 days" if config.params.is_try() else "1 year", + "expires-after": expires if config.params.is_try() else "1 year", "scopes": [], "run-on-projects": [], "worker-type": "images", @@ -158,9 +156,7 @@ def fill_template(config, tasks): ], "env": { "CONTEXT_TASK_ID": {"task-reference": ""}, - "CONTEXT_PATH": "public/docker-contexts/{}.tar.gz".format( - image_name - ), + "CONTEXT_PATH": f"public/docker-contexts/{image_name}.tar.gz", "HASH": context_hash, "PROJECT": config.params["project"], "IMAGE_NAME": image_name, diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/fetch.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/fetch.py index bcb8ff38a6..0e1b739677 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/fetch.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/fetch.py @@ -32,11 +32,12 @@ FETCH_SCHEMA = Schema( Optional("task-from"): str, # Description of the task. Required("description"): str, + Optional("expires-after"): str, Optional("docker-image"): object, Optional( "fetch-alias", - description="An alias that can be used instead of the real fetch job name in " - "fetch stanzas for jobs.", + description="An alias that can be used instead of the real fetch task name in " + "fetch stanzas for tasks.", ): str, Optional( "artifact-prefix", @@ -78,20 +79,20 @@ transforms.add_validate(FETCH_SCHEMA) @transforms.add -def process_fetch_job(config, jobs): - # Converts fetch-url entries to the job schema. - for job in jobs: - typ = job["fetch"]["type"] - name = job["name"] - fetch = job.pop("fetch") +def process_fetch_task(config, tasks): + # Converts fetch-url entries to the run schema. + for task in tasks: + typ = task["fetch"]["type"] + name = task["name"] + fetch = task.pop("fetch") if typ not in fetch_builders: raise Exception(f"Unknown fetch type {typ} in fetch {name}") validate_schema(fetch_builders[typ].schema, fetch, f"In task.fetch {name!r}:") - job.update(configure_fetch(config, typ, name, fetch)) + task.update(configure_fetch(config, typ, name, fetch)) - yield job + yield task def configure_fetch(config, typ, name, fetch): @@ -103,41 +104,41 @@ def configure_fetch(config, typ, name, fetch): @transforms.add -def make_task(config, jobs): +def make_task(config, tasks): # Fetch tasks are idempotent and immutable. Have them live for # essentially forever. if config.params["level"] == "3": expires = "1000 years" else: - expires = "28 days" + expires = config.graph_config._config.get("task-expires-after", "28 days") - for job in jobs: - name = job["name"] - artifact_prefix = job.get("artifact-prefix", "public") - env = job.get("env", {}) + for task in tasks: + name = task["name"] + artifact_prefix = task.get("artifact-prefix", "public") + env = task.get("env", {}) env.update({"UPLOAD_DIR": "/builds/worker/artifacts"}) - attributes = job.get("attributes", {}) - attributes["fetch-artifact"] = path.join(artifact_prefix, job["artifact_name"]) - alias = job.get("fetch-alias") + attributes = task.get("attributes", {}) + attributes["fetch-artifact"] = path.join(artifact_prefix, task["artifact_name"]) + alias = task.get("fetch-alias") if alias: attributes["fetch-alias"] = alias - task = { + task_desc = { "attributes": attributes, "name": name, - "description": job["description"], - "expires-after": expires, + "description": task["description"], + "expires-after": task.get("expires-after", expires), "label": "fetch-%s" % name, "run-on-projects": [], "run": { "using": "run-task", "checkout": False, - "command": job["command"], + "command": task["command"], }, "worker-type": "images", "worker": { "chain-of-trust": True, - "docker-image": job.get("docker-image", {"in-tree": "fetch"}), + "docker-image": task.get("docker-image", {"in-tree": "fetch"}), "env": env, "max-run-time": 900, "artifacts": [ @@ -151,29 +152,29 @@ def make_task(config, jobs): } if "treeherder" in config.graph_config: - task["treeherder"] = { + task_desc["treeherder"] = { "symbol": join_symbol("Fetch", name), "kind": "build", "platform": "fetch/opt", "tier": 1, } - if job.get("secret", None): - task["scopes"] = ["secrets:get:" + job.get("secret")] - task["worker"]["taskcluster-proxy"] = True + if task.get("secret", None): + task_desc["scopes"] = ["secrets:get:" + task.get("secret")] + task_desc["worker"]["taskcluster-proxy"] = True if not taskgraph.fast: - cache_name = task["label"].replace(f"{config.kind}-", "", 1) + cache_name = task_desc["label"].replace(f"{config.kind}-", "", 1) # This adds the level to the index path automatically. add_optimization( config, - task, + task_desc, cache_type=CACHE_TYPE, cache_name=cache_name, - digest_data=job["digest_data"], + digest_data=task["digest_data"], ) - yield task + yield task_desc @fetch_builder( diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/from_deps.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/from_deps.py index 337d68e4ba..191ef7d56a 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/from_deps.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/from_deps.py @@ -16,10 +16,11 @@ from textwrap import dedent from voluptuous import Any, Extra, Optional, Required from taskgraph.transforms.base import TransformSequence -from taskgraph.transforms.job import fetches_schema +from taskgraph.transforms.run import fetches_schema from taskgraph.util.attributes import attrmatch from taskgraph.util.dependencies import GROUP_BY_MAP, get_dependencies from taskgraph.util.schema import Schema, validate_schema +from taskgraph.util.set_name import SET_NAME_MAP FROM_DEPS_SCHEMA = Schema( { @@ -41,12 +42,14 @@ FROM_DEPS_SCHEMA = Schema( "set-name", description=dedent( """ - When True, `from_deps` will derive a name for the generated - tasks from the name of the primary dependency. Defaults to - True. + UPDATE ME AND DOCS """.lstrip() ), - ): bool, + ): Any( + None, + *SET_NAME_MAP, + {Any(*SET_NAME_MAP): object}, + ), Optional( "with-attributes", description=dedent( @@ -170,7 +173,7 @@ def from_deps(config, tasks): groups = func(config, deps) # Split the task, one per group. - set_name = from_deps.get("set-name", True) + set_name = from_deps.get("set-name", "strip-kind") copy_attributes = from_deps.get("copy-attributes", False) unique_kinds = from_deps.get("unique-kinds", True) fetches = from_deps.get("fetches", []) @@ -203,10 +206,8 @@ def from_deps(config, tasks): primary_dep = [dep for dep in group if dep.kind == primary_kind][0] if set_name: - if primary_dep.label.startswith(primary_kind): - new_task["name"] = primary_dep.label[len(primary_kind) + 1 :] - else: - new_task["name"] = primary_dep.label + func = SET_NAME_MAP[set_name] + new_task["name"] = func(config, deps, primary_dep, primary_kind) if copy_attributes: attrs = new_task.setdefault("attributes", {}) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/__init__.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/__init__.py deleted file mode 100644 index 06978ff46d..0000000000 --- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/__init__.py +++ /dev/null @@ -1,453 +0,0 @@ -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -""" -Convert a job description into a task description. - -Jobs descriptions are similar to task descriptions, but they specify how to run -the job at a higher level, using a "run" field that can be interpreted by -run-using handlers in `taskcluster/taskgraph/transforms/job`. -""" - - -import copy -import json -import logging - -from voluptuous import Any, Exclusive, Extra, Optional, Required - -from taskgraph.transforms.base import TransformSequence -from taskgraph.transforms.cached_tasks import order_tasks -from taskgraph.transforms.task import task_description_schema -from taskgraph.util import path as mozpath -from taskgraph.util.python_path import import_sibling_modules -from taskgraph.util.schema import Schema, validate_schema -from taskgraph.util.taskcluster import get_artifact_prefix -from taskgraph.util.workertypes import worker_type_implementation - -logger = logging.getLogger(__name__) - -# Fetches may be accepted in other transforms and eventually passed along -# to a `job` (eg: from_deps). Defining this here allows them to re-use -# the schema and avoid duplication. -fetches_schema = { - Required("artifact"): str, - Optional("dest"): str, - Optional("extract"): bool, - Optional("verify-hash"): bool, -} - -# Schema for a build description -job_description_schema = Schema( - { - # The name of the job and the job's label. At least one must be specified, - # and the label will be generated from the name if necessary, by prepending - # the kind. - Optional("name"): str, - Optional("label"): str, - # the following fields are passed directly through to the task description, - # possibly modified by the run implementation. See - # taskcluster/taskgraph/transforms/task.py for the schema details. - Required("description"): task_description_schema["description"], - Optional("attributes"): task_description_schema["attributes"], - Optional("task-from"): task_description_schema["task-from"], - Optional("dependencies"): task_description_schema["dependencies"], - Optional("soft-dependencies"): task_description_schema["soft-dependencies"], - Optional("if-dependencies"): task_description_schema["if-dependencies"], - Optional("requires"): task_description_schema["requires"], - Optional("expires-after"): task_description_schema["expires-after"], - Optional("routes"): task_description_schema["routes"], - Optional("scopes"): task_description_schema["scopes"], - Optional("tags"): task_description_schema["tags"], - Optional("extra"): task_description_schema["extra"], - Optional("treeherder"): task_description_schema["treeherder"], - Optional("index"): task_description_schema["index"], - Optional("run-on-projects"): task_description_schema["run-on-projects"], - Optional("run-on-tasks-for"): task_description_schema["run-on-tasks-for"], - Optional("run-on-git-branches"): task_description_schema["run-on-git-branches"], - Optional("shipping-phase"): task_description_schema["shipping-phase"], - Optional("always-target"): task_description_schema["always-target"], - Exclusive("optimization", "optimization"): task_description_schema[ - "optimization" - ], - Optional("needs-sccache"): task_description_schema["needs-sccache"], - # The "when" section contains descriptions of the circumstances under which - # this task should be included in the task graph. This will be converted - # into an optimization, so it cannot be specified in a job description that - # also gives 'optimization'. - Exclusive("when", "optimization"): { - # This task only needs to be run if a file matching one of the given - # patterns has changed in the push. The patterns use the mozpack - # match function (python/mozbuild/mozpack/path.py). - Optional("files-changed"): [str], - }, - # A list of artifacts to install from 'fetch' tasks. - Optional("fetches"): { - Any("toolchain", "fetch"): [str], - str: [ - str, - fetches_schema, - ], - }, - # A description of how to run this job. - "run": { - # The key to a job implementation in a peer module to this one - "using": str, - # Base work directory used to set up the task. - Optional("workdir"): str, - # Any remaining content is verified against that job implementation's - # own schema. - Extra: object, - }, - Required("worker-type"): task_description_schema["worker-type"], - # This object will be passed through to the task description, with additions - # provided by the job's run-using function - Optional("worker"): dict, - } -) - -transforms = TransformSequence() -transforms.add_validate(job_description_schema) - - -@transforms.add -def rewrite_when_to_optimization(config, jobs): - for job in jobs: - when = job.pop("when", {}) - if not when: - yield job - continue - - files_changed = when.get("files-changed") - - # implicitly add task config directory. - files_changed.append(f"{config.path}/**") - - # "only when files changed" implies "skip if files have not changed" - job["optimization"] = {"skip-unless-changed": files_changed} - - assert "when" not in job - yield job - - -@transforms.add -def set_implementation(config, jobs): - for job in jobs: - impl, os = worker_type_implementation(config.graph_config, job["worker-type"]) - if os: - job.setdefault("tags", {})["os"] = os - if impl: - job.setdefault("tags", {})["worker-implementation"] = impl - worker = job.setdefault("worker", {}) - assert "implementation" not in worker - worker["implementation"] = impl - if os: - worker["os"] = os - yield job - - -@transforms.add -def set_label(config, jobs): - for job in jobs: - if "label" not in job: - if "name" not in job: - raise Exception("job has neither a name nor a label") - job["label"] = "{}-{}".format(config.kind, job["name"]) - if job.get("name"): - del job["name"] - yield job - - -@transforms.add -def add_resource_monitor(config, jobs): - for job in jobs: - if job.get("attributes", {}).get("resource-monitor"): - worker_implementation, worker_os = worker_type_implementation( - config.graph_config, job["worker-type"] - ) - # Normalise worker os so that linux-bitbar and similar use linux tools. - worker_os = worker_os.split("-")[0] - if "win7" in job["worker-type"]: - arch = "32" - else: - arch = "64" - job.setdefault("fetches", {}) - job["fetches"].setdefault("toolchain", []) - job["fetches"]["toolchain"].append(f"{worker_os}{arch}-resource-monitor") - - if worker_implementation == "docker-worker": - artifact_source = "/builds/worker/monitoring/resource-monitor.json" - else: - artifact_source = "monitoring/resource-monitor.json" - job["worker"].setdefault("artifacts", []) - job["worker"]["artifacts"].append( - { - "name": "public/monitoring/resource-monitor.json", - "type": "file", - "path": artifact_source, - } - ) - # Set env for output file - job["worker"].setdefault("env", {}) - job["worker"]["env"]["RESOURCE_MONITOR_OUTPUT"] = artifact_source - - yield job - - -def get_attribute(dict, key, attributes, attribute_name): - """Get `attribute_name` from the given `attributes` dict, and if there - is a corresponding value, set `key` in `dict` to that value.""" - value = attributes.get(attribute_name) - if value: - dict[key] = value - - -@transforms.add -def use_fetches(config, jobs): - artifact_names = {} - aliases = {} - extra_env = {} - - if config.kind in ("toolchain", "fetch"): - jobs = list(jobs) - for job in jobs: - run = job.get("run", {}) - label = job["label"] - get_attribute(artifact_names, label, run, "toolchain-artifact") - value = run.get(f"{config.kind}-alias") - if value: - aliases[f"{config.kind}-{value}"] = label - - for task in config.kind_dependencies_tasks.values(): - if task.kind in ("fetch", "toolchain"): - get_attribute( - artifact_names, - task.label, - task.attributes, - f"{task.kind}-artifact", - ) - get_attribute(extra_env, task.label, task.attributes, f"{task.kind}-env") - value = task.attributes.get(f"{task.kind}-alias") - if value: - aliases[f"{task.kind}-{value}"] = task.label - - artifact_prefixes = {} - for job in order_tasks(config, jobs): - artifact_prefixes[job["label"]] = get_artifact_prefix(job) - - fetches = job.pop("fetches", None) - if not fetches: - yield job - continue - - job_fetches = [] - name = job.get("name", job.get("label")) - dependencies = job.setdefault("dependencies", {}) - worker = job.setdefault("worker", {}) - env = worker.setdefault("env", {}) - prefix = get_artifact_prefix(job) - for kind in sorted(fetches): - artifacts = fetches[kind] - if kind in ("fetch", "toolchain"): - for fetch_name in sorted(artifacts): - label = f"{kind}-{fetch_name}" - label = aliases.get(label, label) - if label not in artifact_names: - raise Exception( - "Missing fetch job for {kind}-{name}: {fetch}".format( - kind=config.kind, name=name, fetch=fetch_name - ) - ) - if label in extra_env: - env.update(extra_env[label]) - - path = artifact_names[label] - - dependencies[label] = label - job_fetches.append( - { - "artifact": path, - "task": f"<{label}>", - "extract": True, - } - ) - else: - if kind not in dependencies: - raise Exception( - "{name} can't fetch {kind} artifacts because " - "it has no {kind} dependencies!".format(name=name, kind=kind) - ) - dep_label = dependencies[kind] - if dep_label in artifact_prefixes: - prefix = artifact_prefixes[dep_label] - else: - dep_tasks = [ - task - for label, task in config.kind_dependencies_tasks.items() - if label == dep_label - ] - if len(dep_tasks) != 1: - raise Exception( - "{name} can't fetch {kind} artifacts because " - "there are {tasks} with label {label} in kind dependencies!".format( - name=name, - kind=kind, - label=dependencies[kind], - tasks="no tasks" - if len(dep_tasks) == 0 - else "multiple tasks", - ) - ) - - prefix = get_artifact_prefix(dep_tasks[0]) - - def cmp_artifacts(a): - if isinstance(a, str): - return a - else: - return a["artifact"] - - for artifact in sorted(artifacts, key=cmp_artifacts): - if isinstance(artifact, str): - path = artifact - dest = None - extract = True - verify_hash = False - else: - path = artifact["artifact"] - dest = artifact.get("dest") - extract = artifact.get("extract", True) - verify_hash = artifact.get("verify-hash", False) - - fetch = { - "artifact": f"{prefix}/{path}", - "task": f"<{kind}>", - "extract": extract, - } - if dest is not None: - fetch["dest"] = dest - if verify_hash: - fetch["verify-hash"] = verify_hash - job_fetches.append(fetch) - - job_artifact_prefixes = { - mozpath.dirname(fetch["artifact"]) - for fetch in job_fetches - if not fetch["artifact"].startswith("public/") - } - if job_artifact_prefixes: - # Use taskcluster-proxy and request appropriate scope. For example, add - # 'scopes: [queue:get-artifact:path/to/*]' for 'path/to/artifact.tar.xz'. - worker["taskcluster-proxy"] = True - for prefix in sorted(job_artifact_prefixes): - scope = f"queue:get-artifact:{prefix}/*" - if scope not in job.setdefault("scopes", []): - job["scopes"].append(scope) - - env["MOZ_FETCHES"] = {"task-reference": json.dumps(job_fetches, sort_keys=True)} - - env.setdefault("MOZ_FETCHES_DIR", "fetches") - - yield job - - -@transforms.add -def make_task_description(config, jobs): - """Given a build description, create a task description""" - # import plugin modules first, before iterating over jobs - import_sibling_modules(exceptions=("common.py",)) - - for job in jobs: - # always-optimized tasks never execute, so have no workdir - if job["worker"]["implementation"] in ("docker-worker", "generic-worker"): - job["run"].setdefault("workdir", "/builds/worker") - - taskdesc = copy.deepcopy(job) - - # fill in some empty defaults to make run implementations easier - taskdesc.setdefault("attributes", {}) - taskdesc.setdefault("dependencies", {}) - taskdesc.setdefault("soft-dependencies", []) - taskdesc.setdefault("routes", []) - taskdesc.setdefault("scopes", []) - taskdesc.setdefault("extra", {}) - - # give the function for job.run.using on this worker implementation a - # chance to set up the task description. - configure_taskdesc_for_run( - config, job, taskdesc, job["worker"]["implementation"] - ) - del taskdesc["run"] - - # yield only the task description, discarding the job description - yield taskdesc - - -# A registry of all functions decorated with run_job_using -registry = {} - - -def run_job_using(worker_implementation, run_using, schema=None, defaults={}): - """Register the decorated function as able to set up a task description for - jobs with the given worker implementation and `run.using` property. If - `schema` is given, the job's run field will be verified to match it. - - The decorated function should have the signature `using_foo(config, job, taskdesc)` - and should modify the task description in-place. The skeleton of - the task description is already set up, but without a payload.""" - - def wrap(func): - for_run_using = registry.setdefault(run_using, {}) - if worker_implementation in for_run_using: - raise Exception( - "run_job_using({!r}, {!r}) already exists: {!r}".format( - run_using, - worker_implementation, - for_run_using[worker_implementation], - ) - ) - for_run_using[worker_implementation] = (func, schema, defaults) - return func - - return wrap - - -@run_job_using( - "always-optimized", "always-optimized", Schema({"using": "always-optimized"}) -) -def always_optimized(config, job, taskdesc): - pass - - -def configure_taskdesc_for_run(config, job, taskdesc, worker_implementation): - """ - Run the appropriate function for this job against the given task - description. - - This will raise an appropriate error if no function exists, or if the job's - run is not valid according to the schema. - """ - run_using = job["run"]["using"] - if run_using not in registry: - raise Exception(f"no functions for run.using {run_using!r}") - - if worker_implementation not in registry[run_using]: - raise Exception( - "no functions for run.using {!r} on {!r}".format( - run_using, worker_implementation - ) - ) - - func, schema, defaults = registry[run_using][worker_implementation] - for k, v in defaults.items(): - job["run"].setdefault(k, v) - - if schema: - validate_schema( - schema, - job["run"], - "In job.run using {!r}/{!r} for job {!r}:".format( - job["run"]["using"], worker_implementation, job["label"] - ), - ) - func(config, job, taskdesc) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/common.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/common.py deleted file mode 100644 index 04708daf81..0000000000 --- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/common.py +++ /dev/null @@ -1,171 +0,0 @@ -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -""" -Common support for various job types. These functions are all named after the -worker implementation they operate on, and take the same three parameters, for -consistency. -""" - - -import hashlib -import json - -from taskgraph.util.taskcluster import get_artifact_prefix - - -def get_vcsdir_name(os): - if os == "windows": - return "src" - else: - return "vcs" - - -def add_cache(job, taskdesc, name, mount_point, skip_untrusted=False): - """Adds a cache based on the worker's implementation. - - Args: - job (dict): Task's job description. - taskdesc (dict): Target task description to modify. - name (str): Name of the cache. - mount_point (path): Path on the host to mount the cache. - skip_untrusted (bool): Whether cache is used in untrusted environments - (default: False). Only applies to docker-worker. - """ - if not job["run"].get("use-caches", True): - return - - worker = job["worker"] - - if worker["implementation"] == "docker-worker": - taskdesc["worker"].setdefault("caches", []).append( - { - "type": "persistent", - "name": name, - "mount-point": mount_point, - "skip-untrusted": skip_untrusted, - } - ) - - elif worker["implementation"] == "generic-worker": - taskdesc["worker"].setdefault("mounts", []).append( - { - "cache-name": name, - "directory": mount_point, - } - ) - - else: - # Caches not implemented - pass - - -def add_artifacts(config, job, taskdesc, path): - taskdesc["worker"].setdefault("artifacts", []).append( - { - "name": get_artifact_prefix(taskdesc), - "path": path, - "type": "directory", - } - ) - - -def docker_worker_add_artifacts(config, job, taskdesc): - """Adds an artifact directory to the task""" - path = "{workdir}/artifacts/".format(**job["run"]) - taskdesc["worker"]["env"]["UPLOAD_DIR"] = path - add_artifacts(config, job, taskdesc, path) - - -def generic_worker_add_artifacts(config, job, taskdesc): - """Adds an artifact directory to the task""" - # The path is the location on disk; it doesn't necessarily - # mean the artifacts will be public or private; that is set via the name - # attribute in add_artifacts. - add_artifacts(config, job, taskdesc, path=get_artifact_prefix(taskdesc)) - - -def support_vcs_checkout(config, job, taskdesc, repo_configs, sparse=False): - """Update a job/task with parameters to enable a VCS checkout. - - This can only be used with ``run-task`` tasks, as the cache name is - reserved for ``run-task`` tasks. - """ - worker = job["worker"] - is_mac = worker["os"] == "macosx" - is_win = worker["os"] == "windows" - is_linux = worker["os"] == "linux" - is_docker = worker["implementation"] == "docker-worker" - assert is_mac or is_win or is_linux - - if is_win: - checkoutdir = "./build" - hgstore = "y:/hg-shared" - elif is_docker: - checkoutdir = "{workdir}/checkouts".format(**job["run"]) - hgstore = f"{checkoutdir}/hg-store" - else: - checkoutdir = "./checkouts" - hgstore = f"{checkoutdir}/hg-shared" - - vcsdir = checkoutdir + "/" + get_vcsdir_name(worker["os"]) - cache_name = "checkouts" - - # Robust checkout does not clean up subrepositories, so ensure that tasks - # that checkout different sets of paths have separate caches. - # See https://bugzilla.mozilla.org/show_bug.cgi?id=1631610 - if len(repo_configs) > 1: - checkout_paths = { - "\t".join([repo_config.path, repo_config.prefix]) - for repo_config in sorted( - repo_configs.values(), key=lambda repo_config: repo_config.path - ) - } - checkout_paths_str = "\n".join(checkout_paths).encode("utf-8") - digest = hashlib.sha256(checkout_paths_str).hexdigest() - cache_name += f"-repos-{digest}" - - # Sparse checkouts need their own cache because they can interfere - # with clients that aren't sparse aware. - if sparse: - cache_name += "-sparse" - - # Workers using Mercurial >= 5.8 will enable revlog-compression-zstd, which - # workers using older versions can't understand, so they can't share cache. - # At the moment, only docker workers use the newer version. - if is_docker: - cache_name += "-hg58" - - add_cache(job, taskdesc, cache_name, checkoutdir) - - env = taskdesc["worker"].setdefault("env", {}) - env.update( - { - "HG_STORE_PATH": hgstore, - "REPOSITORIES": json.dumps( - {repo.prefix: repo.name for repo in repo_configs.values()} - ), - "VCS_PATH": vcsdir, - } - ) - for repo_config in repo_configs.values(): - env.update( - { - f"{repo_config.prefix.upper()}_{key}": value - for key, value in { - "BASE_REPOSITORY": repo_config.base_repository, - "HEAD_REPOSITORY": repo_config.head_repository, - "HEAD_REV": repo_config.head_rev, - "HEAD_REF": repo_config.head_ref, - "REPOSITORY_TYPE": repo_config.type, - "SSH_SECRET_NAME": repo_config.ssh_secret_name, - }.items() - if value is not None - } - ) - if repo_config.ssh_secret_name: - taskdesc["scopes"].append(f"secrets:get:{repo_config.ssh_secret_name}") - - # only some worker platforms have taskcluster-proxy enabled - if job["worker"]["implementation"] in ("docker-worker",): - taskdesc["worker"]["taskcluster-proxy"] = True diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/index_search.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/index_search.py deleted file mode 100644 index 09b48fe594..0000000000 --- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/index_search.py +++ /dev/null @@ -1,37 +0,0 @@ -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. - -""" -This transform allows including indexed tasks from other projects in the -current taskgraph. The transform takes a list of indexes, and the optimization -phase will replace the task with the task from the other graph. -""" - - -from voluptuous import Required - -from taskgraph.transforms.base import TransformSequence -from taskgraph.transforms.job import run_job_using -from taskgraph.util.schema import Schema - -transforms = TransformSequence() - -run_task_schema = Schema( - { - Required("using"): "index-search", - Required( - "index-search", - "A list of indexes in decreasing order of priority at which to lookup for this " - "task. This is interpolated with the graph parameters.", - ): [str], - } -) - - -@run_job_using("always-optimized", "index-search", schema=run_task_schema) -def fill_template(config, job, taskdesc): - run = job["run"] - taskdesc["optimization"] = { - "index-search": [index.format(**config.params) for index in run["index-search"]] - } diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/run_task.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/run_task.py deleted file mode 100644 index 6337673611..0000000000 --- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/run_task.py +++ /dev/null @@ -1,231 +0,0 @@ -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -""" -Support for running jobs that are invoked via the `run-task` script. -""" - -import dataclasses -import os - -from voluptuous import Any, Optional, Required - -from taskgraph.transforms.job import run_job_using -from taskgraph.transforms.job.common import support_vcs_checkout -from taskgraph.transforms.task import taskref_or_string -from taskgraph.util import path, taskcluster -from taskgraph.util.schema import Schema - -EXEC_COMMANDS = { - "bash": ["bash", "-cx"], - "powershell": ["powershell.exe", "-ExecutionPolicy", "Bypass"], -} - -run_task_schema = Schema( - { - Required("using"): "run-task", - # if true, add a cache at ~worker/.cache, which is where things like pip - # tend to hide their caches. This cache is never added for level-1 jobs. - # TODO Once bug 1526028 is fixed, this and 'use-caches' should be merged. - Required("cache-dotcache"): bool, - # Whether or not to use caches. - Optional("use-caches"): bool, - # if true (the default), perform a checkout on the worker - Required("checkout"): Any(bool, {str: dict}), - Optional( - "cwd", - description="Path to run command in. If a checkout is present, the path " - "to the checkout will be interpolated with the key `checkout`", - ): str, - # The sparse checkout profile to use. Value is the filename relative to the - # directory where sparse profiles are defined (build/sparse-profiles/). - Required("sparse-profile"): Any(str, None), - # The command arguments to pass to the `run-task` script, after the - # checkout arguments. If a list, it will be passed directly; otherwise - # it will be included in a single argument to the command specified by - # `exec-with`. - Required("command"): Any([taskref_or_string], taskref_or_string), - # What to execute the command with in the event command is a string. - Optional("exec-with"): Any(*list(EXEC_COMMANDS)), - # Command used to invoke the `run-task` script. Can be used if the script - # or Python installation is in a non-standard location on the workers. - Optional("run-task-command"): list, - # Base work directory used to set up the task. - Required("workdir"): str, - # Whether to run as root. (defaults to False) - Optional("run-as-root"): bool, - } -) - - -def common_setup(config, job, taskdesc, command): - run = job["run"] - if run["checkout"]: - repo_configs = config.repo_configs - if len(repo_configs) > 1 and run["checkout"] is True: - raise Exception("Must explicitly specify checkouts with multiple repos.") - elif run["checkout"] is not True: - repo_configs = { - repo: dataclasses.replace(repo_configs[repo], **config) - for (repo, config) in run["checkout"].items() - } - - support_vcs_checkout( - config, - job, - taskdesc, - repo_configs=repo_configs, - sparse=bool(run["sparse-profile"]), - ) - - vcs_path = taskdesc["worker"]["env"]["VCS_PATH"] - for repo_config in repo_configs.values(): - checkout_path = path.join(vcs_path, repo_config.path) - command.append(f"--{repo_config.prefix}-checkout={checkout_path}") - - if run["sparse-profile"]: - command.append( - "--{}-sparse-profile=build/sparse-profiles/{}".format( - repo_config.prefix, - run["sparse-profile"], - ) - ) - - if "cwd" in run: - run["cwd"] = path.normpath(run["cwd"].format(checkout=vcs_path)) - elif "cwd" in run and "{checkout}" in run["cwd"]: - raise Exception( - "Found `{{checkout}}` interpolation in `cwd` for task {name} " - "but the task doesn't have a checkout: {cwd}".format( - cwd=run["cwd"], name=job.get("name", job.get("label")) - ) - ) - - if "cwd" in run: - command.extend(("--task-cwd", run["cwd"])) - - taskdesc["worker"].setdefault("env", {})["MOZ_SCM_LEVEL"] = config.params["level"] - - -worker_defaults = { - "cache-dotcache": False, - "checkout": True, - "sparse-profile": None, - "run-as-root": False, -} - - -def script_url(config, script): - if "MOZ_AUTOMATION" in os.environ and "TASK_ID" not in os.environ: - raise Exception("TASK_ID must be defined to use run-task on generic-worker") - task_id = os.environ.get("TASK_ID", "") - # use_proxy = False to avoid having all generic-workers turn on proxy - # Assumes the cluster allows anonymous downloads of public artifacts - tc_url = taskcluster.get_root_url(False) - # TODO: Use util/taskcluster.py:get_artifact_url once hack for Bug 1405889 is removed - return f"{tc_url}/api/queue/v1/task/{task_id}/artifacts/public/{script}" - - -@run_job_using( - "docker-worker", "run-task", schema=run_task_schema, defaults=worker_defaults -) -def docker_worker_run_task(config, job, taskdesc): - run = job["run"] - worker = taskdesc["worker"] = job["worker"] - command = run.pop("run-task-command", ["/usr/local/bin/run-task"]) - common_setup(config, job, taskdesc, command) - - if run.get("cache-dotcache"): - worker["caches"].append( - { - "type": "persistent", - "name": "{project}-dotcache".format(**config.params), - "mount-point": "{workdir}/.cache".format(**run), - "skip-untrusted": True, - } - ) - - run_command = run["command"] - - # dict is for the case of `{'task-reference': str}`. - if isinstance(run_command, str) or isinstance(run_command, dict): - exec_cmd = EXEC_COMMANDS[run.pop("exec-with", "bash")] - run_command = exec_cmd + [run_command] - if run["run-as-root"]: - command.extend(("--user", "root", "--group", "root")) - command.append("--") - command.extend(run_command) - worker["command"] = command - - -@run_job_using( - "generic-worker", "run-task", schema=run_task_schema, defaults=worker_defaults -) -def generic_worker_run_task(config, job, taskdesc): - run = job["run"] - worker = taskdesc["worker"] = job["worker"] - is_win = worker["os"] == "windows" - is_mac = worker["os"] == "macosx" - is_bitbar = worker["os"] == "linux-bitbar" - - command = run.pop("run-task-command", None) - if not command: - if is_win: - command = ["C:/mozilla-build/python3/python3.exe", "run-task"] - elif is_mac: - command = ["/tools/python36/bin/python3", "run-task"] - else: - command = ["./run-task"] - - common_setup(config, job, taskdesc, command) - - worker.setdefault("mounts", []) - if run.get("cache-dotcache"): - worker["mounts"].append( - { - "cache-name": "{project}-dotcache".format(**config.params), - "directory": "{workdir}/.cache".format(**run), - } - ) - worker["mounts"].append( - { - "content": { - "url": script_url(config, "run-task"), - }, - "file": "./run-task", - } - ) - if worker.get("env", {}).get("MOZ_FETCHES"): - worker["mounts"].append( - { - "content": { - "url": script_url(config, "fetch-content"), - }, - "file": "./fetch-content", - } - ) - - run_command = run["command"] - - if isinstance(run_command, str): - if is_win: - run_command = f'"{run_command}"' - exec_cmd = EXEC_COMMANDS[run.pop("exec-with", "bash")] - run_command = exec_cmd + [run_command] - - if run["run-as-root"]: - command.extend(("--user", "root", "--group", "root")) - command.append("--") - if is_bitbar: - # Use the bitbar wrapper script which sets up the device and adb - # environment variables - command.append("/builds/taskcluster/script.py") - command.extend(run_command) - - if is_win: - worker["command"] = [" ".join(command)] - else: - worker["command"] = [ - ["chmod", "+x", "run-task"], - command, - ] diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/toolchain.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/toolchain.py deleted file mode 100644 index c9c09542ff..0000000000 --- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/toolchain.py +++ /dev/null @@ -1,175 +0,0 @@ -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -""" -Support for running toolchain-building jobs via dedicated scripts -""" - -from voluptuous import ALLOW_EXTRA, Any, Optional, Required - -import taskgraph -from taskgraph.transforms.job import configure_taskdesc_for_run, run_job_using -from taskgraph.transforms.job.common import ( - docker_worker_add_artifacts, - generic_worker_add_artifacts, - get_vcsdir_name, -) -from taskgraph.util.hash import hash_paths -from taskgraph.util.schema import Schema -from taskgraph.util.shell import quote as shell_quote - -CACHE_TYPE = "toolchains.v3" - -toolchain_run_schema = Schema( - { - Required("using"): "toolchain-script", - # The script (in taskcluster/scripts/misc) to run. - Required("script"): str, - # Arguments to pass to the script. - Optional("arguments"): [str], - # Sparse profile to give to checkout using `run-task`. If given, - # a filename in `build/sparse-profiles`. Defaults to - # "toolchain-build", i.e., to - # `build/sparse-profiles/toolchain-build`. If `None`, instructs - # `run-task` to not use a sparse profile at all. - Required("sparse-profile"): Any(str, None), - # Paths/patterns pointing to files that influence the outcome of a - # toolchain build. - Optional("resources"): [str], - # Path to the artifact produced by the toolchain job - Required("toolchain-artifact"): str, - Optional( - "toolchain-alias", - description="An alias that can be used instead of the real toolchain job name in " - "fetch stanzas for jobs.", - ): Any(str, [str]), - Optional( - "toolchain-env", - description="Additional env variables to add to the worker when using this toolchain", - ): {str: object}, - # Base work directory used to set up the task. - Required("workdir"): str, - }, - extra=ALLOW_EXTRA, -) - - -def get_digest_data(config, run, taskdesc): - files = list(run.pop("resources", [])) - # The script - files.append("taskcluster/scripts/toolchain/{}".format(run["script"])) - - # Accumulate dependency hashes for index generation. - data = [hash_paths(config.graph_config.vcs_root, files)] - - data.append(taskdesc["attributes"]["toolchain-artifact"]) - - # If the task uses an in-tree docker image, we want it to influence - # the index path as well. Ideally, the content of the docker image itself - # should have an influence, but at the moment, we can't get that - # information here. So use the docker image name as a proxy. Not a lot of - # changes to docker images actually have an impact on the resulting - # toolchain artifact, so we'll just rely on such important changes to be - # accompanied with a docker image name change. - image = taskdesc["worker"].get("docker-image", {}).get("in-tree") - if image: - data.append(image) - - # Likewise script arguments should influence the index. - args = run.get("arguments") - if args: - data.extend(args) - return data - - -def common_toolchain(config, job, taskdesc, is_docker): - run = job["run"] - - worker = taskdesc["worker"] = job["worker"] - worker["chain-of-trust"] = True - - srcdir = get_vcsdir_name(worker["os"]) - - if is_docker: - # If the task doesn't have a docker-image, set a default - worker.setdefault("docker-image", {"in-tree": "toolchain-build"}) - - # Allow the job to specify where artifacts come from, but add - # public/build if it's not there already. - artifacts = worker.setdefault("artifacts", []) - if not any(artifact.get("name") == "public/build" for artifact in artifacts): - if is_docker: - docker_worker_add_artifacts(config, job, taskdesc) - else: - generic_worker_add_artifacts(config, job, taskdesc) - - env = worker["env"] - env.update( - { - "MOZ_BUILD_DATE": config.params["moz_build_date"], - "MOZ_SCM_LEVEL": config.params["level"], - } - ) - - attributes = taskdesc.setdefault("attributes", {}) - attributes["toolchain-artifact"] = run.pop("toolchain-artifact") - if "toolchain-alias" in run: - attributes["toolchain-alias"] = run.pop("toolchain-alias") - if "toolchain-env" in run: - attributes["toolchain-env"] = run.pop("toolchain-env") - - if not taskgraph.fast: - name = taskdesc["label"].replace(f"{config.kind}-", "", 1) - taskdesc["cache"] = { - "type": CACHE_TYPE, - "name": name, - "digest-data": get_digest_data(config, run, taskdesc), - } - - script = run.pop("script") - run["using"] = "run-task" - run["cwd"] = "{checkout}/.." - - if script.endswith(".ps1"): - run["exec-with"] = "powershell" - - command = [f"{srcdir}/taskcluster/scripts/toolchain/{script}"] + run.pop( - "arguments", [] - ) - - if not is_docker: - # Don't quote the first item in the command because it purposely contains - # an environment variable that is not meant to be quoted. - if len(command) > 1: - command = command[0] + " " + shell_quote(*command[1:]) - else: - command = command[0] - - run["command"] = command - - configure_taskdesc_for_run(config, job, taskdesc, worker["implementation"]) - - -toolchain_defaults = { - "sparse-profile": "toolchain-build", -} - - -@run_job_using( - "docker-worker", - "toolchain-script", - schema=toolchain_run_schema, - defaults=toolchain_defaults, -) -def docker_worker_toolchain(config, job, taskdesc): - common_toolchain(config, job, taskdesc, is_docker=True) - - -@run_job_using( - "generic-worker", - "toolchain-script", - schema=toolchain_run_schema, - defaults=toolchain_defaults, -) -def generic_worker_toolchain(config, job, taskdesc): - common_toolchain(config, job, taskdesc, is_docker=False) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/run/__init__.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/run/__init__.py new file mode 100644 index 0000000000..a783a0dc13 --- /dev/null +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/run/__init__.py @@ -0,0 +1,451 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +""" +Convert a run description into a task description. + +Run descriptions are similar to task descriptions, but they specify how to run +the task at a higher level, using a "run" field that can be interpreted by +run-using handlers in `taskcluster/taskgraph/transforms/run`. +""" + + +import copy +import json +import logging + +from voluptuous import Any, Exclusive, Extra, Optional, Required + +from taskgraph.transforms.base import TransformSequence +from taskgraph.transforms.cached_tasks import order_tasks +from taskgraph.transforms.task import task_description_schema +from taskgraph.util import path as mozpath +from taskgraph.util.python_path import import_sibling_modules +from taskgraph.util.schema import Schema, validate_schema +from taskgraph.util.taskcluster import get_artifact_prefix +from taskgraph.util.workertypes import worker_type_implementation + +logger = logging.getLogger(__name__) + +# Fetches may be accepted in other transforms and eventually passed along +# to a `task` (eg: from_deps). Defining this here allows them to reuse +# the schema and avoid duplication. +fetches_schema = { + Required("artifact"): str, + Optional("dest"): str, + Optional("extract"): bool, + Optional("verify-hash"): bool, +} + +# Schema for a build description +run_description_schema = Schema( + { + # The name of the task and the task's label. At least one must be specified, + # and the label will be generated from the name if necessary, by prepending + # the kind. + Optional("name"): str, + Optional("label"): str, + # the following fields are passed directly through to the task description, + # possibly modified by the run implementation. See + # taskcluster/taskgraph/transforms/task.py for the schema details. + Required("description"): task_description_schema["description"], + Optional("attributes"): task_description_schema["attributes"], + Optional("task-from"): task_description_schema["task-from"], + Optional("dependencies"): task_description_schema["dependencies"], + Optional("soft-dependencies"): task_description_schema["soft-dependencies"], + Optional("if-dependencies"): task_description_schema["if-dependencies"], + Optional("requires"): task_description_schema["requires"], + Optional("deadline-after"): task_description_schema["deadline-after"], + Optional("expires-after"): task_description_schema["expires-after"], + Optional("routes"): task_description_schema["routes"], + Optional("scopes"): task_description_schema["scopes"], + Optional("tags"): task_description_schema["tags"], + Optional("extra"): task_description_schema["extra"], + Optional("treeherder"): task_description_schema["treeherder"], + Optional("index"): task_description_schema["index"], + Optional("run-on-projects"): task_description_schema["run-on-projects"], + Optional("run-on-tasks-for"): task_description_schema["run-on-tasks-for"], + Optional("run-on-git-branches"): task_description_schema["run-on-git-branches"], + Optional("shipping-phase"): task_description_schema["shipping-phase"], + Optional("always-target"): task_description_schema["always-target"], + Exclusive("optimization", "optimization"): task_description_schema[ + "optimization" + ], + Optional("needs-sccache"): task_description_schema["needs-sccache"], + # The "when" section contains descriptions of the circumstances under which + # this task should be included in the task graph. This will be converted + # into an optimization, so it cannot be specified in a run description that + # also gives 'optimization'. + Exclusive("when", "optimization"): { + # This task only needs to be run if a file matching one of the given + # patterns has changed in the push. The patterns use the mozpack + # match function (python/mozbuild/mozpack/path.py). + Optional("files-changed"): [str], + }, + # A list of artifacts to install from 'fetch' tasks. + Optional("fetches"): { + Any("toolchain", "fetch"): [str], + str: [ + str, + fetches_schema, + ], + }, + # A description of how to run this task. + "run": { + # The key to a run implementation in a peer module to this one + "using": str, + # Base work directory used to set up the task. + Optional("workdir"): str, + # Any remaining content is verified against that run implementation's + # own schema. + Extra: object, + }, + Required("worker-type"): task_description_schema["worker-type"], + # This object will be passed through to the task description, with additions + # provided by the task's run-using function + Optional("worker"): dict, + } +) + +transforms = TransformSequence() +transforms.add_validate(run_description_schema) + + +@transforms.add +def rewrite_when_to_optimization(config, tasks): + for task in tasks: + when = task.pop("when", {}) + if not when: + yield task + continue + + files_changed = when.get("files-changed") + + # implicitly add task config directory. + files_changed.append(f"{config.path}/**") + + # "only when files changed" implies "skip if files have not changed" + task["optimization"] = {"skip-unless-changed": files_changed} + + assert "when" not in task + yield task + + +@transforms.add +def set_implementation(config, tasks): + for task in tasks: + impl, os = worker_type_implementation(config.graph_config, task["worker-type"]) + if os: + task.setdefault("tags", {})["os"] = os + if impl: + task.setdefault("tags", {})["worker-implementation"] = impl + worker = task.setdefault("worker", {}) + assert "implementation" not in worker + worker["implementation"] = impl + if os: + worker["os"] = os + yield task + + +@transforms.add +def set_label(config, tasks): + for task in tasks: + if "label" not in task: + if "name" not in task: + raise Exception("task has neither a name nor a label") + task["label"] = "{}-{}".format(config.kind, task["name"]) + if task.get("name"): + del task["name"] + yield task + + +@transforms.add +def add_resource_monitor(config, tasks): + for task in tasks: + if task.get("attributes", {}).get("resource-monitor"): + worker_implementation, worker_os = worker_type_implementation( + config.graph_config, task["worker-type"] + ) + # Normalise worker os so that linux-bitbar and similar use linux tools. + if worker_os: + worker_os = worker_os.split("-")[0] + if "win7" in task["worker-type"]: + arch = "32" + else: + arch = "64" + task.setdefault("fetches", {}) + task["fetches"].setdefault("toolchain", []) + task["fetches"]["toolchain"].append(f"{worker_os}{arch}-resource-monitor") + + if worker_implementation == "docker-worker": + artifact_source = "/builds/worker/monitoring/resource-monitor.json" + else: + artifact_source = "monitoring/resource-monitor.json" + task["worker"].setdefault("artifacts", []) + task["worker"]["artifacts"].append( + { + "name": "public/monitoring/resource-monitor.json", + "type": "file", + "path": artifact_source, + } + ) + # Set env for output file + task["worker"].setdefault("env", {}) + task["worker"]["env"]["RESOURCE_MONITOR_OUTPUT"] = artifact_source + + yield task + + +def get_attribute(dict, key, attributes, attribute_name): + """Get `attribute_name` from the given `attributes` dict, and if there + is a corresponding value, set `key` in `dict` to that value.""" + value = attributes.get(attribute_name) + if value: + dict[key] = value + + +@transforms.add +def use_fetches(config, tasks): + artifact_names = {} + aliases = {} + extra_env = {} + + if config.kind in ("toolchain", "fetch"): + tasks = list(tasks) + for task in tasks: + run = task.get("run", {}) + label = task["label"] + get_attribute(artifact_names, label, run, "toolchain-artifact") + value = run.get(f"{config.kind}-alias") + if value: + aliases[f"{config.kind}-{value}"] = label + + for task in config.kind_dependencies_tasks.values(): + if task.kind in ("fetch", "toolchain"): + get_attribute( + artifact_names, + task.label, + task.attributes, + f"{task.kind}-artifact", + ) + get_attribute(extra_env, task.label, task.attributes, f"{task.kind}-env") + value = task.attributes.get(f"{task.kind}-alias") + if value: + aliases[f"{task.kind}-{value}"] = task.label + + artifact_prefixes = {} + for task in order_tasks(config, tasks): + artifact_prefixes[task["label"]] = get_artifact_prefix(task) + + fetches = task.pop("fetches", None) + if not fetches: + yield task + continue + + task_fetches = [] + name = task.get("name", task.get("label")) + dependencies = task.setdefault("dependencies", {}) + worker = task.setdefault("worker", {}) + env = worker.setdefault("env", {}) + prefix = get_artifact_prefix(task) + for kind in sorted(fetches): + artifacts = fetches[kind] + if kind in ("fetch", "toolchain"): + for fetch_name in sorted(artifacts): + label = f"{kind}-{fetch_name}" + label = aliases.get(label, label) + if label not in artifact_names: + raise Exception( + f"Missing fetch task for {config.kind}-{name}: {fetch_name}" + ) + if label in extra_env: + env.update(extra_env[label]) + + path = artifact_names[label] + + dependencies[label] = label + task_fetches.append( + { + "artifact": path, + "task": f"<{label}>", + "extract": True, + } + ) + else: + if kind not in dependencies: + raise Exception( + f"{name} can't fetch {kind} artifacts because " + f"it has no {kind} dependencies!" + ) + dep_label = dependencies[kind] + if dep_label in artifact_prefixes: + prefix = artifact_prefixes[dep_label] + else: + dep_tasks = [ + task + for label, task in config.kind_dependencies_tasks.items() + if label == dep_label + ] + if len(dep_tasks) != 1: + raise Exception( + "{name} can't fetch {kind} artifacts because " + "there are {tasks} with label {label} in kind dependencies!".format( + name=name, + kind=kind, + label=dependencies[kind], + tasks=( + "no tasks" + if len(dep_tasks) == 0 + else "multiple tasks" + ), + ) + ) + + prefix = get_artifact_prefix(dep_tasks[0]) + + def cmp_artifacts(a): + if isinstance(a, str): + return a + else: + return a["artifact"] + + for artifact in sorted(artifacts, key=cmp_artifacts): + if isinstance(artifact, str): + path = artifact + dest = None + extract = True + verify_hash = False + else: + path = artifact["artifact"] + dest = artifact.get("dest") + extract = artifact.get("extract", True) + verify_hash = artifact.get("verify-hash", False) + + fetch = { + "artifact": f"{prefix}/{path}", + "task": f"<{kind}>", + "extract": extract, + } + if dest is not None: + fetch["dest"] = dest + if verify_hash: + fetch["verify-hash"] = verify_hash + task_fetches.append(fetch) + + task_artifact_prefixes = { + mozpath.dirname(fetch["artifact"]) + for fetch in task_fetches + if not fetch["artifact"].startswith("public/") + } + if task_artifact_prefixes: + # Use taskcluster-proxy and request appropriate scope. For example, add + # 'scopes: [queue:get-artifact:path/to/*]' for 'path/to/artifact.tar.xz'. + worker["taskcluster-proxy"] = True + for prefix in sorted(task_artifact_prefixes): + scope = f"queue:get-artifact:{prefix}/*" + if scope not in task.setdefault("scopes", []): + task["scopes"].append(scope) + + env["MOZ_FETCHES"] = { + "task-reference": json.dumps(task_fetches, sort_keys=True) + } + + env.setdefault("MOZ_FETCHES_DIR", "fetches") + + yield task + + +@transforms.add +def make_task_description(config, tasks): + """Given a build description, create a task description""" + # import plugin modules first, before iterating over tasks + import_sibling_modules(exceptions=("common.py",)) + + for task in tasks: + # always-optimized tasks never execute, so have no workdir + if task["worker"]["implementation"] in ("docker-worker", "generic-worker"): + task["run"].setdefault("workdir", "/builds/worker") + + taskdesc = copy.deepcopy(task) + + # fill in some empty defaults to make run implementations easier + taskdesc.setdefault("attributes", {}) + taskdesc.setdefault("dependencies", {}) + taskdesc.setdefault("soft-dependencies", []) + taskdesc.setdefault("routes", []) + taskdesc.setdefault("scopes", []) + taskdesc.setdefault("extra", {}) + + # give the function for task.run.using on this worker implementation a + # chance to set up the task description. + configure_taskdesc_for_run( + config, task, taskdesc, task["worker"]["implementation"] + ) + del taskdesc["run"] + + # yield only the task description, discarding the task description + yield taskdesc + + +# A registry of all functions decorated with run_task_using +registry = {} + + +def run_task_using(worker_implementation, run_using, schema=None, defaults={}): + """Register the decorated function as able to set up a task description for + tasks with the given worker implementation and `run.using` property. If + `schema` is given, the task's run field will be verified to match it. + + The decorated function should have the signature `using_foo(config, task, taskdesc)` + and should modify the task description in-place. The skeleton of + the task description is already set up, but without a payload.""" + + def wrap(func): + for_run_using = registry.setdefault(run_using, {}) + if worker_implementation in for_run_using: + raise Exception( + f"run_task_using({run_using!r}, {worker_implementation!r}) already exists: {for_run_using[worker_implementation]!r}" + ) + for_run_using[worker_implementation] = (func, schema, defaults) + return func + + return wrap + + +@run_task_using( + "always-optimized", "always-optimized", Schema({"using": "always-optimized"}) +) +def always_optimized(config, task, taskdesc): + pass + + +def configure_taskdesc_for_run(config, task, taskdesc, worker_implementation): + """ + Run the appropriate function for this task against the given task + description. + + This will raise an appropriate error if no function exists, or if the task's + run is not valid according to the schema. + """ + run_using = task["run"]["using"] + if run_using not in registry: + raise Exception(f"no functions for run.using {run_using!r}") + + if worker_implementation not in registry[run_using]: + raise Exception( + f"no functions for run.using {run_using!r} on {worker_implementation!r}" + ) + + func, schema, defaults = registry[run_using][worker_implementation] + for k, v in defaults.items(): + task["run"].setdefault(k, v) + + if schema: + validate_schema( + schema, + task["run"], + "In task.run using {!r}/{!r} for task {!r}:".format( + task["run"]["using"], worker_implementation, task["label"] + ), + ) + func(config, task, taskdesc) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/run/common.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/run/common.py new file mode 100644 index 0000000000..66466bc5f9 --- /dev/null +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/run/common.py @@ -0,0 +1,165 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +""" +Common support for various task types. These functions are all named after the +worker implementation they operate on, and take the same three parameters, for +consistency. +""" + + +import hashlib +import json + +from taskgraph.util.taskcluster import get_artifact_prefix + + +def get_vcsdir_name(os): + if os == "windows": + return "src" + else: + return "vcs" + + +def add_cache(task, taskdesc, name, mount_point, skip_untrusted=False): + """Adds a cache based on the worker's implementation. + + Args: + task (dict): Tasks object. + taskdesc (dict): Target task description to modify. + name (str): Name of the cache. + mount_point (path): Path on the host to mount the cache. + skip_untrusted (bool): Whether cache is used in untrusted environments + (default: False). Only applies to docker-worker. + """ + if not task["run"].get("use-caches", True): + return + + worker = task["worker"] + + if worker["implementation"] == "docker-worker": + taskdesc["worker"].setdefault("caches", []).append( + { + "type": "persistent", + "name": name, + "mount-point": mount_point, + "skip-untrusted": skip_untrusted, + } + ) + + elif worker["implementation"] == "generic-worker": + taskdesc["worker"].setdefault("mounts", []).append( + { + "cache-name": name, + "directory": mount_point, + } + ) + + else: + # Caches not implemented + pass + + +def add_artifacts(config, task, taskdesc, path): + taskdesc["worker"].setdefault("artifacts", []).append( + { + "name": get_artifact_prefix(taskdesc), + "path": path, + "type": "directory", + } + ) + + +def docker_worker_add_artifacts(config, task, taskdesc): + """Adds an artifact directory to the task""" + path = "{workdir}/artifacts/".format(**task["run"]) + taskdesc["worker"]["env"]["UPLOAD_DIR"] = path + add_artifacts(config, task, taskdesc, path) + + +def generic_worker_add_artifacts(config, task, taskdesc): + """Adds an artifact directory to the task""" + # The path is the location on disk; it doesn't necessarily + # mean the artifacts will be public or private; that is set via the name + # attribute in add_artifacts. + add_artifacts(config, task, taskdesc, path=get_artifact_prefix(taskdesc)) + + +def support_vcs_checkout(config, task, taskdesc, repo_configs, sparse=False): + """Update a task with parameters to enable a VCS checkout. + + This can only be used with ``run-task`` tasks, as the cache name is + reserved for ``run-task`` tasks. + """ + worker = task["worker"] + is_mac = worker["os"] == "macosx" + is_win = worker["os"] == "windows" + is_linux = worker["os"] == "linux" + is_docker = worker["implementation"] == "docker-worker" + assert is_mac or is_win or is_linux + + if is_win: + checkoutdir = "./build" + hgstore = "y:/hg-shared" + elif is_docker: + checkoutdir = "{workdir}/checkouts".format(**task["run"]) + hgstore = f"{checkoutdir}/hg-store" + else: + checkoutdir = "./checkouts" + hgstore = f"{checkoutdir}/hg-shared" + + vcsdir = checkoutdir + "/" + get_vcsdir_name(worker["os"]) + cache_name = "checkouts" + + # Robust checkout does not clean up subrepositories, so ensure that tasks + # that checkout different sets of paths have separate caches. + # See https://bugzilla.mozilla.org/show_bug.cgi?id=1631610 + if len(repo_configs) > 1: + checkout_paths = { + "\t".join([repo_config.path, repo_config.prefix]) + for repo_config in sorted( + repo_configs.values(), key=lambda repo_config: repo_config.path + ) + } + checkout_paths_str = "\n".join(checkout_paths).encode("utf-8") + digest = hashlib.sha256(checkout_paths_str).hexdigest() + cache_name += f"-repos-{digest}" + + # Sparse checkouts need their own cache because they can interfere + # with clients that aren't sparse aware. + if sparse: + cache_name += "-sparse" + + add_cache(task, taskdesc, cache_name, checkoutdir) + + env = taskdesc["worker"].setdefault("env", {}) + env.update( + { + "HG_STORE_PATH": hgstore, + "REPOSITORIES": json.dumps( + {repo.prefix: repo.name for repo in repo_configs.values()} + ), + "VCS_PATH": vcsdir, + } + ) + for repo_config in repo_configs.values(): + env.update( + { + f"{repo_config.prefix.upper()}_{key}": value + for key, value in { + "BASE_REPOSITORY": repo_config.base_repository, + "HEAD_REPOSITORY": repo_config.head_repository, + "HEAD_REV": repo_config.head_rev, + "HEAD_REF": repo_config.head_ref, + "REPOSITORY_TYPE": repo_config.type, + "SSH_SECRET_NAME": repo_config.ssh_secret_name, + }.items() + if value is not None + } + ) + if repo_config.ssh_secret_name: + taskdesc["scopes"].append(f"secrets:get:{repo_config.ssh_secret_name}") + + # only some worker platforms have taskcluster-proxy enabled + if task["worker"]["implementation"] in ("docker-worker",): + taskdesc["worker"]["taskcluster-proxy"] = True diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/run/index_search.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/run/index_search.py new file mode 100644 index 0000000000..c25946980e --- /dev/null +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/run/index_search.py @@ -0,0 +1,37 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +""" +This transform allows including indexed tasks from other projects in the +current taskgraph. The transform takes a list of indexes, and the optimization +phase will replace the task with the task from the other graph. +""" + + +from voluptuous import Required + +from taskgraph.transforms.base import TransformSequence +from taskgraph.transforms.run import run_task_using +from taskgraph.util.schema import Schema + +transforms = TransformSequence() + +run_task_schema = Schema( + { + Required("using"): "index-search", + Required( + "index-search", + "A list of indexes in decreasing order of priority at which to lookup for this " + "task. This is interpolated with the graph parameters.", + ): [str], + } +) + + +@run_task_using("always-optimized", "index-search", schema=run_task_schema) +def fill_template(config, task, taskdesc): + run = task["run"] + taskdesc["optimization"] = { + "index-search": [index.format(**config.params) for index in run["index-search"]] + } diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/run/run_task.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/run/run_task.py new file mode 100644 index 0000000000..c2fbef83b0 --- /dev/null +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/run/run_task.py @@ -0,0 +1,231 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +""" +Support for running tasks that are invoked via the `run-task` script. +""" + +import dataclasses +import os + +from voluptuous import Any, Optional, Required + +from taskgraph.transforms.run import run_task_using +from taskgraph.transforms.run.common import support_vcs_checkout +from taskgraph.transforms.task import taskref_or_string +from taskgraph.util import path, taskcluster +from taskgraph.util.schema import Schema + +EXEC_COMMANDS = { + "bash": ["bash", "-cx"], + "powershell": ["powershell.exe", "-ExecutionPolicy", "Bypass"], +} + +run_task_schema = Schema( + { + Required("using"): "run-task", + # if true, add a cache at ~worker/.cache, which is where things like pip + # tend to hide their caches. This cache is never added for level-1 tasks. + # TODO Once bug 1526028 is fixed, this and 'use-caches' should be merged. + Required("cache-dotcache"): bool, + # Whether or not to use caches. + Optional("use-caches"): bool, + # if true (the default), perform a checkout on the worker + Required("checkout"): Any(bool, {str: dict}), + Optional( + "cwd", + description="Path to run command in. If a checkout is present, the path " + "to the checkout will be interpolated with the key `checkout`", + ): str, + # The sparse checkout profile to use. Value is the filename relative to the + # directory where sparse profiles are defined (build/sparse-profiles/). + Required("sparse-profile"): Any(str, None), + # The command arguments to pass to the `run-task` script, after the + # checkout arguments. If a list, it will be passed directly; otherwise + # it will be included in a single argument to the command specified by + # `exec-with`. + Required("command"): Any([taskref_or_string], taskref_or_string), + # What to execute the command with in the event command is a string. + Optional("exec-with"): Any(*list(EXEC_COMMANDS)), + # Command used to invoke the `run-task` script. Can be used if the script + # or Python installation is in a non-standard location on the workers. + Optional("run-task-command"): list, + # Base work directory used to set up the task. + Required("workdir"): str, + # Whether to run as root. (defaults to False) + Optional("run-as-root"): bool, + } +) + + +def common_setup(config, task, taskdesc, command): + run = task["run"] + if run["checkout"]: + repo_configs = config.repo_configs + if len(repo_configs) > 1 and run["checkout"] is True: + raise Exception("Must explicitly specify checkouts with multiple repos.") + elif run["checkout"] is not True: + repo_configs = { + repo: dataclasses.replace(repo_configs[repo], **config) + for (repo, config) in run["checkout"].items() + } + + support_vcs_checkout( + config, + task, + taskdesc, + repo_configs=repo_configs, + sparse=bool(run["sparse-profile"]), + ) + + vcs_path = taskdesc["worker"]["env"]["VCS_PATH"] + for repo_config in repo_configs.values(): + checkout_path = path.join(vcs_path, repo_config.path) + command.append(f"--{repo_config.prefix}-checkout={checkout_path}") + + if run["sparse-profile"]: + command.append( + "--{}-sparse-profile=build/sparse-profiles/{}".format( + repo_config.prefix, + run["sparse-profile"], + ) + ) + + if "cwd" in run: + run["cwd"] = path.normpath(run["cwd"].format(checkout=vcs_path)) + elif "cwd" in run and "{checkout}" in run["cwd"]: + raise Exception( + "Found `{{checkout}}` interpolation in `cwd` for task {name} " + "but the task doesn't have a checkout: {cwd}".format( + cwd=run["cwd"], name=task.get("name", task.get("label")) + ) + ) + + if "cwd" in run: + command.extend(("--task-cwd", run["cwd"])) + + taskdesc["worker"].setdefault("env", {})["MOZ_SCM_LEVEL"] = config.params["level"] + + +worker_defaults = { + "cache-dotcache": False, + "checkout": True, + "sparse-profile": None, + "run-as-root": False, +} + + +def script_url(config, script): + if "MOZ_AUTOMATION" in os.environ and "TASK_ID" not in os.environ: + raise Exception("TASK_ID must be defined to use run-task on generic-worker") + task_id = os.environ.get("TASK_ID", "") + # use_proxy = False to avoid having all generic-workers turn on proxy + # Assumes the cluster allows anonymous downloads of public artifacts + tc_url = taskcluster.get_root_url(False) + # TODO: Use util/taskcluster.py:get_artifact_url once hack for Bug 1405889 is removed + return f"{tc_url}/api/queue/v1/task/{task_id}/artifacts/public/{script}" + + +@run_task_using( + "docker-worker", "run-task", schema=run_task_schema, defaults=worker_defaults +) +def docker_worker_run_task(config, task, taskdesc): + run = task["run"] + worker = taskdesc["worker"] = task["worker"] + command = run.pop("run-task-command", ["/usr/local/bin/run-task"]) + common_setup(config, task, taskdesc, command) + + if run.get("cache-dotcache"): + worker["caches"].append( + { + "type": "persistent", + "name": "{project}-dotcache".format(**config.params), + "mount-point": "{workdir}/.cache".format(**run), + "skip-untrusted": True, + } + ) + + run_command = run["command"] + + # dict is for the case of `{'task-reference': str}`. + if isinstance(run_command, str) or isinstance(run_command, dict): + exec_cmd = EXEC_COMMANDS[run.pop("exec-with", "bash")] + run_command = exec_cmd + [run_command] + if run["run-as-root"]: + command.extend(("--user", "root", "--group", "root")) + command.append("--") + command.extend(run_command) + worker["command"] = command + + +@run_task_using( + "generic-worker", "run-task", schema=run_task_schema, defaults=worker_defaults +) +def generic_worker_run_task(config, task, taskdesc): + run = task["run"] + worker = taskdesc["worker"] = task["worker"] + is_win = worker["os"] == "windows" + is_mac = worker["os"] == "macosx" + is_bitbar = worker["os"] == "linux-bitbar" + + command = run.pop("run-task-command", None) + if not command: + if is_win: + command = ["C:/mozilla-build/python3/python3.exe", "run-task"] + elif is_mac: + command = ["/tools/python36/bin/python3", "run-task"] + else: + command = ["./run-task"] + + common_setup(config, task, taskdesc, command) + + worker.setdefault("mounts", []) + if run.get("cache-dotcache"): + worker["mounts"].append( + { + "cache-name": "{project}-dotcache".format(**config.params), + "directory": "{workdir}/.cache".format(**run), + } + ) + worker["mounts"].append( + { + "content": { + "url": script_url(config, "run-task"), + }, + "file": "./run-task", + } + ) + if worker.get("env", {}).get("MOZ_FETCHES"): + worker["mounts"].append( + { + "content": { + "url": script_url(config, "fetch-content"), + }, + "file": "./fetch-content", + } + ) + + run_command = run["command"] + + if isinstance(run_command, str): + if is_win: + run_command = f'"{run_command}"' + exec_cmd = EXEC_COMMANDS[run.pop("exec-with", "bash")] + run_command = exec_cmd + [run_command] + + if run["run-as-root"]: + command.extend(("--user", "root", "--group", "root")) + command.append("--") + if is_bitbar: + # Use the bitbar wrapper script which sets up the device and adb + # environment variables + command.append("/builds/taskcluster/script.py") + command.extend(run_command) + + if is_win: + worker["command"] = [" ".join(command)] + else: + worker["command"] = [ + ["chmod", "+x", "run-task"], + command, + ] diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/run/toolchain.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/run/toolchain.py new file mode 100644 index 0000000000..59e66cb973 --- /dev/null +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/run/toolchain.py @@ -0,0 +1,175 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +""" +Support for running toolchain-building tasks via dedicated scripts +""" + +from voluptuous import ALLOW_EXTRA, Any, Optional, Required + +import taskgraph +from taskgraph.transforms.run import configure_taskdesc_for_run, run_task_using +from taskgraph.transforms.run.common import ( + docker_worker_add_artifacts, + generic_worker_add_artifacts, + get_vcsdir_name, +) +from taskgraph.util.hash import hash_paths +from taskgraph.util.schema import Schema +from taskgraph.util.shell import quote as shell_quote + +CACHE_TYPE = "toolchains.v3" + +toolchain_run_schema = Schema( + { + Required("using"): "toolchain-script", + # The script (in taskcluster/scripts/misc) to run. + Required("script"): str, + # Arguments to pass to the script. + Optional("arguments"): [str], + # Sparse profile to give to checkout using `run-task`. If given, + # a filename in `build/sparse-profiles`. Defaults to + # "toolchain-build", i.e., to + # `build/sparse-profiles/toolchain-build`. If `None`, instructs + # `run-task` to not use a sparse profile at all. + Required("sparse-profile"): Any(str, None), + # Paths/patterns pointing to files that influence the outcome of a + # toolchain build. + Optional("resources"): [str], + # Path to the artifact produced by the toolchain task + Required("toolchain-artifact"): str, + Optional( + "toolchain-alias", + description="An alias that can be used instead of the real toolchain task name in " + "fetch stanzas for tasks.", + ): Any(str, [str]), + Optional( + "toolchain-env", + description="Additional env variables to add to the worker when using this toolchain", + ): {str: object}, + # Base work directory used to set up the task. + Required("workdir"): str, + }, + extra=ALLOW_EXTRA, +) + + +def get_digest_data(config, run, taskdesc): + files = list(run.pop("resources", [])) + # The script + files.append("taskcluster/scripts/toolchain/{}".format(run["script"])) + + # Accumulate dependency hashes for index generation. + data = [hash_paths(config.graph_config.vcs_root, files)] + + data.append(taskdesc["attributes"]["toolchain-artifact"]) + + # If the task uses an in-tree docker image, we want it to influence + # the index path as well. Ideally, the content of the docker image itself + # should have an influence, but at the moment, we can't get that + # information here. So use the docker image name as a proxy. Not a lot of + # changes to docker images actually have an impact on the resulting + # toolchain artifact, so we'll just rely on such important changes to be + # accompanied with a docker image name change. + image = taskdesc["worker"].get("docker-image", {}).get("in-tree") + if image: + data.append(image) + + # Likewise script arguments should influence the index. + args = run.get("arguments") + if args: + data.extend(args) + return data + + +def common_toolchain(config, task, taskdesc, is_docker): + run = task["run"] + + worker = taskdesc["worker"] = task["worker"] + worker["chain-of-trust"] = True + + srcdir = get_vcsdir_name(worker["os"]) + + if is_docker: + # If the task doesn't have a docker-image, set a default + worker.setdefault("docker-image", {"in-tree": "toolchain-build"}) + + # Allow the task to specify where artifacts come from, but add + # public/build if it's not there already. + artifacts = worker.setdefault("artifacts", []) + if not any(artifact.get("name") == "public/build" for artifact in artifacts): + if is_docker: + docker_worker_add_artifacts(config, task, taskdesc) + else: + generic_worker_add_artifacts(config, task, taskdesc) + + env = worker["env"] + env.update( + { + "MOZ_BUILD_DATE": config.params["moz_build_date"], + "MOZ_SCM_LEVEL": config.params["level"], + } + ) + + attributes = taskdesc.setdefault("attributes", {}) + attributes["toolchain-artifact"] = run.pop("toolchain-artifact") + if "toolchain-alias" in run: + attributes["toolchain-alias"] = run.pop("toolchain-alias") + if "toolchain-env" in run: + attributes["toolchain-env"] = run.pop("toolchain-env") + + if not taskgraph.fast: + name = taskdesc["label"].replace(f"{config.kind}-", "", 1) + taskdesc["cache"] = { + "type": CACHE_TYPE, + "name": name, + "digest-data": get_digest_data(config, run, taskdesc), + } + + script = run.pop("script") + run["using"] = "run-task" + run["cwd"] = "{checkout}/.." + + if script.endswith(".ps1"): + run["exec-with"] = "powershell" + + command = [f"{srcdir}/taskcluster/scripts/toolchain/{script}"] + run.pop( + "arguments", [] + ) + + if not is_docker: + # Don't quote the first item in the command because it purposely contains + # an environment variable that is not meant to be quoted. + if len(command) > 1: + command = command[0] + " " + shell_quote(*command[1:]) + else: + command = command[0] + + run["command"] = command + + configure_taskdesc_for_run(config, task, taskdesc, worker["implementation"]) + + +toolchain_defaults = { + "sparse-profile": "toolchain-build", +} + + +@run_task_using( + "docker-worker", + "toolchain-script", + schema=toolchain_run_schema, + defaults=toolchain_defaults, +) +def docker_worker_toolchain(config, task, taskdesc): + common_toolchain(config, task, taskdesc, is_docker=True) + + +@run_task_using( + "generic-worker", + "toolchain-script", + schema=toolchain_run_schema, + defaults=toolchain_defaults, +) +def generic_worker_toolchain(config, task, taskdesc): + common_toolchain(config, task, taskdesc, is_docker=False) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/task.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/task.py index c55de78513..168b8c00c9 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/task.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/task.py @@ -110,7 +110,7 @@ task_description_schema = Schema( # section of the kind (delimited by "-") all smooshed together. # Eg: "test" becomes "T", "docker-image" becomes "DI", etc. "symbol": Optional(str), - # the job kind + # the task kind # If "build" or "test" is found in the kind name, this defaults # to the appropriate value. Otherwise, defaults to "other" "kind": Optional(Any("build", "test", "other")), @@ -129,7 +129,7 @@ task_description_schema = Schema( Optional("index"): { # the name of the product this build produces "product": str, - # the names to use for this job in the TaskCluster index + # the names to use for this task in the TaskCluster index "job-name": str, # Type of gecko v2 index to use "type": str, @@ -179,7 +179,7 @@ task_description_schema = Schema( # be substituted in this string: # {level} -- the scm level of this push "worker-type": str, - # Whether the job should use sccache compiler caching. + # Whether the task should use sccache compiler caching. Required("needs-sccache"): bool, # information specific to the worker implementation that will run this task Optional("worker"): { @@ -196,7 +196,7 @@ TC_TREEHERDER_SCHEMA_URL = ( UNKNOWN_GROUP_NAME = ( - "Treeherder group {} (from {}) has no name; " "add it to taskcluster/ci/config.yml" + "Treeherder group {} (from {}) has no name; " "add it to taskcluster/config.yml" ) V2_ROUTE_TEMPLATES = [ @@ -266,7 +266,7 @@ def index_builder(name): UNSUPPORTED_INDEX_PRODUCT_ERROR = """\ The index product {product} is not in the list of configured products in -`taskcluster/ci/config.yml'. +`taskcluster/config.yml'. """ @@ -317,7 +317,7 @@ def verify_index(config, index): { # only one type is supported by any of the workers right now "type": "persistent", - # name of the cache, allowing re-use by subsequent tasks naming the + # name of the cache, allowing reuse by subsequent tasks naming the # same cache "name": str, # location in the task image where the cache will be mounted @@ -364,6 +364,9 @@ def build_docker_worker_payload(config, task, task_def): if "in-tree" in image: name = image["in-tree"] docker_image_task = "build-docker-image-" + image["in-tree"] + assert "docker-image" not in task.get( + "dependencies", () + ), "docker-image key in dependencies object is reserved" task.setdefault("dependencies", {})["docker-image"] = docker_image_task image = { @@ -487,19 +490,19 @@ def build_docker_worker_payload(config, task, task_def): # run-task knows how to validate caches. # - # To help ensure new run-task features and bug fixes don't interfere - # with existing caches, we seed the hash of run-task into cache names. - # So, any time run-task changes, we should get a fresh set of caches. - # This means run-task can make changes to cache interaction at any time - # without regards for backwards or future compatibility. + # To help ensure new run-task features and bug fixes, as well as the + # versions of tools such as mercurial or git, don't interfere with + # existing caches, we seed the underlying docker-image task id into + # cache names, for tasks using in-tree Docker images. # # But this mechanism only works for in-tree Docker images that are built # with the current run-task! For out-of-tree Docker images, we have no # way of knowing their content of run-task. So, in addition to varying # cache names by the contents of run-task, we also take the Docker image - # name into consideration. This means that different Docker images will - # never share the same cache. This is a bit unfortunate. But it is the - # safest thing to do. Fortunately, most images are defined in-tree. + # name into consideration. + # + # This means that different Docker images will never share the same + # cache. This is a bit unfortunate, but is the safest thing to do. # # For out-of-tree Docker images, we don't strictly need to incorporate # the run-task content into the cache name. However, doing so preserves @@ -520,6 +523,8 @@ def build_docker_worker_payload(config, task, task_def): out_of_tree_image.encode("utf-8") ).hexdigest() suffix += name_hash[0:12] + else: + suffix += "-" else: suffix = cache_version @@ -539,13 +544,15 @@ def build_docker_worker_payload(config, task, task_def): suffix=suffix, ) caches[name] = cache["mount-point"] - task_def["scopes"].append("docker-worker:cache:%s" % name) + task_def["scopes"].append( + {"task-reference": "docker-worker:cache:%s" % name} + ) # Assertion: only run-task is interested in this. if run_task: payload["env"]["TASKCLUSTER_CACHES"] = ";".join(sorted(caches.values())) - payload["cache"] = caches + payload["cache"] = {"task-reference": caches} # And send down volumes information to run-task as well. if run_task and worker.get("volumes"): @@ -752,7 +759,7 @@ def build_generic_worker_payload(config, task, task_def): schema={ # the maximum time to run, in seconds Required("max-run-time"): int, - # locale key, if this is a locale beetmover job + # locale key, if this is a locale beetmover task Optional("locale"): str, Optional("partner-public"): bool, Required("release-properties"): { @@ -1075,7 +1082,11 @@ def build_task(config, tasks): extra["parent"] = os.environ.get("TASK_ID", "") if "expires-after" not in task: - task["expires-after"] = "28 days" if config.params.is_try() else "1 year" + task["expires-after"] = ( + config.graph_config._config.get("task-expires-after", "28 days") + if config.params.is_try() + else "1 year" + ) if "deadline-after" not in task: if "task-deadline-after" in config.graph_config: @@ -1142,9 +1153,9 @@ def build_task(config, tasks): config.params["project"] + th_project_suffix, branch_rev ) ) - task_def["metadata"]["description"] += " ([Treeherder push]({}))".format( - th_push_link - ) + task_def["metadata"][ + "description" + ] += f" ([Treeherder push]({th_push_link}))" # add the payload and adjust anything else as required (e.g., scopes) payload_builders[task["worker"]["implementation"]].builder( @@ -1288,7 +1299,7 @@ def check_caches_are_volumes(task): Caches and volumes are the only filesystem locations whose content isn't defined by the Docker image itself. Some caches are optional - depending on the job environment. We want paths that are potentially + depending on the task environment. We want paths that are potentially caches to have as similar behavior regardless of whether a cache is used. To help enforce this, we require that all paths used as caches to be declared as Docker volumes. This check won't catch all offenders. @@ -1343,7 +1354,9 @@ def check_run_task_caches(config, tasks): main_command = command[0] if isinstance(command[0], str) else "" run_task = main_command.endswith("run-task") - for cache in payload.get("cache", {}): + for cache in payload.get("cache", {}).get( + "task-reference", payload.get("cache", {}) + ): if not cache.startswith(cache_prefix): raise Exception( "{} is using a cache ({}) which is not appropriate " @@ -1364,7 +1377,7 @@ def check_run_task_caches(config, tasks): "cache name" ) - if not cache.endswith(suffix): + if suffix not in cache: raise Exception( f"{task['label']} is using a cache ({cache}) reserved for run-task " "but the cache name is not dependent on the contents " diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/task_context.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/task_context.py index 5c7ed6af80..bd36d827aa 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/task_context.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/task_context.py @@ -81,9 +81,9 @@ transforms.add_validate(SCHEMA) @transforms.add -def render_task(config, jobs): - for job in jobs: - sub_config = job.pop("task-context") +def render_task(config, tasks): + for task in tasks: + sub_config = task.pop("task-context") params_context = {} for var, path in sub_config.pop("from-parameters", {}).items(): if isinstance(path, str): @@ -111,11 +111,11 @@ def render_task(config, jobs): # Now that we have our combined context, we can substitute. for field in fields: - container, subfield = job, field + container, subfield = task, field while "." in subfield: f, subfield = subfield.split(".", 1) container = container[f] container[subfield] = substitute(container[subfield], **subs) - yield job + yield task diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/archive.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/archive.py index ee59ba4548..261a031038 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/archive.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/archive.py @@ -12,6 +12,40 @@ import tarfile DEFAULT_MTIME = 1451606400 +# Python 3.9 contains this change: +# https://github.com/python/cpython/commit/674935b8caf33e47c78f1b8e197b1b77a04992d2 +# which changes the output of tar creation compared to earlier versions. +# As this code is used to generate tar files that are meant to be deterministic +# across versions of python (specifically, it's used as part of computing the hash +# of docker images, which needs to be identical between CI (which uses python 3.8), +# and developer environments (using arbitrary versions of python, at this point, +# most probably more recent than 3.9)). +# What we do is subblass TarInfo so that if used on python >= 3.9, it reproduces the +# behavior from python < 3.9. +# Here's how it goes: +# - the behavior in python >= 3.9 is the same as python < 3.9 when the type encoded +# in the tarinfo is CHRTYPE or BLKTYPE. +# - the value of the type is only compared in the context of choosing which behavior +# to take +# - we replace the type with the same value (so that using the value has no changes) +# but that pretends to be the same as CHRTYPE so that the condition that enables the +# old behavior is taken. +class HackedType(bytes): + def __eq__(self, other): + if other == tarfile.CHRTYPE: + return True + return self == other + + +class TarInfo(tarfile.TarInfo): + @staticmethod + def _create_header(info, format, encoding, errors): + info["type"] = HackedType(info["type"]) + # ignore type checking because it looks like pyright complains because we're calling a + # non-public method + return tarfile.TarInfo._create_header(info, format, encoding, errors) # type: ignore + + def create_tar_from_files(fp, files): """Create a tar file deterministically. @@ -25,15 +59,23 @@ def create_tar_from_files(fp, files): FUTURE accept a filename argument (or create APIs to write files) """ - with tarfile.open(name="", mode="w", fileobj=fp, dereference=True) as tf: + # The format is explicitly set to tarfile.GNU_FORMAT, because this default format + # has been changed in Python 3.8. + with tarfile.open( + name="", mode="w", fileobj=fp, dereference=True, format=tarfile.GNU_FORMAT + ) as tf: for archive_path, f in sorted(files.items()): if isinstance(f, str): - mode = os.stat(f).st_mode + s = os.stat(f) + mode = s.st_mode + size = s.st_size f = open(f, "rb") else: mode = 0o0644 + size = len(f.read()) + f.seek(0) - ti = tarfile.TarInfo(archive_path) + ti = TarInfo(archive_path) ti.mode = mode ti.type = tarfile.REGTYPE @@ -56,9 +98,7 @@ def create_tar_from_files(fp, files): # Set mtime to a constant value. ti.mtime = DEFAULT_MTIME - f.seek(0, 2) - ti.size = f.tell() - f.seek(0, 0) + ti.size = size # tarfile wants to pass a size argument to read(). So just # wrap/buffer in a proper file object interface. tf.addfile(ti, f) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/cached_tasks.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/cached_tasks.py index 974b114902..1a3baad5be 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/cached_tasks.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/cached_tasks.py @@ -7,6 +7,7 @@ import hashlib import time TARGET_CACHE_INDEX = "{cache_prefix}.cache.level-{level}.{type}.{name}.hash.{digest}" +TARGET_PR_CACHE_INDEX = "{cache_prefix}.cache.pr.{type}.{name}.hash.{digest}" EXTRA_CACHE_INDEXES = [ "{cache_prefix}.cache.level-{level}.{type}.{name}.latest", "{cache_prefix}.cache.level-{level}.{type}.{name}.pushdate.{build_date_long}", @@ -53,31 +54,45 @@ def add_optimization( # We'll try to find a cached version of the toolchain at levels above and # including the current level, starting at the highest level. - # Chain-of-trust doesn't handle tasks not built on the tip of a - # pull-request, so don't look for level-1 tasks if building a pull-request. index_routes = [] min_level = int(config.params["level"]) - if config.params["tasks_for"] == "github-pull-request": - min_level = max(min_level, 3) for level in reversed(range(min_level, 4)): subs["level"] = level index_routes.append(TARGET_CACHE_INDEX.format(**subs)) - taskdesc["optimization"] = {"index-search": index_routes} + # Pull requests use a different target cache index route. This way we can + # be confident they won't be used by anything other than the pull request + # that created the cache in the first place. + if config.params["tasks_for"].startswith( + "github-pull-request" + ) and config.graph_config["taskgraph"].get("cache-pull-requests", True): + subs["head_ref"] = config.params["head_ref"] + if subs["head_ref"].startswith("refs/heads/"): + subs["head_ref"] = subs["head_ref"][11:] + index_routes.append(TARGET_PR_CACHE_INDEX.format(**subs)) + + taskdesc["optimization"] = {"index-search": index_routes} # ... and cache at the lowest level. subs["level"] = config.params["level"] - taskdesc.setdefault("routes", []).append( - f"index.{TARGET_CACHE_INDEX.format(**subs)}" - ) - # ... and add some extra routes for humans - subs["build_date_long"] = time.strftime( - "%Y.%m.%d.%Y%m%d%H%M%S", time.gmtime(config.params["build_date"]) - ) - taskdesc["routes"].extend( - [f"index.{route.format(**subs)}" for route in EXTRA_CACHE_INDEXES] - ) + if config.params["tasks_for"].startswith("github-pull-request"): + if config.graph_config["taskgraph"].get("cache-pull-requests", True): + taskdesc.setdefault("routes", []).append( + f"index.{TARGET_PR_CACHE_INDEX.format(**subs)}" + ) + else: + taskdesc.setdefault("routes", []).append( + f"index.{TARGET_CACHE_INDEX.format(**subs)}" + ) + + # ... and add some extra routes for humans + subs["build_date_long"] = time.strftime( + "%Y.%m.%d.%Y%m%d%H%M%S", time.gmtime(config.params["build_date"]) + ) + taskdesc["routes"].extend( + [f"index.{route.format(**subs)}" for route in EXTRA_CACHE_INDEXES] + ) taskdesc["attributes"]["cached_task"] = { "type": cache_type, diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/decision.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/decision.py deleted file mode 100644 index d0e1e1079f..0000000000 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/decision.py +++ /dev/null @@ -1,79 +0,0 @@ -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. - -""" -Utilities for generating a decision task from :file:`.taskcluster.yml`. -""" - - -import os - -import jsone -import slugid -import yaml - -from .templates import merge -from .time import current_json_time -from .vcs import find_hg_revision_push_info - - -def make_decision_task(params, root, context, head_rev=None): - """Generate a basic decision task, based on the root .taskcluster.yml""" - with open(os.path.join(root, ".taskcluster.yml"), "rb") as f: - taskcluster_yml = yaml.safe_load(f) - - if not head_rev: - head_rev = params["head_rev"] - - if params["repository_type"] == "hg": - pushlog = find_hg_revision_push_info(params["repository_url"], head_rev) - - hg_push_context = { - "pushlog_id": pushlog["pushid"], - "pushdate": pushlog["pushdate"], - "owner": pushlog["user"], - } - else: - hg_push_context = {} - - slugids = {} - - def as_slugid(name): - # https://github.com/taskcluster/json-e/issues/164 - name = name[0] - if name not in slugids: - slugids[name] = slugid.nice() - return slugids[name] - - # provide a similar JSON-e context to what mozilla-taskcluster provides: - # https://docs.taskcluster.net/reference/integrations/mozilla-taskcluster/docs/taskcluster-yml - # but with a different tasks_for and an extra `cron` section - context = merge( - { - "repository": { - "url": params["repository_url"], - "project": params["project"], - "level": params["level"], - }, - "push": merge( - { - "revision": params["head_rev"], - # remainder are fake values, but the decision task expects them anyway - "comment": " ", - }, - hg_push_context, - ), - "now": current_json_time(), - "as_slugid": as_slugid, - }, - context, - ) - - rendered = jsone.render(taskcluster_yml, context) - if len(rendered["tasks"]) != 1: - raise Exception("Expected .taskcluster.yml to only produce one cron task") - task = rendered["tasks"][0] - - task_id = task.pop("taskId") - return (task_id, task) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/docker.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/docker.py index c37a69f98f..13815381ed 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/docker.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/docker.py @@ -7,6 +7,7 @@ import hashlib import io import os import re +from typing import Optional from taskgraph.util.archive import create_tar_gz_from_files from taskgraph.util.memoize import memoize @@ -16,17 +17,27 @@ IMAGE_DIR = os.path.join(".", "taskcluster", "docker") from .yaml import load_yaml -def docker_image(name, by_tag=False): +def docker_image(name: str, by_tag: bool = False) -> Optional[str]: """ Resolve in-tree prebuilt docker image to ``/@sha256:``, or ``/:`` if `by_tag` is `True`. + + Args: + name (str): The image to build. + by_tag (bool): If True, will apply a tag based on VERSION file. + Otherwise will apply a hash based on HASH file. + Returns: + Optional[str]: Image if it can be resolved, otherwise None. """ try: with open(os.path.join(IMAGE_DIR, name, "REGISTRY")) as f: registry = f.read().strip() except OSError: - with open(os.path.join(IMAGE_DIR, "REGISTRY")) as f: - registry = f.read().strip() + try: + with open(os.path.join(IMAGE_DIR, "REGISTRY")) as f: + registry = f.read().strip() + except OSError: + return None if not by_tag: hashfile = os.path.join(IMAGE_DIR, name, "HASH") @@ -34,7 +45,7 @@ def docker_image(name, by_tag=False): with open(hashfile) as f: return f"{registry}/{name}@{f.read().strip()}" except OSError: - raise Exception(f"Failed to read HASH file {hashfile}") + return None try: with open(os.path.join(IMAGE_DIR, name, "VERSION")) as f: @@ -197,7 +208,7 @@ def stream_context_tar(topsrcdir, context_dir, out_file, image_name=None, args=N @memoize def image_paths(): """Return a map of image name to paths containing their Dockerfile.""" - config = load_yaml("taskcluster", "ci", "docker-image", "kind.yml") + config = load_yaml("taskcluster", "kinds", "docker-image", "kind.yml") return { k: os.path.join(IMAGE_DIR, v.get("definition", k)) for k, v in config["tasks"].items() diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/hash.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/hash.py index 5d884fc318..d42b2ecef9 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/hash.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/hash.py @@ -39,10 +39,7 @@ def hash_paths(base_path, patterns): raise Exception("%s did not match anything" % pattern) for path in sorted(found): h.update( - "{} {}\n".format( - hash_path(mozpath.abspath(mozpath.join(base_path, path))), - mozpath.normsep(path), - ).encode("utf-8") + f"{hash_path(mozpath.abspath(mozpath.join(base_path, path)))} {mozpath.normsep(path)}\n".encode() ) return h.hexdigest() @@ -55,4 +52,8 @@ def _find_matching_files(base_path, pattern): @memoize def _get_all_files(base_path): - return [str(path) for path in Path(base_path).rglob("*") if path.is_file()] + return [ + mozpath.normsep(str(path)) + for path in Path(base_path).rglob("*") + if path.is_file() + ] diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/keyed_by.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/keyed_by.py index 9b0c5a44fb..00c84ba980 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/keyed_by.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/keyed_by.py @@ -66,8 +66,8 @@ def evaluate_keyed_by( # Error out when only 'default' is specified as only alternatives, # because we don't need to by-{keyed_by} there. raise Exception( - "Keyed-by '{}' unnecessary with only value 'default' " - "found, when determining item {}".format(keyed_by, item_name) + f"Keyed-by '{keyed_by}' unnecessary with only value 'default' " + f"found, when determining item {item_name}" ) if key is None: @@ -76,22 +76,20 @@ def evaluate_keyed_by( continue else: raise Exception( - "No attribute {} and no value for 'default' found " - "while determining item {}".format(keyed_by, item_name) + f"No attribute {keyed_by} and no value for 'default' found " + f"while determining item {item_name}" ) matches = keymatch(alternatives, key) if enforce_single_match and len(matches) > 1: raise Exception( - "Multiple matching values for {} {!r} found while " - "determining item {}".format(keyed_by, key, item_name) + f"Multiple matching values for {keyed_by} {key!r} found while " + f"determining item {item_name}" ) elif matches: value = matches[0] continue raise Exception( - "No {} matching {!r} nor 'default' found while determining item {}".format( - keyed_by, key, item_name - ) + f"No {keyed_by} matching {key!r} nor 'default' found while determining item {item_name}" ) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/memoize.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/memoize.py index 56b513e74c..a4bc50cc26 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/memoize.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/memoize.py @@ -2,39 +2,6 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. -# Imported from -# https://searchfox.org/mozilla-central/rev/c3ebaf6de2d481c262c04bb9657eaf76bf47e2ac/python/mozbuild/mozbuild/util.py#923-949 - - import functools - -class memoize(dict): - """A decorator to memoize the results of function calls depending - on its arguments. - Both functions and instance methods are handled, although in the - instance method case, the results are cache in the instance itself. - """ - - def __init__(self, func): - self.func = func - functools.update_wrapper(self, func) - - def __call__(self, *args): - if args not in self: - self[args] = self.func(*args) - return self[args] - - def method_call(self, instance, *args): - name = "_%s" % self.func.__name__ - if not hasattr(instance, name): - setattr(instance, name, {}) - cache = getattr(instance, name) - if args not in cache: - cache[args] = self.func(instance, *args) - return cache[args] - - def __get__(self, instance, cls): - return functools.update_wrapper( - functools.partial(self.method_call, instance), self.func - ) +memoize = functools.lru_cache(maxsize=None) # backwards compatibility shim diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/parameterization.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/parameterization.py index 6233a98a40..1973f6f7df 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/parameterization.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/parameterization.py @@ -20,6 +20,12 @@ def _recurse(val, param_fns): if len(val) == 1: for param_key, param_fn in param_fns.items(): if set(val.keys()) == {param_key}: + if isinstance(val[param_key], dict): + # handle `{"task-reference": {"": "bar"}}` + return { + param_fn(key): recurse(v) + for key, v in val[param_key].items() + } return param_fn(val[param_key]) return {k: recurse(v) for k, v in val.items()} else: @@ -74,17 +80,14 @@ def resolve_task_references(label, task_def, task_id, decision_task_id, dependen task_id = dependencies[dependency] except KeyError: raise KeyError( - "task '{}' has no dependency named '{}'".format( - label, dependency - ) + f"task '{label}' has no dependency named '{dependency}'" ) - assert artifact_name.startswith( - "public/" - ), "artifact-reference only supports public artifacts, not `{}`".format( - artifact_name - ) - return get_artifact_url(task_id, artifact_name) + use_proxy = False + if not artifact_name.startswith("public/"): + use_proxy = True + + return get_artifact_url(task_id, artifact_name, use_proxy=use_proxy) return ARTIFACT_REFERENCE_PATTERN.sub(repl, val) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/schema.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/schema.py index 3989f71182..02e79a3a27 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/schema.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/schema.py @@ -74,7 +74,7 @@ def resolve_keyed_by( For example, given item:: - job: + task: test-platform: linux128 chunks: by-test-platform: @@ -82,10 +82,10 @@ def resolve_keyed_by( win.*: 6 default: 12 - a call to `resolve_keyed_by(item, 'job.chunks', item['thing-name'])` + a call to `resolve_keyed_by(item, 'task.chunks', item['thing-name'])` would mutate item in-place to:: - job: + task: test-platform: linux128 chunks: 12 @@ -182,7 +182,7 @@ def check_schema(schema): if not identifier_re.match(k) and not excepted(path): raise RuntimeError( "YAML schemas should use dashed lower-case identifiers, " - "not {!r} @ {}".format(k, path) + f"not {k!r} @ {path}" ) elif isinstance(k, (voluptuous.Optional, voluptuous.Required)): check_identifier(path, k.schema) @@ -191,9 +191,7 @@ def check_schema(schema): check_identifier(path, v) elif not excepted(path): raise RuntimeError( - "Unexpected type in YAML schema: {} @ {}".format( - type(k).__name__, path - ) + f"Unexpected type in YAML schema: {type(k).__name__} @ {path}" ) if isinstance(sch, collections.abc.Mapping): diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/set_name.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/set_name.py new file mode 100644 index 0000000000..4c27a9cca1 --- /dev/null +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/set_name.py @@ -0,0 +1,34 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +# Define a collection of set_name functions +# Note: this is stored here instead of where it is used in the `from_deps` +# transform to give consumers a chance to register their own `set_name` +# handlers before the `from_deps` schema is created. +SET_NAME_MAP = {} + + +def set_name(name, schema=None): + def wrapper(func): + assert ( + name not in SET_NAME_MAP + ), f"duplicate set_name function name {name} ({func} and {SET_NAME_MAP[name]})" + SET_NAME_MAP[name] = func + func.schema = schema + return func + + return wrapper + + +@set_name("strip-kind") +def set_name_strip_kind(config, tasks, primary_dep, primary_kind): + if primary_dep.label.startswith(primary_kind): + return primary_dep.label[len(primary_kind) + 1 :] + else: + return primary_dep.label + + +@set_name("retain-kind") +def set_name_retain_kind(config, tasks, primary_dep, primary_kind): + return primary_dep.label diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/shell.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/shell.py index d695767f05..16b71b7d6a 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/shell.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/shell.py @@ -14,7 +14,7 @@ def _quote(s): As a special case, if given an int, returns a string containing the int, not enclosed in quotes. """ - if type(s) == int: + if isinstance(s, int): return "%d" % s # Empty strings need to be quoted to have any significance diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/taskcluster.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/taskcluster.py index a830a473b3..b467e98a97 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/taskcluster.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/taskcluster.py @@ -3,10 +3,12 @@ # file, You can obtain one at http://mozilla.org/MPL/2.0/. +import copy import datetime import functools import logging import os +from typing import Dict, List, Union import requests import taskcluster_urls as liburls @@ -53,9 +55,11 @@ def get_root_url(use_proxy): logger.debug( "Running in Taskcluster instance {}{}".format( os.environ["TASKCLUSTER_ROOT_URL"], - " with taskcluster-proxy" - if "TASKCLUSTER_PROXY_URL" in os.environ - else "", + ( + " with taskcluster-proxy" + if "TASKCLUSTER_PROXY_URL" in os.environ + else "" + ), ) ) return liburls.normalize_root_url(os.environ["TASKCLUSTER_ROOT_URL"]) @@ -136,22 +140,9 @@ def _handle_artifact(path, response): def get_artifact_url(task_id, path, use_proxy=False): artifact_tmpl = liburls.api( - get_root_url(False), "queue", "v1", "task/{}/artifacts/{}" + get_root_url(use_proxy), "queue", "v1", "task/{}/artifacts/{}" ) - data = artifact_tmpl.format(task_id, path) - if use_proxy: - # Until Bug 1405889 is deployed, we can't download directly - # from the taskcluster-proxy. Work around by using the /bewit - # endpoint instead. - # The bewit URL is the body of a 303 redirect, which we don't - # want to follow (which fetches a potentially large resource). - response = _do_request( - os.environ["TASKCLUSTER_PROXY_URL"] + "/bewit", - data=data, - allow_redirects=False, - ) - return response.text - return data + return artifact_tmpl.format(task_id, path) def get_artifact(task_id, path, use_proxy=False): @@ -244,6 +235,7 @@ def get_task_url(task_id, use_proxy=False): return task_tmpl.format(task_id) +@memoize def get_task_definition(task_id, use_proxy=False): response = _do_request(get_task_url(task_id, use_proxy)) return response.json() @@ -327,11 +319,7 @@ def get_purge_cache_url(provisioner_id, worker_type, use_proxy=False): def purge_cache(provisioner_id, worker_type, cache_name, use_proxy=False): """Requests a cache purge from the purge-caches service.""" if testing: - logger.info( - "Would have purged {}/{}/{}.".format( - provisioner_id, worker_type, cache_name - ) - ) + logger.info(f"Would have purged {provisioner_id}/{worker_type}/{cache_name}.") else: logger.info(f"Purging {provisioner_id}/{worker_type}/{cache_name}.") purge_cache_url = get_purge_cache_url(provisioner_id, worker_type, use_proxy) @@ -371,3 +359,40 @@ def list_task_group_incomplete_tasks(task_group_id): params = {"continuationToken": resp.get("continuationToken")} else: break + + +@memoize +def _get_deps(task_ids, use_proxy): + upstream_tasks = {} + for task_id in task_ids: + task_def = get_task_definition(task_id, use_proxy) + upstream_tasks[task_def["metadata"]["name"]] = task_id + + upstream_tasks.update(_get_deps(tuple(task_def["dependencies"]), use_proxy)) + + return upstream_tasks + + +def get_ancestors( + task_ids: Union[List[str], str], use_proxy: bool = False +) -> Dict[str, str]: + """Gets the ancestor tasks of the given task_ids as a dictionary of label -> taskid. + + Args: + task_ids (str or [str]): A single task id or a list of task ids to find the ancestors of. + use_proxy (bool): See get_root_url. + + Returns: + dict: A dict whose keys are task labels and values are task ids. + """ + upstream_tasks: Dict[str, str] = {} + + if isinstance(task_ids, str): + task_ids = [task_ids] + + for task_id in task_ids: + task_def = get_task_definition(task_id, use_proxy) + + upstream_tasks.update(_get_deps(tuple(task_def["dependencies"]), use_proxy)) + + return copy.deepcopy(upstream_tasks) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/time.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/time.py index e511978b5f..6639e5dddd 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/time.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/time.py @@ -73,9 +73,7 @@ def value_of(input_str): if unit not in ALIASES: raise UnknownTimeMeasurement( - "{} is not a valid time measure use one of {}".format( - unit, sorted(ALIASES.keys()) - ) + f"{unit} is not a valid time measure use one of {sorted(ALIASES.keys())}" ) return ALIASES[unit](value) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/treeherder.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/treeherder.py index cff5f286cc..6bb6dbd137 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/treeherder.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/treeherder.py @@ -42,22 +42,25 @@ def replace_group(treeherder_symbol, new_group): return join_symbol(new_group, symbol) -def inherit_treeherder_from_dep(job, dep_job): - """Inherit treeherder defaults from dep_job""" - treeherder = job.get("treeherder", {}) +def inherit_treeherder_from_dep(task, dep_task): + """Inherit treeherder defaults from dep_task""" + treeherder = task.get("treeherder", {}) dep_th_platform = ( - dep_job.task.get("extra", {}) + dep_task.task.get("extra", {}) .get("treeherder", {}) .get("machine", {}) .get("platform", "") ) dep_th_collection = list( - dep_job.task.get("extra", {}).get("treeherder", {}).get("collection", {}).keys() + dep_task.task.get("extra", {}) + .get("treeherder", {}) + .get("collection", {}) + .keys() )[0] treeherder.setdefault("platform", f"{dep_th_platform}/{dep_th_collection}") treeherder.setdefault( - "tier", dep_job.task.get("extra", {}).get("treeherder", {}).get("tier", 1) + "tier", dep_task.task.get("extra", {}).get("treeherder", {}).get("tier", 1) ) # Does not set symbol treeherder.setdefault("kind", "build") diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/vcs.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/vcs.py index 2d967d2645..c2fd0d3236 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/vcs.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/vcs.py @@ -10,9 +10,6 @@ import subprocess from abc import ABC, abstractmethod, abstractproperty from shutil import which -import requests -from redo import retry - from taskgraph.util.path import ancestors PUSHLOG_TMPL = "{}/json-pushes?version=2&changeset={}&tipsonly=1&full=1" @@ -21,7 +18,7 @@ logger = logging.getLogger(__name__) class Repository(ABC): - # Both mercurial and git use sha1 as revision idenfiers. Luckily, both define + # Both mercurial and git use sha1 as revision identifiers. Luckily, both define # the same value as the null revision. # # https://github.com/git/git/blob/dc04167d378fb29d30e1647ff6ff51dd182bc9a3/t/oid-info/hash-info#L7 @@ -519,34 +516,3 @@ def get_repository(path): return GitRepository(path) raise RuntimeError("Current directory is neither a git or hg repository") - - -def find_hg_revision_push_info(repository, revision): - """Given the parameters for this action and a revision, find the - pushlog_id of the revision.""" - pushlog_url = PUSHLOG_TMPL.format(repository, revision) - - def query_pushlog(url): - r = requests.get(pushlog_url, timeout=60) - r.raise_for_status() - return r - - r = retry( - query_pushlog, - args=(pushlog_url,), - attempts=5, - sleeptime=10, - ) - pushes = r.json()["pushes"] - if len(pushes) != 1: - raise RuntimeError( - "Unable to find a single pushlog_id for {} revision {}: {}".format( - repository, revision, pushes - ) - ) - pushid = list(pushes.keys())[0] - return { - "pushdate": pushes[pushid]["date"], - "pushid": pushid, - "user": pushes[pushid]["user"], - } diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/verify.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/verify.py index e6705c16cf..b5bb0889ae 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/verify.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/verify.py @@ -134,10 +134,8 @@ def verify_task_graph_symbol(task, taskgraph, scratch_pad, graph_config, paramet collection_keys = tuple(sorted(treeherder.get("collection", {}).keys())) if len(collection_keys) != 1: raise Exception( - "Task {} can't be in multiple treeherder collections " - "(the part of the platform after `/`): {}".format( - task.label, collection_keys - ) + f"Task {task.label} can't be in multiple treeherder collections " + f"(the part of the platform after `/`): {collection_keys}" ) platform = treeherder.get("machine", {}).get("platform") group_symbol = treeherder.get("groupSymbol") @@ -175,9 +173,7 @@ def verify_trust_domain_v2_routes( if route.startswith(route_prefix): if route in scratch_pad: raise Exception( - "conflict between {}:{} for route: {}".format( - task.label, scratch_pad[route], route - ) + f"conflict between {task.label}:{scratch_pad[route]} for route: {route}" ) else: scratch_pad[route] = task.label @@ -206,9 +202,7 @@ def verify_routes_notification_filters( route_filter = route.split(".")[-1] if route_filter not in valid_filters: raise Exception( - "{} has invalid notification filter ({})".format( - task.label, route_filter - ) + f"{task.label} has invalid notification filter ({route_filter})" ) @@ -235,12 +229,7 @@ def verify_dependency_tiers(task, taskgraph, scratch_pad, graph_config, paramete continue if tier < tiers[d]: raise Exception( - "{} (tier {}) cannot depend on {} (tier {})".format( - task.label, - printable_tier(tier), - d, - printable_tier(tiers[d]), - ) + f"{task.label} (tier {printable_tier(tier)}) cannot depend on {d} (tier {printable_tier(tiers[d])})" ) @@ -262,11 +251,7 @@ def verify_toolchain_alias(task, taskgraph, scratch_pad, graph_config, parameter if key in scratch_pad: raise Exception( "Duplicate toolchain-alias in tasks " - "`{}`and `{}`: {}".format( - task.label, - scratch_pad[key], - key, - ) + f"`{task.label}`and `{scratch_pad[key]}`: {key}" ) else: scratch_pad[key] = task.label diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/yaml.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/yaml.py index 141c7a16d3..a733521527 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/yaml.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/yaml.py @@ -5,7 +5,10 @@ import os -from yaml.loader import SafeLoader +try: + from yaml import CSafeLoader as SafeLoader +except ImportError: + from yaml import SafeLoader class UnicodeLoader(SafeLoader): -- cgit v1.2.3