summaryrefslogtreecommitdiffstats
path: root/third_party/python/taskcluster
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /third_party/python/taskcluster
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/python/taskcluster')
-rw-r--r--third_party/python/taskcluster/taskcluster-44.2.2.dist-info/LICENSE373
-rw-r--r--third_party/python/taskcluster/taskcluster-44.2.2.dist-info/METADATA595
-rw-r--r--third_party/python/taskcluster/taskcluster-44.2.2.dist-info/RECORD92
-rw-r--r--third_party/python/taskcluster/taskcluster-44.2.2.dist-info/WHEEL5
-rw-r--r--third_party/python/taskcluster/taskcluster-44.2.2.dist-info/top_level.txt1
-rw-r--r--third_party/python/taskcluster/taskcluster/__init__.py18
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/__init__.py16
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/asyncclient.py306
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/asyncutils.py147
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/auth.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/authevents.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/awsprovisioner.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/download.py191
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/ec2manager.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/github.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/githubevents.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/hooks.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/hooksevents.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/index.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/login.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/notify.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/notifyevents.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/purgecache.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/queue.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/queueevents.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/reader_writer.py81
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/retry.py41
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/secrets.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/upload.py177
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/workermanager.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/workermanagerevents.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/auth.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/authevents.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/awsprovisioner.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/client.py711
-rw-r--r--third_party/python/taskcluster/taskcluster/download.py94
-rw-r--r--third_party/python/taskcluster/taskcluster/ec2manager.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/exceptions.py43
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/__init__.py0
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/_client_importer.py20
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/__init__.py0
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/_client_importer.py20
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/auth.py781
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/authevents.py180
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/github.py197
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/githubevents.py199
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/hooks.py300
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/hooksevents.py101
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/index.py204
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/notify.py207
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/notifyevents.py68
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/object.py187
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/purgecache.py123
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/queue.py1120
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/queueevents.py719
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/secrets.py143
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/workermanager.py406
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/workermanagerevents.py91
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/auth.py781
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/authevents.py180
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/github.py197
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/githubevents.py199
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/hooks.py300
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/hooksevents.py101
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/index.py204
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/notify.py207
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/notifyevents.py68
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/object.py187
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/purgecache.py123
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/queue.py1120
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/queueevents.py719
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/secrets.py143
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/workermanager.py406
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/workermanagerevents.py91
-rw-r--r--third_party/python/taskcluster/taskcluster/github.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/githubevents.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/helper.py185
-rw-r--r--third_party/python/taskcluster/taskcluster/hooks.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/hooksevents.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/index.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/login.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/notify.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/notifyevents.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/purgecache.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/queue.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/queueevents.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/retry.py41
-rw-r--r--third_party/python/taskcluster/taskcluster/secrets.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/upload.py65
-rw-r--r--third_party/python/taskcluster/taskcluster/utils.py354
-rw-r--r--third_party/python/taskcluster/taskcluster/workermanager.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/workermanagerevents.py2
92 files changed, 13700 insertions, 0 deletions
diff --git a/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/LICENSE b/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/LICENSE
new file mode 100644
index 0000000000..a612ad9813
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/LICENSE
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/METADATA b/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/METADATA
new file mode 100644
index 0000000000..5cd6e69c5d
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/METADATA
@@ -0,0 +1,595 @@
+Metadata-Version: 2.1
+Name: taskcluster
+Version: 44.2.2
+Summary: Python client for Taskcluster
+Home-page: https://github.com/taskcluster/taskcluster
+Author: Mozilla Taskcluster and Release Engineering
+Author-email: release+python@mozilla.com
+License: UNKNOWN
+Platform: UNKNOWN
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Description-Content-Type: text/markdown
+License-File: LICENSE
+Requires-Dist: requests (>=2.4.3)
+Requires-Dist: mohawk (>=0.3.4)
+Requires-Dist: slugid (>=2)
+Requires-Dist: taskcluster-urls (>=12.1.0)
+Requires-Dist: six (>=1.10.0)
+Requires-Dist: aiohttp (>=3.7.4) ; python_version >= "3.6"
+Requires-Dist: async-timeout (>=2.0.0) ; python_version >= "3.6"
+Provides-Extra: test
+Requires-Dist: pytest ; extra == 'test'
+Requires-Dist: pytest-cov ; extra == 'test'
+Requires-Dist: pytest-mock ; extra == 'test'
+Requires-Dist: httmock ; extra == 'test'
+Requires-Dist: mock ; extra == 'test'
+Requires-Dist: setuptools-lint ; extra == 'test'
+Requires-Dist: flake8 ; extra == 'test'
+Requires-Dist: psutil ; extra == 'test'
+Requires-Dist: hypothesis ; extra == 'test'
+Requires-Dist: tox ; extra == 'test'
+Requires-Dist: coverage ; extra == 'test'
+Requires-Dist: python-dateutil ; extra == 'test'
+Requires-Dist: subprocess32 ; (python_version == "2.7") and extra == 'test'
+Requires-Dist: pytest-asyncio ; (python_version >= "3.6") and extra == 'test'
+Requires-Dist: aiofiles ; (python_version >= "3.6") and extra == 'test'
+Requires-Dist: httptest ; (python_version >= "3.6") and extra == 'test'
+
+# Taskcluster Client for Python
+
+[![Download](https://img.shields.io/badge/pypi-taskcluster-brightgreen)](https://pypi.python.org/pypi/taskcluster)
+[![License](https://img.shields.io/badge/license-MPL%202.0-orange.svg)](http://mozilla.org/MPL/2.0)
+
+**A Taskcluster client library for Python.**
+
+This library is a complete interface to Taskcluster in Python. It provides
+both synchronous and asynchronous interfaces for all Taskcluster API methods,
+in both Python-2 and Python-3 variants.
+
+## Usage
+
+For a general guide to using Taskcluster clients, see [Calling Taskcluster APIs](https://docs.taskcluster.net/docs/manual/using/api).
+
+### Setup
+
+Before calling an API end-point, you'll need to create a client instance.
+There is a class for each service, e.g., `Queue` and `Auth`. Each takes the
+same options, described below. Note that only `rootUrl` is
+required, and it's unusual to configure any other options aside from
+`credentials`.
+
+For each service, there are sync and async variants. The classes under
+`taskcluster` (e.g., `taskcluster.Queue`) are Python-2 compatible and operate
+synchronously. The classes under `taskcluster.aio` (e.g.,
+`taskcluster.aio.Queue`) require Python >= 3.6.
+
+#### Authentication Options
+
+Here is a simple set-up of an Index client:
+
+```python
+import taskcluster
+index = taskcluster.Index({
+ 'rootUrl': 'https://tc.example.com',
+ 'credentials': {'clientId': 'id', 'accessToken': 'accessToken'},
+})
+```
+
+The `rootUrl` option is required as it gives the Taskcluster deployment to
+which API requests should be sent. Credentials are only required if the
+request is to be authenticated -- many Taskcluster API methods do not require
+authentication.
+
+In most cases, the root URL and Taskcluster credentials should be provided in [standard environment variables](https://docs.taskcluster.net/docs/manual/design/env-vars). Use `taskcluster.optionsFromEnvironment()` to read these variables automatically:
+
+```python
+auth = taskcluster.Auth(taskcluster.optionsFromEnvironment())
+```
+
+Note that this function does not respect `TASKCLUSTER_PROXY_URL`. To use the Taskcluster Proxy from within a task:
+
+```python
+auth = taskcluster.Auth({'rootUrl': os.environ['TASKCLUSTER_PROXY_URL']})
+```
+
+#### Authorized Scopes
+
+If you wish to perform requests on behalf of a third-party that has small set
+of scopes than you do. You can specify [which scopes your request should be
+allowed to
+use](https://docs.taskcluster.net/docs/manual/design/apis/hawk/authorized-scopes),
+in the `authorizedScopes` option.
+
+```python
+opts = taskcluster.optionsFromEnvironment()
+opts['authorizedScopes'] = ['queue:create-task:highest:my-provisioner/my-worker-type']
+queue = taskcluster.Queue(opts)
+```
+
+#### Other Options
+
+The following additional options are accepted when constructing a client object:
+
+* `signedUrlExpiration` - default value for the `expiration` argument to `buildSignedUrl`
+* `maxRetries` - maximum number of times to retry a failed request
+
+### Calling API Methods
+
+API methods are available as methods on the corresponding client object. For
+sync clients, these are sync methods, and for async clients they are async
+methods; the calling convention is the same in either case.
+
+There are four calling conventions for methods:
+
+```python
+client.method(v1, v1, payload)
+client.method(payload, k1=v1, k2=v2)
+client.method(payload=payload, query=query, params={k1: v1, k2: v2})
+client.method(v1, v2, payload=payload, query=query)
+```
+
+Here, `v1` and `v2` are URL parameters (named `k1` and `k2`), `payload` is the
+request payload, and `query` is a dictionary of query arguments.
+
+For example, in order to call an API method with query-string arguments:
+
+```python
+await queue.listTaskGroup('JzTGxwxhQ76_Tt1dxkaG5g',
+ query={'continuationToken': previousResponse.get('continuationToken')})
+```
+
+
+### Generating URLs
+
+It is often necessary to generate the URL for an API method without actually calling the method.
+To do so, use `buildUrl` or, for an API method that requires authentication, `buildSignedUrl`.
+
+```python
+import taskcluster
+
+index = taskcluster.Index(taskcluster.optionsFromEnvironment())
+print(index.buildUrl('findTask', 'builds.v1.latest'))
+secrets = taskcluster.Secrets(taskcluster.optionsFromEnvironment())
+print(secret.buildSignedUrl('get', 'my-secret'))
+```
+
+Note that signed URLs are time-limited; the expiration can be set with the `signedUrlExpiration` option to the client constructor, or with the `expiration` keyword arguement to `buildSignedUrl`, both given in seconds.
+
+### Generating Temporary Credentials
+
+If you have non-temporary taskcluster credentials you can generate a set of
+[temporary credentials](https://docs.taskcluster.net/docs/manual/design/apis/hawk/temporary-credentials) as follows. Notice that the credentials cannot last more
+than 31 days, and you can only revoke them by revoking the credentials that was
+used to issue them (this takes up to one hour).
+
+It is not the responsibility of the caller to apply any clock drift adjustment
+to the start or expiry time - this is handled by the auth service directly.
+
+```python
+import datetime
+
+start = datetime.datetime.now()
+expiry = start + datetime.timedelta(0,60)
+scopes = ['ScopeA', 'ScopeB']
+name = 'foo'
+
+credentials = taskcluster.createTemporaryCredentials(
+ # issuing clientId
+ clientId,
+ # issuing accessToken
+ accessToken,
+ # Validity of temporary credentials starts here, in timestamp
+ start,
+ # Expiration of temporary credentials, in timestamp
+ expiry,
+ # Scopes to grant the temporary credentials
+ scopes,
+ # credential name (optional)
+ name
+)
+```
+
+You cannot use temporary credentials to issue new temporary credentials. You
+must have `auth:create-client:<name>` to create a named temporary credential,
+but unnamed temporary credentials can be created regardless of your scopes.
+
+### Handling Timestamps
+Many taskcluster APIs require ISO 8601 time stamps offset into the future
+as way of providing expiration, deadlines, etc. These can be easily created
+using `datetime.datetime.isoformat()`, however, it can be rather error prone
+and tedious to offset `datetime.datetime` objects into the future. Therefore
+this library comes with two utility functions for this purposes.
+
+```python
+dateObject = taskcluster.fromNow("2 days 3 hours 1 minute")
+ # -> datetime.datetime(2017, 1, 21, 17, 8, 1, 607929)
+dateString = taskcluster.fromNowJSON("2 days 3 hours 1 minute")
+ # -> '2017-01-21T17:09:23.240178Z'
+```
+
+By default it will offset the date time into the future, if the offset strings
+are prefixed minus (`-`) the date object will be offset into the past. This is
+useful in some corner cases.
+
+```python
+dateObject = taskcluster.fromNow("- 1 year 2 months 3 weeks 5 seconds");
+ # -> datetime.datetime(2015, 10, 30, 18, 16, 50, 931161)
+```
+
+The offset string is ignorant of whitespace and case insensitive. It may also
+optionally be prefixed plus `+` (if not prefixed minus), any `+` prefix will be
+ignored. However, entries in the offset string must be given in order from
+high to low, ie. `2 years 1 day`. Additionally, various shorthands may be
+employed, as illustrated below.
+
+```
+ years, year, yr, y
+ months, month, mo
+ weeks, week, w
+ days, day, d
+ hours, hour, h
+ minutes, minute, min
+ seconds, second, sec, s
+```
+
+The `fromNow` method may also be given a date to be relative to as a second
+argument. This is useful if offset the task expiration relative to the the task
+deadline or doing something similar. This argument can also be passed as the
+kwarg `dateObj`
+
+```python
+dateObject1 = taskcluster.fromNow("2 days 3 hours");
+dateObject2 = taskcluster.fromNow("1 year", dateObject1);
+taskcluster.fromNow("1 year", dateObj=dateObject1);
+ # -> datetime.datetime(2018, 1, 21, 17, 59, 0, 328934)
+```
+### Generating SlugIDs
+
+To generate slugIds (Taskcluster's client-generated unique IDs), use
+`taskcluster.slugId()`, which will return a unique slugId on each call.
+
+In some cases it is useful to be able to create a mapping from names to
+slugIds, with the ability to generate the same slugId multiple times.
+The `taskcluster.stableSlugId()` function returns a callable that does
+just this.
+
+```python
+gen = taskcluster.stableSlugId()
+sometask = gen('sometask')
+assert gen('sometask') == sometask # same input generates same output
+assert gen('sometask') != gen('othertask')
+
+gen2 = taskcluster.stableSlugId()
+sometask2 = gen('sometask')
+assert sometask2 != sometask # but different slugId generators produce
+ # different output
+```
+
+### Scope Analysis
+
+The `scopeMatch(assumedScopes, requiredScopeSets)` function determines
+whether one or more of a set of required scopes are satisfied by the assumed
+scopes, taking *-expansion into account. This is useful for making local
+decisions on scope satisfaction, but note that `assumed_scopes` must be the
+*expanded* scopes, as this function cannot perform expansion.
+
+It takes a list of a assumed scopes, and a list of required scope sets on
+disjunctive normal form, and checks if any of the required scope sets are
+satisfied.
+
+Example:
+
+```python
+requiredScopeSets = [
+ ["scopeA", "scopeB"],
+ ["scopeC:*"]
+]
+assert scopesMatch(['scopeA', 'scopeB'], requiredScopeSets)
+assert scopesMatch(['scopeC:xyz'], requiredScopeSets)
+assert not scopesMatch(['scopeA'], requiredScopeSets)
+assert not scopesMatch(['scopeC'], requiredScopeSets)
+```
+
+### Pagination
+
+Many Taskcluster API methods are paginated. There are two ways to handle
+pagination easily with the python client. The first is to implement pagination
+in your code:
+
+```python
+import taskcluster
+queue = taskcluster.Queue({'rootUrl': 'https://tc.example.com'})
+i = 0
+tasks = 0
+outcome = queue.listTaskGroup('JzTGxwxhQ76_Tt1dxkaG5g')
+while outcome.get('continuationToken'):
+ print('Response %d gave us %d more tasks' % (i, len(outcome['tasks'])))
+ if outcome.get('continuationToken'):
+ outcome = queue.listTaskGroup('JzTGxwxhQ76_Tt1dxkaG5g', query={'continuationToken': outcome.get('continuationToken')})
+ i += 1
+ tasks += len(outcome.get('tasks', []))
+print('Task Group %s has %d tasks' % (outcome['taskGroupId'], tasks))
+```
+
+There's also an experimental feature to support built in automatic pagination
+in the sync client. This feature allows passing a callback as the
+'paginationHandler' keyword-argument. This function will be passed the
+response body of the API method as its sole positional arugment.
+
+This example of the built in pagination shows how a list of tasks could be
+built and then counted:
+
+```python
+import taskcluster
+queue = taskcluster.Queue({'rootUrl': 'https://tc.example.com'})
+
+responses = []
+
+def handle_page(y):
+ print("%d tasks fetched" % len(y.get('tasks', [])))
+ responses.append(y)
+
+queue.listTaskGroup('JzTGxwxhQ76_Tt1dxkaG5g', paginationHandler=handle_page)
+
+tasks = 0
+for response in responses:
+ tasks += len(response.get('tasks', []))
+
+print("%d requests fetch %d tasks" % (len(responses), tasks))
+```
+
+### Pulse Events
+
+This library can generate exchange patterns for Pulse messages based on the
+Exchanges definitions provded by each service. This is done by instantiating a
+`<service>Events` class and calling a method with the name of the vent.
+Options for the topic exchange methods can be in the form of either a single
+dictionary argument or keyword arguments. Only one form is allowed.
+
+```python
+from taskcluster import client
+qEvt = client.QueueEvents({rootUrl: 'https://tc.example.com'})
+# The following calls are equivalent
+print(qEvt.taskCompleted({'taskId': 'atask'}))
+print(qEvt.taskCompleted(taskId='atask'))
+```
+
+Note that the client library does *not* provide support for interfacing with a Pulse server.
+
+### Logging
+
+Logging is set up in `taskcluster/__init__.py`. If the special
+`DEBUG_TASKCLUSTER_CLIENT` environment variable is set, the `__init__.py`
+module will set the `logging` module's level for its logger to `logging.DEBUG`
+and if there are no existing handlers, add a `logging.StreamHandler()`
+instance. This is meant to assist those who do not wish to bother figuring out
+how to configure the python logging module but do want debug messages
+
+## Uploading and Downloading Objects
+
+The Object service provides an API for reliable uploads and downloads of large objects.
+This library provides convenience methods to implement the client portion of those APIs, providing well-tested, resilient upload and download functionality.
+These methods will negotiate the appropriate method with the object service and perform the required steps to transfer the data.
+
+All methods are available in both sync and async versions, with identical APIs except for the `async`/`await` keywords.
+These methods are not available for Python-2.7.
+
+In either case, you will need to provide a configured `Object` instance with appropriate credentials for the operation.
+
+NOTE: There is an helper function to upload `s3` artifacts, `taskcluster.helper.upload_artifact`, but it is deprecated as it only supports the `s3` artifact type.
+
+### Uploads
+
+To upload, use any of the following:
+
+* `await taskcluster.aio.upload.uploadFromBuf(projectId=.., name=.., contentType=.., contentLength=.., uploadId=.., expires=.., maxRetries=.., objectService=.., data=..)` - asynchronously upload data from a buffer full of bytes.
+* `await taskcluster.aio.upload.uploadFromFile(projectId=.., name=.., contentType=.., contentLength=.., uploadId=.., expires=.., maxRetries=.., objectService=.., file=..)` - asynchronously upload data from a standard Python file.
+ Note that this is [probably what you want](https://github.com/python/asyncio/wiki/ThirdParty#filesystem), even in an async context.
+* `await taskcluster.aio.upload(projectId=.., name=.., contentType=.., contentLength=.., expires=.., uploadId=.., maxRetries=.., objectService=.., readerFactory=..)` - asynchronously upload data from an async reader factory.
+* `taskcluster.upload.uploadFromBuf(projectId=.., name=.., contentType=.., contentLength=.., expires=.., uploadId=.., maxRetries=.., objectService=.., data=..)` - upload data from a buffer full of bytes.
+* `taskcluster.upload.uploadFromFile(projectId=.., name=.., contentType=.., contentLength=.., expires=.., uploadId=.., maxRetries=.., objectService=.., file=..)` - upload data from a standard Python file.
+* `taskcluster.upload(projectId=.., name=.., contentType=.., contentLength=.., expires=.., uploadId=.., maxRetries=.., objectService=.., readerFactory=..)` - upload data from a sync reader factory.
+
+A "reader" is an object with a `read(max_size=-1)` method which reads and returns a chunk of 1 .. `max_size` bytes, or returns an empty string at EOF, async for the async functions and sync for the remainder.
+A "reader factory" is an async callable which returns a fresh reader, ready to read the first byte of the object.
+When uploads are retried, the reader factory may be called more than once.
+
+The `uploadId` parameter may be omitted, in which case a new slugId will be generated.
+
+### Downloads
+
+To download, use any of the following:
+
+* `await taskcluster.aio.download.downloadToBuf(name=.., maxRetries=.., objectService=..)` - asynchronously download an object to an in-memory buffer, returning a tuple (buffer, content-type).
+ If the file is larger than available memory, this will crash.
+* `await taskcluster.aio.download.downloadToFile(name=.., maxRetries=.., objectService=.., file=..)` - asynchronously download an object to a standard Python file, returning the content type.
+* `await taskcluster.aio.download.download(name=.., maxRetries=.., objectService=.., writerFactory=..)` - asynchronously download an object to an async writer factory, returning the content type.
+* `taskcluster.download.downloadToBuf(name=.., maxRetries=.., objectService=..)` - download an object to an in-memory buffer, returning a tuple (buffer, content-type).
+ If the file is larger than available memory, this will crash.
+* `taskcluster.download.downloadToFile(name=.., maxRetries=.., objectService=.., file=..)` - download an object to a standard Python file, returning the content type.
+* `taskcluster.download.download(name=.., maxRetries=.., objectService=.., writerFactory=..)` - download an object to a sync writer factory, returning the content type.
+
+A "writer" is an object with a `write(data)` method which writes the given data, async for the async functions and sync for the remainder.
+A "writer factory" is a callable (again either async or sync) which returns a fresh writer, ready to write the first byte of the object.
+When uploads are retried, the writer factory may be called more than once.
+
+### Artifact Downloads
+
+Artifacts can be downloaded from the queue service with similar functions to those above.
+These functions support all of the queue's storage types, raising an error for `error` artifacts.
+In each case, if `runId` is omitted then the most recent run will be used.
+
+* `await taskcluster.aio.download.downloadArtifactToBuf(taskId=.., runId=.., name=.., maxRetries=.., queueService=..)` - asynchronously download an object to an in-memory buffer, returning a tuple (buffer, content-type).
+ If the file is larger than available memory, this will crash.
+* `await taskcluster.aio.download.downloadArtifactToFile(taskId=.., runId=.., name=.., maxRetries=.., queueService=.., file=..)` - asynchronously download an object to a standard Python file, returning the content type.
+* `await taskcluster.aio.download.downloadArtifact(taskId=.., runId=.., name=.., maxRetries=.., queueService=.., writerFactory=..)` - asynchronously download an object to an async writer factory, returning the content type.
+* `taskcluster.download.downloadArtifactToBuf(taskId=.., runId=.., name=.., maxRetries=.., queueService=..)` - download an object to an in-memory buffer, returning a tuple (buffer, content-type).
+ If the file is larger than available memory, this will crash.
+* `taskcluster.download.downloadArtifactToFile(taskId=.., runId=.., name=.., maxRetries=.., queueService=.., file=..)` - download an object to a standard Python file, returning the content type.
+* `taskcluster.download.downloadArtifact(taskId=.., runId=.., name=.., maxRetries=.., queueService=.., writerFactory=..)` - download an object to a sync writer factory, returning the content type.
+
+## Integration Helpers
+
+The Python Taskcluster client has a module `taskcluster.helper` with utilities which allows you to easily share authentication options across multiple services in your project.
+
+Generally a project using this library will face different use cases and authentication options:
+
+* No authentication for a new contributor without Taskcluster access,
+* Specific client credentials through environment variables on a developer's computer,
+* Taskcluster Proxy when running inside a task.
+
+### Shared authentication
+
+The class `taskcluster.helper.TaskclusterConfig` is made to be instantiated once in your project, usually in a top level module. That singleton is then accessed by different parts of your projects, whenever a Taskcluster service is needed.
+
+Here is a sample usage:
+
+1. in `project/__init__.py`, no call to Taskcluster is made at that point:
+
+```python
+from taskcluster.helper import Taskcluster config
+
+tc = TaskclusterConfig('https://community-tc.services.mozilla.com')
+```
+
+2. in `project/boot.py`, we authenticate on Taskcuster with provided credentials, or environment variables, or taskcluster proxy (in that order):
+
+```python
+from project import tc
+
+tc.auth(client_id='XXX', access_token='YYY')
+```
+
+3. at that point, you can load any service using the authenticated wrapper from anywhere in your code:
+
+```python
+from project import tc
+
+def sync_usage():
+ queue = tc.get_service('queue')
+ queue.ping()
+
+async def async_usage():
+ hooks = tc.get_service('hooks', use_async=True) # Asynchronous service class
+ await hooks.ping()
+```
+
+Supported environment variables are:
+- `TASKCLUSTER_ROOT_URL` to specify your Taskcluster instance base url. You can either use that variable or instanciate `TaskclusterConfig` with the base url.
+- `TASKCLUSTER_CLIENT_ID` & `TASKCLUSTER_ACCESS_TOKEN` to specify your client credentials instead of providing them to `TaskclusterConfig.auth`
+- `TASKCLUSTER_PROXY_URL` to specify the proxy address used to reach Taskcluster in a task. It defaults to `http://taskcluster` when not specified.
+
+For more details on Taskcluster environment variables, [here is the documentation](https://docs.taskcluster.net/docs/manual/design/env-vars).
+
+### Loading secrets across multiple authentications
+
+Another available utility is `taskcluster.helper.load_secrets` which allows you to retrieve a secret using an authenticated `taskcluster.Secrets` instance (using `TaskclusterConfig.get_service` or the synchronous class directly).
+
+This utility loads a secret, but allows you to:
+1. share a secret across multiple projects, by using key prefixes inside the secret,
+2. check that some required keys are present in the secret,
+3. provide some default values,
+4. provide a local secret source instead of using the Taskcluster service (useful for local development or sharing _secrets_ with contributors)
+
+Let's say you have a secret on a Taskcluster instance named `project/foo/prod-config`, which is needed by a backend and some tasks. Here is its content:
+
+```yaml
+common:
+ environment: production
+ remote_log: https://log.xx.com/payload
+
+backend:
+ bugzilla_token: XXXX
+
+task:
+ backend_url: https://backend.foo.mozilla.com
+```
+
+In your backend, you would do:
+
+```python
+from taskcluster import Secrets
+from taskcluster.helper import load_secrets
+
+prod_config = load_secrets(
+ Secrets({...}),
+ 'project/foo/prod-config',
+
+ # We only need the common & backend parts
+ prefixes=['common', 'backend'],
+
+ # We absolutely need a bugzilla token to run
+ required=['bugzilla_token'],
+
+ # Let's provide some default value for the environment
+ existing={
+ 'environment': 'dev',
+ }
+)
+ # -> prod_config == {
+ # "environment": "production"
+ # "remote_log": "https://log.xx.com/payload",
+ # "bugzilla_token": "XXXX",
+ # }
+```
+
+In your task, you could do the following using `TaskclusterConfig` mentionned above (the class has a shortcut to use an authenticated `Secrets` service automatically):
+
+```python
+from project import tc
+
+prod_config = tc.load_secrets(
+ 'project/foo/prod-config',
+
+ # We only need the common & bot parts
+ prefixes=['common', 'bot'],
+
+ # Let's provide some default value for the environment and backend_url
+ existing={
+ 'environment': 'dev',
+ 'backend_url': 'http://localhost:8000',
+ }
+)
+ # -> prod_config == {
+ # "environment": "production"
+ # "remote_log": "https://log.xx.com/payload",
+ # "backend_url": "https://backend.foo.mozilla.com",
+ # }
+```
+
+To provide local secrets value, you first need to load these values as a dictionary (usually by reading a local file in your format of choice : YAML, JSON, ...) and providing the dictionary to `load_secrets` by using the `local_secrets` parameter:
+
+```python
+import os
+import yaml
+
+from taskcluster import Secrets
+from taskcluster.helper import load_secrets
+
+local_path = 'path/to/file.yml'
+
+prod_config = load_secrets(
+ Secrets({...}),
+ 'project/foo/prod-config',
+
+ # We support an optional local file to provide some configuration without reaching Taskcluster
+ local_secrets=yaml.safe_load(open(local_path)) if os.path.exists(local_path) else None,
+)
+```
+
+## Compatibility
+
+This library is co-versioned with Taskcluster itself.
+That is, a client with version x.y.z contains API methods corresponding to Taskcluster version x.y.z.
+Taskcluster is careful to maintain API compatibility, and guarantees it within a major version.
+That means that any client with version x.* will work against any Taskcluster services at version x.*, and is very likely to work for many other major versions of the Taskcluster services.
+Any incompatibilities are noted in the [Changelog](https://github.com/taskcluster/taskcluster/blob/main/CHANGELOG.md).
+
+
+
+
+
+
diff --git a/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/RECORD b/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/RECORD
new file mode 100644
index 0000000000..126a485225
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/RECORD
@@ -0,0 +1,92 @@
+taskcluster/__init__.py,sha256=Y9afafyyNPMdTxXxk76Q1Yp6PwNnLKe85arUHbWyS4c,622
+taskcluster/auth.py,sha256=AME2kJmq42R7lQnxKioxrcbPHx-jM_HaNNbSHtPvfOQ,78
+taskcluster/authevents.py,sha256=cRnrErBR2FdevR4fapWFZejFKG8ZudqdxsI4fN-dcC0,84
+taskcluster/awsprovisioner.py,sha256=5-orMBEkSvOS-wjJOQEunkteyGhMiOsu1Ho-WsgSr14,88
+taskcluster/client.py,sha256=yrGFwp3hcxK2vLVifH97jD_K4ZAROdwHFN3aM3ptK7g,27026
+taskcluster/download.py,sha256=JNVLxj_MI0WjRHgz8Vx2ocEOWnszTF6vVgos-kpe7Fk,3797
+taskcluster/ec2manager.py,sha256=D7abzT8gwqNP-p3YsGLRwOf3HH2yVvwvQUIIIsS6YIc,84
+taskcluster/exceptions.py,sha256=Gf_YjoDj_fR9NUEBcfH142sAtIMBMDyoOu8Dv3a_yzU,1322
+taskcluster/github.py,sha256=77_G0H9vKy6-gqKw5vzU_Ny9l4iTQZYvPgVNUtMzc9M,80
+taskcluster/githubevents.py,sha256=fZPDLXcruH1-1V7EVAFxXNh5zabp3ifOvYtqH3Ql6Uo,86
+taskcluster/helper.py,sha256=F5rz9lbBictmZTBKW8iamf6UhFyYYgsqcZH1Ti2jj3M,6493
+taskcluster/hooks.py,sha256=EQAX26VkR-7vUE-8tWJGA20UzIRpNQFeu3B1X0IR4jw,79
+taskcluster/hooksevents.py,sha256=p4Vwj7cWE9dr7x7o2gx1vAC1p5rHyMlKi-65Yo_kgD4,85
+taskcluster/index.py,sha256=lia2B9-pISrK_r_wDAdxv9g2xdswfk7lgD3WkjefBAw,79
+taskcluster/login.py,sha256=8SxrTSYMkPTx0tMmj5ReHTe7qaHQlp-PIUm3HKUFR9o,79
+taskcluster/notify.py,sha256=Ug9IdJbfk1jDIPdN6AN2VCdEncFkNG6QuN19tJ3_k2o,80
+taskcluster/notifyevents.py,sha256=LcunL5OXFp1GrplOS4ibKXeoBPO5DUrFimUAR1TK43U,86
+taskcluster/purgecache.py,sha256=SIu9Q6Q4WtQneCv6VjWddUH8hXF8rDeWafEPuSZTXsM,84
+taskcluster/queue.py,sha256=Yq46lvAQ3jvdI_GujIvyrmzG8MiSqiEU0v4wEh0vwgI,79
+taskcluster/queueevents.py,sha256=AP1lMvDXeobiSY0zqTxAAKKWaFCiuMbxmEFM1Muw6-0,85
+taskcluster/retry.py,sha256=ppxRmAMTxyVvPD-0RQe2doih6tde4eD3FTa9mEOPRBg,1114
+taskcluster/secrets.py,sha256=k1ngR8DGCONOlAsLEz8VsnhuhdsoSHf2ycOYpSVUesE,81
+taskcluster/upload.py,sha256=S5TIV0F84b043Oic7nRTSC73a0FCb63hM4wrn7QVtww,2244
+taskcluster/utils.py,sha256=xRMFVykiWkCmzNE7V9v-cm5JMFVqSzLpaJsw37vVvME,11036
+taskcluster/workermanager.py,sha256=TKgO5XlL_awYPvK41O2cbHCE-WTApzR34uiJz8DfJlo,87
+taskcluster/workermanagerevents.py,sha256=AhgX5zop6vaczuMLh8XrF9GdIXblbnd7IMxvXJTqHRo,93
+taskcluster/aio/__init__.py,sha256=K9gSKiS7jUnTe_tO4nfFpVBah_TeecoEbZ-4L75_RVw,483
+taskcluster/aio/asyncclient.py,sha256=tAK-oiOMLaCRuTzH1C1_JIfBuOS6sGAvL7ygNWD5huM,10960
+taskcluster/aio/asyncutils.py,sha256=Ryf3MMSQzjApg6egeE6lcC6OfOgIqFZhBBwTg17xooM,5138
+taskcluster/aio/auth.py,sha256=sd5FVqwaRzJGQlAEImSsnPUZigVnHZkPWBFm1dxYLaY,83
+taskcluster/aio/authevents.py,sha256=57nValWTawxx-JgYLuxIY0kAoBZK0LgCCu2o9FfYgfs,89
+taskcluster/aio/awsprovisioner.py,sha256=gEK7O2Ptqm8PTnX2lghVdsUabH5zH8I4vAMevEpft3I,93
+taskcluster/aio/download.py,sha256=P87uBADRsmk5WD_G6Ad-GtmunAXn0djtJqJqKXbT1Zs,6966
+taskcluster/aio/ec2manager.py,sha256=k6EX4v-YtxTfKXfrW40z7Iuvnl6qdeYTkHXx5XJPG3o,89
+taskcluster/aio/github.py,sha256=iW2oYpf1AdMeWX-LP_bhLlX1swUglH_Z2V9kLz9y57M,85
+taskcluster/aio/githubevents.py,sha256=CAbBsqRAPvihfvCp2-juTQE0TybFEDtJcHYvXtsJ5mk,91
+taskcluster/aio/hooks.py,sha256=s4G5XHe_cnjqFyeSFAX6hXNPuaHISO7MIlwiKfG0kI4,84
+taskcluster/aio/hooksevents.py,sha256=4IULmioTI0cZhaTG5Pft80RJ9iv0ROdppS7XV0G5aWQ,90
+taskcluster/aio/index.py,sha256=hROSSbdy7B1_fSV2kGfz2_364xQPYLWVu81LxHz93bk,84
+taskcluster/aio/login.py,sha256=ZDaf8OT43EtHq2ub6w9oMY9bKDDZsIlBXyYbziuW8w4,84
+taskcluster/aio/notify.py,sha256=DNwTTRaIrqcYXte45QgxJJSWheHBN2pSIFIEjSpREUQ,85
+taskcluster/aio/notifyevents.py,sha256=tRQ5VfMIiUkkK0PcAHPybpXEZg_QSHypjZp7Y3ewA_I,91
+taskcluster/aio/purgecache.py,sha256=fwzKCePo1ZZ1SGYV7idms9-9tVog3mDY1Jp-WpXY46k,89
+taskcluster/aio/queue.py,sha256=sberomzhztT-2Fg--x1shyHnLjPvpDDIjpL6TlJzrJ0,84
+taskcluster/aio/queueevents.py,sha256=lv9B9fyRQaeicNCilsLKDPoLG3sTP2eeBBpLiPIwZgM,90
+taskcluster/aio/reader_writer.py,sha256=WDYwNeb-lyDDTh9Avq2pwBTX5C-zce9Yil8Pd3rEwEA,2236
+taskcluster/aio/retry.py,sha256=gln9WP1yJWzz5Scgt3FxwAH4I3ikOnRqiT9NRIKIqMI,1144
+taskcluster/aio/secrets.py,sha256=oZOlT1akPX_vsi1LmES7RHJqe_GxfmwCMgXwK8b_Kek,86
+taskcluster/aio/upload.py,sha256=ewvSnz2tzmVsiR7u0DJD-jlwVvfNNwVrd3V-unQIqvE,6006
+taskcluster/aio/workermanager.py,sha256=u9tF-rq3XT_HTT8xGQkAfjIAl8Zz3sc4PbCucJyPyy8,92
+taskcluster/aio/workermanagerevents.py,sha256=FTdOv2qGprRGgFefLFaTQJH-B0ZwRRaahOfYQZYAxq8,98
+taskcluster/generated/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+taskcluster/generated/_client_importer.py,sha256=PqxVoOGtpDYMgWj3I3SuwsvkmVMKMflbyN14aGay7fs,829
+taskcluster/generated/auth.py,sha256=_OSEEE-OikD-6lRszwfZwwYcdB82VMd1VimbZl2GT8w,27153
+taskcluster/generated/authevents.py,sha256=8utWiTCrQ7p-Dz0uYYXj2JXzKkOsWKQ78FQNJUyVbYE,5670
+taskcluster/generated/github.py,sha256=CWxs_8yzH7ybuIbR2r81ITyUZDgCuNHHM9Itf-aCs6E,5989
+taskcluster/generated/githubevents.py,sha256=NpiVaIlZKye5B-7VshxDcTJqdh8U4m3hqvYmheE1fDA,8162
+taskcluster/generated/hooks.py,sha256=PvsI6GBcXVV8_9OLWS576vEW_-qld52vzf_0I5RKq_4,9395
+taskcluster/generated/hooksevents.py,sha256=byRSNpQJmgkYjd8K14AgvoRei84gsYgI0S-LcgWp5y8,3933
+taskcluster/generated/index.py,sha256=Be_Fd93_-trPQpTeo05FkNGZl-CdMIODsXBs6DCHzu4,7022
+taskcluster/generated/notify.py,sha256=PQITG_sLvXmfx2NEiKUA7g7CDHGWMU7_yONj-1HzLi8,6395
+taskcluster/generated/notifyevents.py,sha256=7XZNazqU1acHhn6Krbvl1tGaS7xVDQywCdjD7s5LvVs,2201
+taskcluster/generated/object.py,sha256=k9dmS7vImQWBFs0RR1WQlaAz0-ATubvdiYcJf0EcBQo,6751
+taskcluster/generated/purgecache.py,sha256=EM4t3l6NKZrozBFQum3D4xXBAXSiN04aTy4pTAuWV5o,3761
+taskcluster/generated/queue.py,sha256=Tz-G3ZC5ONUhZ5Uin5s9FbPulhgJsF8igPVHxNYwz8A,44340
+taskcluster/generated/queueevents.py,sha256=fBXHthI0GuigfZPOI7Q3IJXIXA6vYuCA99r3YD4rH1U,27135
+taskcluster/generated/secrets.py,sha256=opcNJIDcy_as-Hzmik1YlGH7H2tj9bMKdFNESg6VIQw,4385
+taskcluster/generated/workermanager.py,sha256=QEALsnQrwljiE8_ly9LTmZ6GdmIfAmpKJkWDJibely8,14025
+taskcluster/generated/workermanagerevents.py,sha256=pqbn6QUJ-rkcb41m32myzg7lW4tlxrJ1zgmS5ZdnKwI,3396
+taskcluster/generated/aio/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+taskcluster/generated/aio/_client_importer.py,sha256=PqxVoOGtpDYMgWj3I3SuwsvkmVMKMflbyN14aGay7fs,829
+taskcluster/generated/aio/auth.py,sha256=E8DhNSLLXtgk3zbr-bfb7vX0r68woh9Z7BZo2zvNpxw,27573
+taskcluster/generated/aio/authevents.py,sha256=Xvhh2riCSXKUXu6lf-toLAnhDqiBwDg1PePolNvmdsw,5730
+taskcluster/generated/aio/github.py,sha256=mULWUV2qpn7177qCKqtKJSIhZ07cxb5FvkDkkEx8Uxc,6145
+taskcluster/generated/aio/githubevents.py,sha256=q0pOuUVSGQ64Rlw_FKkcwvXbsEcnH2r1AzIGFfWte6o,8222
+taskcluster/generated/aio/hooks.py,sha256=NgSC63oQ8J_krW4AFeUMcUSery2pUhmWCwLyurrC__A,9611
+taskcluster/generated/aio/hooksevents.py,sha256=i5fXbMPZDR7qXYMIkzFG0e_cGzHBs6Lpr8kAYo6Pe1g,3993
+taskcluster/generated/aio/index.py,sha256=HuYLtMag0SvYG5W1VY7ro5qDvfg1FOOP9RxalDZz3qk,7166
+taskcluster/generated/aio/notify.py,sha256=33iUtwzsXjdY3DS2-HdpHHB1Ve6rlKPgljf4c1e9iSI,6551
+taskcluster/generated/aio/notifyevents.py,sha256=XfBhEA_295uIzB1hzgscyTNw0JZURMetD80Wd6Q0l1U,2261
+taskcluster/generated/aio/object.py,sha256=u5Ws6jVulIMSk9U7cpxehNTPC7m0ilNod8NkshKDrGM,6883
+taskcluster/generated/aio/purgecache.py,sha256=3wgT_W0C_FOU5mJwZCBiXV-cAxbpxmvkeo4fIQfPyV8,3869
+taskcluster/generated/aio/queue.py,sha256=FslTOocclr5yO--Iti36Fif9elg2ac5Urb77-OoegQs,44856
+taskcluster/generated/aio/queueevents.py,sha256=2yG4WiPhkGE491iojMdyrgXVmrGkxer_92TuCYRLByw,27195
+taskcluster/generated/aio/secrets.py,sha256=T4kdIS6gPAIOieLNhSZdzc6sGs6d7VdBZy7IfMQfaQU,4505
+taskcluster/generated/aio/workermanager.py,sha256=DpSkv3jh5Bv4U8JzGnqYP4p62ZkbPxfLbAJHzVgpcts,14289
+taskcluster/generated/aio/workermanagerevents.py,sha256=xQObLJouXZF54K3aofo9I-czZ7joW-UiT9OQNZNGxes,3456
+taskcluster-44.2.2.dist-info/LICENSE,sha256=HyVuytGSiAUQ6ErWBHTqt1iSGHhLmlC8fO7jTCuR8dU,16725
+taskcluster-44.2.2.dist-info/METADATA,sha256=NmCX-DTWKlMsv_FrbtVbzKEQtHHoEL3IaxEmDw8t3CI,25736
+taskcluster-44.2.2.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92
+taskcluster-44.2.2.dist-info/top_level.txt,sha256=Uxnnep-l0fTSnwOst3XkLMA-KHfY5ONwwtSgRmcErXU,12
+taskcluster-44.2.2.dist-info/RECORD,,
diff --git a/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/WHEEL b/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/WHEEL
new file mode 100644
index 0000000000..5bad85fdc1
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/top_level.txt b/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/top_level.txt
new file mode 100644
index 0000000000..cb1e1bb482
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/top_level.txt
@@ -0,0 +1 @@
+taskcluster
diff --git a/third_party/python/taskcluster/taskcluster/__init__.py b/third_party/python/taskcluster/taskcluster/__init__.py
new file mode 100644
index 0000000000..55102dd5ff
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/__init__.py
@@ -0,0 +1,18 @@
+""" Python client for Taskcluster """
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import logging
+import os
+from .client import createSession # NOQA
+from .client import createTemporaryCredentials # NOQA
+from taskcluster.utils import * # NOQA
+from taskcluster.exceptions import * # NOQA
+from taskcluster.generated._client_importer import * # NOQA
+
+log = logging.getLogger(__name__)
+
+if os.environ.get("DEBUG_TASKCLUSTER_CLIENT"):
+ log.setLevel(logging.DEBUG)
+ if len(log.handlers) == 0:
+ log.addHandler(logging.StreamHandler())
+log.addHandler(logging.NullHandler())
diff --git a/third_party/python/taskcluster/taskcluster/aio/__init__.py b/third_party/python/taskcluster/taskcluster/aio/__init__.py
new file mode 100644
index 0000000000..a7d85f96c5
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/__init__.py
@@ -0,0 +1,16 @@
+""" Python client for Taskcluster """
+
+import logging
+import os
+from .asyncclient import createSession # NOQA
+from taskcluster.utils import * # NOQA
+from taskcluster.exceptions import * # NOQA
+from ..generated.aio._client_importer import * # NOQA
+
+log = logging.getLogger(__name__)
+
+if os.environ.get("DEBUG_TASKCLUSTER_CLIENT"):
+ log.setLevel(logging.DEBUG)
+ if len(log.handlers) == 0:
+ log.addHandler(logging.StreamHandler())
+log.addHandler(logging.NullHandler())
diff --git a/third_party/python/taskcluster/taskcluster/aio/asyncclient.py b/third_party/python/taskcluster/taskcluster/aio/asyncclient.py
new file mode 100644
index 0000000000..5882d81ad2
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/asyncclient.py
@@ -0,0 +1,306 @@
+"""This module is used to interact with taskcluster rest apis"""
+
+from __future__ import absolute_import, division, print_function
+
+import os
+import logging
+from six.moves import urllib
+
+import mohawk
+import mohawk.bewit
+import aiohttp
+
+from .. import exceptions
+from .. import utils
+from ..client import BaseClient, createTemporaryCredentials
+from . import asyncutils, retry
+
+log = logging.getLogger(__name__)
+
+
+# Default configuration
+_defaultConfig = config = {
+ 'credentials': {
+ 'clientId': os.environ.get('TASKCLUSTER_CLIENT_ID'),
+ 'accessToken': os.environ.get('TASKCLUSTER_ACCESS_TOKEN'),
+ 'certificate': os.environ.get('TASKCLUSTER_CERTIFICATE'),
+ },
+ 'maxRetries': 5,
+ 'signedUrlExpiration': 15 * 60,
+}
+
+
+def createSession(*args, **kwargs):
+ """ Create a new aiohttp session. This passes through all positional and
+ keyword arguments to the asyncutils.createSession() constructor.
+
+ It's preferred to do something like
+
+ async with createSession(...) as session:
+ queue = Queue(session=session)
+ await queue.ping()
+
+ or
+
+ async with createSession(...) as session:
+ async with Queue(session=session) as queue:
+ await queue.ping()
+
+ in the client code.
+ """
+ return asyncutils.createSession(*args, **kwargs)
+
+
+class AsyncBaseClient(BaseClient):
+ """ Base Class for API Client Classes. Each individual Client class
+ needs to set up its own methods for REST endpoints and Topic Exchange
+ routing key patterns. The _makeApiCall() and _topicExchange() methods
+ help with this.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(AsyncBaseClient, self).__init__(*args, **kwargs)
+ self._implicitSession = False
+ if self.session is None:
+ self._implicitSession = True
+
+ def _createSession(self):
+ """ If self.session isn't set, don't create an implicit.
+
+ To avoid `session.close()` warnings at the end of tasks, and
+ various strongly-worded aiohttp warnings about using `async with`,
+ let's set `self.session` to `None` if no session is passed in to
+ `__init__`. The `asyncutils` functions will create a new session
+ per call in that case.
+ """
+ return None
+
+ async def _makeApiCall(self, entry, *args, **kwargs):
+ """ This function is used to dispatch calls to other functions
+ for a given API Reference entry"""
+
+ x = self._processArgs(entry, *args, **kwargs)
+ routeParams, payload, query, paginationHandler, paginationLimit = x
+ route = self._subArgsInRoute(entry, routeParams)
+
+ if paginationLimit and 'limit' in entry.get('query', []):
+ query['limit'] = paginationLimit
+
+ if query:
+ _route = route + '?' + urllib.parse.urlencode(query)
+ else:
+ _route = route
+ response = await self._makeHttpRequest(entry['method'], _route, payload)
+
+ if paginationHandler:
+ paginationHandler(response)
+ while response.get('continuationToken'):
+ query['continuationToken'] = response['continuationToken']
+ _route = route + '?' + urllib.parse.urlencode(query)
+ response = await self._makeHttpRequest(entry['method'], _route, payload)
+ paginationHandler(response)
+ else:
+ return response
+
+ async def _makeHttpRequest(self, method, route, payload):
+ """ Make an HTTP Request for the API endpoint. This method wraps
+ the logic about doing failure retry and passes off the actual work
+ of doing an HTTP request to another method."""
+
+ url = self._constructUrl(route)
+ log.debug('Full URL used is: %s', url)
+
+ hawkExt = self.makeHawkExt()
+
+ # Serialize payload if given
+ if payload is not None:
+ payload = utils.dumpJson(payload)
+
+ async def tryRequest(retryFor):
+ # Construct header
+ if self._hasCredentials():
+ sender = mohawk.Sender(
+ credentials={
+ 'id': self.options['credentials']['clientId'],
+ 'key': self.options['credentials']['accessToken'],
+ 'algorithm': 'sha256',
+ },
+ ext=hawkExt if hawkExt else {},
+ url=url,
+ content=payload if payload else '',
+ content_type='application/json' if payload else '',
+ method=method,
+ )
+
+ headers = {'Authorization': sender.request_header}
+ else:
+ log.debug('Not using hawk!')
+ headers = {}
+ if payload:
+ # Set header for JSON if payload is given, note that we serialize
+ # outside this loop.
+ headers['Content-Type'] = 'application/json'
+
+ try:
+ response = await asyncutils.makeSingleHttpRequest(
+ method, url, payload, headers, session=self.session
+ )
+ except aiohttp.ClientError as rerr:
+ return retryFor(exceptions.TaskclusterConnectionError(
+ "Failed to establish connection",
+ superExc=rerr
+ ))
+
+ status = response.status
+ if status == 204:
+ return None
+
+ # Catch retryable errors and go to the beginning of the loop
+ # to do the retry
+ if 500 <= status and status < 600:
+ try:
+ response.raise_for_status()
+ except Exception as exc:
+ return retryFor(exc)
+
+ # Throw errors for non-retryable errors
+ if status < 200 or status >= 300:
+ # Parse messages from errors
+ data = {}
+ try:
+ data = await response.json()
+ except Exception:
+ pass # Ignore JSON errors in error messages
+ # Find error message
+ message = "Unknown Server Error"
+ if isinstance(data, dict) and 'message' in data:
+ message = data['message']
+ else:
+ if status == 401:
+ message = "Authentication Error"
+ elif status == 500:
+ message = "Internal Server Error"
+ else:
+ message = "Unknown Server Error %s\n%s" % (str(status), str(data)[:1024])
+ # Raise TaskclusterAuthFailure if this is an auth issue
+ if status == 401:
+ raise exceptions.TaskclusterAuthFailure(
+ message,
+ status_code=status,
+ body=data,
+ superExc=None
+ )
+ # Raise TaskclusterRestFailure for all other issues
+ raise exceptions.TaskclusterRestFailure(
+ message,
+ status_code=status,
+ body=data,
+ superExc=None
+ )
+
+ # Try to load JSON
+ try:
+ await response.release()
+ return await response.json()
+ except (ValueError, aiohttp.client_exceptions.ContentTypeError):
+ return {"response": response}
+
+ return await retry.retry(self.options['maxRetries'], tryRequest)
+
+ async def __aenter__(self):
+ if self._implicitSession and not self.session:
+ self.session = createSession()
+ return self
+
+ async def __aexit__(self, *args):
+ if self._implicitSession and self.session:
+ await self.session.close()
+ self.session = None
+
+
+def createApiClient(name, api):
+ api = api['reference']
+
+ attributes = dict(
+ name=name,
+ __doc__=api.get('description'),
+ classOptions={},
+ funcinfo={},
+ )
+
+ # apply a default for apiVersion; this can be removed when all services
+ # have apiVersion
+ if 'apiVersion' not in api:
+ api['apiVersion'] = 'v1'
+
+ copiedOptions = ('exchangePrefix',)
+ for opt in copiedOptions:
+ if opt in api:
+ attributes['classOptions'][opt] = api[opt]
+
+ copiedProperties = ('serviceName', 'apiVersion')
+ for opt in copiedProperties:
+ if opt in api:
+ attributes[opt] = api[opt]
+
+ for entry in api['entries']:
+ if entry['type'] == 'function':
+ def addApiCall(e):
+ async def apiCall(self, *args, **kwargs):
+ return await self._makeApiCall(e, *args, **kwargs)
+ return apiCall
+ f = addApiCall(entry)
+
+ docStr = "Call the %s api's %s method. " % (name, entry['name'])
+
+ if entry['args'] and len(entry['args']) > 0:
+ docStr += "This method takes:\n\n"
+ docStr += '\n'.join(['- ``%s``' % x for x in entry['args']])
+ docStr += '\n\n'
+ else:
+ docStr += "This method takes no arguments. "
+
+ if 'input' in entry:
+ docStr += "This method takes input ``%s``. " % entry['input']
+
+ if 'output' in entry:
+ docStr += "This method gives output ``%s``" % entry['output']
+
+ docStr += '\n\nThis method does a ``%s`` to ``%s``.' % (
+ entry['method'].upper(), entry['route'])
+
+ f.__doc__ = docStr
+ attributes['funcinfo'][entry['name']] = entry
+
+ elif entry['type'] == 'topic-exchange':
+ def addTopicExchange(e):
+ def topicExchange(self, *args, **kwargs):
+ return self._makeTopicExchange(e, *args, **kwargs)
+ return topicExchange
+
+ f = addTopicExchange(entry)
+
+ docStr = 'Generate a routing key pattern for the %s exchange. ' % entry['exchange']
+ docStr += 'This method takes a given routing key as a string or a '
+ docStr += 'dictionary. For each given dictionary key, the corresponding '
+ docStr += 'routing key token takes its value. For routing key tokens '
+ docStr += 'which are not specified by the dictionary, the * or # character '
+ docStr += 'is used depending on whether or not the key allows multiple words.\n\n'
+ docStr += 'This exchange takes the following keys:\n\n'
+ docStr += '\n'.join(['- ``%s``' % x['name'] for x in entry['routingKey']])
+
+ f.__doc__ = docStr
+
+ # Add whichever function we created
+ f.__name__ = str(entry['name'])
+ attributes[entry['name']] = f
+
+ return type(utils.toStr(name), (BaseClient,), attributes)
+
+
+__all__ = [
+ 'createTemporaryCredentials',
+ 'config',
+ 'BaseClient',
+ 'createApiClient',
+]
diff --git a/third_party/python/taskcluster/taskcluster/aio/asyncutils.py b/third_party/python/taskcluster/taskcluster/aio/asyncutils.py
new file mode 100644
index 0000000000..ce2b9f6945
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/asyncutils.py
@@ -0,0 +1,147 @@
+from __future__ import absolute_import, division, print_function
+import aiohttp
+import aiohttp.hdrs
+import asyncio
+import async_timeout
+import functools
+import logging
+import os
+import six
+
+import taskcluster.utils as utils
+import taskcluster.exceptions as exceptions
+
+log = logging.getLogger(__name__)
+
+
+def createSession(*args, **kwargs):
+ return aiohttp.ClientSession(*args, **kwargs)
+
+
+# Useful information: https://www.blog.pythonlibrary.org/2016/07/26/python-3-an-intro-to-asyncio/
+async def makeHttpRequest(method, url, payload, headers, retries=utils.MAX_RETRIES, session=None):
+ """ Make an HTTP request and retry it until success, return request """
+ retry = -1
+ response = None
+ implicit = False
+ if session is None:
+ implicit = True
+ session = aiohttp.ClientSession()
+
+ def cleanup():
+ if implicit:
+ loop = asyncio.get_event_loop()
+ loop.run_until_complete(session.close())
+
+ try:
+ while True:
+ retry += 1
+ # if this isn't the first retry then we sleep
+ if retry > 0:
+ snooze = float(retry * retry) / 10.0
+ log.info('Sleeping %0.2f seconds for exponential backoff', snooze)
+ await asyncio.sleep(snooze)
+
+ # Seek payload to start, if it is a file
+ if hasattr(payload, 'seek'):
+ payload.seek(0)
+
+ log.debug('Making attempt %d', retry)
+ try:
+ with async_timeout.timeout(60):
+ response = await makeSingleHttpRequest(method, url, payload, headers, session)
+ except aiohttp.ClientError as rerr:
+ if retry < retries:
+ log.warn('Retrying because of: %s' % rerr)
+ continue
+ # raise a connection exception
+ raise rerr
+ except ValueError as rerr:
+ log.warn('ValueError from aiohttp: redirect to non-http or https')
+ raise rerr
+ except RuntimeError as rerr:
+ log.warn('RuntimeError from aiohttp: session closed')
+ raise rerr
+ # Handle non 2xx status code and retry if possible
+ status = response.status
+ if 500 <= status and status < 600 and retry < retries:
+ if retry < retries:
+ log.warn('Retrying because of: %d status' % status)
+ continue
+ else:
+ raise exceptions.TaskclusterRestFailure("Unknown Server Error", superExc=None)
+ return response
+ finally:
+ cleanup()
+ # This code-path should be unreachable
+ assert False, "Error from last retry should have been raised!"
+
+
+async def makeSingleHttpRequest(method, url, payload, headers, session=None):
+ method = method.upper()
+ log.debug('Making a %s request to %s', method, url)
+ log.debug('HTTP Headers: %s' % str(headers))
+ log.debug('HTTP Payload: %s (limit 100 char)' % str(payload)[:100])
+ implicit = False
+ if session is None:
+ implicit = True
+ session = aiohttp.ClientSession()
+
+ skip_auto_headers = [aiohttp.hdrs.CONTENT_TYPE]
+
+ try:
+ # https://docs.aiohttp.org/en/stable/client_quickstart.html#passing-parameters-in-urls
+ # we must avoid aiohttp's helpful "requoting" functionality, as it breaks Hawk signatures
+ url = aiohttp.client.URL(url, encoded=True)
+ async with session.request(
+ method, url, data=payload, headers=headers,
+ skip_auto_headers=skip_auto_headers, compress=False
+ ) as resp:
+ response_text = await resp.text()
+ log.debug('Received HTTP Status: %s' % resp.status)
+ log.debug('Received HTTP Headers: %s' % str(resp.headers))
+ log.debug('Received HTTP Payload: %s (limit 1024 char)' %
+ six.text_type(response_text)[:1024])
+ return resp
+ finally:
+ if implicit:
+ await session.close()
+
+
+async def putFile(filename, url, contentType, session=None):
+ with open(filename, 'rb') as f:
+ contentLength = os.fstat(f.fileno()).st_size
+ return await makeHttpRequest('put', url, f, headers={
+ 'Content-Length': str(contentLength),
+ 'Content-Type': contentType,
+ }, session=session)
+
+
+def runAsync(coro):
+ """
+ Replacement of asyncio.run, as it doesn't exist in python<3.7.
+ """
+ asyncio.set_event_loop(asyncio.new_event_loop())
+ loop = asyncio.get_event_loop()
+ result = loop.run_until_complete(coro)
+ loop.close()
+ return result
+
+
+def ensureCoro(func):
+ """
+ If func is a regular function, execute in a thread and return an
+ async version of it. If func is already an async function, return
+ it without change.
+ """
+ if asyncio.iscoroutinefunction(func):
+ return func
+
+ @functools.wraps(func)
+ async def coro(*args, **kwargs):
+ loop = asyncio.get_event_loop()
+ return await loop.run_in_executor(
+ None,
+ functools.partial(func, *args, **kwargs)
+ )
+ return coro
diff --git a/third_party/python/taskcluster/taskcluster/aio/auth.py b/third_party/python/taskcluster/taskcluster/aio/auth.py
new file mode 100644
index 0000000000..0e4d7be87e
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/auth.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.auth import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/authevents.py b/third_party/python/taskcluster/taskcluster/aio/authevents.py
new file mode 100644
index 0000000000..8f04d57072
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/authevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.authevents import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/awsprovisioner.py b/third_party/python/taskcluster/taskcluster/aio/awsprovisioner.py
new file mode 100644
index 0000000000..5095053d51
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/awsprovisioner.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.awsprovisioner import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/download.py b/third_party/python/taskcluster/taskcluster/aio/download.py
new file mode 100644
index 0000000000..4d2aceee1a
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/download.py
@@ -0,0 +1,191 @@
+"""
+Support for downloading objects from the object service, following best
+practices for that service.
+
+Downloaded data is written to a "writer" provided by a "writer factory". A
+writer has an async `write` method which writes the entire passed buffer to
+storage. A writer factory is an async callable which returns a fresh writer,
+ready to write the first byte of the object. When downloads are retried, the
+writer factory may be called more than once.
+
+Note that `aiofile.open` returns a value suitable for use as a writer, if async
+file IO is important to the application.
+
+This module provides several pre-defined writers and writer factories for
+common cases.
+"""
+import six
+
+if six.PY2:
+ raise ImportError("download is only supported in Python 3")
+
+import aiohttp
+import contextlib
+
+from .asyncutils import ensureCoro
+from .reader_writer import streamingCopy, BufferWriter, FileWriter
+from .retry import retry
+from . import Object
+from ..exceptions import TaskclusterArtifactError, TaskclusterFailure
+
+
+async def downloadToBuf(**kwargs):
+ """
+ Convenience method to download data to an in-memory buffer and return the
+ downloaded data. Arguments are the same as `download`, except that
+ `writerFactory` should not be supplied. Returns a tuple (buffer, contentType).
+ """
+ writer = None
+
+ async def writerFactory():
+ nonlocal writer
+ writer = BufferWriter()
+ return writer
+
+ contentType = await download(writerFactory=writerFactory, **kwargs)
+ return writer.getbuffer(), contentType
+
+
+async def downloadToFile(file, **kwargs):
+ """
+ Convenience method to download data to a file object. The file must be
+ writeable, in binary mode, seekable (`f.seek`), and truncatable
+ (`f.truncate`) to support retries. Arguments are the same as `download`,
+ except that `writerFactory` should not be supplied. Returns the content-type.
+ """
+ async def writerFactory():
+ file.seek(0)
+ file.truncate()
+ return FileWriter(file)
+
+ return await download(writerFactory=writerFactory, **kwargs)
+
+
+async def download(*, name, maxRetries=5, objectService, writerFactory):
+ """
+ Download the named object from the object service, using a writer returned
+ from `writerFactory` to write the data. The `maxRetries` parameter has
+ the same meaning as for service clients. The `objectService` parameter is
+ an instance of the Object class, configured with credentials for the
+ download. Returns the content-type.
+ """
+ async with aiohttp.ClientSession() as session:
+ downloadResp = await ensureCoro(objectService.startDownload)(name, {
+ "acceptDownloadMethods": {
+ "simple": True,
+ },
+ })
+
+ method = downloadResp["method"]
+
+ if method == "simple":
+ async def tryDownload(retryFor):
+ with _maybeRetryHttpRequest(retryFor):
+ writer = await writerFactory()
+ url = downloadResp['url']
+ return await _doSimpleDownload(url, writer, session)
+
+ return await retry(maxRetries, tryDownload)
+ else:
+ raise RuntimeError(f'Unknown download method {method}')
+
+
+async def downloadArtifactToBuf(**kwargs):
+ """
+ Convenience method to download an artifact to an in-memory buffer and return the
+ downloaded data. Arguments are the same as `downloadArtifact`, except that
+ `writerFactory` should not be supplied. Returns a tuple (buffer, contentType).
+ """
+ writer = None
+
+ async def writerFactory():
+ nonlocal writer
+ writer = BufferWriter()
+ return writer
+
+ contentType = await downloadArtifact(writerFactory=writerFactory, **kwargs)
+ return writer.getbuffer(), contentType
+
+
+async def downloadArtifactToFile(file, **kwargs):
+ """
+ Convenience method to download an artifact to a file object. The file must be
+ writeable, in binary mode, seekable (`f.seek`), and truncatable
+ (`f.truncate`) to support retries. Arguments are the same as `downloadArtifac`,
+ except that `writerFactory` should not be supplied. Returns the content-type.
+ """
+ async def writerFactory():
+ file.seek(0)
+ file.truncate()
+ return FileWriter(file)
+
+ return await downloadArtifact(writerFactory=writerFactory, **kwargs)
+
+
+async def downloadArtifact(*, taskId, name, runId=None, maxRetries=5, queueService, writerFactory):
+ """
+ Download the named artifact with the appropriate storageType, using a writer returned
+ from `writerFactory` to write the data. The `maxRetries` parameter has
+ the same meaning as for service clients. The `queueService` parameter is
+ an instance of the Queue class, configured with credentials for the
+ download. Returns the content-type.
+ """
+ if runId is None:
+ artifact = await ensureCoro(queueService.latestArtifact)(taskId, name)
+ else:
+ artifact = await ensureCoro(queueService.artifact)(taskId, runId, name)
+
+ if artifact["storageType"] == 's3' or artifact["storageType"] == 'reference':
+ async with aiohttp.ClientSession() as session:
+
+ async def tryDownload(retryFor):
+ with _maybeRetryHttpRequest(retryFor):
+ writer = await writerFactory()
+ return await _doSimpleDownload(artifact["url"], writer, session)
+
+ return await retry(maxRetries, tryDownload)
+
+ elif artifact["storageType"] == 'object':
+ objectService = Object({
+ "rootUrl": queueService.options["rootUrl"],
+ "maxRetries": maxRetries,
+ "credentials": artifact["credentials"],
+ })
+ return await download(
+ name=artifact["name"],
+ maxRetries=maxRetries,
+ objectService=objectService,
+ writerFactory=writerFactory)
+
+ elif artifact["storageType"] == 'error':
+ raise TaskclusterArtifactError(artifact["message"], artifact["reason"])
+
+ else:
+ raise TaskclusterFailure(f"Unknown storageType f{artifact['storageType']}")
+
+
+@contextlib.contextmanager
+def _maybeRetryHttpRequest(retryFor):
+ "Catch errors from an aiohttp request and retry the retriable responses."
+ try:
+ yield
+ except aiohttp.ClientResponseError as exc:
+ # treat 4xx's as fatal, and retry others
+ if 400 <= exc.status < 500:
+ raise exc
+ return retryFor(exc)
+ except aiohttp.ClientError as exc:
+ # retry for all other aiohttp errors
+ return retryFor(exc)
+ # .. anything else is considered fatal
+
+
+async def _doSimpleDownload(url, writer, session):
+ async with session.get(url) as resp:
+ contentType = resp.content_type
+ resp.raise_for_status()
+ # note that `resp.content` is a StreamReader and satisfies the
+ # requirements of a reader in this case
+ await streamingCopy(resp.content, writer)
+
+ return contentType
diff --git a/third_party/python/taskcluster/taskcluster/aio/ec2manager.py b/third_party/python/taskcluster/taskcluster/aio/ec2manager.py
new file mode 100644
index 0000000000..8c167b2972
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/ec2manager.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.ec2manager import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/github.py b/third_party/python/taskcluster/taskcluster/aio/github.py
new file mode 100644
index 0000000000..8ef5f8015e
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/github.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.github import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/githubevents.py b/third_party/python/taskcluster/taskcluster/aio/githubevents.py
new file mode 100644
index 0000000000..34bed63dc1
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/githubevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.githubevents import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/hooks.py b/third_party/python/taskcluster/taskcluster/aio/hooks.py
new file mode 100644
index 0000000000..e24e4d4292
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/hooks.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.hooks import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/hooksevents.py b/third_party/python/taskcluster/taskcluster/aio/hooksevents.py
new file mode 100644
index 0000000000..7177399bc8
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/hooksevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.hooksevents import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/index.py b/third_party/python/taskcluster/taskcluster/aio/index.py
new file mode 100644
index 0000000000..5de09cc9bf
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/index.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.index import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/login.py b/third_party/python/taskcluster/taskcluster/aio/login.py
new file mode 100644
index 0000000000..f354e1490a
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/login.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.login import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/notify.py b/third_party/python/taskcluster/taskcluster/aio/notify.py
new file mode 100644
index 0000000000..1fe99a6851
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/notify.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.notify import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/notifyevents.py b/third_party/python/taskcluster/taskcluster/aio/notifyevents.py
new file mode 100644
index 0000000000..583329d364
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/notifyevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.notifyevents import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/purgecache.py b/third_party/python/taskcluster/taskcluster/aio/purgecache.py
new file mode 100644
index 0000000000..42281a73bf
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/purgecache.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.purgecache import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/queue.py b/third_party/python/taskcluster/taskcluster/aio/queue.py
new file mode 100644
index 0000000000..58484ad5ad
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/queue.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.queue import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/queueevents.py b/third_party/python/taskcluster/taskcluster/aio/queueevents.py
new file mode 100644
index 0000000000..e4dec31c92
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/queueevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.queueevents import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/reader_writer.py b/third_party/python/taskcluster/taskcluster/aio/reader_writer.py
new file mode 100644
index 0000000000..2d9880b3a0
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/reader_writer.py
@@ -0,0 +1,81 @@
+"""
+Utilities supporting the "reader" and "writer" definitions used in uploads and downloads.
+"""
+import asyncio
+import io
+
+
+class BufferWriter:
+ """A writer that writes to an in-memory buffer"""
+ def __init__(self):
+ self.buf = io.BytesIO()
+
+ async def write(self, chunk):
+ self.buf.write(chunk)
+
+ def getbuffer(self):
+ """Get the content of the in-memory buffer"""
+ return self.buf.getbuffer()
+
+
+class BufferReader:
+ """A reader that reads from an in-memory buffer"""
+ def __init__(self, data):
+ self.buf = io.BytesIO(data)
+
+ async def read(self, max_size):
+ return self.buf.read(max_size)
+
+
+class FileWriter:
+ """A writer that writes to a (sync) file. The file should be opened in binary mode
+ and empty."""
+ def __init__(self, file):
+ self.file = file
+
+ async def write(self, chunk):
+ self.file.write(chunk)
+
+
+class FileReader:
+ """A reader that reads from a (sync) file. The file should be opened in binary mode,
+ and positioned at its beginning."""
+ def __init__(self, file):
+ self.file = file
+
+ async def read(self, max_size):
+ return self.file.read(max_size)
+
+
+async def streamingCopy(reader, writer):
+ "Copy data from a reader to a writer, as those are defined in upload.py and download.py"
+ # we will read and write concurrently, but with limited buffering -- just enough
+ # that read and write operations are not forced to alternate
+ chunk_size = 64 * 1024
+ q = asyncio.Queue(maxsize=1)
+
+ async def read_loop():
+ while True:
+ chunk = await reader.read(chunk_size)
+ await q.put(chunk)
+ if not chunk:
+ break
+
+ async def write_loop():
+ while True:
+ chunk = await q.get()
+ if not chunk:
+ q.task_done()
+ break
+ await writer.write(chunk)
+ q.task_done()
+
+ read_task = asyncio.ensure_future(read_loop())
+ write_task = asyncio.ensure_future(write_loop())
+
+ try:
+ await asyncio.gather(read_task, write_task)
+ finally:
+ # cancel any still-running tasks, as in case of an exception
+ read_task.cancel()
+ write_task.cancel()
diff --git a/third_party/python/taskcluster/taskcluster/aio/retry.py b/third_party/python/taskcluster/taskcluster/aio/retry.py
new file mode 100644
index 0000000000..d4f743f2d5
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/retry.py
@@ -0,0 +1,41 @@
+import logging
+import asyncio
+
+from .. import utils
+
+log = logging.getLogger(__name__)
+
+
+async def retry(maxRetries, tryFn):
+ """
+ Retry async `tryFn` based on `maxRetries`. Each call to `tryFn` will pass a callable
+ which should be called with the exception object when an exception can be retried.
+ Exceptions raised from `tryFn` are treated as fatal.
+ """
+
+ retry = -1 # we plus first in the loop, and attempt 1 is retry 0
+ while True:
+ retry += 1
+
+ # if this isn't the first retry then we sleep
+ if retry > 0:
+ snooze = float(retry * retry) / 10.0
+ log.info('Sleeping %0.2f seconds for exponential backoff', snooze)
+ await asyncio.sleep(utils.calculateSleepTime(retry))
+
+ retriableException = None
+
+ def retryFor(exc):
+ nonlocal retriableException
+ retriableException = exc
+
+ res = await tryFn(retryFor)
+
+ if not retriableException:
+ return res
+
+ if retry < maxRetries:
+ log.warning(f'Retrying because of: {retriableException}')
+ continue
+
+ raise retriableException
diff --git a/third_party/python/taskcluster/taskcluster/aio/secrets.py b/third_party/python/taskcluster/taskcluster/aio/secrets.py
new file mode 100644
index 0000000000..b48680a29a
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/secrets.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.secrets import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/upload.py b/third_party/python/taskcluster/taskcluster/aio/upload.py
new file mode 100644
index 0000000000..f072afaec9
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/upload.py
@@ -0,0 +1,177 @@
+"""
+Support for uploading objects to the object service, following best
+practices for that service.
+
+Data for upload is read from a "reader" provided by a "reader factory". A
+reader has an async `read(max_size)` method which reads and returns a chunk of
+1 .. `max_size` bytes, or returns an empty string at EOF. A reader factory is an async
+callable which returns a fresh reader, ready to read the first byte of the
+object. When uploads are retried, the reader factory may be called more than
+once.
+
+Note that `aiofile.open` returns a value suitable for use as a reader, if async
+file IO is important to the application.
+
+This module provides several pre-defined readers and reader factories for
+common cases.
+"""
+import six
+
+if six.PY2:
+ raise ImportError("upload is only supported in Python 3")
+
+import base64
+import hashlib
+
+import aiohttp
+
+import taskcluster
+from .asyncutils import ensureCoro
+from .reader_writer import streamingCopy, BufferReader, BufferWriter, FileReader
+from .retry import retry
+
+DATA_INLINE_MAX_SIZE = 8192
+
+
+async def uploadFromBuf(*, data, **kwargs):
+ """
+ Convenience method to upload data from an in-memory buffer. Arguments are the same
+ as `upload` except that `readerFactory` should not be supplied.
+ """
+ async def readerFactory():
+ return BufferReader(data)
+
+ await upload(**kwargs, readerFactory=readerFactory)
+
+
+async def uploadFromFile(*, file, **kwargs):
+ """
+ Convenience method to upload data from a a file. The file should be open
+ for reading, in binary mode, and be seekable (`f.seek`). Remaining
+ arguments are the same as `upload` except that `readerFactory` should not
+ be supplied.
+ """
+ async def readerFactory():
+ file.seek(0)
+ return FileReader(file)
+
+ await upload(**kwargs, readerFactory=readerFactory)
+
+
+async def upload(*, projectId, name, contentType, contentLength, expires,
+ readerFactory, maxRetries=5, uploadId=None, objectService):
+ """
+ Upload the given data to the object service with the given metadata.
+ The `maxRetries` parameter has the same meaning as for service clients.
+ The `objectService` parameter is an instance of the Object class,
+ configured with credentials for the upload.
+ """
+ # wrap the readerFactory with one that will also hash the data
+ hashingReader = None
+
+ async def hashingReaderFactory():
+ nonlocal hashingReader
+ hashingReader = HashingReader(await readerFactory())
+ return hashingReader
+
+ async with aiohttp.ClientSession() as session:
+ if not uploadId:
+ uploadId = taskcluster.slugid.nice()
+ proposedUploadMethods = {}
+
+ if contentLength < DATA_INLINE_MAX_SIZE:
+ reader = await hashingReaderFactory()
+ writer = BufferWriter()
+ await streamingCopy(reader, writer)
+ encoded = base64.b64encode(writer.getbuffer())
+ proposedUploadMethods['dataInline'] = {
+ "contentType": contentType,
+ "objectData": encoded,
+ }
+
+ proposedUploadMethods['putUrl'] = {
+ "contentType": contentType,
+ "contentLength": contentLength,
+ }
+
+ uploadResp = await ensureCoro(objectService.createUpload)(name, {
+ "expires": expires,
+ "projectId": projectId,
+ "uploadId": uploadId,
+ "proposedUploadMethods": proposedUploadMethods,
+ })
+
+ async def tryUpload(retryFor):
+ try:
+ uploadMethod = uploadResp["uploadMethod"]
+ if 'dataInline' in uploadMethod:
+ # data is already uploaded -- nothing to do
+ pass
+ elif 'putUrl' in uploadMethod:
+ reader = await hashingReaderFactory()
+ await _putUrlUpload(uploadMethod['putUrl'], reader, session)
+ else:
+ raise RuntimeError("Could not negotiate an upload method")
+ except aiohttp.ClientResponseError as exc:
+ # treat 4xx's as fatal, and retry others
+ if 400 <= exc.status < 500:
+ raise exc
+ return retryFor(exc)
+ except aiohttp.ClientError as exc:
+ # retry for all other aiohttp errors
+ return retryFor(exc)
+ # .. anything else is considered fatal
+
+ await retry(maxRetries, tryUpload)
+
+ hashes = hashingReader.hashes(contentLength)
+
+ await ensureCoro(objectService.finishUpload)(name, {
+ "projectId": projectId,
+ "uploadId": uploadId,
+ "hashes": hashes,
+ })
+
+
+async def _putUrlUpload(method, reader, session):
+ chunk_size = 64 * 1024
+
+ async def reader_gen():
+ while True:
+ chunk = await reader.read(chunk_size)
+ if not chunk:
+ break
+ yield chunk
+
+ resp = await session.put(method['url'], headers=method['headers'], data=reader_gen())
+ resp.raise_for_status()
+
+
+class HashingReader:
+ """A Reader implementation that hashes contents as they are read."""
+
+ def __init__(self, inner):
+ self.inner = inner
+ self.sha256 = hashlib.sha256()
+ self.sha512 = hashlib.sha512()
+ self.bytes = 0
+
+ async def read(self, max_size):
+ chunk = await self.inner.read(max_size)
+ self.update(chunk)
+ return chunk
+
+ def update(self, chunk):
+ self.sha256.update(chunk)
+ self.sha512.update(chunk)
+ self.bytes += len(chunk)
+
+ def hashes(self, contentLength):
+ """Return the hashes in a format suitable for finishUpload, first checking that all the bytes
+ in the content were hashed."""
+ if contentLength != self.bytes:
+ raise RuntimeError(f"hashed {self.bytes} bytes but content length is {contentLength}")
+ return {
+ "sha256": self.sha256.hexdigest(),
+ "sha512": self.sha512.hexdigest(),
+ }
diff --git a/third_party/python/taskcluster/taskcluster/aio/workermanager.py b/third_party/python/taskcluster/taskcluster/aio/workermanager.py
new file mode 100644
index 0000000000..f7d981cf94
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/workermanager.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.workermanager import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/workermanagerevents.py b/third_party/python/taskcluster/taskcluster/aio/workermanagerevents.py
new file mode 100644
index 0000000000..61f355ba6d
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/workermanagerevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.workermanagerevents import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/auth.py b/third_party/python/taskcluster/taskcluster/auth.py
new file mode 100644
index 0000000000..74cf843de9
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/auth.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.auth import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/authevents.py b/third_party/python/taskcluster/taskcluster/authevents.py
new file mode 100644
index 0000000000..1d4af5ef6d
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/authevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.authevents import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/awsprovisioner.py b/third_party/python/taskcluster/taskcluster/awsprovisioner.py
new file mode 100644
index 0000000000..e868f87244
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/awsprovisioner.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.awsprovisioner import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/client.py b/third_party/python/taskcluster/taskcluster/client.py
new file mode 100644
index 0000000000..516f957728
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/client.py
@@ -0,0 +1,711 @@
+"""This module is used to interact with taskcluster rest apis"""
+
+from __future__ import absolute_import, division, print_function
+
+import json
+import logging
+import copy
+import hashlib
+import hmac
+import datetime
+import calendar
+import requests
+import time
+import six
+import warnings
+from six.moves import urllib
+
+import mohawk
+import mohawk.bewit
+
+import taskcluster.exceptions as exceptions
+import taskcluster.utils as utils
+import taskcluster_urls as liburls
+
+log = logging.getLogger(__name__)
+
+
+# Default configuration
+_defaultConfig = config = {
+ 'credentials': {
+ 'clientId': None,
+ 'accessToken': None,
+ 'certificate': None,
+ },
+ 'rootUrl': None,
+ 'maxRetries': 5,
+ 'signedUrlExpiration': 15 * 60,
+}
+
+
+def createSession(*args, **kwargs):
+ """ Create a new requests session. This passes through all positional and
+ keyword arguments to the requests.Session() constructor
+ """
+ return requests.Session(*args, **kwargs)
+
+
+class BaseClient(object):
+ """ Base Class for API Client Classes. Each individual Client class
+ needs to set up its own methods for REST endpoints and Topic Exchange
+ routing key patterns. The _makeApiCall() and _topicExchange() methods
+ help with this.
+ """
+
+ def __init__(self, options=None, session=None):
+ if options and options.get('baseUrl'):
+ raise exceptions.TaskclusterFailure('baseUrl option is no longer allowed')
+ o = copy.deepcopy(self.classOptions)
+ o.update(_defaultConfig)
+ if options:
+ o.update(options)
+ if not o.get('rootUrl'):
+ raise exceptions.TaskclusterFailure('rootUrl option is required')
+
+ credentials = o.get('credentials')
+ if credentials:
+ for x in ('accessToken', 'clientId', 'certificate'):
+ value = credentials.get(x)
+ if value and not isinstance(value, six.binary_type):
+ try:
+ credentials[x] = credentials[x].encode('ascii')
+ except Exception:
+ s = '%s (%s) must be unicode encodable' % (x, credentials[x])
+ raise exceptions.TaskclusterAuthFailure(s)
+
+ self.options = o
+ if 'credentials' in o:
+ log.debug('credentials key scrubbed from logging output')
+ log.debug(dict((k, v) for k, v in o.items() if k != 'credentials'))
+
+ if session:
+ self.session = session
+ else:
+ self.session = self._createSession()
+
+ def _createSession(self):
+ """ Create a requests session.
+
+ Helper method which can be overridden by child classes.
+ """
+ return createSession()
+
+ def makeHawkExt(self):
+ """ Make an 'ext' for Hawk authentication """
+ o = self.options
+ c = o.get('credentials', {})
+ if c.get('clientId') and c.get('accessToken'):
+ ext = {}
+ cert = c.get('certificate')
+ if cert:
+ if six.PY3 and isinstance(cert, six.binary_type):
+ cert = cert.decode()
+ if isinstance(cert, six.string_types):
+ cert = json.loads(cert)
+ ext['certificate'] = cert
+
+ if 'authorizedScopes' in o:
+ ext['authorizedScopes'] = o['authorizedScopes']
+
+ # .encode('base64') inserts a newline, which hawk doesn't
+ # like but doesn't strip itself
+ return utils.makeB64UrlSafe(utils.encodeStringForB64Header(utils.dumpJson(ext)).strip())
+ else:
+ return {}
+
+ def _makeTopicExchange(self, entry, *args, **kwargs):
+ if len(args) == 0 and not kwargs:
+ routingKeyPattern = {}
+ elif len(args) >= 1:
+ if kwargs or len(args) != 1:
+ errStr = 'Pass either a string, single dictionary or only kwargs'
+ raise exceptions.TaskclusterTopicExchangeFailure(errStr)
+ routingKeyPattern = args[0]
+ else:
+ routingKeyPattern = kwargs
+
+ data = {
+ 'exchange': '%s/%s' % (self.options['exchangePrefix'].rstrip('/'),
+ entry['exchange'].lstrip('/'))
+ }
+
+ # If we are passed in a string, we can short-circuit this function
+ if isinstance(routingKeyPattern, six.string_types):
+ log.debug('Passing through string for topic exchange key')
+ data['routingKeyPattern'] = routingKeyPattern
+ return data
+
+ if type(routingKeyPattern) != dict:
+ errStr = 'routingKeyPattern must eventually be a dict'
+ raise exceptions.TaskclusterTopicExchangeFailure(errStr)
+
+ if not routingKeyPattern:
+ routingKeyPattern = {}
+
+ # There is no canonical meaning for the maxSize and required
+ # reference entry in the JS client, so we don't try to define
+ # them here, even though they sound pretty obvious
+
+ routingKey = []
+ for key in entry['routingKey']:
+ if 'constant' in key:
+ value = key['constant']
+ elif key['name'] in routingKeyPattern:
+ log.debug('Found %s in routing key params', key['name'])
+ value = str(routingKeyPattern[key['name']])
+ if not key.get('multipleWords') and '.' in value:
+ raise exceptions.TaskclusterTopicExchangeFailure(
+ 'Cannot have periods in single word keys')
+ else:
+ value = '#' if key.get('multipleWords') else '*'
+ log.debug('Did not find %s in input params, using %s', key['name'], value)
+
+ routingKey.append(value)
+
+ data['routingKeyPattern'] = '.'.join([str(x) for x in routingKey])
+ return data
+
+ def buildUrl(self, methodName, *args, **kwargs):
+ entry = self.funcinfo.get(methodName)
+ if not entry:
+ raise exceptions.TaskclusterFailure(
+ 'Requested method "%s" not found in API Reference' % methodName)
+ routeParams, _, query, _, _ = self._processArgs(entry, *args, **kwargs)
+ route = self._subArgsInRoute(entry, routeParams)
+ if query:
+ route += '?' + urllib.parse.urlencode(query)
+ return liburls.api(self.options['rootUrl'], self.serviceName, self.apiVersion, route)
+
+ def buildSignedUrl(self, methodName, *args, **kwargs):
+ """ Build a signed URL. This URL contains the credentials needed to access
+ a resource."""
+
+ if 'expiration' in kwargs:
+ expiration = kwargs['expiration']
+ del kwargs['expiration']
+ else:
+ expiration = self.options['signedUrlExpiration']
+
+ expiration = int(time.time() + expiration) # Mainly so that we throw if it's not a number
+
+ requestUrl = self.buildUrl(methodName, *args, **kwargs)
+
+ if not self._hasCredentials():
+ raise exceptions.TaskclusterAuthFailure('Invalid Hawk Credentials')
+
+ clientId = utils.toStr(self.options['credentials']['clientId'])
+ accessToken = utils.toStr(self.options['credentials']['accessToken'])
+
+ def genBewit():
+ # We need to fix the output of get_bewit. It returns a url-safe base64
+ # encoded string, which contains a list of tokens separated by '\'.
+ # The first one is the clientId, the second is an int, the third is
+ # url-safe base64 encoded MAC, the fourth is the ext param.
+ # The problem is that the nested url-safe base64 encoded MAC must be
+ # base64 (i.e. not url safe) or server-side will complain.
+
+ # id + '\\' + exp + '\\' + mac + '\\' + options.ext;
+ resource = mohawk.base.Resource(
+ credentials={
+ 'id': clientId,
+ 'key': accessToken,
+ 'algorithm': 'sha256',
+ },
+ method='GET',
+ ext=utils.toStr(self.makeHawkExt()),
+ url=requestUrl,
+ timestamp=expiration,
+ nonce='',
+ # content='',
+ # content_type='',
+ )
+ bewit = mohawk.bewit.get_bewit(resource)
+ return bewit.rstrip('=')
+
+ bewit = genBewit()
+
+ if not bewit:
+ raise exceptions.TaskclusterFailure('Did not receive a bewit')
+
+ u = urllib.parse.urlparse(requestUrl)
+
+ qs = u.query
+ if qs:
+ qs += '&'
+ qs += 'bewit=%s' % bewit
+
+ return urllib.parse.urlunparse((
+ u.scheme,
+ u.netloc,
+ u.path,
+ u.params,
+ qs,
+ u.fragment,
+ ))
+
+ def _constructUrl(self, route):
+ """Construct a URL for the given route on this service, based on the
+ rootUrl"""
+ return liburls.api(
+ self.options['rootUrl'],
+ self.serviceName,
+ self.apiVersion,
+ route.rstrip('/'))
+
+ def _makeApiCall(self, entry, *args, **kwargs):
+ """ This function is used to dispatch calls to other functions
+ for a given API Reference entry"""
+
+ x = self._processArgs(entry, *args, **kwargs)
+ routeParams, payload, query, paginationHandler, paginationLimit = x
+ route = self._subArgsInRoute(entry, routeParams)
+
+ if paginationLimit and 'limit' in entry.get('query', []):
+ query['limit'] = paginationLimit
+
+ if query:
+ _route = route + '?' + urllib.parse.urlencode(query)
+ else:
+ _route = route
+ response = self._makeHttpRequest(entry['method'], _route, payload)
+
+ if paginationHandler:
+ paginationHandler(response)
+ while response.get('continuationToken'):
+ query['continuationToken'] = response['continuationToken']
+ _route = route + '?' + urllib.parse.urlencode(query)
+ response = self._makeHttpRequest(entry['method'], _route, payload)
+ paginationHandler(response)
+ else:
+ return response
+
+ def _processArgs(self, entry, *_args, **_kwargs):
+ """ Given an entry, positional and keyword arguments, figure out what
+ the query-string options, payload and api arguments are.
+ """
+
+ # We need the args to be a list so we can mutate them
+ args = list(_args)
+ kwargs = copy.deepcopy(_kwargs)
+
+ reqArgs = entry['args']
+ routeParams = {}
+
+ query = {}
+ payload = None
+ kwApiArgs = {}
+
+ paginationHandler = None
+ paginationLimit = None
+
+ # There are three formats for calling methods:
+ # 1. method(v1, v1, payload)
+ # 2. method(payload, k1=v1, k2=v2)
+ # 3. method(payload=payload, query=query, params={k1: v1, k2: v2})
+ if len(kwargs) == 0:
+ if 'input' in entry and len(args) == len(reqArgs) + 1:
+ payload = args.pop()
+ if len(args) != len(reqArgs):
+ log.debug(args)
+ log.debug(reqArgs)
+ raise exceptions.TaskclusterFailure('Incorrect number of positional arguments')
+ log.debug('Using method(v1, v2, payload) calling convention')
+ else:
+ # We're considering kwargs which are the api route parameters to be
+ # called 'flat' because they're top level keys. We're special
+ # casing calls which have only api-arg kwargs and possibly a payload
+ # value and handling them directly.
+ isFlatKwargs = True
+ if len(kwargs) == len(reqArgs):
+ for arg in reqArgs:
+ if not kwargs.get(arg, False):
+ isFlatKwargs = False
+ break
+ if 'input' in entry and len(args) != 1:
+ isFlatKwargs = False
+ if 'input' not in entry and len(args) != 0:
+ isFlatKwargs = False
+ else:
+ pass # We're using payload=, query= and param=
+ else:
+ isFlatKwargs = False
+
+ # Now we're going to handle the two types of kwargs. The first is
+ # 'flat' ones, which are where the api params
+ if isFlatKwargs:
+ if 'input' in entry:
+ payload = args.pop()
+ kwApiArgs = kwargs
+ log.debug('Using method(payload, k1=v1, k2=v2) calling convention')
+ warnings.warn(
+ "The method(payload, k1=v1, k2=v2) calling convention will soon be deprecated",
+ PendingDeprecationWarning
+ )
+ else:
+ kwApiArgs = kwargs.get('params', {})
+ payload = kwargs.get('payload', None)
+ query = kwargs.get('query', {})
+ paginationHandler = kwargs.get('paginationHandler', None)
+ paginationLimit = kwargs.get('paginationLimit', None)
+ log.debug('Using method(payload=payload, query=query, params={k1: v1, k2: v2}) calling convention')
+
+ if 'input' in entry and isinstance(payload, type(None)):
+ raise exceptions.TaskclusterFailure('Payload is required')
+
+ # These all need to be rendered down to a string, let's just check that
+ # they are up front and fail fast
+ for arg in args:
+ if not isinstance(arg, six.string_types) and not isinstance(arg, int):
+ raise exceptions.TaskclusterFailure(
+ 'Positional arg "%s" to %s is not a string or int' % (arg, entry['name']))
+
+ for name, arg in six.iteritems(kwApiArgs):
+ if not isinstance(arg, six.string_types) and not isinstance(arg, int):
+ raise exceptions.TaskclusterFailure(
+ 'KW arg "%s: %s" to %s is not a string or int' % (name, arg, entry['name']))
+
+ if len(args) > 0 and len(kwApiArgs) > 0:
+ raise exceptions.TaskclusterFailure('Specify either positional or key word arguments')
+
+ # We know for sure that if we don't give enough arguments that the call
+ # should fail. We don't yet know if we should fail because of two many
+ # arguments because we might be overwriting positional ones with kw ones
+ if len(reqArgs) > len(args) + len(kwApiArgs):
+ raise exceptions.TaskclusterFailure(
+ '%s takes %d args, only %d were given' % (
+ entry['name'], len(reqArgs), len(args) + len(kwApiArgs)))
+
+ # We also need to error out when we have more positional args than required
+ # because we'll need to go through the lists of provided and required args
+ # at the same time. Not disqualifying early means we'll get IndexErrors if
+ # there are more positional arguments than required
+ if len(args) > len(reqArgs):
+ raise exceptions.TaskclusterFailure('%s called with too many positional args',
+ entry['name'])
+
+ i = 0
+ for arg in args:
+ log.debug('Found a positional argument: %s', arg)
+ routeParams[reqArgs[i]] = arg
+ i += 1
+
+ log.debug('After processing positional arguments, we have: %s', routeParams)
+
+ routeParams.update(kwApiArgs)
+
+ log.debug('After keyword arguments, we have: %s', routeParams)
+
+ if len(reqArgs) != len(routeParams):
+ errMsg = '%s takes %s args, %s given' % (
+ entry['name'],
+ ','.join(reqArgs),
+ routeParams.keys())
+ log.error(errMsg)
+ raise exceptions.TaskclusterFailure(errMsg)
+
+ for reqArg in reqArgs:
+ if reqArg not in routeParams:
+ errMsg = '%s requires a "%s" argument which was not provided' % (
+ entry['name'], reqArg)
+ log.error(errMsg)
+ raise exceptions.TaskclusterFailure(errMsg)
+
+ return routeParams, payload, query, paginationHandler, paginationLimit
+
+ def _subArgsInRoute(self, entry, args):
+ """ Given a route like "/task/<taskId>/artifacts" and a mapping like
+ {"taskId": "12345"}, return a string like "/task/12345/artifacts"
+ """
+
+ route = entry['route']
+
+ for arg, val in six.iteritems(args):
+ toReplace = "<%s>" % arg
+ if toReplace not in route:
+ raise exceptions.TaskclusterFailure(
+ 'Arg %s not found in route for %s' % (arg, entry['name']))
+ val = urllib.parse.quote(str(val).encode("utf-8"), '')
+ route = route.replace("<%s>" % arg, val)
+
+ return route.lstrip('/')
+
+ def _hasCredentials(self):
+ """ Return True, if credentials is given """
+ cred = self.options.get('credentials')
+ return (
+ cred and
+ 'clientId' in cred and
+ 'accessToken' in cred and
+ cred['clientId'] and
+ cred['accessToken']
+ )
+
+ def _makeHttpRequest(self, method, route, payload):
+ """ Make an HTTP Request for the API endpoint. This method wraps
+ the logic about doing failure retry and passes off the actual work
+ of doing an HTTP request to another method."""
+
+ url = self._constructUrl(route)
+ log.debug('Full URL used is: %s', url)
+
+ hawkExt = self.makeHawkExt()
+
+ # Serialize payload if given
+ if payload is not None:
+ payload = utils.dumpJson(payload)
+
+ # Do a loop of retries
+ retry = -1 # we plus first in the loop, and attempt 1 is retry 0
+ retries = self.options['maxRetries']
+ while retry < retries:
+ retry += 1
+ # if this isn't the first retry then we sleep
+ if retry > 0:
+ time.sleep(utils.calculateSleepTime(retry))
+ # Construct header
+ if self._hasCredentials():
+ sender = mohawk.Sender(
+ credentials={
+ 'id': self.options['credentials']['clientId'],
+ 'key': self.options['credentials']['accessToken'],
+ 'algorithm': 'sha256',
+ },
+ ext=hawkExt if hawkExt else {},
+ url=url,
+ content=payload if payload else '',
+ content_type='application/json' if payload else '',
+ method=method,
+ )
+
+ headers = {'Authorization': sender.request_header}
+ else:
+ log.debug('Not using hawk!')
+ headers = {}
+ if payload:
+ # Set header for JSON if payload is given, note that we serialize
+ # outside this loop.
+ headers['Content-Type'] = 'application/json'
+
+ log.debug('Making attempt %d', retry)
+ try:
+ response = utils.makeSingleHttpRequest(method, url, payload, headers)
+ except requests.exceptions.RequestException as rerr:
+ if retry < retries:
+ log.warn('Retrying because of: %s' % rerr)
+ continue
+ # raise a connection exception
+ raise exceptions.TaskclusterConnectionError(
+ "Failed to establish connection",
+ superExc=rerr
+ )
+
+ # Handle non 2xx status code and retry if possible
+ status = response.status_code
+ if status == 204:
+ return None
+
+ # Catch retryable errors and go to the beginning of the loop
+ # to do the retry
+ if 500 <= status and status < 600 and retry < retries:
+ log.warn('Retrying because of a %s status code' % status)
+ continue
+
+ # Throw errors for non-retryable errors
+ if status < 200 or status >= 300:
+ data = {}
+ try:
+ data = response.json()
+ except Exception:
+ pass # Ignore JSON errors in error messages
+ # Find error message
+ message = "Unknown Server Error"
+ if isinstance(data, dict):
+ message = data.get('message')
+ else:
+ if status == 401:
+ message = "Authentication Error"
+ elif status == 500:
+ message = "Internal Server Error"
+ # Raise TaskclusterAuthFailure if this is an auth issue
+ if status == 401:
+ raise exceptions.TaskclusterAuthFailure(
+ message,
+ status_code=status,
+ body=data,
+ superExc=None
+ )
+ # Raise TaskclusterRestFailure for all other issues
+ raise exceptions.TaskclusterRestFailure(
+ message,
+ status_code=status,
+ body=data,
+ superExc=None
+ )
+
+ # Try to load JSON
+ try:
+ return response.json()
+ except ValueError:
+ return {"response": response}
+
+ # This code-path should be unreachable
+ assert False, "Error from last retry should have been raised!"
+
+
+def createApiClient(name, api):
+ api = api['reference']
+
+ attributes = dict(
+ name=name,
+ __doc__=api.get('description'),
+ classOptions={},
+ funcinfo={},
+ )
+
+ # apply a default for apiVersion; this can be removed when all services
+ # have apiVersion
+ if 'apiVersion' not in api:
+ api['apiVersion'] = 'v1'
+
+ copiedOptions = ('exchangePrefix',)
+ for opt in copiedOptions:
+ if opt in api:
+ attributes['classOptions'][opt] = api[opt]
+
+ copiedProperties = ('serviceName', 'apiVersion')
+ for opt in copiedProperties:
+ if opt in api:
+ attributes[opt] = api[opt]
+
+ for entry in api['entries']:
+ if entry['type'] == 'function':
+ def addApiCall(e):
+ def apiCall(self, *args, **kwargs):
+ return self._makeApiCall(e, *args, **kwargs)
+ return apiCall
+ f = addApiCall(entry)
+
+ docStr = "Call the %s api's %s method. " % (name, entry['name'])
+
+ if entry['args'] and len(entry['args']) > 0:
+ docStr += "This method takes:\n\n"
+ docStr += '\n'.join(['- ``%s``' % x for x in entry['args']])
+ docStr += '\n\n'
+ else:
+ docStr += "This method takes no arguments. "
+
+ if 'input' in entry:
+ docStr += "This method takes input ``%s``. " % entry['input']
+
+ if 'output' in entry:
+ docStr += "This method gives output ``%s``" % entry['output']
+
+ docStr += '\n\nThis method does a ``%s`` to ``%s``.' % (
+ entry['method'].upper(), entry['route'])
+
+ f.__doc__ = docStr
+ attributes['funcinfo'][entry['name']] = entry
+
+ elif entry['type'] == 'topic-exchange':
+ def addTopicExchange(e):
+ def topicExchange(self, *args, **kwargs):
+ return self._makeTopicExchange(e, *args, **kwargs)
+ return topicExchange
+
+ f = addTopicExchange(entry)
+
+ docStr = 'Generate a routing key pattern for the %s exchange. ' % entry['exchange']
+ docStr += 'This method takes a given routing key as a string or a '
+ docStr += 'dictionary. For each given dictionary key, the corresponding '
+ docStr += 'routing key token takes its value. For routing key tokens '
+ docStr += 'which are not specified by the dictionary, the * or # character '
+ docStr += 'is used depending on whether or not the key allows multiple words.\n\n'
+ docStr += 'This exchange takes the following keys:\n\n'
+ docStr += '\n'.join(['- ``%s``' % x['name'] for x in entry['routingKey']])
+
+ f.__doc__ = docStr
+
+ # Add whichever function we created
+ f.__name__ = str(entry['name'])
+ attributes[entry['name']] = f
+
+ return type(utils.toStr(name), (BaseClient,), attributes)
+
+
+def createTemporaryCredentials(clientId, accessToken, start, expiry, scopes, name=None):
+ """ Create a set of temporary credentials
+
+ Callers should not apply any clock skew; clock drift is accounted for by
+ auth service.
+
+ clientId: the issuing clientId
+ accessToken: the issuer's accessToken
+ start: start time of credentials (datetime.datetime)
+ expiry: expiration time of credentials, (datetime.datetime)
+ scopes: list of scopes granted
+ name: credential name (optional)
+
+ Returns a dictionary in the form:
+ { 'clientId': str, 'accessToken: str, 'certificate': str}
+ """
+
+ for scope in scopes:
+ if not isinstance(scope, six.string_types):
+ raise exceptions.TaskclusterFailure('Scope must be string')
+
+ # Credentials can only be valid for 31 days. I hope that
+ # this is validated on the server somehow...
+
+ if expiry - start > datetime.timedelta(days=31):
+ raise exceptions.TaskclusterFailure('Only 31 days allowed')
+
+ # We multiply times by 1000 because the auth service is JS and as a result
+ # uses milliseconds instead of seconds
+ cert = dict(
+ version=1,
+ scopes=scopes,
+ start=calendar.timegm(start.utctimetuple()) * 1000,
+ expiry=calendar.timegm(expiry.utctimetuple()) * 1000,
+ seed=utils.slugId().encode('ascii') + utils.slugId().encode('ascii'),
+ )
+
+ # if this is a named temporary credential, include the issuer in the certificate
+ if name:
+ cert['issuer'] = utils.toStr(clientId)
+
+ sig = ['version:' + utils.toStr(cert['version'])]
+ if name:
+ sig.extend([
+ 'clientId:' + utils.toStr(name),
+ 'issuer:' + utils.toStr(clientId),
+ ])
+ sig.extend([
+ 'seed:' + utils.toStr(cert['seed']),
+ 'start:' + utils.toStr(cert['start']),
+ 'expiry:' + utils.toStr(cert['expiry']),
+ 'scopes:'
+ ] + scopes)
+ sigStr = '\n'.join(sig).encode()
+
+ if isinstance(accessToken, six.text_type):
+ accessToken = accessToken.encode()
+ sig = hmac.new(accessToken, sigStr, hashlib.sha256).digest()
+
+ cert['signature'] = utils.encodeStringForB64Header(sig)
+
+ newToken = hmac.new(accessToken, cert['seed'], hashlib.sha256).digest()
+ newToken = utils.makeB64UrlSafe(utils.encodeStringForB64Header(newToken)).replace(b'=', b'')
+
+ return {
+ 'clientId': name or clientId,
+ 'accessToken': newToken,
+ 'certificate': utils.dumpJson(cert),
+ }
+
+
+__all__ = [
+ 'createTemporaryCredentials',
+ 'config',
+ 'BaseClient',
+ 'createApiClient',
+]
diff --git a/third_party/python/taskcluster/taskcluster/download.py b/third_party/python/taskcluster/taskcluster/download.py
new file mode 100644
index 0000000000..5584398ea8
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/download.py
@@ -0,0 +1,94 @@
+"""
+Support for downloading objects from the object service, following best
+practices for that service.
+
+Downloaded data is written to a "writer" provided by a "writer factory". A
+writer has a `write` method which writes the entire passed buffer to storage.
+A writer factory is a callable which returns a fresh writer, ready to write the
+first byte of the object. When downloads are retried, the writer factory may
+be called more than once.
+
+This module provides several pre-defined writers and writer factories for
+common cases.
+"""
+import functools
+import six
+
+if six.PY2:
+ raise ImportError("download is only supported in Python 3")
+
+from .aio import download as aio_download
+from .aio.asyncutils import ensureCoro, runAsync
+
+
+def downloadToBuf(**kwargs):
+ """
+ Convenience method to download data to an in-memory buffer and return the
+ downloaded data. Arguments are the same as `download`, except that
+ `writerFactory` should not be supplied. Returns a tuple (buffer, contentType).
+ """
+ return runAsync(aio_download.downloadToBuf(**kwargs))
+
+
+def downloadToFile(file, **kwargs):
+ """
+ Convenience method to download data to a file object. The file must be
+ writeable, in binary mode, seekable (`f.seek`), and truncatable
+ (`f.truncate`) to support retries. Arguments are the same as `download`,
+ except that `writerFactory` should not be supplied. Returns the content-type.
+ """
+ return runAsync(aio_download.downloadToFile(file=file, **kwargs))
+
+
+def download(*, writerFactory, **kwargs):
+ """
+ Download the named object from the object service, using a writer returned
+ from `writerFactory` to write the data. The `maxRetries` parameter has
+ the same meaning as for service clients. The `objectService` parameter is
+ an instance of the Object class, configured with credentials for the
+ upload. Returns the content-type.
+ """
+ wrappedWriterFactory = _wrapSyncWriterFactory(writerFactory)
+ return runAsync(aio_download.download(writerFactory=wrappedWriterFactory, **kwargs))
+
+
+def downloadArtifactToBuf(**kwargs):
+ """
+ Convenience method to download an artifact to an in-memory buffer and return the
+ downloaded data. Arguments are the same as `downloadArtifact`, except that
+ `writerFactory` should not be supplied. Returns a tuple (buffer, contentType).
+ """
+ return runAsync(aio_download.downloadArtifactToBuf(**kwargs))
+
+
+def downloadArtifactToFile(file, **kwargs):
+ """
+ Convenience method to download an artifact to a file object. The file must be
+ writeable, in binary mode, seekable (`f.seek`), and truncatable
+ (`f.truncate`) to support retries. Arguments are the same as `downloadArtifac`,
+ except that `writerFactory` should not be supplied. Returns the content-type.
+ """
+ return runAsync(aio_download.downloadArtifactToFile(file=file, **kwargs))
+
+
+def downloadArtifact(*, writerFactory, **kwargs):
+ """
+ Download the named artifact with the appropriate storageType, using a writer returned
+ from `writerFactory` to write the data. The `maxRetries` parameter has
+ the same meaning as for service clients. The `queueService` parameter is
+ an instance of the Queue class, configured with credentials for the
+ download. Returns the content-type.
+ """
+ wrappedWriterFactory = _wrapSyncWriterFactory(writerFactory)
+ return runAsync(aio_download.downloadArtifact(writerFactory=wrappedWriterFactory, **kwargs))
+
+
+def _wrapSyncWriterFactory(writerFactory):
+ """Modify the reader returned by readerFactory to have an async read."""
+ @functools.wraps(writerFactory)
+ async def wrappedFactory():
+ writer = writerFactory()
+ writer.write = ensureCoro(writer.write)
+ return writer
+
+ return wrappedFactory
diff --git a/third_party/python/taskcluster/taskcluster/ec2manager.py b/third_party/python/taskcluster/taskcluster/ec2manager.py
new file mode 100644
index 0000000000..64ebd27aa0
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/ec2manager.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.ec2manager import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/exceptions.py b/third_party/python/taskcluster/taskcluster/exceptions.py
new file mode 100644
index 0000000000..bcfc9b1b64
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/exceptions.py
@@ -0,0 +1,43 @@
+""" Taskcluster client exceptions """
+
+
+class TaskclusterFailure(Exception):
+ """ Base exception for all Taskcluster client errors"""
+ pass
+
+
+class TaskclusterRestFailure(TaskclusterFailure):
+ """ Failures in the HTTP Rest API """
+ def __init__(self, msg, superExc, status_code=500, body={}):
+ TaskclusterFailure.__init__(self, msg)
+ self.superExc = superExc
+ self.status_code = status_code
+ self.body = body
+
+
+class TaskclusterConnectionError(TaskclusterFailure):
+ """ Error connecting to resource """
+ def __init__(self, msg, superExc):
+ TaskclusterFailure.__init__(self, msg, superExc)
+ self.superExc = superExc
+
+
+class TaskclusterAuthFailure(TaskclusterFailure):
+ """ Invalid Credentials """
+ def __init__(self, msg, superExc=None, status_code=500, body={}):
+ TaskclusterFailure.__init__(self, msg)
+ self.superExc = superExc
+ self.status_code = status_code
+ self.body = body
+
+
+class TaskclusterTopicExchangeFailure(TaskclusterFailure):
+ """ Error while creating a Topic Exchange routing key """
+ pass
+
+
+class TaskclusterArtifactError(TaskclusterFailure):
+ """Download of an 'error' Artifact"""
+ def __init__(self, message, reason):
+ TaskclusterFailure.__init__(self, message)
+ self.reason = reason
diff --git a/third_party/python/taskcluster/taskcluster/generated/__init__.py b/third_party/python/taskcluster/taskcluster/generated/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/__init__.py
diff --git a/third_party/python/taskcluster/taskcluster/generated/_client_importer.py b/third_party/python/taskcluster/taskcluster/generated/_client_importer.py
new file mode 100644
index 0000000000..fd6edec960
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/_client_importer.py
@@ -0,0 +1,20 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+from .auth import Auth # NOQA
+from .authevents import AuthEvents # NOQA
+from .github import Github # NOQA
+from .githubevents import GithubEvents # NOQA
+from .hooks import Hooks # NOQA
+from .hooksevents import HooksEvents # NOQA
+from .index import Index # NOQA
+from .notify import Notify # NOQA
+from .notifyevents import NotifyEvents # NOQA
+from .object import Object # NOQA
+from .purgecache import PurgeCache # NOQA
+from .queue import Queue # NOQA
+from .queueevents import QueueEvents # NOQA
+from .secrets import Secrets # NOQA
+from .workermanager import WorkerManager # NOQA
+from .workermanagerevents import WorkerManagerEvents # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/__init__.py b/third_party/python/taskcluster/taskcluster/generated/aio/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/__init__.py
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/_client_importer.py b/third_party/python/taskcluster/taskcluster/generated/aio/_client_importer.py
new file mode 100644
index 0000000000..fd6edec960
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/_client_importer.py
@@ -0,0 +1,20 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+from .auth import Auth # NOQA
+from .authevents import AuthEvents # NOQA
+from .github import Github # NOQA
+from .githubevents import GithubEvents # NOQA
+from .hooks import Hooks # NOQA
+from .hooksevents import HooksEvents # NOQA
+from .index import Index # NOQA
+from .notify import Notify # NOQA
+from .notifyevents import NotifyEvents # NOQA
+from .object import Object # NOQA
+from .purgecache import PurgeCache # NOQA
+from .queue import Queue # NOQA
+from .queueevents import QueueEvents # NOQA
+from .secrets import Secrets # NOQA
+from .workermanager import WorkerManager # NOQA
+from .workermanagerevents import WorkerManagerEvents # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/auth.py b/third_party/python/taskcluster/taskcluster/generated/aio/auth.py
new file mode 100644
index 0000000000..39752f3a89
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/auth.py
@@ -0,0 +1,781 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class Auth(AsyncBaseClient):
+ """
+ Authentication related API end-points for Taskcluster and related
+ services. These API end-points are of interest if you wish to:
+ * Authorize a request signed with Taskcluster credentials,
+ * Manage clients and roles,
+ * Inspect or audit clients and roles,
+ * Gain access to various services guarded by this API.
+
+ """
+
+ classOptions = {
+ }
+ serviceName = 'auth'
+ apiVersion = 'v1'
+
+ async def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ async def listClients(self, *args, **kwargs):
+ """
+ List Clients
+
+ Get a list of all clients. With `prefix`, only clients for which
+ it is a prefix of the clientId are returned.
+
+ By default this end-point will try to return up to 1000 clients in one
+ request. But it **may return less, even none**.
+ It may also return a `continuationToken` even though there are no more
+ results. However, you can only be sure to have seen all results if you
+ keep calling `listClients` with the last `continuationToken` until you
+ get a result without a `continuationToken`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listClients"], *args, **kwargs)
+
+ async def client(self, *args, **kwargs):
+ """
+ Get Client
+
+ Get information about a single client.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["client"], *args, **kwargs)
+
+ async def createClient(self, *args, **kwargs):
+ """
+ Create Client
+
+ Create a new client and get the `accessToken` for this client.
+ You should store the `accessToken` from this API call as there is no
+ other way to retrieve it.
+
+ If you loose the `accessToken` you can call `resetAccessToken` to reset
+ it, and a new `accessToken` will be returned, but you cannot retrieve the
+ current `accessToken`.
+
+ If a client with the same `clientId` already exists this operation will
+ fail. Use `updateClient` if you wish to update an existing client.
+
+ The caller's scopes must satisfy `scopes`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["createClient"], *args, **kwargs)
+
+ async def resetAccessToken(self, *args, **kwargs):
+ """
+ Reset `accessToken`
+
+ Reset a clients `accessToken`, this will revoke the existing
+ `accessToken`, generate a new `accessToken` and return it from this
+ call.
+
+ There is no way to retrieve an existing `accessToken`, so if you loose it
+ you must reset the accessToken to acquire it again.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["resetAccessToken"], *args, **kwargs)
+
+ async def updateClient(self, *args, **kwargs):
+ """
+ Update Client
+
+ Update an exisiting client. The `clientId` and `accessToken` cannot be
+ updated, but `scopes` can be modified. The caller's scopes must
+ satisfy all scopes being added to the client in the update operation.
+ If no scopes are given in the request, the client's scopes remain
+ unchanged
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["updateClient"], *args, **kwargs)
+
+ async def enableClient(self, *args, **kwargs):
+ """
+ Enable Client
+
+ Enable a client that was disabled with `disableClient`. If the client
+ is already enabled, this does nothing.
+
+ This is typically used by identity providers to re-enable clients that
+ had been disabled when the corresponding identity's scopes changed.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["enableClient"], *args, **kwargs)
+
+ async def disableClient(self, *args, **kwargs):
+ """
+ Disable Client
+
+ Disable a client. If the client is already disabled, this does nothing.
+
+ This is typically used by identity providers to disable clients when the
+ corresponding identity's scopes no longer satisfy the client's scopes.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["disableClient"], *args, **kwargs)
+
+ async def deleteClient(self, *args, **kwargs):
+ """
+ Delete Client
+
+ Delete a client, please note that any roles related to this client must
+ be deleted independently.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["deleteClient"], *args, **kwargs)
+
+ async def listRoles(self, *args, **kwargs):
+ """
+ List Roles (no pagination)
+
+ Get a list of all roles. Each role object also includes the list of
+ scopes it expands to. This always returns all roles in a single HTTP
+ request.
+
+ To get paginated results, use `listRoles2`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listRoles"], *args, **kwargs)
+
+ async def listRoles2(self, *args, **kwargs):
+ """
+ List Roles
+
+ Get a list of all roles. Each role object also includes the list of
+ scopes it expands to. This is similar to `listRoles` but differs in the
+ format of the response.
+
+ If no limit is given, all roles are returned. Since this
+ list may become long, callers can use the `limit` and `continuationToken`
+ query arguments to page through the responses.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listRoles2"], *args, **kwargs)
+
+ async def listRoleIds(self, *args, **kwargs):
+ """
+ List Role IDs
+
+ Get a list of all role IDs.
+
+ If no limit is given, the roleIds of all roles are returned. Since this
+ list may become long, callers can use the `limit` and `continuationToken`
+ query arguments to page through the responses.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listRoleIds"], *args, **kwargs)
+
+ async def role(self, *args, **kwargs):
+ """
+ Get Role
+
+ Get information about a single role, including the set of scopes that the
+ role expands to.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["role"], *args, **kwargs)
+
+ async def createRole(self, *args, **kwargs):
+ """
+ Create Role
+
+ Create a new role.
+
+ The caller's scopes must satisfy the new role's scopes.
+
+ If there already exists a role with the same `roleId` this operation
+ will fail. Use `updateRole` to modify an existing role.
+
+ Creation of a role that will generate an infinite expansion will result
+ in an error response.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["createRole"], *args, **kwargs)
+
+ async def updateRole(self, *args, **kwargs):
+ """
+ Update Role
+
+ Update an existing role.
+
+ The caller's scopes must satisfy all of the new scopes being added, but
+ need not satisfy all of the role's existing scopes.
+
+ An update of a role that will generate an infinite expansion will result
+ in an error response.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["updateRole"], *args, **kwargs)
+
+ async def deleteRole(self, *args, **kwargs):
+ """
+ Delete Role
+
+ Delete a role. This operation will succeed regardless of whether or not
+ the role exists.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["deleteRole"], *args, **kwargs)
+
+ async def expandScopes(self, *args, **kwargs):
+ """
+ Expand Scopes
+
+ Return an expanded copy of the given scopeset, with scopes implied by any
+ roles included.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["expandScopes"], *args, **kwargs)
+
+ async def currentScopes(self, *args, **kwargs):
+ """
+ Get Current Scopes
+
+ Return the expanded scopes available in the request, taking into account all sources
+ of scopes and scope restrictions (temporary credentials, assumeScopes, client scopes,
+ and roles).
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["currentScopes"], *args, **kwargs)
+
+ async def awsS3Credentials(self, *args, **kwargs):
+ """
+ Get Temporary Read/Write Credentials S3
+
+ Get temporary AWS credentials for `read-write` or `read-only` access to
+ a given `bucket` and `prefix` within that bucket.
+ The `level` parameter can be `read-write` or `read-only` and determines
+ which type of credentials are returned. Please note that the `level`
+ parameter is required in the scope guarding access. The bucket name must
+ not contain `.`, as recommended by Amazon.
+
+ This method can only allow access to a whitelisted set of buckets, as configured
+ in the Taskcluster deployment
+
+ The credentials are set to expire after an hour, but this behavior is
+ subject to change. Hence, you should always read the `expires` property
+ from the response, if you intend to maintain active credentials in your
+ application.
+
+ Please note that your `prefix` may not start with slash `/`. Such a prefix
+ is allowed on S3, but we forbid it here to discourage bad behavior.
+
+ Also note that if your `prefix` doesn't end in a slash `/`, the STS
+ credentials may allow access to unexpected keys, as S3 does not treat
+ slashes specially. For example, a prefix of `my-folder` will allow
+ access to `my-folder/file.txt` as expected, but also to `my-folder.txt`,
+ which may not be intended.
+
+ Finally, note that the `PutObjectAcl` call is not allowed. Passing a canned
+ ACL other than `private` to `PutObject` is treated as a `PutObjectAcl` call, and
+ will result in an access-denied error from AWS. This limitation is due to a
+ security flaw in Amazon S3 which might otherwise allow indefinite access to
+ uploaded objects.
+
+ **EC2 metadata compatibility**, if the querystring parameter
+ `?format=iam-role-compat` is given, the response will be compatible
+ with the JSON exposed by the EC2 metadata service. This aims to ease
+ compatibility for libraries and tools built to auto-refresh credentials.
+ For details on the format returned by EC2 metadata service see:
+ [EC2 User Guide](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials).
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["awsS3Credentials"], *args, **kwargs)
+
+ async def azureAccounts(self, *args, **kwargs):
+ """
+ List Accounts Managed by Auth
+
+ Retrieve a list of all Azure accounts managed by Taskcluster Auth.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["azureAccounts"], *args, **kwargs)
+
+ async def azureTables(self, *args, **kwargs):
+ """
+ List Tables in an Account Managed by Auth
+
+ Retrieve a list of all tables in an account.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["azureTables"], *args, **kwargs)
+
+ async def azureTableSAS(self, *args, **kwargs):
+ """
+ Get Shared-Access-Signature for Azure Table
+
+ Get a shared access signature (SAS) string for use with a specific Azure
+ Table Storage table.
+
+ The `level` parameter can be `read-write` or `read-only` and determines
+ which type of credentials are returned. If level is read-write, it will create the
+ table if it doesn't already exist.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["azureTableSAS"], *args, **kwargs)
+
+ async def azureContainers(self, *args, **kwargs):
+ """
+ List containers in an Account Managed by Auth
+
+ Retrieve a list of all containers in an account.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["azureContainers"], *args, **kwargs)
+
+ async def azureContainerSAS(self, *args, **kwargs):
+ """
+ Get Shared-Access-Signature for Azure Container
+
+ Get a shared access signature (SAS) string for use with a specific Azure
+ Blob Storage container.
+
+ The `level` parameter can be `read-write` or `read-only` and determines
+ which type of credentials are returned. If level is read-write, it will create the
+ container if it doesn't already exist.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["azureContainerSAS"], *args, **kwargs)
+
+ async def sentryDSN(self, *args, **kwargs):
+ """
+ Get DSN for Sentry Project
+
+ Get temporary DSN (access credentials) for a sentry project.
+ The credentials returned can be used with any Sentry client for up to
+ 24 hours, after which the credentials will be automatically disabled.
+
+ If the project doesn't exist it will be created, and assigned to the
+ initial team configured for this component. Contact a Sentry admin
+ to have the project transferred to a team you have access to if needed
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["sentryDSN"], *args, **kwargs)
+
+ async def websocktunnelToken(self, *args, **kwargs):
+ """
+ Get a client token for the Websocktunnel service
+
+ Get a temporary token suitable for use connecting to a
+ [websocktunnel](https://github.com/taskcluster/taskcluster/tree/main/tools/websocktunnel) server.
+
+ The resulting token will only be accepted by servers with a matching audience
+ value. Reaching such a server is the callers responsibility. In general,
+ a server URL or set of URLs should be provided to the caller as configuration
+ along with the audience value.
+
+ The token is valid for a limited time (on the scale of hours). Callers should
+ refresh it before expiration.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["websocktunnelToken"], *args, **kwargs)
+
+ async def gcpCredentials(self, *args, **kwargs):
+ """
+ Get Temporary GCP Credentials
+
+ Get temporary GCP credentials for the given serviceAccount in the given project.
+
+ Only preconfigured projects and serviceAccounts are allowed, as defined in the
+ deployment of the Taskcluster services.
+
+ The credentials are set to expire after an hour, but this behavior is
+ subject to change. Hence, you should always read the `expires` property
+ from the response, if you intend to maintain active credentials in your
+ application.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["gcpCredentials"], *args, **kwargs)
+
+ async def authenticateHawk(self, *args, **kwargs):
+ """
+ Authenticate Hawk Request
+
+ Validate the request signature given on input and return list of scopes
+ that the authenticating client has.
+
+ This method is used by other services that wish rely on Taskcluster
+ credentials for authentication. This way we can use Hawk without having
+ the secret credentials leave this service.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["authenticateHawk"], *args, **kwargs)
+
+ async def testAuthenticate(self, *args, **kwargs):
+ """
+ Test Authentication
+
+ Utility method to test client implementations of Taskcluster
+ authentication.
+
+ Rather than using real credentials, this endpoint accepts requests with
+ clientId `tester` and accessToken `no-secret`. That client's scopes are
+ based on `clientScopes` in the request body.
+
+ The request is validated, with any certificate, authorizedScopes, etc.
+ applied, and the resulting scopes are checked against `requiredScopes`
+ from the request body. On success, the response contains the clientId
+ and scopes as seen by the API method.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["testAuthenticate"], *args, **kwargs)
+
+ async def testAuthenticateGet(self, *args, **kwargs):
+ """
+ Test Authentication (GET)
+
+ Utility method similar to `testAuthenticate`, but with the GET method,
+ so it can be used with signed URLs (bewits).
+
+ Rather than using real credentials, this endpoint accepts requests with
+ clientId `tester` and accessToken `no-secret`. That client's scopes are
+ `['test:*', 'auth:create-client:test:*']`. The call fails if the
+ `test:authenticate-get` scope is not available.
+
+ The request is validated, with any certificate, authorizedScopes, etc.
+ applied, and the resulting scopes are checked, just like any API call.
+ On success, the response contains the clientId and scopes as seen by
+ the API method.
+
+ This method may later be extended to allow specification of client and
+ required scopes via query arguments.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["testAuthenticateGet"], *args, **kwargs)
+
+ funcinfo = {
+ "authenticateHawk": {
+ 'args': [],
+ 'input': 'v1/authenticate-hawk-request.json#',
+ 'method': 'post',
+ 'name': 'authenticateHawk',
+ 'output': 'v1/authenticate-hawk-response.json#',
+ 'route': '/authenticate-hawk',
+ 'stability': 'stable',
+ },
+ "awsS3Credentials": {
+ 'args': ['level', 'bucket', 'prefix'],
+ 'method': 'get',
+ 'name': 'awsS3Credentials',
+ 'output': 'v1/aws-s3-credentials-response.json#',
+ 'query': ['format'],
+ 'route': '/aws/s3/<level>/<bucket>/<prefix>',
+ 'stability': 'stable',
+ },
+ "azureAccounts": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'azureAccounts',
+ 'output': 'v1/azure-account-list-response.json#',
+ 'route': '/azure/accounts',
+ 'stability': 'stable',
+ },
+ "azureContainerSAS": {
+ 'args': ['account', 'container', 'level'],
+ 'method': 'get',
+ 'name': 'azureContainerSAS',
+ 'output': 'v1/azure-container-response.json#',
+ 'route': '/azure/<account>/containers/<container>/<level>',
+ 'stability': 'stable',
+ },
+ "azureContainers": {
+ 'args': ['account'],
+ 'method': 'get',
+ 'name': 'azureContainers',
+ 'output': 'v1/azure-container-list-response.json#',
+ 'query': ['continuationToken'],
+ 'route': '/azure/<account>/containers',
+ 'stability': 'stable',
+ },
+ "azureTableSAS": {
+ 'args': ['account', 'table', 'level'],
+ 'method': 'get',
+ 'name': 'azureTableSAS',
+ 'output': 'v1/azure-table-access-response.json#',
+ 'route': '/azure/<account>/table/<table>/<level>',
+ 'stability': 'stable',
+ },
+ "azureTables": {
+ 'args': ['account'],
+ 'method': 'get',
+ 'name': 'azureTables',
+ 'output': 'v1/azure-table-list-response.json#',
+ 'query': ['continuationToken'],
+ 'route': '/azure/<account>/tables',
+ 'stability': 'stable',
+ },
+ "client": {
+ 'args': ['clientId'],
+ 'method': 'get',
+ 'name': 'client',
+ 'output': 'v1/get-client-response.json#',
+ 'route': '/clients/<clientId>',
+ 'stability': 'stable',
+ },
+ "createClient": {
+ 'args': ['clientId'],
+ 'input': 'v1/create-client-request.json#',
+ 'method': 'put',
+ 'name': 'createClient',
+ 'output': 'v1/create-client-response.json#',
+ 'route': '/clients/<clientId>',
+ 'stability': 'stable',
+ },
+ "createRole": {
+ 'args': ['roleId'],
+ 'input': 'v1/create-role-request.json#',
+ 'method': 'put',
+ 'name': 'createRole',
+ 'output': 'v1/get-role-response.json#',
+ 'route': '/roles/<roleId>',
+ 'stability': 'stable',
+ },
+ "currentScopes": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'currentScopes',
+ 'output': 'v1/scopeset.json#',
+ 'route': '/scopes/current',
+ 'stability': 'stable',
+ },
+ "deleteClient": {
+ 'args': ['clientId'],
+ 'method': 'delete',
+ 'name': 'deleteClient',
+ 'route': '/clients/<clientId>',
+ 'stability': 'stable',
+ },
+ "deleteRole": {
+ 'args': ['roleId'],
+ 'method': 'delete',
+ 'name': 'deleteRole',
+ 'route': '/roles/<roleId>',
+ 'stability': 'stable',
+ },
+ "disableClient": {
+ 'args': ['clientId'],
+ 'method': 'post',
+ 'name': 'disableClient',
+ 'output': 'v1/get-client-response.json#',
+ 'route': '/clients/<clientId>/disable',
+ 'stability': 'stable',
+ },
+ "enableClient": {
+ 'args': ['clientId'],
+ 'method': 'post',
+ 'name': 'enableClient',
+ 'output': 'v1/get-client-response.json#',
+ 'route': '/clients/<clientId>/enable',
+ 'stability': 'stable',
+ },
+ "expandScopes": {
+ 'args': [],
+ 'input': 'v1/scopeset.json#',
+ 'method': 'post',
+ 'name': 'expandScopes',
+ 'output': 'v1/scopeset.json#',
+ 'route': '/scopes/expand',
+ 'stability': 'stable',
+ },
+ "gcpCredentials": {
+ 'args': ['projectId', 'serviceAccount'],
+ 'method': 'get',
+ 'name': 'gcpCredentials',
+ 'output': 'v1/gcp-credentials-response.json#',
+ 'route': '/gcp/credentials/<projectId>/<serviceAccount>',
+ 'stability': 'stable',
+ },
+ "listClients": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listClients',
+ 'output': 'v1/list-clients-response.json#',
+ 'query': ['prefix', 'continuationToken', 'limit'],
+ 'route': '/clients/',
+ 'stability': 'stable',
+ },
+ "listRoleIds": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listRoleIds',
+ 'output': 'v1/list-role-ids-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/roleids/',
+ 'stability': 'stable',
+ },
+ "listRoles": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listRoles',
+ 'output': 'v1/list-roles-response.json#',
+ 'route': '/roles/',
+ 'stability': 'stable',
+ },
+ "listRoles2": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listRoles2',
+ 'output': 'v1/list-roles2-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/roles2/',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "resetAccessToken": {
+ 'args': ['clientId'],
+ 'method': 'post',
+ 'name': 'resetAccessToken',
+ 'output': 'v1/create-client-response.json#',
+ 'route': '/clients/<clientId>/reset',
+ 'stability': 'stable',
+ },
+ "role": {
+ 'args': ['roleId'],
+ 'method': 'get',
+ 'name': 'role',
+ 'output': 'v1/get-role-response.json#',
+ 'route': '/roles/<roleId>',
+ 'stability': 'stable',
+ },
+ "sentryDSN": {
+ 'args': ['project'],
+ 'method': 'get',
+ 'name': 'sentryDSN',
+ 'output': 'v1/sentry-dsn-response.json#',
+ 'route': '/sentry/<project>/dsn',
+ 'stability': 'stable',
+ },
+ "testAuthenticate": {
+ 'args': [],
+ 'input': 'v1/test-authenticate-request.json#',
+ 'method': 'post',
+ 'name': 'testAuthenticate',
+ 'output': 'v1/test-authenticate-response.json#',
+ 'route': '/test-authenticate',
+ 'stability': 'stable',
+ },
+ "testAuthenticateGet": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'testAuthenticateGet',
+ 'output': 'v1/test-authenticate-response.json#',
+ 'route': '/test-authenticate-get/',
+ 'stability': 'stable',
+ },
+ "updateClient": {
+ 'args': ['clientId'],
+ 'input': 'v1/create-client-request.json#',
+ 'method': 'post',
+ 'name': 'updateClient',
+ 'output': 'v1/get-client-response.json#',
+ 'route': '/clients/<clientId>',
+ 'stability': 'stable',
+ },
+ "updateRole": {
+ 'args': ['roleId'],
+ 'input': 'v1/create-role-request.json#',
+ 'method': 'post',
+ 'name': 'updateRole',
+ 'output': 'v1/get-role-response.json#',
+ 'route': '/roles/<roleId>',
+ 'stability': 'stable',
+ },
+ "websocktunnelToken": {
+ 'args': ['wstAudience', 'wstClient'],
+ 'method': 'get',
+ 'name': 'websocktunnelToken',
+ 'output': 'v1/websocktunnel-token-response.json#',
+ 'route': '/websocktunnel/<wstAudience>/<wstClient>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Auth']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/authevents.py b/third_party/python/taskcluster/taskcluster/generated/aio/authevents.py
new file mode 100644
index 0000000000..6bd552a147
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/authevents.py
@@ -0,0 +1,180 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class AuthEvents(AsyncBaseClient):
+ """
+ The auth service is responsible for storing credentials, managing
+ assignment of scopes, and validation of request signatures from other
+ services.
+
+ These exchanges provides notifications when credentials or roles are
+ updated. This is mostly so that multiple instances of the auth service
+ can purge their caches and synchronize state. But you are of course
+ welcome to use these for other purposes, monitoring changes for example.
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-auth/v1/",
+ }
+ serviceName = 'auth'
+ apiVersion = 'v1'
+
+ def clientCreated(self, *args, **kwargs):
+ """
+ Client Created Messages
+
+ Message that a new client has been created.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'client-created',
+ 'name': 'clientCreated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/client-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def clientUpdated(self, *args, **kwargs):
+ """
+ Client Updated Messages
+
+ Message that a new client has been updated.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'client-updated',
+ 'name': 'clientUpdated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/client-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def clientDeleted(self, *args, **kwargs):
+ """
+ Client Deleted Messages
+
+ Message that a new client has been deleted.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'client-deleted',
+ 'name': 'clientDeleted',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/client-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def roleCreated(self, *args, **kwargs):
+ """
+ Role Created Messages
+
+ Message that a new role has been created.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'role-created',
+ 'name': 'roleCreated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/role-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def roleUpdated(self, *args, **kwargs):
+ """
+ Role Updated Messages
+
+ Message that a new role has been updated.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'role-updated',
+ 'name': 'roleUpdated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/role-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def roleDeleted(self, *args, **kwargs):
+ """
+ Role Deleted Messages
+
+ Message that a new role has been deleted.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'role-deleted',
+ 'name': 'roleDeleted',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/role-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'AuthEvents']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/github.py b/third_party/python/taskcluster/taskcluster/generated/aio/github.py
new file mode 100644
index 0000000000..94f19770e5
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/github.py
@@ -0,0 +1,197 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class Github(AsyncBaseClient):
+ """
+ The github service is responsible for creating tasks in response
+ to GitHub events, and posting results to the GitHub UI.
+
+ This document describes the API end-point for consuming GitHub
+ web hooks, as well as some useful consumer APIs.
+
+ When Github forbids an action, this service returns an HTTP 403
+ with code ForbiddenByGithub.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'github'
+ apiVersion = 'v1'
+
+ async def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ async def githubWebHookConsumer(self, *args, **kwargs):
+ """
+ Consume GitHub WebHook
+
+ Capture a GitHub event and publish it via pulse, if it's a push,
+ release or pull request.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["githubWebHookConsumer"], *args, **kwargs)
+
+ async def builds(self, *args, **kwargs):
+ """
+ List of Builds
+
+ A paginated list of builds that have been run in
+ Taskcluster. Can be filtered on various git-specific
+ fields.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["builds"], *args, **kwargs)
+
+ async def badge(self, *args, **kwargs):
+ """
+ Latest Build Status Badge
+
+ Checks the status of the latest build of a given branch
+ and returns corresponding badge svg.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["badge"], *args, **kwargs)
+
+ async def repository(self, *args, **kwargs):
+ """
+ Get Repository Info
+
+ Returns any repository metadata that is
+ useful within Taskcluster related services.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["repository"], *args, **kwargs)
+
+ async def latest(self, *args, **kwargs):
+ """
+ Latest Status for Branch
+
+ For a given branch of a repository, this will always point
+ to a status page for the most recent task triggered by that
+ branch.
+
+ Note: This is a redirect rather than a direct link.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["latest"], *args, **kwargs)
+
+ async def createStatus(self, *args, **kwargs):
+ """
+ Post a status against a given changeset
+
+ For a given changeset (SHA) of a repository, this will attach a "commit status"
+ on github. These statuses are links displayed next to each revision.
+ The status is either OK (green check) or FAILURE (red cross),
+ made of a custom title and link.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["createStatus"], *args, **kwargs)
+
+ async def createComment(self, *args, **kwargs):
+ """
+ Post a comment on a given GitHub Issue or Pull Request
+
+ For a given Issue or Pull Request of a repository, this will write a new message.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["createComment"], *args, **kwargs)
+
+ funcinfo = {
+ "badge": {
+ 'args': ['owner', 'repo', 'branch'],
+ 'method': 'get',
+ 'name': 'badge',
+ 'route': '/repository/<owner>/<repo>/<branch>/badge.svg',
+ 'stability': 'experimental',
+ },
+ "builds": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'builds',
+ 'output': 'v1/build-list.json#',
+ 'query': ['continuationToken', 'limit', 'organization', 'repository', 'sha'],
+ 'route': '/builds',
+ 'stability': 'stable',
+ },
+ "createComment": {
+ 'args': ['owner', 'repo', 'number'],
+ 'input': 'v1/create-comment.json#',
+ 'method': 'post',
+ 'name': 'createComment',
+ 'route': '/repository/<owner>/<repo>/issues/<number>/comments',
+ 'stability': 'stable',
+ },
+ "createStatus": {
+ 'args': ['owner', 'repo', 'sha'],
+ 'input': 'v1/create-status.json#',
+ 'method': 'post',
+ 'name': 'createStatus',
+ 'route': '/repository/<owner>/<repo>/statuses/<sha>',
+ 'stability': 'experimental',
+ },
+ "githubWebHookConsumer": {
+ 'args': [],
+ 'method': 'post',
+ 'name': 'githubWebHookConsumer',
+ 'route': '/github',
+ 'stability': 'stable',
+ },
+ "latest": {
+ 'args': ['owner', 'repo', 'branch'],
+ 'method': 'get',
+ 'name': 'latest',
+ 'route': '/repository/<owner>/<repo>/<branch>/latest',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "repository": {
+ 'args': ['owner', 'repo'],
+ 'method': 'get',
+ 'name': 'repository',
+ 'output': 'v1/repository.json#',
+ 'route': '/repository/<owner>/<repo>',
+ 'stability': 'experimental',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Github']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/githubevents.py b/third_party/python/taskcluster/taskcluster/generated/aio/githubevents.py
new file mode 100644
index 0000000000..a70180d78f
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/githubevents.py
@@ -0,0 +1,199 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class GithubEvents(AsyncBaseClient):
+ """
+ The github service publishes a pulse
+ message for supported github events, translating Github webhook
+ events into pulse messages.
+
+ This document describes the exchange offered by the taskcluster
+ github service
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-github/v1/",
+ }
+ serviceName = 'github'
+ apiVersion = 'v1'
+
+ def pullRequest(self, *args, **kwargs):
+ """
+ GitHub Pull Request Event
+
+ When a GitHub pull request event is posted it will be broadcast on this
+ exchange with the designated `organization` and `repository`
+ in the routing-key along with event specific metadata in the payload.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
+
+ * organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+
+ * repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+
+ * action: The GitHub `action` which triggered an event. See for possible values see the payload actions property. (required)
+ """
+
+ ref = {
+ 'exchange': 'pull-request',
+ 'name': 'pullRequest',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'organization',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'repository',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'action',
+ },
+ ],
+ 'schema': 'v1/github-pull-request-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def push(self, *args, **kwargs):
+ """
+ GitHub push Event
+
+ When a GitHub push event is posted it will be broadcast on this
+ exchange with the designated `organization` and `repository`
+ in the routing-key along with event specific metadata in the payload.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
+
+ * organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+
+ * repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+ """
+
+ ref = {
+ 'exchange': 'push',
+ 'name': 'push',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'organization',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'repository',
+ },
+ ],
+ 'schema': 'v1/github-push-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def release(self, *args, **kwargs):
+ """
+ GitHub release Event
+
+ When a GitHub release event is posted it will be broadcast on this
+ exchange with the designated `organization` and `repository`
+ in the routing-key along with event specific metadata in the payload.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
+
+ * organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+
+ * repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+ """
+
+ ref = {
+ 'exchange': 'release',
+ 'name': 'release',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'organization',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'repository',
+ },
+ ],
+ 'schema': 'v1/github-release-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskGroupCreationRequested(self, *args, **kwargs):
+ """
+ tc-gh requested the Queue service to create all the tasks in a group
+
+ supposed to signal that taskCreate API has been called for every task in the task group
+ for this particular repo and this particular organization
+ currently used for creating initial status indicators in GitHub UI using Statuses API.
+ This particular exchange can also be bound to RabbitMQ queues by custom routes - for that,
+ Pass in the array of routes as a second argument to the publish method. Currently, we do
+ use the statuses routes to bind the handler that creates the initial status.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
+
+ * organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+
+ * repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+ """
+
+ ref = {
+ 'exchange': 'task-group-creation-requested',
+ 'name': 'taskGroupCreationRequested',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'organization',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'repository',
+ },
+ ],
+ 'schema': 'v1/task-group-creation-requested.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'GithubEvents']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/hooks.py b/third_party/python/taskcluster/taskcluster/generated/aio/hooks.py
new file mode 100644
index 0000000000..59abb7a938
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/hooks.py
@@ -0,0 +1,300 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class Hooks(AsyncBaseClient):
+ """
+ The hooks service provides a mechanism for creating tasks in response to events.
+
+ """
+
+ classOptions = {
+ }
+ serviceName = 'hooks'
+ apiVersion = 'v1'
+
+ async def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ async def listHookGroups(self, *args, **kwargs):
+ """
+ List hook groups
+
+ This endpoint will return a list of all hook groups with at least one hook.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listHookGroups"], *args, **kwargs)
+
+ async def listHooks(self, *args, **kwargs):
+ """
+ List hooks in a given group
+
+ This endpoint will return a list of all the hook definitions within a
+ given hook group.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listHooks"], *args, **kwargs)
+
+ async def hook(self, *args, **kwargs):
+ """
+ Get hook definition
+
+ This endpoint will return the hook definition for the given `hookGroupId`
+ and hookId.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["hook"], *args, **kwargs)
+
+ async def getHookStatus(self, *args, **kwargs):
+ """
+ Get hook status
+
+ This endpoint will return the current status of the hook. This represents a
+ snapshot in time and may vary from one call to the next.
+
+ This method is deprecated in favor of listLastFires.
+
+ This method is ``deprecated``
+ """
+
+ return await self._makeApiCall(self.funcinfo["getHookStatus"], *args, **kwargs)
+
+ async def createHook(self, *args, **kwargs):
+ """
+ Create a hook
+
+ This endpoint will create a new hook.
+
+ The caller's credentials must include the role that will be used to
+ create the task. That role must satisfy task.scopes as well as the
+ necessary scopes to add the task to the queue.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["createHook"], *args, **kwargs)
+
+ async def updateHook(self, *args, **kwargs):
+ """
+ Update a hook
+
+ This endpoint will update an existing hook. All fields except
+ `hookGroupId` and `hookId` can be modified.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["updateHook"], *args, **kwargs)
+
+ async def removeHook(self, *args, **kwargs):
+ """
+ Delete a hook
+
+ This endpoint will remove a hook definition.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["removeHook"], *args, **kwargs)
+
+ async def triggerHook(self, *args, **kwargs):
+ """
+ Trigger a hook
+
+ This endpoint will trigger the creation of a task from a hook definition.
+
+ The HTTP payload must match the hooks `triggerSchema`. If it does, it is
+ provided as the `payload` property of the JSON-e context used to render the
+ task template.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["triggerHook"], *args, **kwargs)
+
+ async def getTriggerToken(self, *args, **kwargs):
+ """
+ Get a trigger token
+
+ Retrieve a unique secret token for triggering the specified hook. This
+ token can be deactivated with `resetTriggerToken`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["getTriggerToken"], *args, **kwargs)
+
+ async def resetTriggerToken(self, *args, **kwargs):
+ """
+ Reset a trigger token
+
+ Reset the token for triggering a given hook. This invalidates token that
+ may have been issued via getTriggerToken with a new token.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["resetTriggerToken"], *args, **kwargs)
+
+ async def triggerHookWithToken(self, *args, **kwargs):
+ """
+ Trigger a hook with a token
+
+ This endpoint triggers a defined hook with a valid token.
+
+ The HTTP payload must match the hooks `triggerSchema`. If it does, it is
+ provided as the `payload` property of the JSON-e context used to render the
+ task template.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["triggerHookWithToken"], *args, **kwargs)
+
+ async def listLastFires(self, *args, **kwargs):
+ """
+ Get information about recent hook fires
+
+ This endpoint will return information about the the last few times this hook has been
+ fired, including whether the hook was fired successfully or not
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listLastFires"], *args, **kwargs)
+
+ funcinfo = {
+ "createHook": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'input': 'v1/create-hook-request.json#',
+ 'method': 'put',
+ 'name': 'createHook',
+ 'output': 'v1/hook-definition.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>',
+ 'stability': 'stable',
+ },
+ "getHookStatus": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'get',
+ 'name': 'getHookStatus',
+ 'output': 'v1/hook-status.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/status',
+ 'stability': 'deprecated',
+ },
+ "getTriggerToken": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'get',
+ 'name': 'getTriggerToken',
+ 'output': 'v1/trigger-token-response.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/token',
+ 'stability': 'stable',
+ },
+ "hook": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'get',
+ 'name': 'hook',
+ 'output': 'v1/hook-definition.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>',
+ 'stability': 'stable',
+ },
+ "listHookGroups": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listHookGroups',
+ 'output': 'v1/list-hook-groups-response.json#',
+ 'route': '/hooks',
+ 'stability': 'stable',
+ },
+ "listHooks": {
+ 'args': ['hookGroupId'],
+ 'method': 'get',
+ 'name': 'listHooks',
+ 'output': 'v1/list-hooks-response.json#',
+ 'route': '/hooks/<hookGroupId>',
+ 'stability': 'stable',
+ },
+ "listLastFires": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'get',
+ 'name': 'listLastFires',
+ 'output': 'v1/list-lastFires-response.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/last-fires',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "removeHook": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'delete',
+ 'name': 'removeHook',
+ 'route': '/hooks/<hookGroupId>/<hookId>',
+ 'stability': 'stable',
+ },
+ "resetTriggerToken": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'post',
+ 'name': 'resetTriggerToken',
+ 'output': 'v1/trigger-token-response.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/token',
+ 'stability': 'stable',
+ },
+ "triggerHook": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'input': 'v1/trigger-hook.json#',
+ 'method': 'post',
+ 'name': 'triggerHook',
+ 'output': 'v1/trigger-hook-response.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/trigger',
+ 'stability': 'stable',
+ },
+ "triggerHookWithToken": {
+ 'args': ['hookGroupId', 'hookId', 'token'],
+ 'input': 'v1/trigger-hook.json#',
+ 'method': 'post',
+ 'name': 'triggerHookWithToken',
+ 'output': 'v1/trigger-hook-response.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/trigger/<token>',
+ 'stability': 'stable',
+ },
+ "updateHook": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'input': 'v1/create-hook-request.json#',
+ 'method': 'post',
+ 'name': 'updateHook',
+ 'output': 'v1/hook-definition.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Hooks']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/hooksevents.py b/third_party/python/taskcluster/taskcluster/generated/aio/hooksevents.py
new file mode 100644
index 0000000000..0e841a256d
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/hooksevents.py
@@ -0,0 +1,101 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class HooksEvents(AsyncBaseClient):
+ """
+ The hooks service is responsible for creating tasks at specific times orin . response to webhooks and API calls.Using this exchange allows us tomake hooks which repsond to particular pulse messagesThese exchanges provide notifications when a hook is created, updatedor deleted. This is so that the listener running in a different hooks process at the other end can direct another listener specified by`hookGroupId` and `hookId` to synchronize its bindings. But you are ofcourse welcome to use these for other purposes, monitoring changes for example.
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-hooks/v1/",
+ }
+ serviceName = 'hooks'
+ apiVersion = 'v1'
+
+ def hookCreated(self, *args, **kwargs):
+ """
+ Hook Created Messages
+
+ Whenever the api receives a request to create apulse based hook, a message is posted to this exchange andthe receiver creates a listener with the bindings, to create a task
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'hook-created',
+ 'name': 'hookCreated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/pulse-hook-changed-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def hookUpdated(self, *args, **kwargs):
+ """
+ Hook Updated Messages
+
+ Whenever the api receives a request to update apulse based hook, a message is posted to this exchange andthe receiver updates the listener associated with that hook.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'hook-updated',
+ 'name': 'hookUpdated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/pulse-hook-changed-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def hookDeleted(self, *args, **kwargs):
+ """
+ Hook Deleted Messages
+
+ Whenever the api receives a request to delete apulse based hook, a message is posted to this exchange andthe receiver deletes the listener associated with that hook.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'hook-deleted',
+ 'name': 'hookDeleted',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/pulse-hook-changed-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'HooksEvents']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/index.py b/third_party/python/taskcluster/taskcluster/generated/aio/index.py
new file mode 100644
index 0000000000..e7aabd3c49
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/index.py
@@ -0,0 +1,204 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class Index(AsyncBaseClient):
+ """
+ The index service is responsible for indexing tasks. The service ensures that
+ tasks can be located by user-defined names.
+
+ As described in the service documentation, tasks are typically indexed via Pulse
+ messages, so the most common use of API methods is to read from the index.
+
+ Slashes (`/`) aren't allowed in index paths.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'index'
+ apiVersion = 'v1'
+
+ async def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ async def findTask(self, *args, **kwargs):
+ """
+ Find Indexed Task
+
+ Find a task by index path, returning the highest-rank task with that path. If no
+ task exists for the given path, this API end-point will respond with a 404 status.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["findTask"], *args, **kwargs)
+
+ async def listNamespaces(self, *args, **kwargs):
+ """
+ List Namespaces
+
+ List the namespaces immediately under a given namespace.
+
+ This endpoint
+ lists up to 1000 namespaces. If more namespaces are present, a
+ `continuationToken` will be returned, which can be given in the next
+ request. For the initial request, the payload should be an empty JSON
+ object.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listNamespaces"], *args, **kwargs)
+
+ async def listTasks(self, *args, **kwargs):
+ """
+ List Tasks
+
+ List the tasks immediately under a given namespace.
+
+ This endpoint
+ lists up to 1000 tasks. If more tasks are present, a
+ `continuationToken` will be returned, which can be given in the next
+ request. For the initial request, the payload should be an empty JSON
+ object.
+
+ **Remark**, this end-point is designed for humans browsing for tasks, not
+ services, as that makes little sense.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listTasks"], *args, **kwargs)
+
+ async def insertTask(self, *args, **kwargs):
+ """
+ Insert Task into Index
+
+ Insert a task into the index. If the new rank is less than the existing rank
+ at the given index path, the task is not indexed but the response is still 200 OK.
+
+ Please see the introduction above for information
+ about indexing successfully completed tasks automatically using custom routes.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["insertTask"], *args, **kwargs)
+
+ async def deleteTask(self, *args, **kwargs):
+ """
+ Remove Task from Index
+
+ Remove a task from the index. This is intended for administrative use,
+ where an index entry is no longer appropriate. The parent namespace is
+ not automatically deleted. Index entries with lower rank that were
+ previously inserted will not re-appear, as they were never stored.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["deleteTask"], *args, **kwargs)
+
+ async def findArtifactFromTask(self, *args, **kwargs):
+ """
+ Get Artifact From Indexed Task
+
+ Find a task by index path and redirect to the artifact on the most recent
+ run with the given `name`.
+
+ Note that multiple calls to this endpoint may return artifacts from differen tasks
+ if a new task is inserted into the index between calls. Avoid using this method as
+ a stable link to multiple, connected files if the index path does not contain a
+ unique identifier. For example, the following two links may return unrelated files:
+ * https://tc.example.com/api/index/v1/task/some-app.win64.latest.installer/artifacts/public/installer.exe`
+ * https://tc.example.com/api/index/v1/task/some-app.win64.latest.installer/artifacts/public/debug-symbols.zip`
+
+ This problem be remedied by including the revision in the index path or by bundling both
+ installer and debug symbols into a single artifact.
+
+ If no task exists for the given index path, this API end-point responds with 404.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["findArtifactFromTask"], *args, **kwargs)
+
+ funcinfo = {
+ "deleteTask": {
+ 'args': ['namespace'],
+ 'method': 'delete',
+ 'name': 'deleteTask',
+ 'route': '/task/<namespace>',
+ 'stability': 'stable',
+ },
+ "findArtifactFromTask": {
+ 'args': ['indexPath', 'name'],
+ 'method': 'get',
+ 'name': 'findArtifactFromTask',
+ 'route': '/task/<indexPath>/artifacts/<name>',
+ 'stability': 'stable',
+ },
+ "findTask": {
+ 'args': ['indexPath'],
+ 'method': 'get',
+ 'name': 'findTask',
+ 'output': 'v1/indexed-task-response.json#',
+ 'route': '/task/<indexPath>',
+ 'stability': 'stable',
+ },
+ "insertTask": {
+ 'args': ['namespace'],
+ 'input': 'v1/insert-task-request.json#',
+ 'method': 'put',
+ 'name': 'insertTask',
+ 'output': 'v1/indexed-task-response.json#',
+ 'route': '/task/<namespace>',
+ 'stability': 'stable',
+ },
+ "listNamespaces": {
+ 'args': ['namespace'],
+ 'method': 'get',
+ 'name': 'listNamespaces',
+ 'output': 'v1/list-namespaces-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/namespaces/<namespace>',
+ 'stability': 'stable',
+ },
+ "listTasks": {
+ 'args': ['namespace'],
+ 'method': 'get',
+ 'name': 'listTasks',
+ 'output': 'v1/list-tasks-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/tasks/<namespace>',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Index']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/notify.py b/third_party/python/taskcluster/taskcluster/generated/aio/notify.py
new file mode 100644
index 0000000000..391e0516a7
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/notify.py
@@ -0,0 +1,207 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class Notify(AsyncBaseClient):
+ """
+ The notification service listens for tasks with associated notifications
+ and handles requests to send emails and post pulse messages.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'notify'
+ apiVersion = 'v1'
+
+ async def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ async def email(self, *args, **kwargs):
+ """
+ Send an Email
+
+ Send an email to `address`. The content is markdown and will be rendered
+ to HTML, but both the HTML and raw markdown text will be sent in the
+ email. If a link is included, it will be rendered to a nice button in the
+ HTML version of the email
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["email"], *args, **kwargs)
+
+ async def pulse(self, *args, **kwargs):
+ """
+ Publish a Pulse Message
+
+ Publish a message on pulse with the given `routingKey`.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["pulse"], *args, **kwargs)
+
+ async def matrix(self, *args, **kwargs):
+ """
+ Post Matrix Message
+
+ Post a message to a room in Matrix. Optionally includes formatted message.
+
+ The `roomId` in the scopes is a fully formed `roomId` with leading `!` such
+ as `!foo:bar.com`.
+
+ Note that the matrix client used by taskcluster must be invited to a room before
+ it can post there!
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["matrix"], *args, **kwargs)
+
+ async def slack(self, *args, **kwargs):
+ """
+ Post Slack Message
+
+ Post a message to a Slack channel.
+
+ The `channelId` in the scopes is a Slack channel ID, starting with a capital C.
+
+ The Slack app can post into public channels by default but will need to be added
+ to private channels before it can post messages there.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["slack"], *args, **kwargs)
+
+ async def addDenylistAddress(self, *args, **kwargs):
+ """
+ Denylist Given Address
+
+ Add the given address to the notification denylist. Addresses in the denylist will be ignored
+ by the notification service.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["addDenylistAddress"], *args, **kwargs)
+
+ async def deleteDenylistAddress(self, *args, **kwargs):
+ """
+ Delete Denylisted Address
+
+ Delete the specified address from the notification denylist.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["deleteDenylistAddress"], *args, **kwargs)
+
+ async def listDenylist(self, *args, **kwargs):
+ """
+ List Denylisted Notifications
+
+ Lists all the denylisted addresses.
+
+ By default this end-point will try to return up to 1000 addresses in one
+ request. But it **may return less**, even if more tasks are available.
+ It may also return a `continuationToken` even though there are no more
+ results. However, you can only be sure to have seen all results if you
+ keep calling `list` with the last `continuationToken` until you
+ get a result without a `continuationToken`.
+
+ If you are not interested in listing all the members at once, you may
+ use the query-string option `limit` to return fewer.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listDenylist"], *args, **kwargs)
+
+ funcinfo = {
+ "addDenylistAddress": {
+ 'args': [],
+ 'input': 'v1/notification-address.json#',
+ 'method': 'post',
+ 'name': 'addDenylistAddress',
+ 'route': '/denylist/add',
+ 'stability': 'experimental',
+ },
+ "deleteDenylistAddress": {
+ 'args': [],
+ 'input': 'v1/notification-address.json#',
+ 'method': 'delete',
+ 'name': 'deleteDenylistAddress',
+ 'route': '/denylist/delete',
+ 'stability': 'experimental',
+ },
+ "email": {
+ 'args': [],
+ 'input': 'v1/email-request.json#',
+ 'method': 'post',
+ 'name': 'email',
+ 'route': '/email',
+ 'stability': 'experimental',
+ },
+ "listDenylist": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listDenylist',
+ 'output': 'v1/notification-address-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/denylist/list',
+ 'stability': 'experimental',
+ },
+ "matrix": {
+ 'args': [],
+ 'input': 'v1/matrix-request.json#',
+ 'method': 'post',
+ 'name': 'matrix',
+ 'route': '/matrix',
+ 'stability': 'experimental',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "pulse": {
+ 'args': [],
+ 'input': 'v1/pulse-request.json#',
+ 'method': 'post',
+ 'name': 'pulse',
+ 'route': '/pulse',
+ 'stability': 'experimental',
+ },
+ "slack": {
+ 'args': [],
+ 'input': 'v1/slack-request.json#',
+ 'method': 'post',
+ 'name': 'slack',
+ 'route': '/slack',
+ 'stability': 'experimental',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Notify']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/notifyevents.py b/third_party/python/taskcluster/taskcluster/generated/aio/notifyevents.py
new file mode 100644
index 0000000000..614adc548d
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/notifyevents.py
@@ -0,0 +1,68 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class NotifyEvents(AsyncBaseClient):
+ """
+ This pretty much only contains the simple free-form
+ message that can be published from this service from a request
+ by anybody with the proper scopes.
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-notify/v1/",
+ }
+ serviceName = 'notify'
+ apiVersion = 'v1'
+
+ def notify(self, *args, **kwargs):
+ """
+ Notification Messages
+
+ An arbitrary message that a taskcluster user
+ can trigger if they like.
+
+ The standard one that is published by us watching
+ for the completion of tasks is just the task status
+ data that we pull from the queue `status()` endpoint
+ when we notice a task is complete.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'notification',
+ 'name': 'notify',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/notification-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'NotifyEvents']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/object.py b/third_party/python/taskcluster/taskcluster/generated/aio/object.py
new file mode 100644
index 0000000000..b575e11f80
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/object.py
@@ -0,0 +1,187 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class Object(AsyncBaseClient):
+ """
+ The object service provides HTTP-accessible storage for large blobs of data.
+
+ Objects can be uploaded and downloaded, with the object data flowing directly
+ from the storage "backend" to the caller, and not directly via this service.
+ Once uploaded, objects are immutable until their expiration time.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'object'
+ apiVersion = 'v1'
+
+ async def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ async def createUpload(self, *args, **kwargs):
+ """
+ Begin upload of a new object
+
+ Create a new object by initiating upload of its data.
+
+ This endpoint implements negotiation of upload methods. It can be called
+ multiple times if necessary, either to propose new upload methods or to
+ renew credentials for an already-agreed upload.
+
+ The `name` parameter can contain any printable ASCII character (0x20 - 0x7e).
+ The `uploadId` must be supplied by the caller, and any attempts to upload
+ an object with the same name but a different `uploadId` will fail.
+ Thus the first call to this method establishes the `uploadId` for the
+ object, and as long as that value is kept secret, no other caller can
+ upload an object of that name, regardless of scopes. Object expiration
+ cannot be changed after the initial call, either. It is possible to call
+ this method with no proposed upload methods, which has the effect of "locking
+ in" the `expiration`, `projectId`, and `uploadId` properties and any
+ supplied hashes.
+
+ Unfinished uploads expire after 1 day.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["createUpload"], *args, **kwargs)
+
+ async def finishUpload(self, *args, **kwargs):
+ """
+ Mark an upload as complete.
+
+ This endpoint marks an upload as complete. This indicates that all data has been
+ transmitted to the backend. After this call, no further calls to `uploadObject` are
+ allowed, and downloads of the object may begin. This method is idempotent, but will
+ fail if given an incorrect uploadId for an unfinished upload.
+
+ Note that, once `finishUpload` is complete, the object is considered immutable.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["finishUpload"], *args, **kwargs)
+
+ async def startDownload(self, *args, **kwargs):
+ """
+ Download object data
+
+ Start the process of downloading an object's data. Call this endpoint with a list of acceptable
+ download methods, and the server will select a method and return the corresponding payload.
+
+ Returns a 406 error if none of the given download methods are available.
+
+ See [Download Methods](https://docs.taskcluster.net/docs/reference/platform/object/download-methods) for more detail.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["startDownload"], *args, **kwargs)
+
+ async def object(self, *args, **kwargs):
+ """
+ Get an object's metadata
+
+ Get the metadata for the named object. This metadata is not sufficient to
+ get the object's content; for that use `startDownload`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["object"], *args, **kwargs)
+
+ async def download(self, *args, **kwargs):
+ """
+ Get an object's data
+
+ Get the data in an object directly. This method does not return a JSON body, but
+ redirects to a location that will serve the object content directly.
+
+ URLs for this endpoint, perhaps with attached authentication (`?bewit=..`),
+ are typically used for downloads of objects by simple HTTP clients such as
+ web browsers, curl, or wget.
+
+ This method is limited by the common capabilities of HTTP, so it may not be
+ the most efficient, resilient, or featureful way to retrieve an artifact.
+ Situations where such functionality is required should ues the
+ `startDownload` API endpoint.
+
+ See [Simple Downloads](https://docs.taskcluster.net/docs/reference/platform/object/simple-downloads) for more detail.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["download"], *args, **kwargs)
+
+ funcinfo = {
+ "createUpload": {
+ 'args': ['name'],
+ 'input': 'v1/create-upload-request.json#',
+ 'method': 'put',
+ 'name': 'createUpload',
+ 'output': 'v1/create-upload-response.json#',
+ 'route': '/upload/<name>',
+ 'stability': 'stable',
+ },
+ "download": {
+ 'args': ['name'],
+ 'method': 'get',
+ 'name': 'download',
+ 'route': '/download/<name>',
+ 'stability': 'stable',
+ },
+ "finishUpload": {
+ 'args': ['name'],
+ 'input': 'v1/finish-upload-request.json#',
+ 'method': 'post',
+ 'name': 'finishUpload',
+ 'route': '/finish-upload/<name>',
+ 'stability': 'stable',
+ },
+ "object": {
+ 'args': ['name'],
+ 'method': 'get',
+ 'name': 'object',
+ 'output': 'v1/get-object-response.json#',
+ 'route': '/metadata/<name>',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "startDownload": {
+ 'args': ['name'],
+ 'input': 'v1/download-object-request.json#',
+ 'method': 'put',
+ 'name': 'startDownload',
+ 'output': 'v1/download-object-response.json#',
+ 'route': '/start-download/<name>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Object']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/purgecache.py b/third_party/python/taskcluster/taskcluster/generated/aio/purgecache.py
new file mode 100644
index 0000000000..d7f5c3881d
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/purgecache.py
@@ -0,0 +1,123 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class PurgeCache(AsyncBaseClient):
+ """
+ The purge-cache service is responsible for tracking cache-purge requests.
+
+ User create purge requests for specific caches on specific workers, and
+ these requests are timestamped. Workers consult the service before
+ starting a new task, and purge any caches older than the timestamp.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'purge-cache'
+ apiVersion = 'v1'
+
+ async def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ async def purgeCache(self, *args, **kwargs):
+ """
+ Purge Worker Cache
+
+ Publish a request to purge caches named `cacheName` with
+ on `workerPoolId` workers.
+
+ If such a request already exists, its `before` timestamp is updated to
+ the current time.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["purgeCache"], *args, **kwargs)
+
+ async def allPurgeRequests(self, *args, **kwargs):
+ """
+ All Open Purge Requests
+
+ View all active purge requests.
+
+ This is useful mostly for administors to view
+ the set of open purge requests. It should not
+ be used by workers. They should use the purgeRequests
+ endpoint that is specific to their workerType and
+ provisionerId.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["allPurgeRequests"], *args, **kwargs)
+
+ async def purgeRequests(self, *args, **kwargs):
+ """
+ Open Purge Requests for a worker pool
+
+ List the caches for this `workerPoolId` that should to be
+ purged if they are from before the time given in the response.
+
+ This is intended to be used by workers to determine which caches to purge.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["purgeRequests"], *args, **kwargs)
+
+ funcinfo = {
+ "allPurgeRequests": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'allPurgeRequests',
+ 'output': 'v1/all-purge-cache-request-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/purge-cache/list',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "purgeCache": {
+ 'args': ['workerPoolId'],
+ 'input': 'v1/purge-cache-request.json#',
+ 'method': 'post',
+ 'name': 'purgeCache',
+ 'route': '/purge-cache/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "purgeRequests": {
+ 'args': ['workerPoolId'],
+ 'method': 'get',
+ 'name': 'purgeRequests',
+ 'output': 'v1/purge-cache-request-list.json#',
+ 'query': ['since'],
+ 'route': '/purge-cache/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'PurgeCache']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/queue.py b/third_party/python/taskcluster/taskcluster/generated/aio/queue.py
new file mode 100644
index 0000000000..ec07ac2cf8
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/queue.py
@@ -0,0 +1,1120 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class Queue(AsyncBaseClient):
+ """
+ The queue service is responsible for accepting tasks and tracking their state
+ as they are executed by workers, in order to ensure they are eventually
+ resolved.
+
+ ## Artifact Storage Types
+
+ * **Object artifacts** contain arbitrary data, stored via the object service.
+ * **Redirect artifacts**, will redirect the caller to URL when fetched
+ with a a 303 (See Other) response. Clients will not apply any kind of
+ authentication to that URL.
+ * **Link artifacts**, will be treated as if the caller requested the linked
+ artifact on the same task. Links may be chained, but cycles are forbidden.
+ The caller must have scopes for the linked artifact, or a 403 response will
+ be returned.
+ * **Error artifacts**, only consists of meta-data which the queue will
+ store for you. These artifacts are only meant to indicate that you the
+ worker or the task failed to generate a specific artifact, that you
+ would otherwise have uploaded. For example docker-worker will upload an
+ error artifact, if the file it was supposed to upload doesn't exists or
+ turns out to be a directory. Clients requesting an error artifact will
+ get a `424` (Failed Dependency) response. This is mainly designed to
+ ensure that dependent tasks can distinguish between artifacts that were
+ suppose to be generated and artifacts for which the name is misspelled.
+ * **S3 artifacts** are used for static files which will be
+ stored on S3. When creating an S3 artifact the queue will return a
+ pre-signed URL to which you can do a `PUT` request to upload your
+ artifact. Note that `PUT` request **must** specify the `content-length`
+ header and **must** give the `content-type` header the same value as in
+ the request to `createArtifact`. S3 artifacts will be deprecated soon,
+ and users should prefer object artifacts instead.
+
+ ## Artifact immutability
+
+ Generally speaking you cannot overwrite an artifact when created.
+ But if you repeat the request with the same properties the request will
+ succeed as the operation is idempotent.
+ This is useful if you need to refresh a signed URL while uploading.
+ Do not abuse this to overwrite artifacts created by another entity!
+ Such as worker-host overwriting artifact created by worker-code.
+
+ The queue defines the following *immutability special cases*:
+
+ * A `reference` artifact can replace an existing `reference` artifact.
+ * A `link` artifact can replace an existing `reference` artifact.
+ * Any artifact's `expires` can be extended (made later, but not earlier).
+ """
+
+ classOptions = {
+ }
+ serviceName = 'queue'
+ apiVersion = 'v1'
+
+ async def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ async def task(self, *args, **kwargs):
+ """
+ Get Task Definition
+
+ This end-point will return the task-definition. Notice that the task
+ definition may have been modified by queue, if an optional property is
+ not specified the queue may provide a default value.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["task"], *args, **kwargs)
+
+ async def status(self, *args, **kwargs):
+ """
+ Get task status
+
+ Get task status structure from `taskId`
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["status"], *args, **kwargs)
+
+ async def listTaskGroup(self, *args, **kwargs):
+ """
+ List Task Group
+
+ List tasks sharing the same `taskGroupId`.
+
+ As a task-group may contain an unbounded number of tasks, this end-point
+ may return a `continuationToken`. To continue listing tasks you must call
+ the `listTaskGroup` again with the `continuationToken` as the
+ query-string option `continuationToken`.
+
+ By default this end-point will try to return up to 1000 members in one
+ request. But it **may return less**, even if more tasks are available.
+ It may also return a `continuationToken` even though there are no more
+ results. However, you can only be sure to have seen all results if you
+ keep calling `listTaskGroup` with the last `continuationToken` until you
+ get a result without a `continuationToken`.
+
+ If you are not interested in listing all the members at once, you may
+ use the query-string option `limit` to return fewer.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listTaskGroup"], *args, **kwargs)
+
+ async def listDependentTasks(self, *args, **kwargs):
+ """
+ List Dependent Tasks
+
+ List tasks that depend on the given `taskId`.
+
+ As many tasks from different task-groups may dependent on a single tasks,
+ this end-point may return a `continuationToken`. To continue listing
+ tasks you must call `listDependentTasks` again with the
+ `continuationToken` as the query-string option `continuationToken`.
+
+ By default this end-point will try to return up to 1000 tasks in one
+ request. But it **may return less**, even if more tasks are available.
+ It may also return a `continuationToken` even though there are no more
+ results. However, you can only be sure to have seen all results if you
+ keep calling `listDependentTasks` with the last `continuationToken` until
+ you get a result without a `continuationToken`.
+
+ If you are not interested in listing all the tasks at once, you may
+ use the query-string option `limit` to return fewer.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listDependentTasks"], *args, **kwargs)
+
+ async def createTask(self, *args, **kwargs):
+ """
+ Create New Task
+
+ Create a new task, this is an **idempotent** operation, so repeat it if
+ you get an internal server error or network connection is dropped.
+
+ **Task `deadline`**: the deadline property can be no more than 5 days
+ into the future. This is to limit the amount of pending tasks not being
+ taken care of. Ideally, you should use a much shorter deadline.
+
+ **Task expiration**: the `expires` property must be greater than the
+ task `deadline`. If not provided it will default to `deadline` + one
+ year. Notice that artifacts created by a task must expire before the
+ task's expiration.
+
+ **Task specific routing-keys**: using the `task.routes` property you may
+ define task specific routing-keys. If a task has a task specific
+ routing-key: `<route>`, then when the AMQP message about the task is
+ published, the message will be CC'ed with the routing-key:
+ `route.<route>`. This is useful if you want another component to listen
+ for completed tasks you have posted. The caller must have scope
+ `queue:route:<route>` for each route.
+
+ **Dependencies**: any tasks referenced in `task.dependencies` must have
+ already been created at the time of this call.
+
+ **Scopes**: Note that the scopes required to complete this API call depend
+ on the content of the `scopes`, `routes`, `schedulerId`, `priority`,
+ `provisionerId`, and `workerType` properties of the task definition.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["createTask"], *args, **kwargs)
+
+ async def scheduleTask(self, *args, **kwargs):
+ """
+ Schedule Defined Task
+
+ scheduleTask will schedule a task to be executed, even if it has
+ unresolved dependencies. A task would otherwise only be scheduled if
+ its dependencies were resolved.
+
+ This is useful if you have defined a task that depends on itself or on
+ some other task that has not been resolved, but you wish the task to be
+ scheduled immediately.
+
+ This will announce the task as pending and workers will be allowed to
+ claim it and resolve the task.
+
+ **Note** this operation is **idempotent** and will not fail or complain
+ if called with a `taskId` that is already scheduled, or even resolved.
+ To reschedule a task previously resolved, use `rerunTask`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["scheduleTask"], *args, **kwargs)
+
+ async def rerunTask(self, *args, **kwargs):
+ """
+ Rerun a Resolved Task
+
+ This method _reruns_ a previously resolved task, even if it was
+ _completed_. This is useful if your task completes unsuccessfully, and
+ you just want to run it from scratch again. This will also reset the
+ number of `retries` allowed. It will schedule a task that is _unscheduled_
+ regardless of the state of its dependencies.
+
+ This method is deprecated in favour of creating a new task with the same
+ task definition (but with a new taskId).
+
+ Remember that `retries` in the task status counts the number of runs that
+ the queue have started because the worker stopped responding, for example
+ because a spot node died.
+
+ **Remark** this operation is idempotent: if it is invoked for a task that
+ is `pending` or `running`, it will just return the current task status.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["rerunTask"], *args, **kwargs)
+
+ async def cancelTask(self, *args, **kwargs):
+ """
+ Cancel Task
+
+ This method will cancel a task that is either `unscheduled`, `pending` or
+ `running`. It will resolve the current run as `exception` with
+ `reasonResolved` set to `canceled`. If the task isn't scheduled yet, ie.
+ it doesn't have any runs, an initial run will be added and resolved as
+ described above. Hence, after canceling a task, it cannot be scheduled
+ with `queue.scheduleTask`, but a new run can be created with
+ `queue.rerun`. These semantics is equivalent to calling
+ `queue.scheduleTask` immediately followed by `queue.cancelTask`.
+
+ **Remark** this operation is idempotent, if you try to cancel a task that
+ isn't `unscheduled`, `pending` or `running`, this operation will just
+ return the current task status.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["cancelTask"], *args, **kwargs)
+
+ async def claimWork(self, *args, **kwargs):
+ """
+ Claim Work
+
+ Claim pending task(s) for the given task queue.
+
+ If any work is available (even if fewer than the requested number of
+ tasks, this will return immediately. Otherwise, it will block for tens of
+ seconds waiting for work. If no work appears, it will return an emtpy
+ list of tasks. Callers should sleep a short while (to avoid denial of
+ service in an error condition) and call the endpoint again. This is a
+ simple implementation of "long polling".
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["claimWork"], *args, **kwargs)
+
+ async def claimTask(self, *args, **kwargs):
+ """
+ Claim Task
+
+ claim a task - never documented
+
+ This method is ``deprecated``
+ """
+
+ return await self._makeApiCall(self.funcinfo["claimTask"], *args, **kwargs)
+
+ async def reclaimTask(self, *args, **kwargs):
+ """
+ Reclaim task
+
+ Refresh the claim for a specific `runId` for given `taskId`. This updates
+ the `takenUntil` property and returns a new set of temporary credentials
+ for performing requests on behalf of the task. These credentials should
+ be used in-place of the credentials returned by `claimWork`.
+
+ The `reclaimTask` requests serves to:
+ * Postpone `takenUntil` preventing the queue from resolving
+ `claim-expired`,
+ * Refresh temporary credentials used for processing the task, and
+ * Abort execution if the task/run have been resolved.
+
+ If the `takenUntil` timestamp is exceeded the queue will resolve the run
+ as _exception_ with reason `claim-expired`, and proceeded to retry to the
+ task. This ensures that tasks are retried, even if workers disappear
+ without warning.
+
+ If the task is resolved, this end-point will return `409` reporting
+ `RequestConflict`. This typically happens if the task have been canceled
+ or the `task.deadline` have been exceeded. If reclaiming fails, workers
+ should abort the task and forget about the given `runId`. There is no
+ need to resolve the run or upload artifacts.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["reclaimTask"], *args, **kwargs)
+
+ async def reportCompleted(self, *args, **kwargs):
+ """
+ Report Run Completed
+
+ Report a task completed, resolving the run as `completed`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["reportCompleted"], *args, **kwargs)
+
+ async def reportFailed(self, *args, **kwargs):
+ """
+ Report Run Failed
+
+ Report a run failed, resolving the run as `failed`. Use this to resolve
+ a run that failed because the task specific code behaved unexpectedly.
+ For example the task exited non-zero, or didn't produce expected output.
+
+ Do not use this if the task couldn't be run because if malformed
+ payload, or other unexpected condition. In these cases we have a task
+ exception, which should be reported with `reportException`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["reportFailed"], *args, **kwargs)
+
+ async def reportException(self, *args, **kwargs):
+ """
+ Report Task Exception
+
+ Resolve a run as _exception_. Generally, you will want to report tasks as
+ failed instead of exception. You should `reportException` if,
+
+ * The `task.payload` is invalid,
+ * Non-existent resources are referenced,
+ * Declared actions cannot be executed due to unavailable resources,
+ * The worker had to shutdown prematurely,
+ * The worker experienced an unknown error, or,
+ * The task explicitly requested a retry.
+
+ Do not use this to signal that some user-specified code crashed for any
+ reason specific to this code. If user-specific code hits a resource that
+ is temporarily unavailable worker should report task _failed_.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["reportException"], *args, **kwargs)
+
+ async def createArtifact(self, *args, **kwargs):
+ """
+ Create Artifact
+
+ This API end-point creates an artifact for a specific run of a task. This
+ should **only** be used by a worker currently operating on this task, or
+ from a process running within the task (ie. on the worker).
+
+ All artifacts must specify when they expire. The queue will
+ automatically take care of deleting artifacts past their
+ expiration point. This feature makes it feasible to upload large
+ intermediate artifacts from data processing applications, as the
+ artifacts can be set to expire a few days later.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["createArtifact"], *args, **kwargs)
+
+ async def finishArtifact(self, *args, **kwargs):
+ """
+ Finish Artifact
+
+ This endpoint marks an artifact as present for the given task, and
+ should be called when the artifact data is fully uploaded.
+
+ The storage types `reference`, `link`, and `error` do not need to
+ be finished, as they are finished immediately by `createArtifact`.
+ The storage type `s3` does not support this functionality and cannot
+ be finished. In all such cases, calling this method is an input error
+ (400).
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["finishArtifact"], *args, **kwargs)
+
+ async def getArtifact(self, *args, **kwargs):
+ """
+ Get Artifact Data from Run
+
+ Get artifact by `<name>` from a specific run.
+
+ **Artifact Access**, in order to get an artifact you need the scope
+ `queue:get-artifact:<name>`, where `<name>` is the name of the artifact.
+ To allow access to fetch artifacts with a client like `curl` or a web
+ browser, without using Taskcluster credentials, include a scope in the
+ `anonymous` role. The convention is to include
+ `queue:get-artifact:public/*`.
+
+ **Response**: the HTTP response to this method is a 303 redirect to the
+ URL from which the artifact can be downloaded. The body of that response
+ contains the data described in the output schema, contianing the same URL.
+ Callers are encouraged to use whichever method of gathering the URL is
+ most convenient. Standard HTTP clients will follow the redirect, while
+ API client libraries will return the JSON body.
+
+ In order to download an artifact the following must be done:
+
+ 1. Obtain queue url. Building a signed url with a taskcluster client is
+ recommended
+ 1. Make a GET request which does not follow redirects
+ 1. In all cases, if specified, the
+ x-taskcluster-location-{content,transfer}-{sha256,length} values must be
+ validated to be equal to the Content-Length and Sha256 checksum of the
+ final artifact downloaded. as well as any intermediate redirects
+ 1. If this response is a 500-series error, retry using an exponential
+ backoff. No more than 5 retries should be attempted
+ 1. If this response is a 400-series error, treat it appropriately for
+ your context. This might be an error in responding to this request or
+ an Error storage type body. This request should not be retried.
+ 1. If this response is a 200-series response, the response body is the artifact.
+ If the x-taskcluster-location-{content,transfer}-{sha256,length} and
+ x-taskcluster-location-content-encoding are specified, they should match
+ this response body
+ 1. If the response type is a 300-series redirect, the artifact will be at the
+ location specified by the `Location` header. There are multiple artifact storage
+ types which use a 300-series redirect.
+ 1. For all redirects followed, the user must verify that the content-sha256, content-length,
+ transfer-sha256, transfer-length and content-encoding match every further request. The final
+ artifact must also be validated against the values specified in the original queue response
+ 1. Caching of requests with an x-taskcluster-artifact-storage-type value of `reference`
+ must not occur
+
+ **Headers**
+ The following important headers are set on the response to this method:
+
+ * location: the url of the artifact if a redirect is to be performed
+ * x-taskcluster-artifact-storage-type: the storage type. Example: s3
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["getArtifact"], *args, **kwargs)
+
+ async def getLatestArtifact(self, *args, **kwargs):
+ """
+ Get Artifact Data from Latest Run
+
+ Get artifact by `<name>` from the last run of a task.
+
+ **Artifact Access**, in order to get an artifact you need the scope
+ `queue:get-artifact:<name>`, where `<name>` is the name of the artifact.
+ To allow access to fetch artifacts with a client like `curl` or a web
+ browser, without using Taskcluster credentials, include a scope in the
+ `anonymous` role. The convention is to include
+ `queue:get-artifact:public/*`.
+
+ **API Clients**, this method will redirect you to the artifact, if it is
+ stored externally. Either way, the response may not be JSON. So API
+ client users might want to generate a signed URL for this end-point and
+ use that URL with a normal HTTP client.
+
+ **Remark**, this end-point is slightly slower than
+ `queue.getArtifact`, so consider that if you already know the `runId` of
+ the latest run. Otherwise, just us the most convenient API end-point.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["getLatestArtifact"], *args, **kwargs)
+
+ async def listArtifacts(self, *args, **kwargs):
+ """
+ Get Artifacts from Run
+
+ Returns a list of artifacts and associated meta-data for a given run.
+
+ As a task may have many artifacts paging may be necessary. If this
+ end-point returns a `continuationToken`, you should call the end-point
+ again with the `continuationToken` as the query-string option:
+ `continuationToken`.
+
+ By default this end-point will list up-to 1000 artifacts in a single page
+ you may limit this with the query-string parameter `limit`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listArtifacts"], *args, **kwargs)
+
+ async def listLatestArtifacts(self, *args, **kwargs):
+ """
+ Get Artifacts from Latest Run
+
+ Returns a list of artifacts and associated meta-data for the latest run
+ from the given task.
+
+ As a task may have many artifacts paging may be necessary. If this
+ end-point returns a `continuationToken`, you should call the end-point
+ again with the `continuationToken` as the query-string option:
+ `continuationToken`.
+
+ By default this end-point will list up-to 1000 artifacts in a single page
+ you may limit this with the query-string parameter `limit`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listLatestArtifacts"], *args, **kwargs)
+
+ async def artifactInfo(self, *args, **kwargs):
+ """
+ Get Artifact Information From Run
+
+ Returns associated metadata for a given artifact, in the given task run.
+ The metadata is the same as that returned from `listArtifacts`, and does
+ not grant access to the artifact data.
+
+ Note that this method does *not* automatically follow link artifacts.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["artifactInfo"], *args, **kwargs)
+
+ async def latestArtifactInfo(self, *args, **kwargs):
+ """
+ Get Artifact Information From Latest Run
+
+ Returns associated metadata for a given artifact, in the latest run of the
+ task. The metadata is the same as that returned from `listArtifacts`,
+ and does not grant access to the artifact data.
+
+ Note that this method does *not* automatically follow link artifacts.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["latestArtifactInfo"], *args, **kwargs)
+
+ async def artifact(self, *args, **kwargs):
+ """
+ Get Artifact Content From Run
+
+ Returns information about the content of the artifact, in the given task run.
+
+ Depending on the storage type, the endpoint returns the content of the artifact
+ or enough information to access that content.
+
+ This method follows link artifacts, so it will not return content
+ for a link artifact.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["artifact"], *args, **kwargs)
+
+ async def latestArtifact(self, *args, **kwargs):
+ """
+ Get Artifact Content From Latest Run
+
+ Returns information about the content of the artifact, in the latest task run.
+
+ Depending on the storage type, the endpoint returns the content of the artifact
+ or enough information to access that content.
+
+ This method follows link artifacts, so it will not return content
+ for a link artifact.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["latestArtifact"], *args, **kwargs)
+
+ async def listProvisioners(self, *args, **kwargs):
+ """
+ Get a list of all active provisioners
+
+ Get all active provisioners.
+
+ The term "provisioner" is taken broadly to mean anything with a provisionerId.
+ This does not necessarily mean there is an associated service performing any
+ provisioning activity.
+
+ The response is paged. If this end-point returns a `continuationToken`, you
+ should call the end-point again with the `continuationToken` as a query-string
+ option. By default this end-point will list up to 1000 provisioners in a single
+ page. You may limit this with the query-string parameter `limit`.
+
+ This method is ``deprecated``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listProvisioners"], *args, **kwargs)
+
+ async def getProvisioner(self, *args, **kwargs):
+ """
+ Get an active provisioner
+
+ Get an active provisioner.
+
+ The term "provisioner" is taken broadly to mean anything with a provisionerId.
+ This does not necessarily mean there is an associated service performing any
+ provisioning activity.
+
+ This method is ``deprecated``
+ """
+
+ return await self._makeApiCall(self.funcinfo["getProvisioner"], *args, **kwargs)
+
+ async def declareProvisioner(self, *args, **kwargs):
+ """
+ Update a provisioner
+
+ Declare a provisioner, supplying some details about it.
+
+ `declareProvisioner` allows updating one or more properties of a provisioner as long as the required scopes are
+ possessed. For example, a request to update the `my-provisioner`
+ provisioner with a body `{description: 'This provisioner is great'}` would require you to have the scope
+ `queue:declare-provisioner:my-provisioner#description`.
+
+ The term "provisioner" is taken broadly to mean anything with a provisionerId.
+ This does not necessarily mean there is an associated service performing any
+ provisioning activity.
+
+ This method is ``deprecated``
+ """
+
+ return await self._makeApiCall(self.funcinfo["declareProvisioner"], *args, **kwargs)
+
+ async def pendingTasks(self, *args, **kwargs):
+ """
+ Get Number of Pending Tasks
+
+ Get an approximate number of pending tasks for the given `taskQueueId`.
+
+ The underlying Azure Storage Queues only promises to give us an estimate.
+ Furthermore, we cache the result in memory for 20 seconds. So consumers
+ should be no means expect this to be an accurate number.
+ It is, however, a solid estimate of the number of pending tasks.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["pendingTasks"], *args, **kwargs)
+
+ async def listWorkerTypes(self, *args, **kwargs):
+ """
+ Get a list of all active worker-types
+
+ Get all active worker-types for the given provisioner.
+
+ The response is paged. If this end-point returns a `continuationToken`, you
+ should call the end-point again with the `continuationToken` as a query-string
+ option. By default this end-point will list up to 1000 worker-types in a single
+ page. You may limit this with the query-string parameter `limit`.
+
+ This method is ``deprecated``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listWorkerTypes"], *args, **kwargs)
+
+ async def getWorkerType(self, *args, **kwargs):
+ """
+ Get a worker-type
+
+ Get a worker-type from a provisioner.
+
+ This method is ``deprecated``
+ """
+
+ return await self._makeApiCall(self.funcinfo["getWorkerType"], *args, **kwargs)
+
+ async def declareWorkerType(self, *args, **kwargs):
+ """
+ Update a worker-type
+
+ Declare a workerType, supplying some details about it.
+
+ `declareWorkerType` allows updating one or more properties of a worker-type as long as the required scopes are
+ possessed. For example, a request to update the `highmem` worker-type within the `my-provisioner`
+ provisioner with a body `{description: 'This worker type is great'}` would require you to have the scope
+ `queue:declare-worker-type:my-provisioner/highmem#description`.
+
+ This method is ``deprecated``
+ """
+
+ return await self._makeApiCall(self.funcinfo["declareWorkerType"], *args, **kwargs)
+
+ async def listTaskQueues(self, *args, **kwargs):
+ """
+ Get a list of all active task queues
+
+ Get all active task queues.
+
+ The response is paged. If this end-point returns a `continuationToken`, you
+ should call the end-point again with the `continuationToken` as a query-string
+ option. By default this end-point will list up to 1000 task queues in a single
+ page. You may limit this with the query-string parameter `limit`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listTaskQueues"], *args, **kwargs)
+
+ async def getTaskQueue(self, *args, **kwargs):
+ """
+ Get a task queue
+
+ Get a task queue.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["getTaskQueue"], *args, **kwargs)
+
+ async def listWorkers(self, *args, **kwargs):
+ """
+ Get a list of all active workers of a workerType
+
+ Get a list of all active workers of a workerType.
+
+ `listWorkers` allows a response to be filtered by quarantined and non quarantined workers.
+ To filter the query, you should call the end-point with `quarantined` as a query-string option with a
+ true or false value.
+
+ The response is paged. If this end-point returns a `continuationToken`, you
+ should call the end-point again with the `continuationToken` as a query-string
+ option. By default this end-point will list up to 1000 workers in a single
+ page. You may limit this with the query-string parameter `limit`.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listWorkers"], *args, **kwargs)
+
+ async def getWorker(self, *args, **kwargs):
+ """
+ Get a worker-type
+
+ Get a worker from a worker-type.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["getWorker"], *args, **kwargs)
+
+ async def quarantineWorker(self, *args, **kwargs):
+ """
+ Quarantine a worker
+
+ Quarantine a worker
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["quarantineWorker"], *args, **kwargs)
+
+ async def declareWorker(self, *args, **kwargs):
+ """
+ Declare a worker
+
+ Declare a worker, supplying some details about it.
+
+ `declareWorker` allows updating one or more properties of a worker as long as the required scopes are
+ possessed.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["declareWorker"], *args, **kwargs)
+
+ funcinfo = {
+ "artifact": {
+ 'args': ['taskId', 'runId', 'name'],
+ 'method': 'get',
+ 'name': 'artifact',
+ 'output': 'v1/artifact-content-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/artifact-content/<name>',
+ 'stability': 'stable',
+ },
+ "artifactInfo": {
+ 'args': ['taskId', 'runId', 'name'],
+ 'method': 'get',
+ 'name': 'artifactInfo',
+ 'output': 'v1/artifact-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/artifact-info/<name>',
+ 'stability': 'stable',
+ },
+ "cancelTask": {
+ 'args': ['taskId'],
+ 'method': 'post',
+ 'name': 'cancelTask',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/cancel',
+ 'stability': 'stable',
+ },
+ "claimTask": {
+ 'args': ['taskId', 'runId'],
+ 'input': 'v1/task-claim-request.json#',
+ 'method': 'post',
+ 'name': 'claimTask',
+ 'output': 'v1/task-claim-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/claim',
+ 'stability': 'deprecated',
+ },
+ "claimWork": {
+ 'args': ['taskQueueId'],
+ 'input': 'v1/claim-work-request.json#',
+ 'method': 'post',
+ 'name': 'claimWork',
+ 'output': 'v1/claim-work-response.json#',
+ 'route': '/claim-work/<taskQueueId>',
+ 'stability': 'stable',
+ },
+ "createArtifact": {
+ 'args': ['taskId', 'runId', 'name'],
+ 'input': 'v1/post-artifact-request.json#',
+ 'method': 'post',
+ 'name': 'createArtifact',
+ 'output': 'v1/post-artifact-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/artifacts/<name>',
+ 'stability': 'stable',
+ },
+ "createTask": {
+ 'args': ['taskId'],
+ 'input': 'v1/create-task-request.json#',
+ 'method': 'put',
+ 'name': 'createTask',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>',
+ 'stability': 'stable',
+ },
+ "declareProvisioner": {
+ 'args': ['provisionerId'],
+ 'input': 'v1/update-provisioner-request.json#',
+ 'method': 'put',
+ 'name': 'declareProvisioner',
+ 'output': 'v1/provisioner-response.json#',
+ 'route': '/provisioners/<provisionerId>',
+ 'stability': 'deprecated',
+ },
+ "declareWorker": {
+ 'args': ['provisionerId', 'workerType', 'workerGroup', 'workerId'],
+ 'input': 'v1/update-worker-request.json#',
+ 'method': 'put',
+ 'name': 'declareWorker',
+ 'output': 'v1/worker-response.json#',
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>/<workerGroup>/<workerId>',
+ 'stability': 'experimental',
+ },
+ "declareWorkerType": {
+ 'args': ['provisionerId', 'workerType'],
+ 'input': 'v1/update-workertype-request.json#',
+ 'method': 'put',
+ 'name': 'declareWorkerType',
+ 'output': 'v1/workertype-response.json#',
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>',
+ 'stability': 'deprecated',
+ },
+ "finishArtifact": {
+ 'args': ['taskId', 'runId', 'name'],
+ 'input': 'v1/finish-artifact-request.json#',
+ 'method': 'put',
+ 'name': 'finishArtifact',
+ 'route': '/task/<taskId>/runs/<runId>/artifacts/<name>',
+ 'stability': 'stable',
+ },
+ "getArtifact": {
+ 'args': ['taskId', 'runId', 'name'],
+ 'method': 'get',
+ 'name': 'getArtifact',
+ 'output': 'v1/get-artifact-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/artifacts/<name>',
+ 'stability': 'stable',
+ },
+ "getLatestArtifact": {
+ 'args': ['taskId', 'name'],
+ 'method': 'get',
+ 'name': 'getLatestArtifact',
+ 'output': 'v1/get-artifact-response.json#',
+ 'route': '/task/<taskId>/artifacts/<name>',
+ 'stability': 'stable',
+ },
+ "getProvisioner": {
+ 'args': ['provisionerId'],
+ 'method': 'get',
+ 'name': 'getProvisioner',
+ 'output': 'v1/provisioner-response.json#',
+ 'route': '/provisioners/<provisionerId>',
+ 'stability': 'deprecated',
+ },
+ "getTaskQueue": {
+ 'args': ['taskQueueId'],
+ 'method': 'get',
+ 'name': 'getTaskQueue',
+ 'output': 'v1/taskqueue-response.json#',
+ 'route': '/task-queues/<taskQueueId>',
+ 'stability': 'stable',
+ },
+ "getWorker": {
+ 'args': ['provisionerId', 'workerType', 'workerGroup', 'workerId'],
+ 'method': 'get',
+ 'name': 'getWorker',
+ 'output': 'v1/worker-response.json#',
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>/workers/<workerGroup>/<workerId>',
+ 'stability': 'experimental',
+ },
+ "getWorkerType": {
+ 'args': ['provisionerId', 'workerType'],
+ 'method': 'get',
+ 'name': 'getWorkerType',
+ 'output': 'v1/workertype-response.json#',
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>',
+ 'stability': 'deprecated',
+ },
+ "latestArtifact": {
+ 'args': ['taskId', 'name'],
+ 'method': 'get',
+ 'name': 'latestArtifact',
+ 'output': 'v1/artifact-content-response.json#',
+ 'route': '/task/<taskId>/artifact-content/<name>',
+ 'stability': 'stable',
+ },
+ "latestArtifactInfo": {
+ 'args': ['taskId', 'name'],
+ 'method': 'get',
+ 'name': 'latestArtifactInfo',
+ 'output': 'v1/artifact-response.json#',
+ 'route': '/task/<taskId>/artifact-info/<name>',
+ 'stability': 'stable',
+ },
+ "listArtifacts": {
+ 'args': ['taskId', 'runId'],
+ 'method': 'get',
+ 'name': 'listArtifacts',
+ 'output': 'v1/list-artifacts-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/task/<taskId>/runs/<runId>/artifacts',
+ 'stability': 'stable',
+ },
+ "listDependentTasks": {
+ 'args': ['taskId'],
+ 'method': 'get',
+ 'name': 'listDependentTasks',
+ 'output': 'v1/list-dependent-tasks-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/task/<taskId>/dependents',
+ 'stability': 'stable',
+ },
+ "listLatestArtifacts": {
+ 'args': ['taskId'],
+ 'method': 'get',
+ 'name': 'listLatestArtifacts',
+ 'output': 'v1/list-artifacts-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/task/<taskId>/artifacts',
+ 'stability': 'stable',
+ },
+ "listProvisioners": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listProvisioners',
+ 'output': 'v1/list-provisioners-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/provisioners',
+ 'stability': 'deprecated',
+ },
+ "listTaskGroup": {
+ 'args': ['taskGroupId'],
+ 'method': 'get',
+ 'name': 'listTaskGroup',
+ 'output': 'v1/list-task-group-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/task-group/<taskGroupId>/list',
+ 'stability': 'stable',
+ },
+ "listTaskQueues": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listTaskQueues',
+ 'output': 'v1/list-taskqueues-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/task-queues',
+ 'stability': 'stable',
+ },
+ "listWorkerTypes": {
+ 'args': ['provisionerId'],
+ 'method': 'get',
+ 'name': 'listWorkerTypes',
+ 'output': 'v1/list-workertypes-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/provisioners/<provisionerId>/worker-types',
+ 'stability': 'deprecated',
+ },
+ "listWorkers": {
+ 'args': ['provisionerId', 'workerType'],
+ 'method': 'get',
+ 'name': 'listWorkers',
+ 'output': 'v1/list-workers-response.json#',
+ 'query': ['continuationToken', 'limit', 'quarantined'],
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>/workers',
+ 'stability': 'experimental',
+ },
+ "pendingTasks": {
+ 'args': ['taskQueueId'],
+ 'method': 'get',
+ 'name': 'pendingTasks',
+ 'output': 'v1/pending-tasks-response.json#',
+ 'route': '/pending/<taskQueueId>',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "quarantineWorker": {
+ 'args': ['provisionerId', 'workerType', 'workerGroup', 'workerId'],
+ 'input': 'v1/quarantine-worker-request.json#',
+ 'method': 'put',
+ 'name': 'quarantineWorker',
+ 'output': 'v1/worker-response.json#',
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>/workers/<workerGroup>/<workerId>',
+ 'stability': 'experimental',
+ },
+ "reclaimTask": {
+ 'args': ['taskId', 'runId'],
+ 'method': 'post',
+ 'name': 'reclaimTask',
+ 'output': 'v1/task-reclaim-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/reclaim',
+ 'stability': 'stable',
+ },
+ "reportCompleted": {
+ 'args': ['taskId', 'runId'],
+ 'method': 'post',
+ 'name': 'reportCompleted',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/completed',
+ 'stability': 'stable',
+ },
+ "reportException": {
+ 'args': ['taskId', 'runId'],
+ 'input': 'v1/task-exception-request.json#',
+ 'method': 'post',
+ 'name': 'reportException',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/exception',
+ 'stability': 'stable',
+ },
+ "reportFailed": {
+ 'args': ['taskId', 'runId'],
+ 'method': 'post',
+ 'name': 'reportFailed',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/failed',
+ 'stability': 'stable',
+ },
+ "rerunTask": {
+ 'args': ['taskId'],
+ 'method': 'post',
+ 'name': 'rerunTask',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/rerun',
+ 'stability': 'stable',
+ },
+ "scheduleTask": {
+ 'args': ['taskId'],
+ 'method': 'post',
+ 'name': 'scheduleTask',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/schedule',
+ 'stability': 'stable',
+ },
+ "status": {
+ 'args': ['taskId'],
+ 'method': 'get',
+ 'name': 'status',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/status',
+ 'stability': 'stable',
+ },
+ "task": {
+ 'args': ['taskId'],
+ 'method': 'get',
+ 'name': 'task',
+ 'output': 'v1/task.json#',
+ 'route': '/task/<taskId>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Queue']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/queueevents.py b/third_party/python/taskcluster/taskcluster/generated/aio/queueevents.py
new file mode 100644
index 0000000000..be916b2ca3
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/queueevents.py
@@ -0,0 +1,719 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class QueueEvents(AsyncBaseClient):
+ """
+ The queue service is responsible for accepting tasks and track their state
+ as they are executed by workers. In order ensure they are eventually
+ resolved.
+
+ This document describes AMQP exchanges offered by the queue, which allows
+ third-party listeners to monitor tasks as they progress to resolution.
+ These exchanges targets the following audience:
+ * Schedulers, who takes action after tasks are completed,
+ * Workers, who wants to listen for new or canceled tasks (optional),
+ * Tools, that wants to update their view as task progress.
+
+ You'll notice that all the exchanges in the document shares the same
+ routing key pattern. This makes it very easy to bind to all messages
+ about a certain kind tasks.
+
+ **Task specific routes**, a task can define a task specific route using
+ the `task.routes` property. See task creation documentation for details
+ on permissions required to provide task specific routes. If a task has
+ the entry `'notify.by-email'` in as task specific route defined in
+ `task.routes` all messages about this task will be CC'ed with the
+ routing-key `'route.notify.by-email'`.
+
+ These routes will always be prefixed `route.`, so that cannot interfere
+ with the _primary_ routing key as documented here. Notice that the
+ _primary_ routing key is always prefixed `primary.`. This is ensured
+ in the routing key reference, so API clients will do this automatically.
+
+ Please, note that the way RabbitMQ works, the message will only arrive
+ in your queue once, even though you may have bound to the exchange with
+ multiple routing key patterns that matches more of the CC'ed routing
+ routing keys.
+
+ **Delivery guarantees**, most operations on the queue are idempotent,
+ which means that if repeated with the same arguments then the requests
+ will ensure completion of the operation and return the same response.
+ This is useful if the server crashes or the TCP connection breaks, but
+ when re-executing an idempotent operation, the queue will also resend
+ any related AMQP messages. Hence, messages may be repeated.
+
+ This shouldn't be much of a problem, as the best you can achieve using
+ confirm messages with AMQP is at-least-once delivery semantics. Hence,
+ this only prevents you from obtaining at-most-once delivery semantics.
+
+ **Remark**, some message generated by timeouts maybe dropped if the
+ server crashes at wrong time. Ideally, we'll address this in the
+ future. For now we suggest you ignore this corner case, and notify us
+ if this corner case is of concern to you.
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-queue/v1/",
+ }
+ serviceName = 'queue'
+ apiVersion = 'v1'
+
+ def taskDefined(self, *args, **kwargs):
+ """
+ Task Defined Messages
+
+ When a task is created or just defined a message is posted to this
+ exchange.
+
+ This message exchange is mainly useful when tasks are created with dependencies
+ on incomplete tasks, as this does not make the task
+ `pending`. Thus, no `taskPending` message is published.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-defined',
+ 'name': 'taskDefined',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-defined-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskPending(self, *args, **kwargs):
+ """
+ Task Pending Messages
+
+ When a task becomes `pending` a message is posted to this exchange.
+
+ This is useful for workers who doesn't want to constantly poll the queue
+ for new tasks. The queue will also be authority for task states and
+ claims. But using this exchange workers should be able to distribute work
+ efficiently and they would be able to reduce their polling interval
+ significantly without affecting general responsiveness.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-pending',
+ 'name': 'taskPending',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-pending-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskRunning(self, *args, **kwargs):
+ """
+ Task Running Messages
+
+ Whenever a task is claimed by a worker, a run is started on the worker,
+ and a message is posted on this exchange.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-running',
+ 'name': 'taskRunning',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-running-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def artifactCreated(self, *args, **kwargs):
+ """
+ Artifact Creation Messages
+
+ Whenever the `createArtifact` end-point is called, the queue will create
+ a record of the artifact and post a message on this exchange. All of this
+ happens before the queue returns a signed URL for the caller to upload
+ the actual artifact with (pending on `storageType`).
+
+ This means that the actual artifact is rarely available when this message
+ is posted. But it is not unreasonable to assume that the artifact will
+ will become available at some point later. Most signatures will expire in
+ 30 minutes or so, forcing the uploader to call `createArtifact` with
+ the same payload again in-order to continue uploading the artifact.
+
+ However, in most cases (especially for small artifacts) it's very
+ reasonable assume the artifact will be available within a few minutes.
+ This property means that this exchange is mostly useful for tools
+ monitoring task evaluation. One could also use it count number of
+ artifacts per task, or _index_ artifacts though in most cases it'll be
+ smarter to index artifacts after the task in question have completed
+ successfully.
+
+ *NOTE*: this message is currently only sent for reference and error
+ artifacts. This will be remedied in a future version of Taskcluster.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'artifact-created',
+ 'name': 'artifactCreated',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/artifact-created-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskCompleted(self, *args, **kwargs):
+ """
+ Task Completed Messages
+
+ When a task is successfully completed by a worker a message is posted
+ this exchange.
+ This message is routed using the `runId`, `workerGroup` and `workerId`
+ that completed the task. But information about additional runs is also
+ available from the task status structure.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-completed',
+ 'name': 'taskCompleted',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-completed-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskFailed(self, *args, **kwargs):
+ """
+ Task Failed Messages
+
+ When a task ran, but failed to complete successfully a message is posted
+ to this exchange. This is same as worker ran task-specific code, but the
+ task specific code exited non-zero.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-failed',
+ 'name': 'taskFailed',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-failed-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskException(self, *args, **kwargs):
+ """
+ Task Exception Messages
+
+ Whenever Taskcluster fails to run a message is posted to this exchange.
+ This happens if the task isn't completed before its `deadlìne`,
+ all retries failed (i.e. workers stopped responding), the task was
+ canceled by another entity, or the task carried a malformed payload.
+
+ The specific _reason_ is evident from that task status structure, refer
+ to the `reasonResolved` property for the last run.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-exception',
+ 'name': 'taskException',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-exception-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskGroupResolved(self, *args, **kwargs):
+ """
+ Task Group Resolved Messages
+
+ A message is published on task-group-resolved whenever all submitted
+ tasks (whether scheduled or unscheduled) for a given task group have
+ been resolved, regardless of whether they resolved as successful or
+ not. A task group may be resolved multiple times, since new tasks may
+ be submitted against an already resolved task group.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskGroupId: `taskGroupId` for the task-group this message concerns (required)
+
+ * schedulerId: `schedulerId` for the task-group this message concerns (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-group-resolved',
+ 'name': 'taskGroupResolved',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-group-resolved.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'QueueEvents']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/secrets.py b/third_party/python/taskcluster/taskcluster/generated/aio/secrets.py
new file mode 100644
index 0000000000..f8ccdc2366
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/secrets.py
@@ -0,0 +1,143 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class Secrets(AsyncBaseClient):
+ """
+ The secrets service provides a simple key/value store for small bits of secret
+ data. Access is limited by scopes, so values can be considered secret from
+ those who do not have the relevant scopes.
+
+ Secrets also have an expiration date, and once a secret has expired it can no
+ longer be read. This is useful for short-term secrets such as a temporary
+ service credential or a one-time signing key.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'secrets'
+ apiVersion = 'v1'
+
+ async def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ async def set(self, *args, **kwargs):
+ """
+ Set Secret
+
+ Set the secret associated with some key. If the secret already exists, it is
+ updated instead.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["set"], *args, **kwargs)
+
+ async def remove(self, *args, **kwargs):
+ """
+ Delete Secret
+
+ Delete the secret associated with some key. It will succeed whether or not the secret exists
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["remove"], *args, **kwargs)
+
+ async def get(self, *args, **kwargs):
+ """
+ Read Secret
+
+ Read the secret associated with some key. If the secret has recently
+ expired, the response code 410 is returned. If the caller lacks the
+ scope necessary to get the secret, the call will fail with a 403 code
+ regardless of whether the secret exists.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["get"], *args, **kwargs)
+
+ async def list(self, *args, **kwargs):
+ """
+ List Secrets
+
+ List the names of all secrets.
+
+ By default this end-point will try to return up to 1000 secret names in one
+ request. But it **may return less**, even if more tasks are available.
+ It may also return a `continuationToken` even though there are no more
+ results. However, you can only be sure to have seen all results if you
+ keep calling `listTaskGroup` with the last `continuationToken` until you
+ get a result without a `continuationToken`.
+
+ If you are not interested in listing all the members at once, you may
+ use the query-string option `limit` to return fewer.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["list"], *args, **kwargs)
+
+ funcinfo = {
+ "get": {
+ 'args': ['name'],
+ 'method': 'get',
+ 'name': 'get',
+ 'output': 'v1/secret.json#',
+ 'route': '/secret/<name>',
+ 'stability': 'stable',
+ },
+ "list": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'list',
+ 'output': 'v1/secret-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/secrets',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "remove": {
+ 'args': ['name'],
+ 'method': 'delete',
+ 'name': 'remove',
+ 'route': '/secret/<name>',
+ 'stability': 'stable',
+ },
+ "set": {
+ 'args': ['name'],
+ 'input': 'v1/secret.json#',
+ 'method': 'put',
+ 'name': 'set',
+ 'route': '/secret/<name>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Secrets']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/workermanager.py b/third_party/python/taskcluster/taskcluster/generated/aio/workermanager.py
new file mode 100644
index 0000000000..3d4e1197fd
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/workermanager.py
@@ -0,0 +1,406 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class WorkerManager(AsyncBaseClient):
+ """
+ This service manages workers, including provisioning for dynamic worker pools.
+
+ Methods interacting with a provider may return a 503 response if that provider has
+ not been able to start up, such as if the service to which it interfaces has an
+ outage. Such requests can be retried as for any other 5xx response.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'worker-manager'
+ apiVersion = 'v1'
+
+ async def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ async def listProviders(self, *args, **kwargs):
+ """
+ List Providers
+
+ Retrieve a list of providers that are available for worker pools.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listProviders"], *args, **kwargs)
+
+ async def createWorkerPool(self, *args, **kwargs):
+ """
+ Create Worker Pool
+
+ Create a new worker pool. If the worker pool already exists, this will throw an error.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["createWorkerPool"], *args, **kwargs)
+
+ async def updateWorkerPool(self, *args, **kwargs):
+ """
+ Update Worker Pool
+
+ Given an existing worker pool definition, this will modify it and return
+ the new definition.
+
+ To delete a worker pool, set its `providerId` to `"null-provider"`.
+ After any existing workers have exited, a cleanup job will remove the
+ worker pool. During that time, the worker pool can be updated again, such
+ as to set its `providerId` to a real provider.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["updateWorkerPool"], *args, **kwargs)
+
+ async def deleteWorkerPool(self, *args, **kwargs):
+ """
+ Delete Worker Pool
+
+ Mark a worker pool for deletion. This is the same as updating the pool to
+ set its providerId to `"null-provider"`, but does not require scope
+ `worker-manager:provider:null-provider`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["deleteWorkerPool"], *args, **kwargs)
+
+ async def workerPool(self, *args, **kwargs):
+ """
+ Get Worker Pool
+
+ Fetch an existing worker pool defition.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["workerPool"], *args, **kwargs)
+
+ async def listWorkerPools(self, *args, **kwargs):
+ """
+ List All Worker Pools
+
+ Get the list of all the existing worker pools.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listWorkerPools"], *args, **kwargs)
+
+ async def reportWorkerError(self, *args, **kwargs):
+ """
+ Report an error from a worker
+
+ Report an error that occurred on a worker. This error will be included
+ with the other errors in `listWorkerPoolErrors(workerPoolId)`.
+
+ Workers can use this endpoint to report startup or configuration errors
+ that might be associated with the worker pool configuration and thus of
+ interest to a worker-pool administrator.
+
+ NOTE: errors are publicly visible. Ensure that none of the content
+ contains secrets or other sensitive information.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["reportWorkerError"], *args, **kwargs)
+
+ async def listWorkerPoolErrors(self, *args, **kwargs):
+ """
+ List Worker Pool Errors
+
+ Get the list of worker pool errors.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listWorkerPoolErrors"], *args, **kwargs)
+
+ async def listWorkersForWorkerGroup(self, *args, **kwargs):
+ """
+ Workers in a specific Worker Group in a Worker Pool
+
+ Get the list of all the existing workers in a given group in a given worker pool.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listWorkersForWorkerGroup"], *args, **kwargs)
+
+ async def worker(self, *args, **kwargs):
+ """
+ Get a Worker
+
+ Get a single worker.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["worker"], *args, **kwargs)
+
+ async def createWorker(self, *args, **kwargs):
+ """
+ Create a Worker
+
+ Create a new worker. This is only useful for worker pools where the provider
+ does not create workers automatically, such as those with a `static` provider
+ type. Providers that do not support creating workers will return a 400 error.
+ See the documentation for the individual providers, and in particular the
+ [static provider](https://docs.taskcluster.net/docs/reference/core/worker-manager/)
+ for more information.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["createWorker"], *args, **kwargs)
+
+ async def updateWorker(self, *args, **kwargs):
+ """
+ Update an existing Worker
+
+ Update an existing worker in-place. Like `createWorker`, this is only useful for
+ worker pools where the provider does not create workers automatically.
+ This method allows updating all fields in the schema unless otherwise indicated
+ in the provider documentation.
+ See the documentation for the individual providers, and in particular the
+ [static provider](https://docs.taskcluster.net/docs/reference/core/worker-manager/)
+ for more information.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["updateWorker"], *args, **kwargs)
+
+ async def removeWorker(self, *args, **kwargs):
+ """
+ Remove a Worker
+
+ Remove an existing worker. The precise behavior of this method depends
+ on the provider implementing the given worker. Some providers
+ do not support removing workers at all, and will return a 400 error.
+ Others may begin removing the worker, but it may remain available via
+ the API (perhaps even in state RUNNING) afterward.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["removeWorker"], *args, **kwargs)
+
+ async def listWorkersForWorkerPool(self, *args, **kwargs):
+ """
+ Workers in a Worker Pool
+
+ Get the list of all the existing workers in a given worker pool.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listWorkersForWorkerPool"], *args, **kwargs)
+
+ async def registerWorker(self, *args, **kwargs):
+ """
+ Register a running worker
+
+ Register a running worker. Workers call this method on worker start-up.
+
+ This call both marks the worker as running and returns the credentials
+ the worker will require to perform its work. The worker must provide
+ some proof of its identity, and that proof varies by provider type.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["registerWorker"], *args, **kwargs)
+
+ async def reregisterWorker(self, *args, **kwargs):
+ """
+ Reregister a Worker
+
+ Reregister a running worker.
+
+ This will generate and return new Taskcluster credentials for the worker
+ on that instance to use. The credentials will not live longer the
+ `registrationTimeout` for that worker. The endpoint will update `terminateAfter`
+ for the worker so that worker-manager does not terminate the instance.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["reregisterWorker"], *args, **kwargs)
+
+ funcinfo = {
+ "createWorker": {
+ 'args': ['workerPoolId', 'workerGroup', 'workerId'],
+ 'input': 'v1/create-worker-request.json#',
+ 'method': 'put',
+ 'name': 'createWorker',
+ 'output': 'v1/worker-full.json#',
+ 'route': '/workers/<workerPoolId>:/<workerGroup>/<workerId>',
+ 'stability': 'stable',
+ },
+ "createWorkerPool": {
+ 'args': ['workerPoolId'],
+ 'input': 'v1/create-worker-pool-request.json#',
+ 'method': 'put',
+ 'name': 'createWorkerPool',
+ 'output': 'v1/worker-pool-full.json#',
+ 'route': '/worker-pool/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "deleteWorkerPool": {
+ 'args': ['workerPoolId'],
+ 'method': 'delete',
+ 'name': 'deleteWorkerPool',
+ 'output': 'v1/worker-pool-full.json#',
+ 'route': '/worker-pool/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "listProviders": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listProviders',
+ 'output': 'v1/provider-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/providers',
+ 'stability': 'stable',
+ },
+ "listWorkerPoolErrors": {
+ 'args': ['workerPoolId'],
+ 'method': 'get',
+ 'name': 'listWorkerPoolErrors',
+ 'output': 'v1/worker-pool-error-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/worker-pool-errors/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "listWorkerPools": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listWorkerPools',
+ 'output': 'v1/worker-pool-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/worker-pools',
+ 'stability': 'stable',
+ },
+ "listWorkersForWorkerGroup": {
+ 'args': ['workerPoolId', 'workerGroup'],
+ 'method': 'get',
+ 'name': 'listWorkersForWorkerGroup',
+ 'output': 'v1/worker-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/workers/<workerPoolId>:/<workerGroup>',
+ 'stability': 'stable',
+ },
+ "listWorkersForWorkerPool": {
+ 'args': ['workerPoolId'],
+ 'method': 'get',
+ 'name': 'listWorkersForWorkerPool',
+ 'output': 'v1/worker-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/workers/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "registerWorker": {
+ 'args': [],
+ 'input': 'v1/register-worker-request.json#',
+ 'method': 'post',
+ 'name': 'registerWorker',
+ 'output': 'v1/register-worker-response.json#',
+ 'route': '/worker/register',
+ 'stability': 'stable',
+ },
+ "removeWorker": {
+ 'args': ['workerPoolId', 'workerGroup', 'workerId'],
+ 'method': 'delete',
+ 'name': 'removeWorker',
+ 'route': '/workers/<workerPoolId>/<workerGroup>/<workerId>',
+ 'stability': 'stable',
+ },
+ "reportWorkerError": {
+ 'args': ['workerPoolId'],
+ 'input': 'v1/report-worker-error-request.json#',
+ 'method': 'post',
+ 'name': 'reportWorkerError',
+ 'output': 'v1/worker-pool-error.json#',
+ 'route': '/worker-pool-errors/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "reregisterWorker": {
+ 'args': [],
+ 'input': 'v1/reregister-worker-request.json#',
+ 'method': 'post',
+ 'name': 'reregisterWorker',
+ 'output': 'v1/reregister-worker-response.json#',
+ 'route': '/worker/reregister',
+ 'stability': 'experimental',
+ },
+ "updateWorker": {
+ 'args': ['workerPoolId', 'workerGroup', 'workerId'],
+ 'input': 'v1/create-worker-request.json#',
+ 'method': 'post',
+ 'name': 'updateWorker',
+ 'output': 'v1/worker-full.json#',
+ 'route': '/workers/<workerPoolId>:/<workerGroup>/<workerId>',
+ 'stability': 'stable',
+ },
+ "updateWorkerPool": {
+ 'args': ['workerPoolId'],
+ 'input': 'v1/update-worker-pool-request.json#',
+ 'method': 'post',
+ 'name': 'updateWorkerPool',
+ 'output': 'v1/worker-pool-full.json#',
+ 'route': '/worker-pool/<workerPoolId>',
+ 'stability': 'experimental',
+ },
+ "worker": {
+ 'args': ['workerPoolId', 'workerGroup', 'workerId'],
+ 'method': 'get',
+ 'name': 'worker',
+ 'output': 'v1/worker-full.json#',
+ 'route': '/workers/<workerPoolId>:/<workerGroup>/<workerId>',
+ 'stability': 'stable',
+ },
+ "workerPool": {
+ 'args': ['workerPoolId'],
+ 'method': 'get',
+ 'name': 'workerPool',
+ 'output': 'v1/worker-pool-full.json#',
+ 'route': '/worker-pool/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'WorkerManager']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/workermanagerevents.py b/third_party/python/taskcluster/taskcluster/generated/aio/workermanagerevents.py
new file mode 100644
index 0000000000..80bd60729a
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/workermanagerevents.py
@@ -0,0 +1,91 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class WorkerManagerEvents(AsyncBaseClient):
+ """
+ These exchanges provide notifications when a worker pool is created or updated.This is so that the provisioner running in a differentprocess at the other end can synchronize to the changes. But you are ofcourse welcome to use these for other purposes, monitoring changes for example.
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-worker-manager/v1/",
+ }
+ serviceName = 'worker-manager'
+ apiVersion = 'v1'
+
+ def workerPoolCreated(self, *args, **kwargs):
+ """
+ Worker Pool Created Messages
+
+ Whenever the api receives a request to create aworker pool, a message is posted to this exchange anda provider can act upon it.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'worker-pool-created',
+ 'name': 'workerPoolCreated',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/pulse-worker-pool-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def workerPoolUpdated(self, *args, **kwargs):
+ """
+ Worker Pool Updated Messages
+
+ Whenever the api receives a request to update aworker pool, a message is posted to this exchange anda provider can act upon it.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'worker-pool-updated',
+ 'name': 'workerPoolUpdated',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/pulse-worker-pool-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'WorkerManagerEvents']
diff --git a/third_party/python/taskcluster/taskcluster/generated/auth.py b/third_party/python/taskcluster/taskcluster/generated/auth.py
new file mode 100644
index 0000000000..a4738ee143
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/auth.py
@@ -0,0 +1,781 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class Auth(BaseClient):
+ """
+ Authentication related API end-points for Taskcluster and related
+ services. These API end-points are of interest if you wish to:
+ * Authorize a request signed with Taskcluster credentials,
+ * Manage clients and roles,
+ * Inspect or audit clients and roles,
+ * Gain access to various services guarded by this API.
+
+ """
+
+ classOptions = {
+ }
+ serviceName = 'auth'
+ apiVersion = 'v1'
+
+ def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ def listClients(self, *args, **kwargs):
+ """
+ List Clients
+
+ Get a list of all clients. With `prefix`, only clients for which
+ it is a prefix of the clientId are returned.
+
+ By default this end-point will try to return up to 1000 clients in one
+ request. But it **may return less, even none**.
+ It may also return a `continuationToken` even though there are no more
+ results. However, you can only be sure to have seen all results if you
+ keep calling `listClients` with the last `continuationToken` until you
+ get a result without a `continuationToken`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listClients"], *args, **kwargs)
+
+ def client(self, *args, **kwargs):
+ """
+ Get Client
+
+ Get information about a single client.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["client"], *args, **kwargs)
+
+ def createClient(self, *args, **kwargs):
+ """
+ Create Client
+
+ Create a new client and get the `accessToken` for this client.
+ You should store the `accessToken` from this API call as there is no
+ other way to retrieve it.
+
+ If you loose the `accessToken` you can call `resetAccessToken` to reset
+ it, and a new `accessToken` will be returned, but you cannot retrieve the
+ current `accessToken`.
+
+ If a client with the same `clientId` already exists this operation will
+ fail. Use `updateClient` if you wish to update an existing client.
+
+ The caller's scopes must satisfy `scopes`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["createClient"], *args, **kwargs)
+
+ def resetAccessToken(self, *args, **kwargs):
+ """
+ Reset `accessToken`
+
+ Reset a clients `accessToken`, this will revoke the existing
+ `accessToken`, generate a new `accessToken` and return it from this
+ call.
+
+ There is no way to retrieve an existing `accessToken`, so if you loose it
+ you must reset the accessToken to acquire it again.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["resetAccessToken"], *args, **kwargs)
+
+ def updateClient(self, *args, **kwargs):
+ """
+ Update Client
+
+ Update an exisiting client. The `clientId` and `accessToken` cannot be
+ updated, but `scopes` can be modified. The caller's scopes must
+ satisfy all scopes being added to the client in the update operation.
+ If no scopes are given in the request, the client's scopes remain
+ unchanged
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["updateClient"], *args, **kwargs)
+
+ def enableClient(self, *args, **kwargs):
+ """
+ Enable Client
+
+ Enable a client that was disabled with `disableClient`. If the client
+ is already enabled, this does nothing.
+
+ This is typically used by identity providers to re-enable clients that
+ had been disabled when the corresponding identity's scopes changed.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["enableClient"], *args, **kwargs)
+
+ def disableClient(self, *args, **kwargs):
+ """
+ Disable Client
+
+ Disable a client. If the client is already disabled, this does nothing.
+
+ This is typically used by identity providers to disable clients when the
+ corresponding identity's scopes no longer satisfy the client's scopes.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["disableClient"], *args, **kwargs)
+
+ def deleteClient(self, *args, **kwargs):
+ """
+ Delete Client
+
+ Delete a client, please note that any roles related to this client must
+ be deleted independently.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["deleteClient"], *args, **kwargs)
+
+ def listRoles(self, *args, **kwargs):
+ """
+ List Roles (no pagination)
+
+ Get a list of all roles. Each role object also includes the list of
+ scopes it expands to. This always returns all roles in a single HTTP
+ request.
+
+ To get paginated results, use `listRoles2`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listRoles"], *args, **kwargs)
+
+ def listRoles2(self, *args, **kwargs):
+ """
+ List Roles
+
+ Get a list of all roles. Each role object also includes the list of
+ scopes it expands to. This is similar to `listRoles` but differs in the
+ format of the response.
+
+ If no limit is given, all roles are returned. Since this
+ list may become long, callers can use the `limit` and `continuationToken`
+ query arguments to page through the responses.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listRoles2"], *args, **kwargs)
+
+ def listRoleIds(self, *args, **kwargs):
+ """
+ List Role IDs
+
+ Get a list of all role IDs.
+
+ If no limit is given, the roleIds of all roles are returned. Since this
+ list may become long, callers can use the `limit` and `continuationToken`
+ query arguments to page through the responses.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listRoleIds"], *args, **kwargs)
+
+ def role(self, *args, **kwargs):
+ """
+ Get Role
+
+ Get information about a single role, including the set of scopes that the
+ role expands to.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["role"], *args, **kwargs)
+
+ def createRole(self, *args, **kwargs):
+ """
+ Create Role
+
+ Create a new role.
+
+ The caller's scopes must satisfy the new role's scopes.
+
+ If there already exists a role with the same `roleId` this operation
+ will fail. Use `updateRole` to modify an existing role.
+
+ Creation of a role that will generate an infinite expansion will result
+ in an error response.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["createRole"], *args, **kwargs)
+
+ def updateRole(self, *args, **kwargs):
+ """
+ Update Role
+
+ Update an existing role.
+
+ The caller's scopes must satisfy all of the new scopes being added, but
+ need not satisfy all of the role's existing scopes.
+
+ An update of a role that will generate an infinite expansion will result
+ in an error response.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["updateRole"], *args, **kwargs)
+
+ def deleteRole(self, *args, **kwargs):
+ """
+ Delete Role
+
+ Delete a role. This operation will succeed regardless of whether or not
+ the role exists.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["deleteRole"], *args, **kwargs)
+
+ def expandScopes(self, *args, **kwargs):
+ """
+ Expand Scopes
+
+ Return an expanded copy of the given scopeset, with scopes implied by any
+ roles included.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["expandScopes"], *args, **kwargs)
+
+ def currentScopes(self, *args, **kwargs):
+ """
+ Get Current Scopes
+
+ Return the expanded scopes available in the request, taking into account all sources
+ of scopes and scope restrictions (temporary credentials, assumeScopes, client scopes,
+ and roles).
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["currentScopes"], *args, **kwargs)
+
+ def awsS3Credentials(self, *args, **kwargs):
+ """
+ Get Temporary Read/Write Credentials S3
+
+ Get temporary AWS credentials for `read-write` or `read-only` access to
+ a given `bucket` and `prefix` within that bucket.
+ The `level` parameter can be `read-write` or `read-only` and determines
+ which type of credentials are returned. Please note that the `level`
+ parameter is required in the scope guarding access. The bucket name must
+ not contain `.`, as recommended by Amazon.
+
+ This method can only allow access to a whitelisted set of buckets, as configured
+ in the Taskcluster deployment
+
+ The credentials are set to expire after an hour, but this behavior is
+ subject to change. Hence, you should always read the `expires` property
+ from the response, if you intend to maintain active credentials in your
+ application.
+
+ Please note that your `prefix` may not start with slash `/`. Such a prefix
+ is allowed on S3, but we forbid it here to discourage bad behavior.
+
+ Also note that if your `prefix` doesn't end in a slash `/`, the STS
+ credentials may allow access to unexpected keys, as S3 does not treat
+ slashes specially. For example, a prefix of `my-folder` will allow
+ access to `my-folder/file.txt` as expected, but also to `my-folder.txt`,
+ which may not be intended.
+
+ Finally, note that the `PutObjectAcl` call is not allowed. Passing a canned
+ ACL other than `private` to `PutObject` is treated as a `PutObjectAcl` call, and
+ will result in an access-denied error from AWS. This limitation is due to a
+ security flaw in Amazon S3 which might otherwise allow indefinite access to
+ uploaded objects.
+
+ **EC2 metadata compatibility**, if the querystring parameter
+ `?format=iam-role-compat` is given, the response will be compatible
+ with the JSON exposed by the EC2 metadata service. This aims to ease
+ compatibility for libraries and tools built to auto-refresh credentials.
+ For details on the format returned by EC2 metadata service see:
+ [EC2 User Guide](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials).
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["awsS3Credentials"], *args, **kwargs)
+
+ def azureAccounts(self, *args, **kwargs):
+ """
+ List Accounts Managed by Auth
+
+ Retrieve a list of all Azure accounts managed by Taskcluster Auth.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["azureAccounts"], *args, **kwargs)
+
+ def azureTables(self, *args, **kwargs):
+ """
+ List Tables in an Account Managed by Auth
+
+ Retrieve a list of all tables in an account.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["azureTables"], *args, **kwargs)
+
+ def azureTableSAS(self, *args, **kwargs):
+ """
+ Get Shared-Access-Signature for Azure Table
+
+ Get a shared access signature (SAS) string for use with a specific Azure
+ Table Storage table.
+
+ The `level` parameter can be `read-write` or `read-only` and determines
+ which type of credentials are returned. If level is read-write, it will create the
+ table if it doesn't already exist.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["azureTableSAS"], *args, **kwargs)
+
+ def azureContainers(self, *args, **kwargs):
+ """
+ List containers in an Account Managed by Auth
+
+ Retrieve a list of all containers in an account.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["azureContainers"], *args, **kwargs)
+
+ def azureContainerSAS(self, *args, **kwargs):
+ """
+ Get Shared-Access-Signature for Azure Container
+
+ Get a shared access signature (SAS) string for use with a specific Azure
+ Blob Storage container.
+
+ The `level` parameter can be `read-write` or `read-only` and determines
+ which type of credentials are returned. If level is read-write, it will create the
+ container if it doesn't already exist.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["azureContainerSAS"], *args, **kwargs)
+
+ def sentryDSN(self, *args, **kwargs):
+ """
+ Get DSN for Sentry Project
+
+ Get temporary DSN (access credentials) for a sentry project.
+ The credentials returned can be used with any Sentry client for up to
+ 24 hours, after which the credentials will be automatically disabled.
+
+ If the project doesn't exist it will be created, and assigned to the
+ initial team configured for this component. Contact a Sentry admin
+ to have the project transferred to a team you have access to if needed
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["sentryDSN"], *args, **kwargs)
+
+ def websocktunnelToken(self, *args, **kwargs):
+ """
+ Get a client token for the Websocktunnel service
+
+ Get a temporary token suitable for use connecting to a
+ [websocktunnel](https://github.com/taskcluster/taskcluster/tree/main/tools/websocktunnel) server.
+
+ The resulting token will only be accepted by servers with a matching audience
+ value. Reaching such a server is the callers responsibility. In general,
+ a server URL or set of URLs should be provided to the caller as configuration
+ along with the audience value.
+
+ The token is valid for a limited time (on the scale of hours). Callers should
+ refresh it before expiration.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["websocktunnelToken"], *args, **kwargs)
+
+ def gcpCredentials(self, *args, **kwargs):
+ """
+ Get Temporary GCP Credentials
+
+ Get temporary GCP credentials for the given serviceAccount in the given project.
+
+ Only preconfigured projects and serviceAccounts are allowed, as defined in the
+ deployment of the Taskcluster services.
+
+ The credentials are set to expire after an hour, but this behavior is
+ subject to change. Hence, you should always read the `expires` property
+ from the response, if you intend to maintain active credentials in your
+ application.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["gcpCredentials"], *args, **kwargs)
+
+ def authenticateHawk(self, *args, **kwargs):
+ """
+ Authenticate Hawk Request
+
+ Validate the request signature given on input and return list of scopes
+ that the authenticating client has.
+
+ This method is used by other services that wish rely on Taskcluster
+ credentials for authentication. This way we can use Hawk without having
+ the secret credentials leave this service.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["authenticateHawk"], *args, **kwargs)
+
+ def testAuthenticate(self, *args, **kwargs):
+ """
+ Test Authentication
+
+ Utility method to test client implementations of Taskcluster
+ authentication.
+
+ Rather than using real credentials, this endpoint accepts requests with
+ clientId `tester` and accessToken `no-secret`. That client's scopes are
+ based on `clientScopes` in the request body.
+
+ The request is validated, with any certificate, authorizedScopes, etc.
+ applied, and the resulting scopes are checked against `requiredScopes`
+ from the request body. On success, the response contains the clientId
+ and scopes as seen by the API method.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["testAuthenticate"], *args, **kwargs)
+
+ def testAuthenticateGet(self, *args, **kwargs):
+ """
+ Test Authentication (GET)
+
+ Utility method similar to `testAuthenticate`, but with the GET method,
+ so it can be used with signed URLs (bewits).
+
+ Rather than using real credentials, this endpoint accepts requests with
+ clientId `tester` and accessToken `no-secret`. That client's scopes are
+ `['test:*', 'auth:create-client:test:*']`. The call fails if the
+ `test:authenticate-get` scope is not available.
+
+ The request is validated, with any certificate, authorizedScopes, etc.
+ applied, and the resulting scopes are checked, just like any API call.
+ On success, the response contains the clientId and scopes as seen by
+ the API method.
+
+ This method may later be extended to allow specification of client and
+ required scopes via query arguments.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["testAuthenticateGet"], *args, **kwargs)
+
+ funcinfo = {
+ "authenticateHawk": {
+ 'args': [],
+ 'input': 'v1/authenticate-hawk-request.json#',
+ 'method': 'post',
+ 'name': 'authenticateHawk',
+ 'output': 'v1/authenticate-hawk-response.json#',
+ 'route': '/authenticate-hawk',
+ 'stability': 'stable',
+ },
+ "awsS3Credentials": {
+ 'args': ['level', 'bucket', 'prefix'],
+ 'method': 'get',
+ 'name': 'awsS3Credentials',
+ 'output': 'v1/aws-s3-credentials-response.json#',
+ 'query': ['format'],
+ 'route': '/aws/s3/<level>/<bucket>/<prefix>',
+ 'stability': 'stable',
+ },
+ "azureAccounts": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'azureAccounts',
+ 'output': 'v1/azure-account-list-response.json#',
+ 'route': '/azure/accounts',
+ 'stability': 'stable',
+ },
+ "azureContainerSAS": {
+ 'args': ['account', 'container', 'level'],
+ 'method': 'get',
+ 'name': 'azureContainerSAS',
+ 'output': 'v1/azure-container-response.json#',
+ 'route': '/azure/<account>/containers/<container>/<level>',
+ 'stability': 'stable',
+ },
+ "azureContainers": {
+ 'args': ['account'],
+ 'method': 'get',
+ 'name': 'azureContainers',
+ 'output': 'v1/azure-container-list-response.json#',
+ 'query': ['continuationToken'],
+ 'route': '/azure/<account>/containers',
+ 'stability': 'stable',
+ },
+ "azureTableSAS": {
+ 'args': ['account', 'table', 'level'],
+ 'method': 'get',
+ 'name': 'azureTableSAS',
+ 'output': 'v1/azure-table-access-response.json#',
+ 'route': '/azure/<account>/table/<table>/<level>',
+ 'stability': 'stable',
+ },
+ "azureTables": {
+ 'args': ['account'],
+ 'method': 'get',
+ 'name': 'azureTables',
+ 'output': 'v1/azure-table-list-response.json#',
+ 'query': ['continuationToken'],
+ 'route': '/azure/<account>/tables',
+ 'stability': 'stable',
+ },
+ "client": {
+ 'args': ['clientId'],
+ 'method': 'get',
+ 'name': 'client',
+ 'output': 'v1/get-client-response.json#',
+ 'route': '/clients/<clientId>',
+ 'stability': 'stable',
+ },
+ "createClient": {
+ 'args': ['clientId'],
+ 'input': 'v1/create-client-request.json#',
+ 'method': 'put',
+ 'name': 'createClient',
+ 'output': 'v1/create-client-response.json#',
+ 'route': '/clients/<clientId>',
+ 'stability': 'stable',
+ },
+ "createRole": {
+ 'args': ['roleId'],
+ 'input': 'v1/create-role-request.json#',
+ 'method': 'put',
+ 'name': 'createRole',
+ 'output': 'v1/get-role-response.json#',
+ 'route': '/roles/<roleId>',
+ 'stability': 'stable',
+ },
+ "currentScopes": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'currentScopes',
+ 'output': 'v1/scopeset.json#',
+ 'route': '/scopes/current',
+ 'stability': 'stable',
+ },
+ "deleteClient": {
+ 'args': ['clientId'],
+ 'method': 'delete',
+ 'name': 'deleteClient',
+ 'route': '/clients/<clientId>',
+ 'stability': 'stable',
+ },
+ "deleteRole": {
+ 'args': ['roleId'],
+ 'method': 'delete',
+ 'name': 'deleteRole',
+ 'route': '/roles/<roleId>',
+ 'stability': 'stable',
+ },
+ "disableClient": {
+ 'args': ['clientId'],
+ 'method': 'post',
+ 'name': 'disableClient',
+ 'output': 'v1/get-client-response.json#',
+ 'route': '/clients/<clientId>/disable',
+ 'stability': 'stable',
+ },
+ "enableClient": {
+ 'args': ['clientId'],
+ 'method': 'post',
+ 'name': 'enableClient',
+ 'output': 'v1/get-client-response.json#',
+ 'route': '/clients/<clientId>/enable',
+ 'stability': 'stable',
+ },
+ "expandScopes": {
+ 'args': [],
+ 'input': 'v1/scopeset.json#',
+ 'method': 'post',
+ 'name': 'expandScopes',
+ 'output': 'v1/scopeset.json#',
+ 'route': '/scopes/expand',
+ 'stability': 'stable',
+ },
+ "gcpCredentials": {
+ 'args': ['projectId', 'serviceAccount'],
+ 'method': 'get',
+ 'name': 'gcpCredentials',
+ 'output': 'v1/gcp-credentials-response.json#',
+ 'route': '/gcp/credentials/<projectId>/<serviceAccount>',
+ 'stability': 'stable',
+ },
+ "listClients": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listClients',
+ 'output': 'v1/list-clients-response.json#',
+ 'query': ['prefix', 'continuationToken', 'limit'],
+ 'route': '/clients/',
+ 'stability': 'stable',
+ },
+ "listRoleIds": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listRoleIds',
+ 'output': 'v1/list-role-ids-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/roleids/',
+ 'stability': 'stable',
+ },
+ "listRoles": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listRoles',
+ 'output': 'v1/list-roles-response.json#',
+ 'route': '/roles/',
+ 'stability': 'stable',
+ },
+ "listRoles2": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listRoles2',
+ 'output': 'v1/list-roles2-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/roles2/',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "resetAccessToken": {
+ 'args': ['clientId'],
+ 'method': 'post',
+ 'name': 'resetAccessToken',
+ 'output': 'v1/create-client-response.json#',
+ 'route': '/clients/<clientId>/reset',
+ 'stability': 'stable',
+ },
+ "role": {
+ 'args': ['roleId'],
+ 'method': 'get',
+ 'name': 'role',
+ 'output': 'v1/get-role-response.json#',
+ 'route': '/roles/<roleId>',
+ 'stability': 'stable',
+ },
+ "sentryDSN": {
+ 'args': ['project'],
+ 'method': 'get',
+ 'name': 'sentryDSN',
+ 'output': 'v1/sentry-dsn-response.json#',
+ 'route': '/sentry/<project>/dsn',
+ 'stability': 'stable',
+ },
+ "testAuthenticate": {
+ 'args': [],
+ 'input': 'v1/test-authenticate-request.json#',
+ 'method': 'post',
+ 'name': 'testAuthenticate',
+ 'output': 'v1/test-authenticate-response.json#',
+ 'route': '/test-authenticate',
+ 'stability': 'stable',
+ },
+ "testAuthenticateGet": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'testAuthenticateGet',
+ 'output': 'v1/test-authenticate-response.json#',
+ 'route': '/test-authenticate-get/',
+ 'stability': 'stable',
+ },
+ "updateClient": {
+ 'args': ['clientId'],
+ 'input': 'v1/create-client-request.json#',
+ 'method': 'post',
+ 'name': 'updateClient',
+ 'output': 'v1/get-client-response.json#',
+ 'route': '/clients/<clientId>',
+ 'stability': 'stable',
+ },
+ "updateRole": {
+ 'args': ['roleId'],
+ 'input': 'v1/create-role-request.json#',
+ 'method': 'post',
+ 'name': 'updateRole',
+ 'output': 'v1/get-role-response.json#',
+ 'route': '/roles/<roleId>',
+ 'stability': 'stable',
+ },
+ "websocktunnelToken": {
+ 'args': ['wstAudience', 'wstClient'],
+ 'method': 'get',
+ 'name': 'websocktunnelToken',
+ 'output': 'v1/websocktunnel-token-response.json#',
+ 'route': '/websocktunnel/<wstAudience>/<wstClient>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Auth']
diff --git a/third_party/python/taskcluster/taskcluster/generated/authevents.py b/third_party/python/taskcluster/taskcluster/generated/authevents.py
new file mode 100644
index 0000000000..23d7b1f5c7
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/authevents.py
@@ -0,0 +1,180 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class AuthEvents(BaseClient):
+ """
+ The auth service is responsible for storing credentials, managing
+ assignment of scopes, and validation of request signatures from other
+ services.
+
+ These exchanges provides notifications when credentials or roles are
+ updated. This is mostly so that multiple instances of the auth service
+ can purge their caches and synchronize state. But you are of course
+ welcome to use these for other purposes, monitoring changes for example.
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-auth/v1/",
+ }
+ serviceName = 'auth'
+ apiVersion = 'v1'
+
+ def clientCreated(self, *args, **kwargs):
+ """
+ Client Created Messages
+
+ Message that a new client has been created.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'client-created',
+ 'name': 'clientCreated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/client-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def clientUpdated(self, *args, **kwargs):
+ """
+ Client Updated Messages
+
+ Message that a new client has been updated.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'client-updated',
+ 'name': 'clientUpdated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/client-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def clientDeleted(self, *args, **kwargs):
+ """
+ Client Deleted Messages
+
+ Message that a new client has been deleted.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'client-deleted',
+ 'name': 'clientDeleted',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/client-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def roleCreated(self, *args, **kwargs):
+ """
+ Role Created Messages
+
+ Message that a new role has been created.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'role-created',
+ 'name': 'roleCreated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/role-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def roleUpdated(self, *args, **kwargs):
+ """
+ Role Updated Messages
+
+ Message that a new role has been updated.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'role-updated',
+ 'name': 'roleUpdated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/role-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def roleDeleted(self, *args, **kwargs):
+ """
+ Role Deleted Messages
+
+ Message that a new role has been deleted.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'role-deleted',
+ 'name': 'roleDeleted',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/role-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'AuthEvents']
diff --git a/third_party/python/taskcluster/taskcluster/generated/github.py b/third_party/python/taskcluster/taskcluster/generated/github.py
new file mode 100644
index 0000000000..88507d34a0
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/github.py
@@ -0,0 +1,197 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class Github(BaseClient):
+ """
+ The github service is responsible for creating tasks in response
+ to GitHub events, and posting results to the GitHub UI.
+
+ This document describes the API end-point for consuming GitHub
+ web hooks, as well as some useful consumer APIs.
+
+ When Github forbids an action, this service returns an HTTP 403
+ with code ForbiddenByGithub.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'github'
+ apiVersion = 'v1'
+
+ def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ def githubWebHookConsumer(self, *args, **kwargs):
+ """
+ Consume GitHub WebHook
+
+ Capture a GitHub event and publish it via pulse, if it's a push,
+ release or pull request.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["githubWebHookConsumer"], *args, **kwargs)
+
+ def builds(self, *args, **kwargs):
+ """
+ List of Builds
+
+ A paginated list of builds that have been run in
+ Taskcluster. Can be filtered on various git-specific
+ fields.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["builds"], *args, **kwargs)
+
+ def badge(self, *args, **kwargs):
+ """
+ Latest Build Status Badge
+
+ Checks the status of the latest build of a given branch
+ and returns corresponding badge svg.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["badge"], *args, **kwargs)
+
+ def repository(self, *args, **kwargs):
+ """
+ Get Repository Info
+
+ Returns any repository metadata that is
+ useful within Taskcluster related services.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["repository"], *args, **kwargs)
+
+ def latest(self, *args, **kwargs):
+ """
+ Latest Status for Branch
+
+ For a given branch of a repository, this will always point
+ to a status page for the most recent task triggered by that
+ branch.
+
+ Note: This is a redirect rather than a direct link.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["latest"], *args, **kwargs)
+
+ def createStatus(self, *args, **kwargs):
+ """
+ Post a status against a given changeset
+
+ For a given changeset (SHA) of a repository, this will attach a "commit status"
+ on github. These statuses are links displayed next to each revision.
+ The status is either OK (green check) or FAILURE (red cross),
+ made of a custom title and link.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["createStatus"], *args, **kwargs)
+
+ def createComment(self, *args, **kwargs):
+ """
+ Post a comment on a given GitHub Issue or Pull Request
+
+ For a given Issue or Pull Request of a repository, this will write a new message.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["createComment"], *args, **kwargs)
+
+ funcinfo = {
+ "badge": {
+ 'args': ['owner', 'repo', 'branch'],
+ 'method': 'get',
+ 'name': 'badge',
+ 'route': '/repository/<owner>/<repo>/<branch>/badge.svg',
+ 'stability': 'experimental',
+ },
+ "builds": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'builds',
+ 'output': 'v1/build-list.json#',
+ 'query': ['continuationToken', 'limit', 'organization', 'repository', 'sha'],
+ 'route': '/builds',
+ 'stability': 'stable',
+ },
+ "createComment": {
+ 'args': ['owner', 'repo', 'number'],
+ 'input': 'v1/create-comment.json#',
+ 'method': 'post',
+ 'name': 'createComment',
+ 'route': '/repository/<owner>/<repo>/issues/<number>/comments',
+ 'stability': 'stable',
+ },
+ "createStatus": {
+ 'args': ['owner', 'repo', 'sha'],
+ 'input': 'v1/create-status.json#',
+ 'method': 'post',
+ 'name': 'createStatus',
+ 'route': '/repository/<owner>/<repo>/statuses/<sha>',
+ 'stability': 'experimental',
+ },
+ "githubWebHookConsumer": {
+ 'args': [],
+ 'method': 'post',
+ 'name': 'githubWebHookConsumer',
+ 'route': '/github',
+ 'stability': 'stable',
+ },
+ "latest": {
+ 'args': ['owner', 'repo', 'branch'],
+ 'method': 'get',
+ 'name': 'latest',
+ 'route': '/repository/<owner>/<repo>/<branch>/latest',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "repository": {
+ 'args': ['owner', 'repo'],
+ 'method': 'get',
+ 'name': 'repository',
+ 'output': 'v1/repository.json#',
+ 'route': '/repository/<owner>/<repo>',
+ 'stability': 'experimental',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Github']
diff --git a/third_party/python/taskcluster/taskcluster/generated/githubevents.py b/third_party/python/taskcluster/taskcluster/generated/githubevents.py
new file mode 100644
index 0000000000..2bdfff2314
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/githubevents.py
@@ -0,0 +1,199 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class GithubEvents(BaseClient):
+ """
+ The github service publishes a pulse
+ message for supported github events, translating Github webhook
+ events into pulse messages.
+
+ This document describes the exchange offered by the taskcluster
+ github service
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-github/v1/",
+ }
+ serviceName = 'github'
+ apiVersion = 'v1'
+
+ def pullRequest(self, *args, **kwargs):
+ """
+ GitHub Pull Request Event
+
+ When a GitHub pull request event is posted it will be broadcast on this
+ exchange with the designated `organization` and `repository`
+ in the routing-key along with event specific metadata in the payload.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
+
+ * organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+
+ * repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+
+ * action: The GitHub `action` which triggered an event. See for possible values see the payload actions property. (required)
+ """
+
+ ref = {
+ 'exchange': 'pull-request',
+ 'name': 'pullRequest',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'organization',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'repository',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'action',
+ },
+ ],
+ 'schema': 'v1/github-pull-request-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def push(self, *args, **kwargs):
+ """
+ GitHub push Event
+
+ When a GitHub push event is posted it will be broadcast on this
+ exchange with the designated `organization` and `repository`
+ in the routing-key along with event specific metadata in the payload.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
+
+ * organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+
+ * repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+ """
+
+ ref = {
+ 'exchange': 'push',
+ 'name': 'push',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'organization',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'repository',
+ },
+ ],
+ 'schema': 'v1/github-push-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def release(self, *args, **kwargs):
+ """
+ GitHub release Event
+
+ When a GitHub release event is posted it will be broadcast on this
+ exchange with the designated `organization` and `repository`
+ in the routing-key along with event specific metadata in the payload.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
+
+ * organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+
+ * repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+ """
+
+ ref = {
+ 'exchange': 'release',
+ 'name': 'release',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'organization',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'repository',
+ },
+ ],
+ 'schema': 'v1/github-release-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskGroupCreationRequested(self, *args, **kwargs):
+ """
+ tc-gh requested the Queue service to create all the tasks in a group
+
+ supposed to signal that taskCreate API has been called for every task in the task group
+ for this particular repo and this particular organization
+ currently used for creating initial status indicators in GitHub UI using Statuses API.
+ This particular exchange can also be bound to RabbitMQ queues by custom routes - for that,
+ Pass in the array of routes as a second argument to the publish method. Currently, we do
+ use the statuses routes to bind the handler that creates the initial status.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
+
+ * organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+
+ * repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+ """
+
+ ref = {
+ 'exchange': 'task-group-creation-requested',
+ 'name': 'taskGroupCreationRequested',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'organization',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'repository',
+ },
+ ],
+ 'schema': 'v1/task-group-creation-requested.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'GithubEvents']
diff --git a/third_party/python/taskcluster/taskcluster/generated/hooks.py b/third_party/python/taskcluster/taskcluster/generated/hooks.py
new file mode 100644
index 0000000000..efaaf4b831
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/hooks.py
@@ -0,0 +1,300 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class Hooks(BaseClient):
+ """
+ The hooks service provides a mechanism for creating tasks in response to events.
+
+ """
+
+ classOptions = {
+ }
+ serviceName = 'hooks'
+ apiVersion = 'v1'
+
+ def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ def listHookGroups(self, *args, **kwargs):
+ """
+ List hook groups
+
+ This endpoint will return a list of all hook groups with at least one hook.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listHookGroups"], *args, **kwargs)
+
+ def listHooks(self, *args, **kwargs):
+ """
+ List hooks in a given group
+
+ This endpoint will return a list of all the hook definitions within a
+ given hook group.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listHooks"], *args, **kwargs)
+
+ def hook(self, *args, **kwargs):
+ """
+ Get hook definition
+
+ This endpoint will return the hook definition for the given `hookGroupId`
+ and hookId.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["hook"], *args, **kwargs)
+
+ def getHookStatus(self, *args, **kwargs):
+ """
+ Get hook status
+
+ This endpoint will return the current status of the hook. This represents a
+ snapshot in time and may vary from one call to the next.
+
+ This method is deprecated in favor of listLastFires.
+
+ This method is ``deprecated``
+ """
+
+ return self._makeApiCall(self.funcinfo["getHookStatus"], *args, **kwargs)
+
+ def createHook(self, *args, **kwargs):
+ """
+ Create a hook
+
+ This endpoint will create a new hook.
+
+ The caller's credentials must include the role that will be used to
+ create the task. That role must satisfy task.scopes as well as the
+ necessary scopes to add the task to the queue.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["createHook"], *args, **kwargs)
+
+ def updateHook(self, *args, **kwargs):
+ """
+ Update a hook
+
+ This endpoint will update an existing hook. All fields except
+ `hookGroupId` and `hookId` can be modified.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["updateHook"], *args, **kwargs)
+
+ def removeHook(self, *args, **kwargs):
+ """
+ Delete a hook
+
+ This endpoint will remove a hook definition.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["removeHook"], *args, **kwargs)
+
+ def triggerHook(self, *args, **kwargs):
+ """
+ Trigger a hook
+
+ This endpoint will trigger the creation of a task from a hook definition.
+
+ The HTTP payload must match the hooks `triggerSchema`. If it does, it is
+ provided as the `payload` property of the JSON-e context used to render the
+ task template.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["triggerHook"], *args, **kwargs)
+
+ def getTriggerToken(self, *args, **kwargs):
+ """
+ Get a trigger token
+
+ Retrieve a unique secret token for triggering the specified hook. This
+ token can be deactivated with `resetTriggerToken`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["getTriggerToken"], *args, **kwargs)
+
+ def resetTriggerToken(self, *args, **kwargs):
+ """
+ Reset a trigger token
+
+ Reset the token for triggering a given hook. This invalidates token that
+ may have been issued via getTriggerToken with a new token.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["resetTriggerToken"], *args, **kwargs)
+
+ def triggerHookWithToken(self, *args, **kwargs):
+ """
+ Trigger a hook with a token
+
+ This endpoint triggers a defined hook with a valid token.
+
+ The HTTP payload must match the hooks `triggerSchema`. If it does, it is
+ provided as the `payload` property of the JSON-e context used to render the
+ task template.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["triggerHookWithToken"], *args, **kwargs)
+
+ def listLastFires(self, *args, **kwargs):
+ """
+ Get information about recent hook fires
+
+ This endpoint will return information about the the last few times this hook has been
+ fired, including whether the hook was fired successfully or not
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listLastFires"], *args, **kwargs)
+
+ funcinfo = {
+ "createHook": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'input': 'v1/create-hook-request.json#',
+ 'method': 'put',
+ 'name': 'createHook',
+ 'output': 'v1/hook-definition.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>',
+ 'stability': 'stable',
+ },
+ "getHookStatus": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'get',
+ 'name': 'getHookStatus',
+ 'output': 'v1/hook-status.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/status',
+ 'stability': 'deprecated',
+ },
+ "getTriggerToken": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'get',
+ 'name': 'getTriggerToken',
+ 'output': 'v1/trigger-token-response.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/token',
+ 'stability': 'stable',
+ },
+ "hook": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'get',
+ 'name': 'hook',
+ 'output': 'v1/hook-definition.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>',
+ 'stability': 'stable',
+ },
+ "listHookGroups": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listHookGroups',
+ 'output': 'v1/list-hook-groups-response.json#',
+ 'route': '/hooks',
+ 'stability': 'stable',
+ },
+ "listHooks": {
+ 'args': ['hookGroupId'],
+ 'method': 'get',
+ 'name': 'listHooks',
+ 'output': 'v1/list-hooks-response.json#',
+ 'route': '/hooks/<hookGroupId>',
+ 'stability': 'stable',
+ },
+ "listLastFires": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'get',
+ 'name': 'listLastFires',
+ 'output': 'v1/list-lastFires-response.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/last-fires',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "removeHook": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'delete',
+ 'name': 'removeHook',
+ 'route': '/hooks/<hookGroupId>/<hookId>',
+ 'stability': 'stable',
+ },
+ "resetTriggerToken": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'post',
+ 'name': 'resetTriggerToken',
+ 'output': 'v1/trigger-token-response.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/token',
+ 'stability': 'stable',
+ },
+ "triggerHook": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'input': 'v1/trigger-hook.json#',
+ 'method': 'post',
+ 'name': 'triggerHook',
+ 'output': 'v1/trigger-hook-response.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/trigger',
+ 'stability': 'stable',
+ },
+ "triggerHookWithToken": {
+ 'args': ['hookGroupId', 'hookId', 'token'],
+ 'input': 'v1/trigger-hook.json#',
+ 'method': 'post',
+ 'name': 'triggerHookWithToken',
+ 'output': 'v1/trigger-hook-response.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/trigger/<token>',
+ 'stability': 'stable',
+ },
+ "updateHook": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'input': 'v1/create-hook-request.json#',
+ 'method': 'post',
+ 'name': 'updateHook',
+ 'output': 'v1/hook-definition.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Hooks']
diff --git a/third_party/python/taskcluster/taskcluster/generated/hooksevents.py b/third_party/python/taskcluster/taskcluster/generated/hooksevents.py
new file mode 100644
index 0000000000..73e4a08c69
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/hooksevents.py
@@ -0,0 +1,101 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class HooksEvents(BaseClient):
+ """
+ The hooks service is responsible for creating tasks at specific times orin . response to webhooks and API calls.Using this exchange allows us tomake hooks which repsond to particular pulse messagesThese exchanges provide notifications when a hook is created, updatedor deleted. This is so that the listener running in a different hooks process at the other end can direct another listener specified by`hookGroupId` and `hookId` to synchronize its bindings. But you are ofcourse welcome to use these for other purposes, monitoring changes for example.
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-hooks/v1/",
+ }
+ serviceName = 'hooks'
+ apiVersion = 'v1'
+
+ def hookCreated(self, *args, **kwargs):
+ """
+ Hook Created Messages
+
+ Whenever the api receives a request to create apulse based hook, a message is posted to this exchange andthe receiver creates a listener with the bindings, to create a task
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'hook-created',
+ 'name': 'hookCreated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/pulse-hook-changed-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def hookUpdated(self, *args, **kwargs):
+ """
+ Hook Updated Messages
+
+ Whenever the api receives a request to update apulse based hook, a message is posted to this exchange andthe receiver updates the listener associated with that hook.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'hook-updated',
+ 'name': 'hookUpdated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/pulse-hook-changed-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def hookDeleted(self, *args, **kwargs):
+ """
+ Hook Deleted Messages
+
+ Whenever the api receives a request to delete apulse based hook, a message is posted to this exchange andthe receiver deletes the listener associated with that hook.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'hook-deleted',
+ 'name': 'hookDeleted',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/pulse-hook-changed-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'HooksEvents']
diff --git a/third_party/python/taskcluster/taskcluster/generated/index.py b/third_party/python/taskcluster/taskcluster/generated/index.py
new file mode 100644
index 0000000000..627d16a150
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/index.py
@@ -0,0 +1,204 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class Index(BaseClient):
+ """
+ The index service is responsible for indexing tasks. The service ensures that
+ tasks can be located by user-defined names.
+
+ As described in the service documentation, tasks are typically indexed via Pulse
+ messages, so the most common use of API methods is to read from the index.
+
+ Slashes (`/`) aren't allowed in index paths.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'index'
+ apiVersion = 'v1'
+
+ def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ def findTask(self, *args, **kwargs):
+ """
+ Find Indexed Task
+
+ Find a task by index path, returning the highest-rank task with that path. If no
+ task exists for the given path, this API end-point will respond with a 404 status.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["findTask"], *args, **kwargs)
+
+ def listNamespaces(self, *args, **kwargs):
+ """
+ List Namespaces
+
+ List the namespaces immediately under a given namespace.
+
+ This endpoint
+ lists up to 1000 namespaces. If more namespaces are present, a
+ `continuationToken` will be returned, which can be given in the next
+ request. For the initial request, the payload should be an empty JSON
+ object.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listNamespaces"], *args, **kwargs)
+
+ def listTasks(self, *args, **kwargs):
+ """
+ List Tasks
+
+ List the tasks immediately under a given namespace.
+
+ This endpoint
+ lists up to 1000 tasks. If more tasks are present, a
+ `continuationToken` will be returned, which can be given in the next
+ request. For the initial request, the payload should be an empty JSON
+ object.
+
+ **Remark**, this end-point is designed for humans browsing for tasks, not
+ services, as that makes little sense.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listTasks"], *args, **kwargs)
+
+ def insertTask(self, *args, **kwargs):
+ """
+ Insert Task into Index
+
+ Insert a task into the index. If the new rank is less than the existing rank
+ at the given index path, the task is not indexed but the response is still 200 OK.
+
+ Please see the introduction above for information
+ about indexing successfully completed tasks automatically using custom routes.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["insertTask"], *args, **kwargs)
+
+ def deleteTask(self, *args, **kwargs):
+ """
+ Remove Task from Index
+
+ Remove a task from the index. This is intended for administrative use,
+ where an index entry is no longer appropriate. The parent namespace is
+ not automatically deleted. Index entries with lower rank that were
+ previously inserted will not re-appear, as they were never stored.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["deleteTask"], *args, **kwargs)
+
+ def findArtifactFromTask(self, *args, **kwargs):
+ """
+ Get Artifact From Indexed Task
+
+ Find a task by index path and redirect to the artifact on the most recent
+ run with the given `name`.
+
+ Note that multiple calls to this endpoint may return artifacts from differen tasks
+ if a new task is inserted into the index between calls. Avoid using this method as
+ a stable link to multiple, connected files if the index path does not contain a
+ unique identifier. For example, the following two links may return unrelated files:
+ * https://tc.example.com/api/index/v1/task/some-app.win64.latest.installer/artifacts/public/installer.exe`
+ * https://tc.example.com/api/index/v1/task/some-app.win64.latest.installer/artifacts/public/debug-symbols.zip`
+
+ This problem be remedied by including the revision in the index path or by bundling both
+ installer and debug symbols into a single artifact.
+
+ If no task exists for the given index path, this API end-point responds with 404.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["findArtifactFromTask"], *args, **kwargs)
+
+ funcinfo = {
+ "deleteTask": {
+ 'args': ['namespace'],
+ 'method': 'delete',
+ 'name': 'deleteTask',
+ 'route': '/task/<namespace>',
+ 'stability': 'stable',
+ },
+ "findArtifactFromTask": {
+ 'args': ['indexPath', 'name'],
+ 'method': 'get',
+ 'name': 'findArtifactFromTask',
+ 'route': '/task/<indexPath>/artifacts/<name>',
+ 'stability': 'stable',
+ },
+ "findTask": {
+ 'args': ['indexPath'],
+ 'method': 'get',
+ 'name': 'findTask',
+ 'output': 'v1/indexed-task-response.json#',
+ 'route': '/task/<indexPath>',
+ 'stability': 'stable',
+ },
+ "insertTask": {
+ 'args': ['namespace'],
+ 'input': 'v1/insert-task-request.json#',
+ 'method': 'put',
+ 'name': 'insertTask',
+ 'output': 'v1/indexed-task-response.json#',
+ 'route': '/task/<namespace>',
+ 'stability': 'stable',
+ },
+ "listNamespaces": {
+ 'args': ['namespace'],
+ 'method': 'get',
+ 'name': 'listNamespaces',
+ 'output': 'v1/list-namespaces-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/namespaces/<namespace>',
+ 'stability': 'stable',
+ },
+ "listTasks": {
+ 'args': ['namespace'],
+ 'method': 'get',
+ 'name': 'listTasks',
+ 'output': 'v1/list-tasks-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/tasks/<namespace>',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Index']
diff --git a/third_party/python/taskcluster/taskcluster/generated/notify.py b/third_party/python/taskcluster/taskcluster/generated/notify.py
new file mode 100644
index 0000000000..c249782d2d
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/notify.py
@@ -0,0 +1,207 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class Notify(BaseClient):
+ """
+ The notification service listens for tasks with associated notifications
+ and handles requests to send emails and post pulse messages.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'notify'
+ apiVersion = 'v1'
+
+ def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ def email(self, *args, **kwargs):
+ """
+ Send an Email
+
+ Send an email to `address`. The content is markdown and will be rendered
+ to HTML, but both the HTML and raw markdown text will be sent in the
+ email. If a link is included, it will be rendered to a nice button in the
+ HTML version of the email
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["email"], *args, **kwargs)
+
+ def pulse(self, *args, **kwargs):
+ """
+ Publish a Pulse Message
+
+ Publish a message on pulse with the given `routingKey`.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["pulse"], *args, **kwargs)
+
+ def matrix(self, *args, **kwargs):
+ """
+ Post Matrix Message
+
+ Post a message to a room in Matrix. Optionally includes formatted message.
+
+ The `roomId` in the scopes is a fully formed `roomId` with leading `!` such
+ as `!foo:bar.com`.
+
+ Note that the matrix client used by taskcluster must be invited to a room before
+ it can post there!
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["matrix"], *args, **kwargs)
+
+ def slack(self, *args, **kwargs):
+ """
+ Post Slack Message
+
+ Post a message to a Slack channel.
+
+ The `channelId` in the scopes is a Slack channel ID, starting with a capital C.
+
+ The Slack app can post into public channels by default but will need to be added
+ to private channels before it can post messages there.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["slack"], *args, **kwargs)
+
+ def addDenylistAddress(self, *args, **kwargs):
+ """
+ Denylist Given Address
+
+ Add the given address to the notification denylist. Addresses in the denylist will be ignored
+ by the notification service.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["addDenylistAddress"], *args, **kwargs)
+
+ def deleteDenylistAddress(self, *args, **kwargs):
+ """
+ Delete Denylisted Address
+
+ Delete the specified address from the notification denylist.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["deleteDenylistAddress"], *args, **kwargs)
+
+ def listDenylist(self, *args, **kwargs):
+ """
+ List Denylisted Notifications
+
+ Lists all the denylisted addresses.
+
+ By default this end-point will try to return up to 1000 addresses in one
+ request. But it **may return less**, even if more tasks are available.
+ It may also return a `continuationToken` even though there are no more
+ results. However, you can only be sure to have seen all results if you
+ keep calling `list` with the last `continuationToken` until you
+ get a result without a `continuationToken`.
+
+ If you are not interested in listing all the members at once, you may
+ use the query-string option `limit` to return fewer.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["listDenylist"], *args, **kwargs)
+
+ funcinfo = {
+ "addDenylistAddress": {
+ 'args': [],
+ 'input': 'v1/notification-address.json#',
+ 'method': 'post',
+ 'name': 'addDenylistAddress',
+ 'route': '/denylist/add',
+ 'stability': 'experimental',
+ },
+ "deleteDenylistAddress": {
+ 'args': [],
+ 'input': 'v1/notification-address.json#',
+ 'method': 'delete',
+ 'name': 'deleteDenylistAddress',
+ 'route': '/denylist/delete',
+ 'stability': 'experimental',
+ },
+ "email": {
+ 'args': [],
+ 'input': 'v1/email-request.json#',
+ 'method': 'post',
+ 'name': 'email',
+ 'route': '/email',
+ 'stability': 'experimental',
+ },
+ "listDenylist": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listDenylist',
+ 'output': 'v1/notification-address-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/denylist/list',
+ 'stability': 'experimental',
+ },
+ "matrix": {
+ 'args': [],
+ 'input': 'v1/matrix-request.json#',
+ 'method': 'post',
+ 'name': 'matrix',
+ 'route': '/matrix',
+ 'stability': 'experimental',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "pulse": {
+ 'args': [],
+ 'input': 'v1/pulse-request.json#',
+ 'method': 'post',
+ 'name': 'pulse',
+ 'route': '/pulse',
+ 'stability': 'experimental',
+ },
+ "slack": {
+ 'args': [],
+ 'input': 'v1/slack-request.json#',
+ 'method': 'post',
+ 'name': 'slack',
+ 'route': '/slack',
+ 'stability': 'experimental',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Notify']
diff --git a/third_party/python/taskcluster/taskcluster/generated/notifyevents.py b/third_party/python/taskcluster/taskcluster/generated/notifyevents.py
new file mode 100644
index 0000000000..33a54e9b70
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/notifyevents.py
@@ -0,0 +1,68 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class NotifyEvents(BaseClient):
+ """
+ This pretty much only contains the simple free-form
+ message that can be published from this service from a request
+ by anybody with the proper scopes.
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-notify/v1/",
+ }
+ serviceName = 'notify'
+ apiVersion = 'v1'
+
+ def notify(self, *args, **kwargs):
+ """
+ Notification Messages
+
+ An arbitrary message that a taskcluster user
+ can trigger if they like.
+
+ The standard one that is published by us watching
+ for the completion of tasks is just the task status
+ data that we pull from the queue `status()` endpoint
+ when we notice a task is complete.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'notification',
+ 'name': 'notify',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/notification-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'NotifyEvents']
diff --git a/third_party/python/taskcluster/taskcluster/generated/object.py b/third_party/python/taskcluster/taskcluster/generated/object.py
new file mode 100644
index 0000000000..d2baaa0ff4
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/object.py
@@ -0,0 +1,187 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class Object(BaseClient):
+ """
+ The object service provides HTTP-accessible storage for large blobs of data.
+
+ Objects can be uploaded and downloaded, with the object data flowing directly
+ from the storage "backend" to the caller, and not directly via this service.
+ Once uploaded, objects are immutable until their expiration time.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'object'
+ apiVersion = 'v1'
+
+ def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ def createUpload(self, *args, **kwargs):
+ """
+ Begin upload of a new object
+
+ Create a new object by initiating upload of its data.
+
+ This endpoint implements negotiation of upload methods. It can be called
+ multiple times if necessary, either to propose new upload methods or to
+ renew credentials for an already-agreed upload.
+
+ The `name` parameter can contain any printable ASCII character (0x20 - 0x7e).
+ The `uploadId` must be supplied by the caller, and any attempts to upload
+ an object with the same name but a different `uploadId` will fail.
+ Thus the first call to this method establishes the `uploadId` for the
+ object, and as long as that value is kept secret, no other caller can
+ upload an object of that name, regardless of scopes. Object expiration
+ cannot be changed after the initial call, either. It is possible to call
+ this method with no proposed upload methods, which has the effect of "locking
+ in" the `expiration`, `projectId`, and `uploadId` properties and any
+ supplied hashes.
+
+ Unfinished uploads expire after 1 day.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["createUpload"], *args, **kwargs)
+
+ def finishUpload(self, *args, **kwargs):
+ """
+ Mark an upload as complete.
+
+ This endpoint marks an upload as complete. This indicates that all data has been
+ transmitted to the backend. After this call, no further calls to `uploadObject` are
+ allowed, and downloads of the object may begin. This method is idempotent, but will
+ fail if given an incorrect uploadId for an unfinished upload.
+
+ Note that, once `finishUpload` is complete, the object is considered immutable.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["finishUpload"], *args, **kwargs)
+
+ def startDownload(self, *args, **kwargs):
+ """
+ Download object data
+
+ Start the process of downloading an object's data. Call this endpoint with a list of acceptable
+ download methods, and the server will select a method and return the corresponding payload.
+
+ Returns a 406 error if none of the given download methods are available.
+
+ See [Download Methods](https://docs.taskcluster.net/docs/reference/platform/object/download-methods) for more detail.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["startDownload"], *args, **kwargs)
+
+ def object(self, *args, **kwargs):
+ """
+ Get an object's metadata
+
+ Get the metadata for the named object. This metadata is not sufficient to
+ get the object's content; for that use `startDownload`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["object"], *args, **kwargs)
+
+ def download(self, *args, **kwargs):
+ """
+ Get an object's data
+
+ Get the data in an object directly. This method does not return a JSON body, but
+ redirects to a location that will serve the object content directly.
+
+ URLs for this endpoint, perhaps with attached authentication (`?bewit=..`),
+ are typically used for downloads of objects by simple HTTP clients such as
+ web browsers, curl, or wget.
+
+ This method is limited by the common capabilities of HTTP, so it may not be
+ the most efficient, resilient, or featureful way to retrieve an artifact.
+ Situations where such functionality is required should ues the
+ `startDownload` API endpoint.
+
+ See [Simple Downloads](https://docs.taskcluster.net/docs/reference/platform/object/simple-downloads) for more detail.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["download"], *args, **kwargs)
+
+ funcinfo = {
+ "createUpload": {
+ 'args': ['name'],
+ 'input': 'v1/create-upload-request.json#',
+ 'method': 'put',
+ 'name': 'createUpload',
+ 'output': 'v1/create-upload-response.json#',
+ 'route': '/upload/<name>',
+ 'stability': 'stable',
+ },
+ "download": {
+ 'args': ['name'],
+ 'method': 'get',
+ 'name': 'download',
+ 'route': '/download/<name>',
+ 'stability': 'stable',
+ },
+ "finishUpload": {
+ 'args': ['name'],
+ 'input': 'v1/finish-upload-request.json#',
+ 'method': 'post',
+ 'name': 'finishUpload',
+ 'route': '/finish-upload/<name>',
+ 'stability': 'stable',
+ },
+ "object": {
+ 'args': ['name'],
+ 'method': 'get',
+ 'name': 'object',
+ 'output': 'v1/get-object-response.json#',
+ 'route': '/metadata/<name>',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "startDownload": {
+ 'args': ['name'],
+ 'input': 'v1/download-object-request.json#',
+ 'method': 'put',
+ 'name': 'startDownload',
+ 'output': 'v1/download-object-response.json#',
+ 'route': '/start-download/<name>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Object']
diff --git a/third_party/python/taskcluster/taskcluster/generated/purgecache.py b/third_party/python/taskcluster/taskcluster/generated/purgecache.py
new file mode 100644
index 0000000000..659a087c4e
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/purgecache.py
@@ -0,0 +1,123 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class PurgeCache(BaseClient):
+ """
+ The purge-cache service is responsible for tracking cache-purge requests.
+
+ User create purge requests for specific caches on specific workers, and
+ these requests are timestamped. Workers consult the service before
+ starting a new task, and purge any caches older than the timestamp.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'purge-cache'
+ apiVersion = 'v1'
+
+ def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ def purgeCache(self, *args, **kwargs):
+ """
+ Purge Worker Cache
+
+ Publish a request to purge caches named `cacheName` with
+ on `workerPoolId` workers.
+
+ If such a request already exists, its `before` timestamp is updated to
+ the current time.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["purgeCache"], *args, **kwargs)
+
+ def allPurgeRequests(self, *args, **kwargs):
+ """
+ All Open Purge Requests
+
+ View all active purge requests.
+
+ This is useful mostly for administors to view
+ the set of open purge requests. It should not
+ be used by workers. They should use the purgeRequests
+ endpoint that is specific to their workerType and
+ provisionerId.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["allPurgeRequests"], *args, **kwargs)
+
+ def purgeRequests(self, *args, **kwargs):
+ """
+ Open Purge Requests for a worker pool
+
+ List the caches for this `workerPoolId` that should to be
+ purged if they are from before the time given in the response.
+
+ This is intended to be used by workers to determine which caches to purge.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["purgeRequests"], *args, **kwargs)
+
+ funcinfo = {
+ "allPurgeRequests": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'allPurgeRequests',
+ 'output': 'v1/all-purge-cache-request-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/purge-cache/list',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "purgeCache": {
+ 'args': ['workerPoolId'],
+ 'input': 'v1/purge-cache-request.json#',
+ 'method': 'post',
+ 'name': 'purgeCache',
+ 'route': '/purge-cache/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "purgeRequests": {
+ 'args': ['workerPoolId'],
+ 'method': 'get',
+ 'name': 'purgeRequests',
+ 'output': 'v1/purge-cache-request-list.json#',
+ 'query': ['since'],
+ 'route': '/purge-cache/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'PurgeCache']
diff --git a/third_party/python/taskcluster/taskcluster/generated/queue.py b/third_party/python/taskcluster/taskcluster/generated/queue.py
new file mode 100644
index 0000000000..990d00aeec
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/queue.py
@@ -0,0 +1,1120 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class Queue(BaseClient):
+ """
+ The queue service is responsible for accepting tasks and tracking their state
+ as they are executed by workers, in order to ensure they are eventually
+ resolved.
+
+ ## Artifact Storage Types
+
+ * **Object artifacts** contain arbitrary data, stored via the object service.
+ * **Redirect artifacts**, will redirect the caller to URL when fetched
+ with a a 303 (See Other) response. Clients will not apply any kind of
+ authentication to that URL.
+ * **Link artifacts**, will be treated as if the caller requested the linked
+ artifact on the same task. Links may be chained, but cycles are forbidden.
+ The caller must have scopes for the linked artifact, or a 403 response will
+ be returned.
+ * **Error artifacts**, only consists of meta-data which the queue will
+ store for you. These artifacts are only meant to indicate that you the
+ worker or the task failed to generate a specific artifact, that you
+ would otherwise have uploaded. For example docker-worker will upload an
+ error artifact, if the file it was supposed to upload doesn't exists or
+ turns out to be a directory. Clients requesting an error artifact will
+ get a `424` (Failed Dependency) response. This is mainly designed to
+ ensure that dependent tasks can distinguish between artifacts that were
+ suppose to be generated and artifacts for which the name is misspelled.
+ * **S3 artifacts** are used for static files which will be
+ stored on S3. When creating an S3 artifact the queue will return a
+ pre-signed URL to which you can do a `PUT` request to upload your
+ artifact. Note that `PUT` request **must** specify the `content-length`
+ header and **must** give the `content-type` header the same value as in
+ the request to `createArtifact`. S3 artifacts will be deprecated soon,
+ and users should prefer object artifacts instead.
+
+ ## Artifact immutability
+
+ Generally speaking you cannot overwrite an artifact when created.
+ But if you repeat the request with the same properties the request will
+ succeed as the operation is idempotent.
+ This is useful if you need to refresh a signed URL while uploading.
+ Do not abuse this to overwrite artifacts created by another entity!
+ Such as worker-host overwriting artifact created by worker-code.
+
+ The queue defines the following *immutability special cases*:
+
+ * A `reference` artifact can replace an existing `reference` artifact.
+ * A `link` artifact can replace an existing `reference` artifact.
+ * Any artifact's `expires` can be extended (made later, but not earlier).
+ """
+
+ classOptions = {
+ }
+ serviceName = 'queue'
+ apiVersion = 'v1'
+
+ def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ def task(self, *args, **kwargs):
+ """
+ Get Task Definition
+
+ This end-point will return the task-definition. Notice that the task
+ definition may have been modified by queue, if an optional property is
+ not specified the queue may provide a default value.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["task"], *args, **kwargs)
+
+ def status(self, *args, **kwargs):
+ """
+ Get task status
+
+ Get task status structure from `taskId`
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["status"], *args, **kwargs)
+
+ def listTaskGroup(self, *args, **kwargs):
+ """
+ List Task Group
+
+ List tasks sharing the same `taskGroupId`.
+
+ As a task-group may contain an unbounded number of tasks, this end-point
+ may return a `continuationToken`. To continue listing tasks you must call
+ the `listTaskGroup` again with the `continuationToken` as the
+ query-string option `continuationToken`.
+
+ By default this end-point will try to return up to 1000 members in one
+ request. But it **may return less**, even if more tasks are available.
+ It may also return a `continuationToken` even though there are no more
+ results. However, you can only be sure to have seen all results if you
+ keep calling `listTaskGroup` with the last `continuationToken` until you
+ get a result without a `continuationToken`.
+
+ If you are not interested in listing all the members at once, you may
+ use the query-string option `limit` to return fewer.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listTaskGroup"], *args, **kwargs)
+
+ def listDependentTasks(self, *args, **kwargs):
+ """
+ List Dependent Tasks
+
+ List tasks that depend on the given `taskId`.
+
+ As many tasks from different task-groups may dependent on a single tasks,
+ this end-point may return a `continuationToken`. To continue listing
+ tasks you must call `listDependentTasks` again with the
+ `continuationToken` as the query-string option `continuationToken`.
+
+ By default this end-point will try to return up to 1000 tasks in one
+ request. But it **may return less**, even if more tasks are available.
+ It may also return a `continuationToken` even though there are no more
+ results. However, you can only be sure to have seen all results if you
+ keep calling `listDependentTasks` with the last `continuationToken` until
+ you get a result without a `continuationToken`.
+
+ If you are not interested in listing all the tasks at once, you may
+ use the query-string option `limit` to return fewer.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listDependentTasks"], *args, **kwargs)
+
+ def createTask(self, *args, **kwargs):
+ """
+ Create New Task
+
+ Create a new task, this is an **idempotent** operation, so repeat it if
+ you get an internal server error or network connection is dropped.
+
+ **Task `deadline`**: the deadline property can be no more than 5 days
+ into the future. This is to limit the amount of pending tasks not being
+ taken care of. Ideally, you should use a much shorter deadline.
+
+ **Task expiration**: the `expires` property must be greater than the
+ task `deadline`. If not provided it will default to `deadline` + one
+ year. Notice that artifacts created by a task must expire before the
+ task's expiration.
+
+ **Task specific routing-keys**: using the `task.routes` property you may
+ define task specific routing-keys. If a task has a task specific
+ routing-key: `<route>`, then when the AMQP message about the task is
+ published, the message will be CC'ed with the routing-key:
+ `route.<route>`. This is useful if you want another component to listen
+ for completed tasks you have posted. The caller must have scope
+ `queue:route:<route>` for each route.
+
+ **Dependencies**: any tasks referenced in `task.dependencies` must have
+ already been created at the time of this call.
+
+ **Scopes**: Note that the scopes required to complete this API call depend
+ on the content of the `scopes`, `routes`, `schedulerId`, `priority`,
+ `provisionerId`, and `workerType` properties of the task definition.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["createTask"], *args, **kwargs)
+
+ def scheduleTask(self, *args, **kwargs):
+ """
+ Schedule Defined Task
+
+ scheduleTask will schedule a task to be executed, even if it has
+ unresolved dependencies. A task would otherwise only be scheduled if
+ its dependencies were resolved.
+
+ This is useful if you have defined a task that depends on itself or on
+ some other task that has not been resolved, but you wish the task to be
+ scheduled immediately.
+
+ This will announce the task as pending and workers will be allowed to
+ claim it and resolve the task.
+
+ **Note** this operation is **idempotent** and will not fail or complain
+ if called with a `taskId` that is already scheduled, or even resolved.
+ To reschedule a task previously resolved, use `rerunTask`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["scheduleTask"], *args, **kwargs)
+
+ def rerunTask(self, *args, **kwargs):
+ """
+ Rerun a Resolved Task
+
+ This method _reruns_ a previously resolved task, even if it was
+ _completed_. This is useful if your task completes unsuccessfully, and
+ you just want to run it from scratch again. This will also reset the
+ number of `retries` allowed. It will schedule a task that is _unscheduled_
+ regardless of the state of its dependencies.
+
+ This method is deprecated in favour of creating a new task with the same
+ task definition (but with a new taskId).
+
+ Remember that `retries` in the task status counts the number of runs that
+ the queue have started because the worker stopped responding, for example
+ because a spot node died.
+
+ **Remark** this operation is idempotent: if it is invoked for a task that
+ is `pending` or `running`, it will just return the current task status.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["rerunTask"], *args, **kwargs)
+
+ def cancelTask(self, *args, **kwargs):
+ """
+ Cancel Task
+
+ This method will cancel a task that is either `unscheduled`, `pending` or
+ `running`. It will resolve the current run as `exception` with
+ `reasonResolved` set to `canceled`. If the task isn't scheduled yet, ie.
+ it doesn't have any runs, an initial run will be added and resolved as
+ described above. Hence, after canceling a task, it cannot be scheduled
+ with `queue.scheduleTask`, but a new run can be created with
+ `queue.rerun`. These semantics is equivalent to calling
+ `queue.scheduleTask` immediately followed by `queue.cancelTask`.
+
+ **Remark** this operation is idempotent, if you try to cancel a task that
+ isn't `unscheduled`, `pending` or `running`, this operation will just
+ return the current task status.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["cancelTask"], *args, **kwargs)
+
+ def claimWork(self, *args, **kwargs):
+ """
+ Claim Work
+
+ Claim pending task(s) for the given task queue.
+
+ If any work is available (even if fewer than the requested number of
+ tasks, this will return immediately. Otherwise, it will block for tens of
+ seconds waiting for work. If no work appears, it will return an emtpy
+ list of tasks. Callers should sleep a short while (to avoid denial of
+ service in an error condition) and call the endpoint again. This is a
+ simple implementation of "long polling".
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["claimWork"], *args, **kwargs)
+
+ def claimTask(self, *args, **kwargs):
+ """
+ Claim Task
+
+ claim a task - never documented
+
+ This method is ``deprecated``
+ """
+
+ return self._makeApiCall(self.funcinfo["claimTask"], *args, **kwargs)
+
+ def reclaimTask(self, *args, **kwargs):
+ """
+ Reclaim task
+
+ Refresh the claim for a specific `runId` for given `taskId`. This updates
+ the `takenUntil` property and returns a new set of temporary credentials
+ for performing requests on behalf of the task. These credentials should
+ be used in-place of the credentials returned by `claimWork`.
+
+ The `reclaimTask` requests serves to:
+ * Postpone `takenUntil` preventing the queue from resolving
+ `claim-expired`,
+ * Refresh temporary credentials used for processing the task, and
+ * Abort execution if the task/run have been resolved.
+
+ If the `takenUntil` timestamp is exceeded the queue will resolve the run
+ as _exception_ with reason `claim-expired`, and proceeded to retry to the
+ task. This ensures that tasks are retried, even if workers disappear
+ without warning.
+
+ If the task is resolved, this end-point will return `409` reporting
+ `RequestConflict`. This typically happens if the task have been canceled
+ or the `task.deadline` have been exceeded. If reclaiming fails, workers
+ should abort the task and forget about the given `runId`. There is no
+ need to resolve the run or upload artifacts.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["reclaimTask"], *args, **kwargs)
+
+ def reportCompleted(self, *args, **kwargs):
+ """
+ Report Run Completed
+
+ Report a task completed, resolving the run as `completed`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["reportCompleted"], *args, **kwargs)
+
+ def reportFailed(self, *args, **kwargs):
+ """
+ Report Run Failed
+
+ Report a run failed, resolving the run as `failed`. Use this to resolve
+ a run that failed because the task specific code behaved unexpectedly.
+ For example the task exited non-zero, or didn't produce expected output.
+
+ Do not use this if the task couldn't be run because if malformed
+ payload, or other unexpected condition. In these cases we have a task
+ exception, which should be reported with `reportException`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["reportFailed"], *args, **kwargs)
+
+ def reportException(self, *args, **kwargs):
+ """
+ Report Task Exception
+
+ Resolve a run as _exception_. Generally, you will want to report tasks as
+ failed instead of exception. You should `reportException` if,
+
+ * The `task.payload` is invalid,
+ * Non-existent resources are referenced,
+ * Declared actions cannot be executed due to unavailable resources,
+ * The worker had to shutdown prematurely,
+ * The worker experienced an unknown error, or,
+ * The task explicitly requested a retry.
+
+ Do not use this to signal that some user-specified code crashed for any
+ reason specific to this code. If user-specific code hits a resource that
+ is temporarily unavailable worker should report task _failed_.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["reportException"], *args, **kwargs)
+
+ def createArtifact(self, *args, **kwargs):
+ """
+ Create Artifact
+
+ This API end-point creates an artifact for a specific run of a task. This
+ should **only** be used by a worker currently operating on this task, or
+ from a process running within the task (ie. on the worker).
+
+ All artifacts must specify when they expire. The queue will
+ automatically take care of deleting artifacts past their
+ expiration point. This feature makes it feasible to upload large
+ intermediate artifacts from data processing applications, as the
+ artifacts can be set to expire a few days later.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["createArtifact"], *args, **kwargs)
+
+ def finishArtifact(self, *args, **kwargs):
+ """
+ Finish Artifact
+
+ This endpoint marks an artifact as present for the given task, and
+ should be called when the artifact data is fully uploaded.
+
+ The storage types `reference`, `link`, and `error` do not need to
+ be finished, as they are finished immediately by `createArtifact`.
+ The storage type `s3` does not support this functionality and cannot
+ be finished. In all such cases, calling this method is an input error
+ (400).
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["finishArtifact"], *args, **kwargs)
+
+ def getArtifact(self, *args, **kwargs):
+ """
+ Get Artifact Data from Run
+
+ Get artifact by `<name>` from a specific run.
+
+ **Artifact Access**, in order to get an artifact you need the scope
+ `queue:get-artifact:<name>`, where `<name>` is the name of the artifact.
+ To allow access to fetch artifacts with a client like `curl` or a web
+ browser, without using Taskcluster credentials, include a scope in the
+ `anonymous` role. The convention is to include
+ `queue:get-artifact:public/*`.
+
+ **Response**: the HTTP response to this method is a 303 redirect to the
+ URL from which the artifact can be downloaded. The body of that response
+ contains the data described in the output schema, contianing the same URL.
+ Callers are encouraged to use whichever method of gathering the URL is
+ most convenient. Standard HTTP clients will follow the redirect, while
+ API client libraries will return the JSON body.
+
+ In order to download an artifact the following must be done:
+
+ 1. Obtain queue url. Building a signed url with a taskcluster client is
+ recommended
+ 1. Make a GET request which does not follow redirects
+ 1. In all cases, if specified, the
+ x-taskcluster-location-{content,transfer}-{sha256,length} values must be
+ validated to be equal to the Content-Length and Sha256 checksum of the
+ final artifact downloaded. as well as any intermediate redirects
+ 1. If this response is a 500-series error, retry using an exponential
+ backoff. No more than 5 retries should be attempted
+ 1. If this response is a 400-series error, treat it appropriately for
+ your context. This might be an error in responding to this request or
+ an Error storage type body. This request should not be retried.
+ 1. If this response is a 200-series response, the response body is the artifact.
+ If the x-taskcluster-location-{content,transfer}-{sha256,length} and
+ x-taskcluster-location-content-encoding are specified, they should match
+ this response body
+ 1. If the response type is a 300-series redirect, the artifact will be at the
+ location specified by the `Location` header. There are multiple artifact storage
+ types which use a 300-series redirect.
+ 1. For all redirects followed, the user must verify that the content-sha256, content-length,
+ transfer-sha256, transfer-length and content-encoding match every further request. The final
+ artifact must also be validated against the values specified in the original queue response
+ 1. Caching of requests with an x-taskcluster-artifact-storage-type value of `reference`
+ must not occur
+
+ **Headers**
+ The following important headers are set on the response to this method:
+
+ * location: the url of the artifact if a redirect is to be performed
+ * x-taskcluster-artifact-storage-type: the storage type. Example: s3
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["getArtifact"], *args, **kwargs)
+
+ def getLatestArtifact(self, *args, **kwargs):
+ """
+ Get Artifact Data from Latest Run
+
+ Get artifact by `<name>` from the last run of a task.
+
+ **Artifact Access**, in order to get an artifact you need the scope
+ `queue:get-artifact:<name>`, where `<name>` is the name of the artifact.
+ To allow access to fetch artifacts with a client like `curl` or a web
+ browser, without using Taskcluster credentials, include a scope in the
+ `anonymous` role. The convention is to include
+ `queue:get-artifact:public/*`.
+
+ **API Clients**, this method will redirect you to the artifact, if it is
+ stored externally. Either way, the response may not be JSON. So API
+ client users might want to generate a signed URL for this end-point and
+ use that URL with a normal HTTP client.
+
+ **Remark**, this end-point is slightly slower than
+ `queue.getArtifact`, so consider that if you already know the `runId` of
+ the latest run. Otherwise, just us the most convenient API end-point.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["getLatestArtifact"], *args, **kwargs)
+
+ def listArtifacts(self, *args, **kwargs):
+ """
+ Get Artifacts from Run
+
+ Returns a list of artifacts and associated meta-data for a given run.
+
+ As a task may have many artifacts paging may be necessary. If this
+ end-point returns a `continuationToken`, you should call the end-point
+ again with the `continuationToken` as the query-string option:
+ `continuationToken`.
+
+ By default this end-point will list up-to 1000 artifacts in a single page
+ you may limit this with the query-string parameter `limit`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listArtifacts"], *args, **kwargs)
+
+ def listLatestArtifacts(self, *args, **kwargs):
+ """
+ Get Artifacts from Latest Run
+
+ Returns a list of artifacts and associated meta-data for the latest run
+ from the given task.
+
+ As a task may have many artifacts paging may be necessary. If this
+ end-point returns a `continuationToken`, you should call the end-point
+ again with the `continuationToken` as the query-string option:
+ `continuationToken`.
+
+ By default this end-point will list up-to 1000 artifacts in a single page
+ you may limit this with the query-string parameter `limit`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listLatestArtifacts"], *args, **kwargs)
+
+ def artifactInfo(self, *args, **kwargs):
+ """
+ Get Artifact Information From Run
+
+ Returns associated metadata for a given artifact, in the given task run.
+ The metadata is the same as that returned from `listArtifacts`, and does
+ not grant access to the artifact data.
+
+ Note that this method does *not* automatically follow link artifacts.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["artifactInfo"], *args, **kwargs)
+
+ def latestArtifactInfo(self, *args, **kwargs):
+ """
+ Get Artifact Information From Latest Run
+
+ Returns associated metadata for a given artifact, in the latest run of the
+ task. The metadata is the same as that returned from `listArtifacts`,
+ and does not grant access to the artifact data.
+
+ Note that this method does *not* automatically follow link artifacts.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["latestArtifactInfo"], *args, **kwargs)
+
+ def artifact(self, *args, **kwargs):
+ """
+ Get Artifact Content From Run
+
+ Returns information about the content of the artifact, in the given task run.
+
+ Depending on the storage type, the endpoint returns the content of the artifact
+ or enough information to access that content.
+
+ This method follows link artifacts, so it will not return content
+ for a link artifact.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["artifact"], *args, **kwargs)
+
+ def latestArtifact(self, *args, **kwargs):
+ """
+ Get Artifact Content From Latest Run
+
+ Returns information about the content of the artifact, in the latest task run.
+
+ Depending on the storage type, the endpoint returns the content of the artifact
+ or enough information to access that content.
+
+ This method follows link artifacts, so it will not return content
+ for a link artifact.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["latestArtifact"], *args, **kwargs)
+
+ def listProvisioners(self, *args, **kwargs):
+ """
+ Get a list of all active provisioners
+
+ Get all active provisioners.
+
+ The term "provisioner" is taken broadly to mean anything with a provisionerId.
+ This does not necessarily mean there is an associated service performing any
+ provisioning activity.
+
+ The response is paged. If this end-point returns a `continuationToken`, you
+ should call the end-point again with the `continuationToken` as a query-string
+ option. By default this end-point will list up to 1000 provisioners in a single
+ page. You may limit this with the query-string parameter `limit`.
+
+ This method is ``deprecated``
+ """
+
+ return self._makeApiCall(self.funcinfo["listProvisioners"], *args, **kwargs)
+
+ def getProvisioner(self, *args, **kwargs):
+ """
+ Get an active provisioner
+
+ Get an active provisioner.
+
+ The term "provisioner" is taken broadly to mean anything with a provisionerId.
+ This does not necessarily mean there is an associated service performing any
+ provisioning activity.
+
+ This method is ``deprecated``
+ """
+
+ return self._makeApiCall(self.funcinfo["getProvisioner"], *args, **kwargs)
+
+ def declareProvisioner(self, *args, **kwargs):
+ """
+ Update a provisioner
+
+ Declare a provisioner, supplying some details about it.
+
+ `declareProvisioner` allows updating one or more properties of a provisioner as long as the required scopes are
+ possessed. For example, a request to update the `my-provisioner`
+ provisioner with a body `{description: 'This provisioner is great'}` would require you to have the scope
+ `queue:declare-provisioner:my-provisioner#description`.
+
+ The term "provisioner" is taken broadly to mean anything with a provisionerId.
+ This does not necessarily mean there is an associated service performing any
+ provisioning activity.
+
+ This method is ``deprecated``
+ """
+
+ return self._makeApiCall(self.funcinfo["declareProvisioner"], *args, **kwargs)
+
+ def pendingTasks(self, *args, **kwargs):
+ """
+ Get Number of Pending Tasks
+
+ Get an approximate number of pending tasks for the given `taskQueueId`.
+
+ The underlying Azure Storage Queues only promises to give us an estimate.
+ Furthermore, we cache the result in memory for 20 seconds. So consumers
+ should be no means expect this to be an accurate number.
+ It is, however, a solid estimate of the number of pending tasks.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["pendingTasks"], *args, **kwargs)
+
+ def listWorkerTypes(self, *args, **kwargs):
+ """
+ Get a list of all active worker-types
+
+ Get all active worker-types for the given provisioner.
+
+ The response is paged. If this end-point returns a `continuationToken`, you
+ should call the end-point again with the `continuationToken` as a query-string
+ option. By default this end-point will list up to 1000 worker-types in a single
+ page. You may limit this with the query-string parameter `limit`.
+
+ This method is ``deprecated``
+ """
+
+ return self._makeApiCall(self.funcinfo["listWorkerTypes"], *args, **kwargs)
+
+ def getWorkerType(self, *args, **kwargs):
+ """
+ Get a worker-type
+
+ Get a worker-type from a provisioner.
+
+ This method is ``deprecated``
+ """
+
+ return self._makeApiCall(self.funcinfo["getWorkerType"], *args, **kwargs)
+
+ def declareWorkerType(self, *args, **kwargs):
+ """
+ Update a worker-type
+
+ Declare a workerType, supplying some details about it.
+
+ `declareWorkerType` allows updating one or more properties of a worker-type as long as the required scopes are
+ possessed. For example, a request to update the `highmem` worker-type within the `my-provisioner`
+ provisioner with a body `{description: 'This worker type is great'}` would require you to have the scope
+ `queue:declare-worker-type:my-provisioner/highmem#description`.
+
+ This method is ``deprecated``
+ """
+
+ return self._makeApiCall(self.funcinfo["declareWorkerType"], *args, **kwargs)
+
+ def listTaskQueues(self, *args, **kwargs):
+ """
+ Get a list of all active task queues
+
+ Get all active task queues.
+
+ The response is paged. If this end-point returns a `continuationToken`, you
+ should call the end-point again with the `continuationToken` as a query-string
+ option. By default this end-point will list up to 1000 task queues in a single
+ page. You may limit this with the query-string parameter `limit`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listTaskQueues"], *args, **kwargs)
+
+ def getTaskQueue(self, *args, **kwargs):
+ """
+ Get a task queue
+
+ Get a task queue.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["getTaskQueue"], *args, **kwargs)
+
+ def listWorkers(self, *args, **kwargs):
+ """
+ Get a list of all active workers of a workerType
+
+ Get a list of all active workers of a workerType.
+
+ `listWorkers` allows a response to be filtered by quarantined and non quarantined workers.
+ To filter the query, you should call the end-point with `quarantined` as a query-string option with a
+ true or false value.
+
+ The response is paged. If this end-point returns a `continuationToken`, you
+ should call the end-point again with the `continuationToken` as a query-string
+ option. By default this end-point will list up to 1000 workers in a single
+ page. You may limit this with the query-string parameter `limit`.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["listWorkers"], *args, **kwargs)
+
+ def getWorker(self, *args, **kwargs):
+ """
+ Get a worker-type
+
+ Get a worker from a worker-type.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["getWorker"], *args, **kwargs)
+
+ def quarantineWorker(self, *args, **kwargs):
+ """
+ Quarantine a worker
+
+ Quarantine a worker
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["quarantineWorker"], *args, **kwargs)
+
+ def declareWorker(self, *args, **kwargs):
+ """
+ Declare a worker
+
+ Declare a worker, supplying some details about it.
+
+ `declareWorker` allows updating one or more properties of a worker as long as the required scopes are
+ possessed.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["declareWorker"], *args, **kwargs)
+
+ funcinfo = {
+ "artifact": {
+ 'args': ['taskId', 'runId', 'name'],
+ 'method': 'get',
+ 'name': 'artifact',
+ 'output': 'v1/artifact-content-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/artifact-content/<name>',
+ 'stability': 'stable',
+ },
+ "artifactInfo": {
+ 'args': ['taskId', 'runId', 'name'],
+ 'method': 'get',
+ 'name': 'artifactInfo',
+ 'output': 'v1/artifact-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/artifact-info/<name>',
+ 'stability': 'stable',
+ },
+ "cancelTask": {
+ 'args': ['taskId'],
+ 'method': 'post',
+ 'name': 'cancelTask',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/cancel',
+ 'stability': 'stable',
+ },
+ "claimTask": {
+ 'args': ['taskId', 'runId'],
+ 'input': 'v1/task-claim-request.json#',
+ 'method': 'post',
+ 'name': 'claimTask',
+ 'output': 'v1/task-claim-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/claim',
+ 'stability': 'deprecated',
+ },
+ "claimWork": {
+ 'args': ['taskQueueId'],
+ 'input': 'v1/claim-work-request.json#',
+ 'method': 'post',
+ 'name': 'claimWork',
+ 'output': 'v1/claim-work-response.json#',
+ 'route': '/claim-work/<taskQueueId>',
+ 'stability': 'stable',
+ },
+ "createArtifact": {
+ 'args': ['taskId', 'runId', 'name'],
+ 'input': 'v1/post-artifact-request.json#',
+ 'method': 'post',
+ 'name': 'createArtifact',
+ 'output': 'v1/post-artifact-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/artifacts/<name>',
+ 'stability': 'stable',
+ },
+ "createTask": {
+ 'args': ['taskId'],
+ 'input': 'v1/create-task-request.json#',
+ 'method': 'put',
+ 'name': 'createTask',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>',
+ 'stability': 'stable',
+ },
+ "declareProvisioner": {
+ 'args': ['provisionerId'],
+ 'input': 'v1/update-provisioner-request.json#',
+ 'method': 'put',
+ 'name': 'declareProvisioner',
+ 'output': 'v1/provisioner-response.json#',
+ 'route': '/provisioners/<provisionerId>',
+ 'stability': 'deprecated',
+ },
+ "declareWorker": {
+ 'args': ['provisionerId', 'workerType', 'workerGroup', 'workerId'],
+ 'input': 'v1/update-worker-request.json#',
+ 'method': 'put',
+ 'name': 'declareWorker',
+ 'output': 'v1/worker-response.json#',
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>/<workerGroup>/<workerId>',
+ 'stability': 'experimental',
+ },
+ "declareWorkerType": {
+ 'args': ['provisionerId', 'workerType'],
+ 'input': 'v1/update-workertype-request.json#',
+ 'method': 'put',
+ 'name': 'declareWorkerType',
+ 'output': 'v1/workertype-response.json#',
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>',
+ 'stability': 'deprecated',
+ },
+ "finishArtifact": {
+ 'args': ['taskId', 'runId', 'name'],
+ 'input': 'v1/finish-artifact-request.json#',
+ 'method': 'put',
+ 'name': 'finishArtifact',
+ 'route': '/task/<taskId>/runs/<runId>/artifacts/<name>',
+ 'stability': 'stable',
+ },
+ "getArtifact": {
+ 'args': ['taskId', 'runId', 'name'],
+ 'method': 'get',
+ 'name': 'getArtifact',
+ 'output': 'v1/get-artifact-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/artifacts/<name>',
+ 'stability': 'stable',
+ },
+ "getLatestArtifact": {
+ 'args': ['taskId', 'name'],
+ 'method': 'get',
+ 'name': 'getLatestArtifact',
+ 'output': 'v1/get-artifact-response.json#',
+ 'route': '/task/<taskId>/artifacts/<name>',
+ 'stability': 'stable',
+ },
+ "getProvisioner": {
+ 'args': ['provisionerId'],
+ 'method': 'get',
+ 'name': 'getProvisioner',
+ 'output': 'v1/provisioner-response.json#',
+ 'route': '/provisioners/<provisionerId>',
+ 'stability': 'deprecated',
+ },
+ "getTaskQueue": {
+ 'args': ['taskQueueId'],
+ 'method': 'get',
+ 'name': 'getTaskQueue',
+ 'output': 'v1/taskqueue-response.json#',
+ 'route': '/task-queues/<taskQueueId>',
+ 'stability': 'stable',
+ },
+ "getWorker": {
+ 'args': ['provisionerId', 'workerType', 'workerGroup', 'workerId'],
+ 'method': 'get',
+ 'name': 'getWorker',
+ 'output': 'v1/worker-response.json#',
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>/workers/<workerGroup>/<workerId>',
+ 'stability': 'experimental',
+ },
+ "getWorkerType": {
+ 'args': ['provisionerId', 'workerType'],
+ 'method': 'get',
+ 'name': 'getWorkerType',
+ 'output': 'v1/workertype-response.json#',
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>',
+ 'stability': 'deprecated',
+ },
+ "latestArtifact": {
+ 'args': ['taskId', 'name'],
+ 'method': 'get',
+ 'name': 'latestArtifact',
+ 'output': 'v1/artifact-content-response.json#',
+ 'route': '/task/<taskId>/artifact-content/<name>',
+ 'stability': 'stable',
+ },
+ "latestArtifactInfo": {
+ 'args': ['taskId', 'name'],
+ 'method': 'get',
+ 'name': 'latestArtifactInfo',
+ 'output': 'v1/artifact-response.json#',
+ 'route': '/task/<taskId>/artifact-info/<name>',
+ 'stability': 'stable',
+ },
+ "listArtifacts": {
+ 'args': ['taskId', 'runId'],
+ 'method': 'get',
+ 'name': 'listArtifacts',
+ 'output': 'v1/list-artifacts-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/task/<taskId>/runs/<runId>/artifacts',
+ 'stability': 'stable',
+ },
+ "listDependentTasks": {
+ 'args': ['taskId'],
+ 'method': 'get',
+ 'name': 'listDependentTasks',
+ 'output': 'v1/list-dependent-tasks-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/task/<taskId>/dependents',
+ 'stability': 'stable',
+ },
+ "listLatestArtifacts": {
+ 'args': ['taskId'],
+ 'method': 'get',
+ 'name': 'listLatestArtifacts',
+ 'output': 'v1/list-artifacts-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/task/<taskId>/artifacts',
+ 'stability': 'stable',
+ },
+ "listProvisioners": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listProvisioners',
+ 'output': 'v1/list-provisioners-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/provisioners',
+ 'stability': 'deprecated',
+ },
+ "listTaskGroup": {
+ 'args': ['taskGroupId'],
+ 'method': 'get',
+ 'name': 'listTaskGroup',
+ 'output': 'v1/list-task-group-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/task-group/<taskGroupId>/list',
+ 'stability': 'stable',
+ },
+ "listTaskQueues": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listTaskQueues',
+ 'output': 'v1/list-taskqueues-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/task-queues',
+ 'stability': 'stable',
+ },
+ "listWorkerTypes": {
+ 'args': ['provisionerId'],
+ 'method': 'get',
+ 'name': 'listWorkerTypes',
+ 'output': 'v1/list-workertypes-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/provisioners/<provisionerId>/worker-types',
+ 'stability': 'deprecated',
+ },
+ "listWorkers": {
+ 'args': ['provisionerId', 'workerType'],
+ 'method': 'get',
+ 'name': 'listWorkers',
+ 'output': 'v1/list-workers-response.json#',
+ 'query': ['continuationToken', 'limit', 'quarantined'],
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>/workers',
+ 'stability': 'experimental',
+ },
+ "pendingTasks": {
+ 'args': ['taskQueueId'],
+ 'method': 'get',
+ 'name': 'pendingTasks',
+ 'output': 'v1/pending-tasks-response.json#',
+ 'route': '/pending/<taskQueueId>',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "quarantineWorker": {
+ 'args': ['provisionerId', 'workerType', 'workerGroup', 'workerId'],
+ 'input': 'v1/quarantine-worker-request.json#',
+ 'method': 'put',
+ 'name': 'quarantineWorker',
+ 'output': 'v1/worker-response.json#',
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>/workers/<workerGroup>/<workerId>',
+ 'stability': 'experimental',
+ },
+ "reclaimTask": {
+ 'args': ['taskId', 'runId'],
+ 'method': 'post',
+ 'name': 'reclaimTask',
+ 'output': 'v1/task-reclaim-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/reclaim',
+ 'stability': 'stable',
+ },
+ "reportCompleted": {
+ 'args': ['taskId', 'runId'],
+ 'method': 'post',
+ 'name': 'reportCompleted',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/completed',
+ 'stability': 'stable',
+ },
+ "reportException": {
+ 'args': ['taskId', 'runId'],
+ 'input': 'v1/task-exception-request.json#',
+ 'method': 'post',
+ 'name': 'reportException',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/exception',
+ 'stability': 'stable',
+ },
+ "reportFailed": {
+ 'args': ['taskId', 'runId'],
+ 'method': 'post',
+ 'name': 'reportFailed',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/failed',
+ 'stability': 'stable',
+ },
+ "rerunTask": {
+ 'args': ['taskId'],
+ 'method': 'post',
+ 'name': 'rerunTask',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/rerun',
+ 'stability': 'stable',
+ },
+ "scheduleTask": {
+ 'args': ['taskId'],
+ 'method': 'post',
+ 'name': 'scheduleTask',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/schedule',
+ 'stability': 'stable',
+ },
+ "status": {
+ 'args': ['taskId'],
+ 'method': 'get',
+ 'name': 'status',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/status',
+ 'stability': 'stable',
+ },
+ "task": {
+ 'args': ['taskId'],
+ 'method': 'get',
+ 'name': 'task',
+ 'output': 'v1/task.json#',
+ 'route': '/task/<taskId>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Queue']
diff --git a/third_party/python/taskcluster/taskcluster/generated/queueevents.py b/third_party/python/taskcluster/taskcluster/generated/queueevents.py
new file mode 100644
index 0000000000..fec36671f0
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/queueevents.py
@@ -0,0 +1,719 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class QueueEvents(BaseClient):
+ """
+ The queue service is responsible for accepting tasks and track their state
+ as they are executed by workers. In order ensure they are eventually
+ resolved.
+
+ This document describes AMQP exchanges offered by the queue, which allows
+ third-party listeners to monitor tasks as they progress to resolution.
+ These exchanges targets the following audience:
+ * Schedulers, who takes action after tasks are completed,
+ * Workers, who wants to listen for new or canceled tasks (optional),
+ * Tools, that wants to update their view as task progress.
+
+ You'll notice that all the exchanges in the document shares the same
+ routing key pattern. This makes it very easy to bind to all messages
+ about a certain kind tasks.
+
+ **Task specific routes**, a task can define a task specific route using
+ the `task.routes` property. See task creation documentation for details
+ on permissions required to provide task specific routes. If a task has
+ the entry `'notify.by-email'` in as task specific route defined in
+ `task.routes` all messages about this task will be CC'ed with the
+ routing-key `'route.notify.by-email'`.
+
+ These routes will always be prefixed `route.`, so that cannot interfere
+ with the _primary_ routing key as documented here. Notice that the
+ _primary_ routing key is always prefixed `primary.`. This is ensured
+ in the routing key reference, so API clients will do this automatically.
+
+ Please, note that the way RabbitMQ works, the message will only arrive
+ in your queue once, even though you may have bound to the exchange with
+ multiple routing key patterns that matches more of the CC'ed routing
+ routing keys.
+
+ **Delivery guarantees**, most operations on the queue are idempotent,
+ which means that if repeated with the same arguments then the requests
+ will ensure completion of the operation and return the same response.
+ This is useful if the server crashes or the TCP connection breaks, but
+ when re-executing an idempotent operation, the queue will also resend
+ any related AMQP messages. Hence, messages may be repeated.
+
+ This shouldn't be much of a problem, as the best you can achieve using
+ confirm messages with AMQP is at-least-once delivery semantics. Hence,
+ this only prevents you from obtaining at-most-once delivery semantics.
+
+ **Remark**, some message generated by timeouts maybe dropped if the
+ server crashes at wrong time. Ideally, we'll address this in the
+ future. For now we suggest you ignore this corner case, and notify us
+ if this corner case is of concern to you.
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-queue/v1/",
+ }
+ serviceName = 'queue'
+ apiVersion = 'v1'
+
+ def taskDefined(self, *args, **kwargs):
+ """
+ Task Defined Messages
+
+ When a task is created or just defined a message is posted to this
+ exchange.
+
+ This message exchange is mainly useful when tasks are created with dependencies
+ on incomplete tasks, as this does not make the task
+ `pending`. Thus, no `taskPending` message is published.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-defined',
+ 'name': 'taskDefined',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-defined-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskPending(self, *args, **kwargs):
+ """
+ Task Pending Messages
+
+ When a task becomes `pending` a message is posted to this exchange.
+
+ This is useful for workers who doesn't want to constantly poll the queue
+ for new tasks. The queue will also be authority for task states and
+ claims. But using this exchange workers should be able to distribute work
+ efficiently and they would be able to reduce their polling interval
+ significantly without affecting general responsiveness.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-pending',
+ 'name': 'taskPending',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-pending-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskRunning(self, *args, **kwargs):
+ """
+ Task Running Messages
+
+ Whenever a task is claimed by a worker, a run is started on the worker,
+ and a message is posted on this exchange.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-running',
+ 'name': 'taskRunning',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-running-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def artifactCreated(self, *args, **kwargs):
+ """
+ Artifact Creation Messages
+
+ Whenever the `createArtifact` end-point is called, the queue will create
+ a record of the artifact and post a message on this exchange. All of this
+ happens before the queue returns a signed URL for the caller to upload
+ the actual artifact with (pending on `storageType`).
+
+ This means that the actual artifact is rarely available when this message
+ is posted. But it is not unreasonable to assume that the artifact will
+ will become available at some point later. Most signatures will expire in
+ 30 minutes or so, forcing the uploader to call `createArtifact` with
+ the same payload again in-order to continue uploading the artifact.
+
+ However, in most cases (especially for small artifacts) it's very
+ reasonable assume the artifact will be available within a few minutes.
+ This property means that this exchange is mostly useful for tools
+ monitoring task evaluation. One could also use it count number of
+ artifacts per task, or _index_ artifacts though in most cases it'll be
+ smarter to index artifacts after the task in question have completed
+ successfully.
+
+ *NOTE*: this message is currently only sent for reference and error
+ artifacts. This will be remedied in a future version of Taskcluster.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'artifact-created',
+ 'name': 'artifactCreated',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/artifact-created-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskCompleted(self, *args, **kwargs):
+ """
+ Task Completed Messages
+
+ When a task is successfully completed by a worker a message is posted
+ this exchange.
+ This message is routed using the `runId`, `workerGroup` and `workerId`
+ that completed the task. But information about additional runs is also
+ available from the task status structure.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-completed',
+ 'name': 'taskCompleted',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-completed-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskFailed(self, *args, **kwargs):
+ """
+ Task Failed Messages
+
+ When a task ran, but failed to complete successfully a message is posted
+ to this exchange. This is same as worker ran task-specific code, but the
+ task specific code exited non-zero.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-failed',
+ 'name': 'taskFailed',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-failed-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskException(self, *args, **kwargs):
+ """
+ Task Exception Messages
+
+ Whenever Taskcluster fails to run a message is posted to this exchange.
+ This happens if the task isn't completed before its `deadlìne`,
+ all retries failed (i.e. workers stopped responding), the task was
+ canceled by another entity, or the task carried a malformed payload.
+
+ The specific _reason_ is evident from that task status structure, refer
+ to the `reasonResolved` property for the last run.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-exception',
+ 'name': 'taskException',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-exception-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskGroupResolved(self, *args, **kwargs):
+ """
+ Task Group Resolved Messages
+
+ A message is published on task-group-resolved whenever all submitted
+ tasks (whether scheduled or unscheduled) for a given task group have
+ been resolved, regardless of whether they resolved as successful or
+ not. A task group may be resolved multiple times, since new tasks may
+ be submitted against an already resolved task group.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskGroupId: `taskGroupId` for the task-group this message concerns (required)
+
+ * schedulerId: `schedulerId` for the task-group this message concerns (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-group-resolved',
+ 'name': 'taskGroupResolved',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-group-resolved.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'QueueEvents']
diff --git a/third_party/python/taskcluster/taskcluster/generated/secrets.py b/third_party/python/taskcluster/taskcluster/generated/secrets.py
new file mode 100644
index 0000000000..8482a678ba
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/secrets.py
@@ -0,0 +1,143 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class Secrets(BaseClient):
+ """
+ The secrets service provides a simple key/value store for small bits of secret
+ data. Access is limited by scopes, so values can be considered secret from
+ those who do not have the relevant scopes.
+
+ Secrets also have an expiration date, and once a secret has expired it can no
+ longer be read. This is useful for short-term secrets such as a temporary
+ service credential or a one-time signing key.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'secrets'
+ apiVersion = 'v1'
+
+ def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ def set(self, *args, **kwargs):
+ """
+ Set Secret
+
+ Set the secret associated with some key. If the secret already exists, it is
+ updated instead.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["set"], *args, **kwargs)
+
+ def remove(self, *args, **kwargs):
+ """
+ Delete Secret
+
+ Delete the secret associated with some key. It will succeed whether or not the secret exists
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["remove"], *args, **kwargs)
+
+ def get(self, *args, **kwargs):
+ """
+ Read Secret
+
+ Read the secret associated with some key. If the secret has recently
+ expired, the response code 410 is returned. If the caller lacks the
+ scope necessary to get the secret, the call will fail with a 403 code
+ regardless of whether the secret exists.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["get"], *args, **kwargs)
+
+ def list(self, *args, **kwargs):
+ """
+ List Secrets
+
+ List the names of all secrets.
+
+ By default this end-point will try to return up to 1000 secret names in one
+ request. But it **may return less**, even if more tasks are available.
+ It may also return a `continuationToken` even though there are no more
+ results. However, you can only be sure to have seen all results if you
+ keep calling `listTaskGroup` with the last `continuationToken` until you
+ get a result without a `continuationToken`.
+
+ If you are not interested in listing all the members at once, you may
+ use the query-string option `limit` to return fewer.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["list"], *args, **kwargs)
+
+ funcinfo = {
+ "get": {
+ 'args': ['name'],
+ 'method': 'get',
+ 'name': 'get',
+ 'output': 'v1/secret.json#',
+ 'route': '/secret/<name>',
+ 'stability': 'stable',
+ },
+ "list": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'list',
+ 'output': 'v1/secret-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/secrets',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "remove": {
+ 'args': ['name'],
+ 'method': 'delete',
+ 'name': 'remove',
+ 'route': '/secret/<name>',
+ 'stability': 'stable',
+ },
+ "set": {
+ 'args': ['name'],
+ 'input': 'v1/secret.json#',
+ 'method': 'put',
+ 'name': 'set',
+ 'route': '/secret/<name>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Secrets']
diff --git a/third_party/python/taskcluster/taskcluster/generated/workermanager.py b/third_party/python/taskcluster/taskcluster/generated/workermanager.py
new file mode 100644
index 0000000000..4d8dd7a010
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/workermanager.py
@@ -0,0 +1,406 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class WorkerManager(BaseClient):
+ """
+ This service manages workers, including provisioning for dynamic worker pools.
+
+ Methods interacting with a provider may return a 503 response if that provider has
+ not been able to start up, such as if the service to which it interfaces has an
+ outage. Such requests can be retried as for any other 5xx response.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'worker-manager'
+ apiVersion = 'v1'
+
+ def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ def listProviders(self, *args, **kwargs):
+ """
+ List Providers
+
+ Retrieve a list of providers that are available for worker pools.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listProviders"], *args, **kwargs)
+
+ def createWorkerPool(self, *args, **kwargs):
+ """
+ Create Worker Pool
+
+ Create a new worker pool. If the worker pool already exists, this will throw an error.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["createWorkerPool"], *args, **kwargs)
+
+ def updateWorkerPool(self, *args, **kwargs):
+ """
+ Update Worker Pool
+
+ Given an existing worker pool definition, this will modify it and return
+ the new definition.
+
+ To delete a worker pool, set its `providerId` to `"null-provider"`.
+ After any existing workers have exited, a cleanup job will remove the
+ worker pool. During that time, the worker pool can be updated again, such
+ as to set its `providerId` to a real provider.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["updateWorkerPool"], *args, **kwargs)
+
+ def deleteWorkerPool(self, *args, **kwargs):
+ """
+ Delete Worker Pool
+
+ Mark a worker pool for deletion. This is the same as updating the pool to
+ set its providerId to `"null-provider"`, but does not require scope
+ `worker-manager:provider:null-provider`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["deleteWorkerPool"], *args, **kwargs)
+
+ def workerPool(self, *args, **kwargs):
+ """
+ Get Worker Pool
+
+ Fetch an existing worker pool defition.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["workerPool"], *args, **kwargs)
+
+ def listWorkerPools(self, *args, **kwargs):
+ """
+ List All Worker Pools
+
+ Get the list of all the existing worker pools.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listWorkerPools"], *args, **kwargs)
+
+ def reportWorkerError(self, *args, **kwargs):
+ """
+ Report an error from a worker
+
+ Report an error that occurred on a worker. This error will be included
+ with the other errors in `listWorkerPoolErrors(workerPoolId)`.
+
+ Workers can use this endpoint to report startup or configuration errors
+ that might be associated with the worker pool configuration and thus of
+ interest to a worker-pool administrator.
+
+ NOTE: errors are publicly visible. Ensure that none of the content
+ contains secrets or other sensitive information.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["reportWorkerError"], *args, **kwargs)
+
+ def listWorkerPoolErrors(self, *args, **kwargs):
+ """
+ List Worker Pool Errors
+
+ Get the list of worker pool errors.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listWorkerPoolErrors"], *args, **kwargs)
+
+ def listWorkersForWorkerGroup(self, *args, **kwargs):
+ """
+ Workers in a specific Worker Group in a Worker Pool
+
+ Get the list of all the existing workers in a given group in a given worker pool.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listWorkersForWorkerGroup"], *args, **kwargs)
+
+ def worker(self, *args, **kwargs):
+ """
+ Get a Worker
+
+ Get a single worker.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["worker"], *args, **kwargs)
+
+ def createWorker(self, *args, **kwargs):
+ """
+ Create a Worker
+
+ Create a new worker. This is only useful for worker pools where the provider
+ does not create workers automatically, such as those with a `static` provider
+ type. Providers that do not support creating workers will return a 400 error.
+ See the documentation for the individual providers, and in particular the
+ [static provider](https://docs.taskcluster.net/docs/reference/core/worker-manager/)
+ for more information.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["createWorker"], *args, **kwargs)
+
+ def updateWorker(self, *args, **kwargs):
+ """
+ Update an existing Worker
+
+ Update an existing worker in-place. Like `createWorker`, this is only useful for
+ worker pools where the provider does not create workers automatically.
+ This method allows updating all fields in the schema unless otherwise indicated
+ in the provider documentation.
+ See the documentation for the individual providers, and in particular the
+ [static provider](https://docs.taskcluster.net/docs/reference/core/worker-manager/)
+ for more information.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["updateWorker"], *args, **kwargs)
+
+ def removeWorker(self, *args, **kwargs):
+ """
+ Remove a Worker
+
+ Remove an existing worker. The precise behavior of this method depends
+ on the provider implementing the given worker. Some providers
+ do not support removing workers at all, and will return a 400 error.
+ Others may begin removing the worker, but it may remain available via
+ the API (perhaps even in state RUNNING) afterward.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["removeWorker"], *args, **kwargs)
+
+ def listWorkersForWorkerPool(self, *args, **kwargs):
+ """
+ Workers in a Worker Pool
+
+ Get the list of all the existing workers in a given worker pool.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listWorkersForWorkerPool"], *args, **kwargs)
+
+ def registerWorker(self, *args, **kwargs):
+ """
+ Register a running worker
+
+ Register a running worker. Workers call this method on worker start-up.
+
+ This call both marks the worker as running and returns the credentials
+ the worker will require to perform its work. The worker must provide
+ some proof of its identity, and that proof varies by provider type.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["registerWorker"], *args, **kwargs)
+
+ def reregisterWorker(self, *args, **kwargs):
+ """
+ Reregister a Worker
+
+ Reregister a running worker.
+
+ This will generate and return new Taskcluster credentials for the worker
+ on that instance to use. The credentials will not live longer the
+ `registrationTimeout` for that worker. The endpoint will update `terminateAfter`
+ for the worker so that worker-manager does not terminate the instance.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["reregisterWorker"], *args, **kwargs)
+
+ funcinfo = {
+ "createWorker": {
+ 'args': ['workerPoolId', 'workerGroup', 'workerId'],
+ 'input': 'v1/create-worker-request.json#',
+ 'method': 'put',
+ 'name': 'createWorker',
+ 'output': 'v1/worker-full.json#',
+ 'route': '/workers/<workerPoolId>:/<workerGroup>/<workerId>',
+ 'stability': 'stable',
+ },
+ "createWorkerPool": {
+ 'args': ['workerPoolId'],
+ 'input': 'v1/create-worker-pool-request.json#',
+ 'method': 'put',
+ 'name': 'createWorkerPool',
+ 'output': 'v1/worker-pool-full.json#',
+ 'route': '/worker-pool/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "deleteWorkerPool": {
+ 'args': ['workerPoolId'],
+ 'method': 'delete',
+ 'name': 'deleteWorkerPool',
+ 'output': 'v1/worker-pool-full.json#',
+ 'route': '/worker-pool/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "listProviders": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listProviders',
+ 'output': 'v1/provider-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/providers',
+ 'stability': 'stable',
+ },
+ "listWorkerPoolErrors": {
+ 'args': ['workerPoolId'],
+ 'method': 'get',
+ 'name': 'listWorkerPoolErrors',
+ 'output': 'v1/worker-pool-error-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/worker-pool-errors/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "listWorkerPools": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listWorkerPools',
+ 'output': 'v1/worker-pool-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/worker-pools',
+ 'stability': 'stable',
+ },
+ "listWorkersForWorkerGroup": {
+ 'args': ['workerPoolId', 'workerGroup'],
+ 'method': 'get',
+ 'name': 'listWorkersForWorkerGroup',
+ 'output': 'v1/worker-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/workers/<workerPoolId>:/<workerGroup>',
+ 'stability': 'stable',
+ },
+ "listWorkersForWorkerPool": {
+ 'args': ['workerPoolId'],
+ 'method': 'get',
+ 'name': 'listWorkersForWorkerPool',
+ 'output': 'v1/worker-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/workers/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "registerWorker": {
+ 'args': [],
+ 'input': 'v1/register-worker-request.json#',
+ 'method': 'post',
+ 'name': 'registerWorker',
+ 'output': 'v1/register-worker-response.json#',
+ 'route': '/worker/register',
+ 'stability': 'stable',
+ },
+ "removeWorker": {
+ 'args': ['workerPoolId', 'workerGroup', 'workerId'],
+ 'method': 'delete',
+ 'name': 'removeWorker',
+ 'route': '/workers/<workerPoolId>/<workerGroup>/<workerId>',
+ 'stability': 'stable',
+ },
+ "reportWorkerError": {
+ 'args': ['workerPoolId'],
+ 'input': 'v1/report-worker-error-request.json#',
+ 'method': 'post',
+ 'name': 'reportWorkerError',
+ 'output': 'v1/worker-pool-error.json#',
+ 'route': '/worker-pool-errors/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "reregisterWorker": {
+ 'args': [],
+ 'input': 'v1/reregister-worker-request.json#',
+ 'method': 'post',
+ 'name': 'reregisterWorker',
+ 'output': 'v1/reregister-worker-response.json#',
+ 'route': '/worker/reregister',
+ 'stability': 'experimental',
+ },
+ "updateWorker": {
+ 'args': ['workerPoolId', 'workerGroup', 'workerId'],
+ 'input': 'v1/create-worker-request.json#',
+ 'method': 'post',
+ 'name': 'updateWorker',
+ 'output': 'v1/worker-full.json#',
+ 'route': '/workers/<workerPoolId>:/<workerGroup>/<workerId>',
+ 'stability': 'stable',
+ },
+ "updateWorkerPool": {
+ 'args': ['workerPoolId'],
+ 'input': 'v1/update-worker-pool-request.json#',
+ 'method': 'post',
+ 'name': 'updateWorkerPool',
+ 'output': 'v1/worker-pool-full.json#',
+ 'route': '/worker-pool/<workerPoolId>',
+ 'stability': 'experimental',
+ },
+ "worker": {
+ 'args': ['workerPoolId', 'workerGroup', 'workerId'],
+ 'method': 'get',
+ 'name': 'worker',
+ 'output': 'v1/worker-full.json#',
+ 'route': '/workers/<workerPoolId>:/<workerGroup>/<workerId>',
+ 'stability': 'stable',
+ },
+ "workerPool": {
+ 'args': ['workerPoolId'],
+ 'method': 'get',
+ 'name': 'workerPool',
+ 'output': 'v1/worker-pool-full.json#',
+ 'route': '/worker-pool/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'WorkerManager']
diff --git a/third_party/python/taskcluster/taskcluster/generated/workermanagerevents.py b/third_party/python/taskcluster/taskcluster/generated/workermanagerevents.py
new file mode 100644
index 0000000000..b9a7ce2062
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/workermanagerevents.py
@@ -0,0 +1,91 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class WorkerManagerEvents(BaseClient):
+ """
+ These exchanges provide notifications when a worker pool is created or updated.This is so that the provisioner running in a differentprocess at the other end can synchronize to the changes. But you are ofcourse welcome to use these for other purposes, monitoring changes for example.
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-worker-manager/v1/",
+ }
+ serviceName = 'worker-manager'
+ apiVersion = 'v1'
+
+ def workerPoolCreated(self, *args, **kwargs):
+ """
+ Worker Pool Created Messages
+
+ Whenever the api receives a request to create aworker pool, a message is posted to this exchange anda provider can act upon it.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'worker-pool-created',
+ 'name': 'workerPoolCreated',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/pulse-worker-pool-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def workerPoolUpdated(self, *args, **kwargs):
+ """
+ Worker Pool Updated Messages
+
+ Whenever the api receives a request to update aworker pool, a message is posted to this exchange anda provider can act upon it.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'worker-pool-updated',
+ 'name': 'workerPoolUpdated',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/pulse-worker-pool-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'WorkerManagerEvents']
diff --git a/third_party/python/taskcluster/taskcluster/github.py b/third_party/python/taskcluster/taskcluster/github.py
new file mode 100644
index 0000000000..2d47274ee4
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/github.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.github import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/githubevents.py b/third_party/python/taskcluster/taskcluster/githubevents.py
new file mode 100644
index 0000000000..e53249bd3b
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/githubevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.githubevents import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/helper.py b/third_party/python/taskcluster/taskcluster/helper.py
new file mode 100644
index 0000000000..7fec5d5acc
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/helper.py
@@ -0,0 +1,185 @@
+# -*- coding: utf-8 -*-
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+import datetime
+import logging
+import requests
+from taskcluster.generated import _client_importer
+from taskcluster.generated.aio import _client_importer as _async_client_importer
+from taskcluster.utils import stringDate
+import urllib.parse
+
+logger = logging.getLogger(__name__)
+
+
+class TaskclusterConfig(object):
+ """
+ Local configuration used to access Taskcluster service and objects
+ """
+
+ def __init__(self, url=None):
+ self.options = None
+ self.secrets = None
+ self.default_url = url if url is not None else os.environ.get("TASKCLUSTER_ROOT_URL")
+ assert self.default_url is not None, "You must specify a Taskcluster deployment url"
+
+ def auth(self, client_id=None, access_token=None, max_retries=12):
+ """
+ Build Taskcluster credentials options
+ Supports, by order of preference:
+ * directly provided credentials
+ * credentials from environment variables
+ * taskclusterProxy
+ * no authentication
+ """
+ self.options = {"maxRetries": max_retries}
+
+ if client_id is None and access_token is None:
+ # Credentials preference: Use env. variables
+ client_id = os.environ.get("TASKCLUSTER_CLIENT_ID")
+ access_token = os.environ.get("TASKCLUSTER_ACCESS_TOKEN")
+ logger.info("Using taskcluster credentials from environment")
+ else:
+ logger.info("Using taskcluster credentials from cli")
+
+ if client_id is not None and access_token is not None:
+ # Use provided credentials
+ self.options["credentials"] = {
+ "clientId": client_id,
+ "accessToken": access_token,
+ }
+ self.options["rootUrl"] = self.default_url
+
+ elif "TASK_ID" in os.environ:
+ # Use Taskcluster Proxy when running in a task
+ logger.info("Taskcluster Proxy enabled")
+ self.options["rootUrl"] = os.environ.get("TASKCLUSTER_PROXY_URL", "http://taskcluster")
+
+ else:
+ logger.info("No Taskcluster authentication.")
+ self.options["rootUrl"] = self.default_url
+
+ def get_service(self, service_name, use_async=False):
+ """
+ Build a Taskcluster service instance using current authentication
+ """
+ if self.options is None:
+ self.auth()
+
+ client_importer = _async_client_importer if use_async else _client_importer
+ service = getattr(client_importer, service_name.capitalize(), None)
+ assert service is not None, "Invalid Taskcluster service {}".format(
+ service_name
+ )
+ return service(self.options)
+
+ def load_secrets(
+ self, secret_name, prefixes=[], required=[], existing={}, local_secrets=None
+ ):
+ """Shortcut to use load_secrets helper with current authentication"""
+ self.secrets = load_secrets(
+ self.get_service('secrets'),
+ secret_name,
+ prefixes,
+ required,
+ existing,
+ local_secrets,
+ )
+ return self.secrets
+
+ def upload_artifact(self, artifact_path, content, content_type, ttl):
+ """Shortcut to use upload_artifact helper with current authentication"""
+ path = upload_artifact(
+ self.get_service('queue'),
+ artifact_path,
+ content,
+ content_type,
+ ttl,
+ )
+
+ return urllib.parse.urljoin(self.default_url, path)
+
+
+def load_secrets(
+ secrets_service, secret_name, prefixes=[], required=[], existing={}, local_secrets=None
+):
+ """
+ Fetch a specific set of secrets by name and verify that the required
+ secrets exist.
+ Also supports providing local secrets to avoid using remote Taskcluster service
+ for local development (or contributor onboarding)
+ A user can specify prefixes to limit the part of secrets used (useful when a secret
+ is shared amongst several services)
+ """
+ secrets = {}
+ if existing:
+ secrets.update(existing)
+
+ if isinstance(local_secrets, dict):
+ # Use local secrets file to avoid using Taskcluster secrets
+ logger.info("Using provided local secrets")
+ all_secrets = local_secrets
+ else:
+ # Use Taskcluster secret service
+ assert secret_name is not None, "Missing Taskcluster secret secret_name"
+ all_secrets = secrets_service.get(secret_name).get("secret", dict())
+ logger.info("Loaded Taskcluster secret {}".format(secret_name))
+
+ if prefixes:
+ # Use secrets behind supported prefixes
+ for prefix in prefixes:
+ secrets.update(all_secrets.get(prefix, dict()))
+
+ else:
+ # Use all secrets available
+ secrets.update(all_secrets)
+
+ # Check required secrets
+ for required_secret in required:
+ if required_secret not in secrets:
+ raise Exception("Missing value {} in secrets.".format(required_secret))
+
+ return secrets
+
+
+def upload_artifact(queue_service, artifact_path, content, content_type, ttl):
+ """
+ DEPRECATED. Do not use.
+ """
+ task_id = os.environ.get("TASK_ID")
+ run_id = os.environ.get("RUN_ID")
+ proxy = os.environ.get("TASKCLUSTER_PROXY_URL")
+ assert task_id and run_id and proxy, "Can only run in Taskcluster tasks with proxy"
+ assert isinstance(content, str)
+ assert isinstance(ttl, datetime.timedelta)
+
+ # Create S3 artifact on Taskcluster
+ resp = queue_service.createArtifact(
+ task_id,
+ run_id,
+ artifact_path,
+ {
+ "storageType": "s3",
+ "expires": stringDate(datetime.datetime.utcnow() + ttl),
+ "contentType": content_type,
+ },
+ )
+ assert resp["storageType"] == "s3", "Not an s3 storage"
+ assert "putUrl" in resp, "Missing putUrl"
+ assert "contentType" in resp, "Missing contentType"
+
+ # Push the artifact on storage service
+ headers = {"Content-Type": resp["contentType"]}
+ push = requests.put(url=resp["putUrl"], headers=headers, data=content)
+ push.raise_for_status()
+
+ # Build the absolute url
+ return "/api/queue/v1/task/{task_id}/runs/{run_id}/artifacts/{path}".format(
+ task_id=task_id,
+ run_id=run_id,
+ path=artifact_path,
+ )
diff --git a/third_party/python/taskcluster/taskcluster/hooks.py b/third_party/python/taskcluster/taskcluster/hooks.py
new file mode 100644
index 0000000000..178fae04f1
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/hooks.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.hooks import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/hooksevents.py b/third_party/python/taskcluster/taskcluster/hooksevents.py
new file mode 100644
index 0000000000..93ede272c0
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/hooksevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.hooksevents import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/index.py b/third_party/python/taskcluster/taskcluster/index.py
new file mode 100644
index 0000000000..21238fd6f9
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/index.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.index import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/login.py b/third_party/python/taskcluster/taskcluster/login.py
new file mode 100644
index 0000000000..4741c2397d
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/login.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.login import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/notify.py b/third_party/python/taskcluster/taskcluster/notify.py
new file mode 100644
index 0000000000..4edf44541b
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/notify.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.notify import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/notifyevents.py b/third_party/python/taskcluster/taskcluster/notifyevents.py
new file mode 100644
index 0000000000..5a329a8290
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/notifyevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.notifyevents import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/purgecache.py b/third_party/python/taskcluster/taskcluster/purgecache.py
new file mode 100644
index 0000000000..a4dfac897a
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/purgecache.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.purgecache import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/queue.py b/third_party/python/taskcluster/taskcluster/queue.py
new file mode 100644
index 0000000000..782195cefa
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/queue.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.queue import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/queueevents.py b/third_party/python/taskcluster/taskcluster/queueevents.py
new file mode 100644
index 0000000000..aa32aa35ca
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/queueevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.queueevents import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/retry.py b/third_party/python/taskcluster/taskcluster/retry.py
new file mode 100644
index 0000000000..59cf581e48
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/retry.py
@@ -0,0 +1,41 @@
+import logging
+import time
+
+from . import utils
+
+log = logging.getLogger(__name__)
+
+
+def retry(maxRetries, tryFn):
+ """
+ Retry `tryFn` based on `maxRetries`. Each call to `tryFn` will pass a
+ callable which should be called with the exception object when an exception
+ can be retried. Exceptions raised from `tryFn` are treated as fatal.
+ """
+
+ retry = -1 # we plus first in the loop, and attempt 1 is retry 0
+ while True:
+ retry += 1
+
+ # if this isn't the first retry then we sleep
+ if retry > 0:
+ snooze = float(retry * retry) / 10.0
+ log.info('Sleeping %0.2f seconds for exponential backoff', snooze)
+ time.sleep(utils.calculateSleepTime(retry))
+
+ retriableException = None
+
+ def retryFor(exc):
+ nonlocal retriableException
+ retriableException = exc
+
+ res = tryFn(retryFor)
+
+ if not retriableException:
+ return res
+
+ if retry < maxRetries:
+ log.warning(f'Retrying because of: {retriableException}')
+ continue
+
+ raise retriableException
diff --git a/third_party/python/taskcluster/taskcluster/secrets.py b/third_party/python/taskcluster/taskcluster/secrets.py
new file mode 100644
index 0000000000..3177e08e46
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/secrets.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.secrets import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/upload.py b/third_party/python/taskcluster/taskcluster/upload.py
new file mode 100644
index 0000000000..ed47ba8dcf
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/upload.py
@@ -0,0 +1,65 @@
+"""
+Support for uploading objects to the object service, following best
+practices for that service.
+
+Data for upload is read from a "reader" provided by a "reader factory". A
+reader has a `read(max_size)` method which reads and returns a chunk of 1 ..
+`max_size` bytes, or returns an empty string at EOF. A reader factory is a
+callable which returns a fresh reader, ready to read the first byte of the
+object. When uploads are retried, the reader factory may be called more than
+once.
+
+This module provides several pre-defined readers and reader factories for
+common cases.
+"""
+import functools
+import six
+
+if six.PY2:
+ raise ImportError("upload is only supported in Python 3")
+
+from .aio import upload as aio_upload
+from .aio.asyncutils import ensureCoro, runAsync
+
+
+DATA_INLINE_MAX_SIZE = 8192
+
+
+def uploadFromBuf(*, data, **kwargs):
+ """
+ Convenience method to upload data from an in-memory buffer. Arguments are the same
+ as `upload` except that `readerFactory` should not be supplied.
+ """
+ return runAsync(aio_upload.uploadFromBuf(data=data, **kwargs))
+
+
+def uploadFromFile(*, file, **kwargs):
+ """
+ Convenience method to upload data from a a file. The file should be open
+ for reading, in binary mode, and be seekable (`f.seek`). Remaining
+ arguments are the same as `upload` except that `readerFactory` should not
+ be supplied.
+ """
+ return runAsync(aio_upload.uploadFromFile(file=file, **kwargs))
+
+
+def upload(*, readerFactory, **kwargs):
+ """
+ Upload the given data to the object service with the given metadata.
+ The `maxRetries` parameter has the same meaning as for service clients.
+ The `objectService` parameter is an instance of the Object class,
+ configured with credentials for the upload.
+ """
+ wrappedReaderFactory = _wrapSyncReaderFactory(readerFactory)
+ return runAsync(aio_upload.upload(readerFactory=wrappedReaderFactory, **kwargs))
+
+
+def _wrapSyncReaderFactory(readerFactory):
+ """Modify the reader returned by readerFactory to have an async read."""
+ @functools.wraps(readerFactory)
+ async def wrappedFactory():
+ reader = readerFactory()
+ reader.read = ensureCoro(reader.read)
+ return reader
+
+ return wrappedFactory
diff --git a/third_party/python/taskcluster/taskcluster/utils.py b/third_party/python/taskcluster/taskcluster/utils.py
new file mode 100644
index 0000000000..9e005a36f3
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/utils.py
@@ -0,0 +1,354 @@
+# -*- coding: UTF-8 -*-
+from __future__ import absolute_import, division, print_function
+import re
+import json
+import datetime
+import base64
+import logging
+import os
+import requests
+import requests.exceptions
+import slugid
+import time
+import six
+import random
+
+import taskcluster_urls as liburls
+
+from . import exceptions
+
+MAX_RETRIES = 5
+
+DELAY_FACTOR = 0.1
+RANDOMIZATION_FACTOR = 0.25
+MAX_DELAY = 30
+
+
+log = logging.getLogger(__name__)
+
+# Regular expression matching: X days Y hours Z minutes
+# todo: support hr, wk, yr
+r = re.compile(''.join([
+ r'^(\s*(?P<years>\d+)\s*y(ears?)?)?',
+ r'(\s*(?P<months>\d+)\s*mo(nths?)?)?',
+ r'(\s*(?P<weeks>\d+)\s*w(eeks?)?)?',
+ r'(\s*(?P<days>\d+)\s*d(ays?)?)?',
+ r'(\s*(?P<hours>\d+)\s*h(ours?)?)?',
+ r'(\s*(?P<minutes>\d+)\s*m(in(utes?)?)?)?\s*',
+ r'(\s*(?P<seconds>\d+)\s*s(ec(onds?)?)?)?\s*$',
+]))
+
+
+def calculateSleepTime(attempt):
+ """ From the go client
+ https://github.com/taskcluster/go-got/blob/031f55c/backoff.go#L24-L29
+ """
+ if attempt <= 0:
+ return 0
+
+ # We subtract one to get exponents: 1, 2, 3, 4, 5, ..
+ delay = float(2 ** (attempt - 1)) * float(DELAY_FACTOR)
+ # Apply randomization factor
+ delay = delay * (RANDOMIZATION_FACTOR * (random.random() * 2 - 1) + 1)
+ # Always limit with a maximum delay
+ return min(delay, MAX_DELAY)
+
+
+def toStr(obj, encoding='utf-8'):
+ if six.PY3 and isinstance(obj, six.binary_type):
+ obj = obj.decode(encoding)
+ else:
+ obj = str(obj)
+ return obj
+
+
+def fromNow(offset, dateObj=None):
+ """
+ Generate a `datetime.datetime` instance which is offset using a string.
+ See the README.md for a full example, but offset could be '1 day' for
+ a datetime object one day in the future
+ """
+
+ # We want to handle past dates as well as future
+ future = True
+ offset = offset.lstrip()
+ if offset.startswith('-'):
+ future = False
+ offset = offset[1:].lstrip()
+ if offset.startswith('+'):
+ offset = offset[1:].lstrip()
+
+ # Parse offset
+ m = r.match(offset)
+ if m is None:
+ raise ValueError("offset string: '%s' does not parse" % offset)
+
+ # In order to calculate years and months we need to calculate how many days
+ # to offset the offset by, since timedelta only goes as high as weeks
+ days = 0
+ hours = 0
+ minutes = 0
+ seconds = 0
+ if m.group('years'):
+ years = int(m.group('years'))
+ days += 365 * years
+ if m.group('months'):
+ months = int(m.group('months'))
+ days += 30 * months
+ days += int(m.group('days') or 0)
+ hours += int(m.group('hours') or 0)
+ minutes += int(m.group('minutes') or 0)
+ seconds += int(m.group('seconds') or 0)
+
+ # Offset datetime from utc
+ delta = datetime.timedelta(
+ weeks=int(m.group('weeks') or 0),
+ days=days,
+ hours=hours,
+ minutes=minutes,
+ seconds=seconds,
+ )
+
+ if not dateObj:
+ dateObj = datetime.datetime.utcnow()
+
+ return dateObj + delta if future else dateObj - delta
+
+
+def fromNowJSON(offset):
+ """
+ Like fromNow() but returns in a taskcluster-json compatible way
+ """
+ return stringDate(fromNow(offset))
+
+
+def dumpJson(obj, **kwargs):
+ """ Match JS's JSON.stringify. When using the default seperators,
+ base64 encoding JSON results in \n sequences in the output. Hawk
+ barfs in your face if you have that in the text"""
+ def handleDateAndBinaryForJs(x):
+ if six.PY3 and isinstance(x, six.binary_type):
+ x = x.decode()
+ if isinstance(x, datetime.datetime) or isinstance(x, datetime.date):
+ return stringDate(x)
+ else:
+ return x
+ d = json.dumps(obj, separators=(',', ':'), default=handleDateAndBinaryForJs, **kwargs)
+ assert '\n' not in d
+ return d
+
+
+def stringDate(date):
+ # Convert to isoFormat
+ string = date.isoformat()
+
+ # If there is no timezone and no Z added, we'll add one at the end.
+ # This is just to be fully compliant with:
+ # https://tools.ietf.org/html/rfc3339#section-5.6
+ if string.endswith('+00:00'):
+ return string[:-6] + 'Z'
+ if date.utcoffset() is None and string[-1] != 'Z':
+ return string + 'Z'
+ return string
+
+
+def makeB64UrlSafe(b64str):
+ """ Make a base64 string URL Safe """
+ if isinstance(b64str, six.text_type):
+ b64str = b64str.encode()
+ # see RFC 4648, sec. 5
+ return b64str.replace(b'+', b'-').replace(b'/', b'_')
+
+
+def makeB64UrlUnsafe(b64str):
+ """ Make a base64 string URL Unsafe """
+ if isinstance(b64str, six.text_type):
+ b64str = b64str.encode()
+ # see RFC 4648, sec. 5
+ return b64str.replace(b'-', b'+').replace(b'_', b'/')
+
+
+def encodeStringForB64Header(s):
+ """ HTTP Headers can't have new lines in them, let's """
+ if isinstance(s, six.text_type):
+ s = s.encode()
+ if six.PY3:
+ b64str = base64.encodebytes(s)
+ else:
+ b64str = base64.encodestring(s)
+ return b64str.strip().replace(b'\n', b'')
+
+
+def slugId():
+ """ Generate a taskcluster slugid. This is a V4 UUID encoded into
+ URL-Safe Base64 (RFC 4648, sec 5) with '=' padding removed """
+ return slugid.nice()
+
+
+def stableSlugId():
+ """Returns a closure which can be used to generate stable slugIds.
+ Stable slugIds can be used in a graph to specify task IDs in multiple
+ places without regenerating them, e.g. taskId, requires, etc.
+ """
+ _cache = {}
+
+ def closure(name):
+ if name not in _cache:
+ _cache[name] = slugId()
+ return _cache[name]
+
+ return closure
+
+
+def scopeMatch(assumedScopes, requiredScopeSets):
+ """
+ Take a list of a assumed scopes, and a list of required scope sets on
+ disjunctive normal form, and check if any of the required scope sets are
+ satisfied.
+
+ Example:
+
+ requiredScopeSets = [
+ ["scopeA", "scopeB"],
+ ["scopeC"]
+ ]
+
+ In this case assumed_scopes must contain, either:
+ "scopeA" AND "scopeB", OR just "scopeC".
+ """
+ for scopeSet in requiredScopeSets:
+ for requiredScope in scopeSet:
+ for scope in assumedScopes:
+ if scope == requiredScope:
+ # requiredScope satisifed, no need to check more scopes
+ break
+ if scope.endswith("*") and requiredScope.startswith(scope[:-1]):
+ # requiredScope satisifed, no need to check more scopes
+ break
+ else:
+ # requiredScope not satisfied, stop checking scopeSet
+ break
+ else:
+ # scopeSet satisfied, so we're happy
+ return True
+ # none of the requiredScopeSets were satisfied
+ return False
+
+
+def scope_match(assumed_scopes, required_scope_sets):
+ """ This is a deprecated form of def scopeMatch(assumedScopes, requiredScopeSets).
+ That form should be used.
+ """
+ import warnings
+ warnings.warn('NOTE: scope_match is deprecated. Use scopeMatch')
+ return scopeMatch(assumed_scopes, required_scope_sets)
+
+
+def makeHttpRequest(method, url, payload, headers, retries=MAX_RETRIES, session=None):
+ """ Make an HTTP request and retry it until success, return request """
+ retry = -1
+ response = None
+ while retry < retries:
+ retry += 1
+ # if this isn't the first retry then we sleep
+ if retry > 0:
+ snooze = float(retry * retry) / 10.0
+ log.info('Sleeping %0.2f seconds for exponential backoff', snooze)
+ time.sleep(snooze)
+
+ # Seek payload to start, if it is a file
+ if hasattr(payload, 'seek'):
+ payload.seek(0)
+
+ log.debug('Making attempt %d', retry)
+ try:
+ response = makeSingleHttpRequest(method, url, payload, headers, session)
+ except requests.exceptions.RequestException as rerr:
+ if retry < retries:
+ log.warn('Retrying because of: %s' % rerr)
+ continue
+ # raise a connection exception
+ raise rerr
+ # Handle non 2xx status code and retry if possible
+ try:
+ response.raise_for_status()
+ except requests.exceptions.RequestException:
+ pass
+ status = response.status_code
+ if 500 <= status and status < 600 and retry < retries:
+ if retry < retries:
+ log.warn('Retrying because of: %d status' % status)
+ continue
+ else:
+ raise exceptions.TaskclusterRestFailure("Unknown Server Error", superExc=None)
+ return response
+
+ # This code-path should be unreachable
+ assert False, "Error from last retry should have been raised!"
+
+
+def makeSingleHttpRequest(method, url, payload, headers, session=None):
+ method = method.upper()
+ log.debug('Making a %s request to %s', method, url)
+ log.debug('HTTP Headers: %s' % str(headers))
+ log.debug('HTTP Payload: %s (limit 100 char)' % str(payload)[:100])
+ obj = session if session else requests
+ response = obj.request(method.upper(), url, data=payload, headers=headers, allow_redirects=False)
+ log.debug('Received HTTP Status: %s' % response.status_code)
+ log.debug('Received HTTP Headers: %s' % str(response.headers))
+
+ return response
+
+
+def putFile(filename, url, contentType):
+ with open(filename, 'rb') as f:
+ contentLength = os.fstat(f.fileno()).st_size
+ return makeHttpRequest('put', url, f, headers={
+ 'Content-Length': str(contentLength),
+ 'Content-Type': contentType,
+ })
+
+
+def encryptEnvVar(taskId, startTime, endTime, name, value, keyFile):
+ raise Exception("Encrypted environment variables are no longer supported")
+
+
+def decryptMessage(message, privateKey):
+ raise Exception("Decryption is no longer supported")
+
+
+def isExpired(certificate):
+ """ Check if certificate is expired """
+ if isinstance(certificate, six.string_types):
+ certificate = json.loads(certificate)
+ expiry = certificate.get('expiry', 0)
+ return expiry < int(time.time() * 1000) + 20 * 60
+
+
+def optionsFromEnvironment(defaults=None):
+ """Fetch root URL and credentials from the standard TASKCLUSTER_…
+ environment variables and return them in a format suitable for passing to a
+ client constructor."""
+ options = defaults or {}
+ credentials = options.get('credentials', {})
+
+ rootUrl = os.environ.get('TASKCLUSTER_ROOT_URL')
+ if rootUrl:
+ options['rootUrl'] = liburls.normalize_root_url(rootUrl)
+
+ clientId = os.environ.get('TASKCLUSTER_CLIENT_ID')
+ if clientId:
+ credentials['clientId'] = clientId
+
+ accessToken = os.environ.get('TASKCLUSTER_ACCESS_TOKEN')
+ if accessToken:
+ credentials['accessToken'] = accessToken
+
+ certificate = os.environ.get('TASKCLUSTER_CERTIFICATE')
+ if certificate:
+ credentials['certificate'] = certificate
+
+ if credentials:
+ options['credentials'] = credentials
+
+ return options
diff --git a/third_party/python/taskcluster/taskcluster/workermanager.py b/third_party/python/taskcluster/taskcluster/workermanager.py
new file mode 100644
index 0000000000..57ee384b18
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/workermanager.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.workermanager import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/workermanagerevents.py b/third_party/python/taskcluster/taskcluster/workermanagerevents.py
new file mode 100644
index 0000000000..e879f0d0a3
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/workermanagerevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.workermanagerevents import * # NOQA