summaryrefslogtreecommitdiffstats
path: root/testing/mozharness/external_tools
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
commit43a97878ce14b72f0981164f87f2e35e14151312 (patch)
tree620249daf56c0258faa40cbdcf9cfba06de2a846 /testing/mozharness/external_tools
parentInitial commit. (diff)
downloadfirefox-43a97878ce14b72f0981164f87f2e35e14151312.tar.xz
firefox-43a97878ce14b72f0981164f87f2e35e14151312.zip
Adding upstream version 110.0.1.upstream/110.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'testing/mozharness/external_tools')
-rw-r--r--testing/mozharness/external_tools/__init__.py0
-rwxr-xr-xtesting/mozharness/external_tools/gittool.py139
-rw-r--r--testing/mozharness/external_tools/machine-configuration.json32
-rwxr-xr-xtesting/mozharness/external_tools/mouse_and_screen_resolution.py191
-rw-r--r--testing/mozharness/external_tools/packagesymbols.py81
-rw-r--r--testing/mozharness/external_tools/performance-artifact-schema.json233
-rw-r--r--testing/mozharness/external_tools/robustcheckout.py832
-rwxr-xr-xtesting/mozharness/external_tools/tooltool.py1679
8 files changed, 3187 insertions, 0 deletions
diff --git a/testing/mozharness/external_tools/__init__.py b/testing/mozharness/external_tools/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/mozharness/external_tools/__init__.py
diff --git a/testing/mozharness/external_tools/gittool.py b/testing/mozharness/external_tools/gittool.py
new file mode 100755
index 0000000000..e7cf524ea9
--- /dev/null
+++ b/testing/mozharness/external_tools/gittool.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+### Compressed module sources ###
+module_sources = [
+ (
+ "util",
+ "eJxlkMEKgzAQRO/5isWTQhFaSg8Ff6LnQknM2ixoItmov1+T2FLb3DY7mZkXGkbnAxjJpiclKI+K\nrOSWSAihsQM28sjBk32WXF0FrKe4YZi8hWAwrZMDuC5fJC1wkaQ+K7eIOqpXm1rTEzmU1ZahLuc/\ncwYlGS9nQNs6jfoACwUDQVIf/RdDAXmULYK0Gpo1aXAz6l3sG6VWJ/nIdjHdx45jWTR3W3xVSKTT\n8NuEE9a+DMzomZz9QOencdyDJ7LvH6zEC9SEeBQ=\n",
+ ),
+ (
+ "util.file",
+ "eJzNVk2P2zYQvftXTF0sVl64am20lwA+FNsCKVCkRZJbEHhpkbKYlUiBpNb2v+8MP0RZ3uTQU3SwJXLmcWbem5GWy+Vb0fbCQD2oykmtLDgNDVO8FVBL/NG4y/zOcrlcyK7XxkGrj0epjulR23Rnm8HJNj01zDatPKRHJ7qeMBe10R1YeS47/SJsWWlVy2PPjMVAou17dnr0y//65QWeCLt0bnkU7m+8FabY7xXrxH6/WiwWXNRQ6Q6BREHnbNY+he3qzQLwwvjjLibZCDRVTihnQdfgTtrb2jX0zFrBQSoQEs0MMOvd8cJaqFCVUCF0xe2qEtbKQypYz1xjS/hD4zbDLLseM44AiskXAdYZTCKGKq1Wa7AauAalHQwWa66gZeZItFBMVHjyljVIK5V1TFVjhgdmRQCMadLl97BeFHAyvDf3a/hoBrH6Ctj2G2DbKdj2BswfsR8LugsLpRGMF9liO7fYTi2McINRN1C7mWvkmUsjKqfNBVXiGOZRjCtYzaGu5TnTDu8DtsOAKXFi/4hMqAzj1UA4frM3+kVyVMFJtrxihnukALsmTiXyQ53y1Fqotf754cDMEySiGukwQ6Yuxae6FIrbEyqpiFGhPfJK+tK2bKV1GEMOfvV5pIfUgEiZCFR/KYzRplg+6qHl3qKWisPDnSXAO7uEOyhSnBn0qsKIOTZLf6HqFtZUaG7d2i91moudJ3es4CMuA1pRzt6uxy4S5oV0jAOik9gBNDywLcDJDqUv6xELhea1ksoThkR5c/qYeXLMqU9caGPmMrNATbuJRcjVNmxjh66oc1JJFUj4h7e//7Tx88pPg9l08H39VD+tqciNOBOFHXMj3Uh2HHUlHZMk349NQw1zuA/Lp4bQqB45vUOrq2dqij50xG+bLTzA5pftrznBqAhvmj29N/o8jytNOfScOVF4y2vmSwyeyyP2eDHWhdViP6hWqmff3DROc4nCBqQNkEeljSLWvRBt6iZfIY4jT907EGdUdSqOM5c30xxQ9DQhS2lJ97MTx5GDLWK0dl7DNoxxG1vmxNoc6RoF2XN9UlO9zpF8s3mI2/1MVIzri5aqCGfXq1fNryrW39ogkukoOULJ26K14vr8STV8yezXyhFR5yx5G3GuRO/gnw9/EiH4soLJKT/CX0SYgOU7jeOrauI73eTZsJySI2i+KE1Td3sdQlDQN5IxTFox1dSsvVFzWVZk0F58n49TBQ3w5UfSYv5LQRuGY1liF5pOcImKDtOlwbFtqAD0AUJ40TkJruYoiq73ct2N3xxl92zpnibtLlUd78ms8MGtXptN+vCl8C3sk/BNvCYqah4aG8+6P+HijXOeQTGWYEHa8OQVcTlWJhau1Yzvw+fQXAthFTOafRkVk6lJq2GAfEren1fwww7yYyYtzoR3WonpjAgoMR78zkrhhD98+Qn/nYhV6MM/2rGhTeTmOHAi7qNxEf9XnsHJfsAoZpirmyCjC4ZzYzuNPYZyE/weVfS9JECh/K8cDlq330sSFItgty6vJfIfMB3quQ==",
+ ),
+ (
+ "util.commands",
+ "eJzdWW1v2zgS/u5fwXPQs9x1laDFvSBA9pDdJnfBtkkucS9XtIUgS+OYG4n0kVQc76+/GZKSKPkl2T3slzOQF4nkcF6eeWZID4fD80pkhkuh2VwqpiohuLhnmSzLVOR6OBwOeLmUyjBdzZZKZqB1/UY2/xleQv3/skgNiirr50Le36PIRgx/GuArdlIPxPdgPuC/oKIkEWkJSTIeDIxaHw8YfvyyFRfv3s55ARsv0yW3764/311cvnuLgqeqggE8ZbA07MLOPFNKquPetPO00DAYDHKYky5JVuYR/kzY69cPq1Td67FbccCyVc64ZnoJGU8LxgUzixS3B5YWq3St2SoVhnG0XXFhAEXAIwjG5/hupJmQxguCp2XBM26KNcsWUoOw791uqJH7J87kch2NnaFzNsLdRySD9nUznF7t0i92zjeUIDW5E5/8erQr5mIuo6GP6DG7nZ7eTIc7h1/pIXsVBDsuuDZv0S8FF0D+GbulhJYHWE/YY1pUQLphZGNuQOFPqaOOC3fuZfebOEHaqMgKwx1cVEpQ95CAeIzwx4sSsKI3zlb8hyspQo/58bha5qkBu9C+V2AqJephvwHCfWfYEfo3lWA4xKKUkReYnDOcUJUgjB7HjN2kXEPoqx/TooD82j1Z0GEErTSzgDqpEAXcIGa4WWBYxZtfQEkUdjHvTHL6IuiqjITNq6JYT0JLjmJKTu/ZLVZ4yN0Bc65A6YjhdI4RqlOPzSBLKzQBkZxLMTIOyGgEmUracLGwAc1rAKPnH1OlWVnlhIh7FG4nysosK9NgFueN9uGVxgmvbYQ7I075hgPsA84mmonpl4/1Dlg5XY6H7STvsiBS2QKyhyRDU7e5zZPH3shuQfaoUeHs5ubqZjQhQQkNnRAnBfoQcFwacYGSAzuhSJca8q617A0z+yw+u3zPolfx27mu14+/Csyr+iGA+38qDmYn6HN4FIg0yq4liAhTzL+ZsNFq1MmkPc7UJscYnDRL8RmUap9bZ7d6KSilgVYxDeoR1IQhPBVVhZNLKQBf68VPsG4fTkb4azRpnBN8eqah1GSpYM6f0LovuOpbjdZ6izYI7dwYngyIPPoyelOMWm2+NfTsFHpmKcel6Mtlahb4dokxI0GRWzv2woKV6XJJK50Pmq245kKbVGTgXDRLNSBpYhaOO/uTffjnWxitgOlqL3zHesjvcGKyxMSXOc+SpazjivOoO0ioyCmk6pN3R0cTRuPtq6P47Z+2hsN/ahmEllmaPfhQ/kryJW5JAwgyokwLQ0bciomxLIDaGiutNoUy7cW0vUnZVtZvoe2WsgfbXMDmvgdD4Zh7M5RmFXJ7kfQ5uliuiG4bF3gNHXLc5thqwBNklRVl0gfQyBUCGyuaKDai12IGQaVMQlTTUAaGFd1Rx+L/usw8W0HIEygkAMy1Zcctjmg9uaVcFalG5rUgPQlmDhwoFtjc2ta1NUPRtrRnbHNwPGhG0Hgc5La3ZJRBx52UI5NymFX30dCj2kMQQWUbLpWNOwucvJMTdnS8kbu7KlJgQX/JbylRfRl1znQGAFv2TQ3V8wkdqWxCdBL4UMjVjp4CvUGDb8KIfb8nf+hzgAprMOESI1n6KHmOB4e0LO2xymd8N1ghLHDfnsG1851GO5yOGvdZZdNNASrIQaTQTiqixjx6pcd4GNhD5lF//SRQcLwZ005HF376cqKdlLQp1DdrZ/YPqb51BzIeLBKs8dZYi5sca4eXgYsnjN8LSTU1juPhvkLmt97W3tmhrUglLbytqEdTXGrSxoRG3qbANMSdiuFeJYZ9WGKW5eNn1kzqPbdA4JmltxQYcpohiDTBwQfKbTo7Rn2NnvPi858ADf+zrJpwAiawHKALgGXUaWbqlgiP1IkrCY7zuciKKofE97X2ImFCpc0kGOD6+de2NG37EJQ612F0N3TlD3sMMHVv3XQOfmbuLilqKdh94oNaEU1y00zWuDGyVHtAJdpU2EZ4odhcQlrGzlF3Lqru6mMtK6YXsipyKudU223ZbxoZpL4sXaItgG1agUbi31JWgmy20pxeLPoex+ewwuWy0uSB2dqAHk8aNXNI80JmD81pl/fdEfYxVu1Oqb6dvr/6NHWnuk5KNpOpfA6e63B+n0Ot98IJG7ZZt+ugu7cL8SeucPDi+qw5ebk/z+dO1+Z6V2QsbgJNGq3toNs6RqTk3RJqRx2gM5kD+8NGd/GSut2T4ot4sA11QHXqbXZBz14NdCa64Y17g6DrfMnNAEuxx2lVOWAfsRfHJQpc8nj/uQxm0iVqU77am4C4cXSgQOu33+UCYf/9AZ7RHyGhY2xEv1pmm1L2U9+Pc5ZFmgFxmr0j1AssM0WsSuQSb/9KqgeN/GPojFAJhKY/WYlcrnTMplj0cAJyyA+QojeNddDV7em5leiOWzm4w1T0g0Sfr9DguZIlm1W8yGfSIJ2njxDX99jj4BBT90A3ZA0xmu2IrT2DOtcTrhOvT7T1DvOTppWJM6ueyqhnaNHTG3Uu62Gr2ZFg3F4RYHXQoYsdjqyX0Tl5gUe7mZIPeKrDNu5Bh/lQC+GahvpC6IMzXCT3a7Rf0EuEHLB/4bFuvrYhy7mCzEi1JpwQWxyu6Mh36DoRaENbKUXgoRuSgd8pW5QytxtN2JH8y9GRjxStoesYYmXprqpxm76qdAi33yy4S2Nr1M+SCy+QRsaBlzEh71zEJravrg+tCwQUszrThULJtbZHdiqbNOy8EcixB3c81B22lgOnMoy0LJmv80QHohc+p+oJGwkz6kauxUlqrxeixjR7v3SXXP003mw6D9gFlWfx4Iq0PTWuUN8ccw8P4hNMGoNTAiuxI76cbpFTphnKAUpNrNKkurXlYlSye2mbA8kKIAncfl1hqWCLnDfUIvrw9pDpQh3YdST/3MR7A5kU7WZuz/SQrdo5AU/2G/QD9t66AEu0bWBIk15+dYMQpscuLXabZWHcm+ZTqqcuDZQNsJGMD0KCso1jn4no+kfgPEuKpfxlQQ7X2vH4FnbTa22gpJruv76L3Zv2eyg/A6MaDX1+IG0OP/JMSS3nZhgY7uum/SKuszy2nbymiy08/3z++93F5TPrduZDONebtY1vmxJVJ/Ub+zXenGcsrEk1q9IahLC4NwsdHsLYx9N/J9en03/EjQLX0h6ErIezQs5moEDFy3XcuTVLNfKY6brc3SrRTi0hISMWbWF17EYo8NNGX/Hzt69fR+w7u3JP2WiWbTg2XOS/Bz0O86oL6y1MGvi9vlAe5VCww3N2+E82xEI6wgNgtyBs5llfgsU2O7zdLiNsnzo1hKg4nflvhJvvh+NbMOf499QYxZHYQN9FfedNgunnFx/OktPp9Obih0/Ts+Ty6ubj6Yd651s8aYCjeDyKOLxgM2XvZVNL7sR/JVCTpbeFxHmxv/8m4lt93kMBBsiCzWVt0ZvP0RGUi/VX4PE5gotW6Y1l+AdfvY5fj8KaiFOTFH2E2LLCvhx9a2/GXK107//6rQOROhfjEauLr32Me6Uqw5aci6pl/xdV4XCrVsU/7o7X+4ubsx+nVzefu7v3qeBF/P9iDAU0/iIgbW6wI8o9Ndv5tleF9zX8t0Djvwh1IB4=",
+ ),
+ (
+ "util.retry",
+ "eJytVk2P2zYQvetXDFwsLDuC4C2wORhxsUHQFgWKnHqXaYmyiUqkQ1LxGkX/e2dIivpy0h6qw1oa\nDh9nHt/MjmivSluwouVJrVULdSdLq1RjQPilm2ZX49dKJS1/s4049YvB0jLJzlwnwdqo81nIc4K/\ncOi/8jO3v+Mr12lRSNbyotgkSVLxGjS3+p6y0golM2DW8vZqzeElA9NwfqXgDu93GbTsrRgsL7AF\ntCYQH4dT8LeSPJQ0h/Tn/j3bZFA2nMnuevisJMdj9Bkd0Pznzb3+9fdm77BWq9Un1jRw9AGtgdHB\nou1aUDVaQ3hrR5qBTlrRgLBgurLkvDJDRJgb6xqLyYNV8JLDMUa/BmHAXjjIrj1xTciGI5uVIdcb\nEzainLi9cS4jL9kM9/0OmKygUt2pIRNn5cVT0W/J0C3CTbOZULrOAY5zEl2kDGx3bThuiTiRWsqD\nYfoX1TUVRgsl684Xm8NvNQwwoDBbTa4S/yjDI1AjjOUVCPnobKY5aCYMOjgJ9peSEXl3uAm8qNOA\nFVxF2/JKMMubuwvjGK7e5XLV6quo0ItYK/Gm2QkzwwsksBHrbm0KBqy2mASmELMnxD7hz4pU1bVc\nWhOBQohwZYZCwwsTnpu76nSvSV92BKf5l05o1NUSCUPEwzTKBCOSlIEjHnFckbp1ScH1WxtuTETO\nI86R9L526R+9+D3P/SU7NYnSkkBiFBQ4pQBY8YOY0HjsKVxj4bgFSpR6Q7CHwt6M16SyMXWlB9dg\n876inlY8fBj6wX6QjzrnFT9153Q19X6qwBHgJDc2r+AJ0lHbgOkxo66z8YFI7GLP7u12EUiQhA+H\nWI5DJKjd/QSWQhOyVunKCXsP1FeoRJ8MysJeXA/a41ffhPz7agISn1U4EX4IKfQN01id0u6Nf/VQ\n+CFD+LE4uO00qsNtS7fklcF2G/yjqy+/RTNdphZYj7lREQwVv4dVRl8FMXD4Q3d8Gg3ebrjt/SLf\nsJAuduBNPGL+m4T/Kr4S36QyidwSbWM1Ttih1jE/b5DNT7D7D+f9wlAfVVCQu+kq9vUTrxV1M/LE\nJYzl8T3TMyhw4UPW3K2n3/EaAj+M3rfw48JzluWkFJYZz7En7hNvGg2E7AZjLSTKf1YiEt5RbQ1z\ngHB9YOvV10vUfwWheoD1eg0f8T9hqTSz2EKQ2zBHbHLszqylTtYZHEu8/+sA7tmiA2ulRhrL8zyZ\n+8Zh5Hm3G48jz7sB5cR0utlPYEKESfQpImRRowIVxkmNebTt1Q1a3jqeIMZbyeWKA9S8dveP6tyz\nQXhh2PGbwrjjfxBjxPS39Ti7gmR21DLE5PFqyB3v+3U2OsY5EEsjBP3vIlhwFlEKYb/D0v/M0CN2\n7oLjNNTHkvwDPQB6iA==\n",
+ ),
+ (
+ "util.git",
+ "eJzNW+uT27YR/66/ApF7IymWeEk/Xuam4/jReJrGntiZdMZ2JEoEJcQUIRPgyddM/vfuAyDAh+S75tFqxpZIAovdxe5vH8SNx+NndbmxSpdG5LoSqrSySuFGuRVHZXditx2PxyO1P+jKCm38L1OvD5XeSNPcqauiUGt/VcnRKK/0XtRWFclG7/dpmRnhn9blcrPP5jBsr2/k8pDa3ZzufqiVtPgsmp2rQvqZJs3lsi4LVb4f+bUKvd0CvyP4Ftf+KtlK+y38lNV0uSzTvVwuZ6PRaFOkxognMk/rwr7apZX8OjXyaiTgc4BHo+4joNi9NUVCmczFcp++l8t0bXRRWzmt5EHPmJTKBV4lxqaVNajI6RjFuLq8HLsh+Hkg/gkUhHuCKjTCk2sGoXKAC6T3ppBlTOhdMwifwiD/7MKMxQVsV4KTEyCJ31P8b0ZTZAEcjpGIKLWFXScCV11yXQIk4YgH2LriWU4ZoO8lXpKyY12slTVAi+0D6FVGJijpoVA2orjTxoZhH2oNGsWpSSltoTdzMR7PRpE++gOJ1cLYSh2mY1BPmOK49eL8rFU5xfXmglXCEuxSAxLcKAPeMM0kPvaXThRwhe+JlBGvq1ryNvMIIT8qA4KCKnEqOg3OsNVtpNXYwKdvJltlJ3MxAYvFr8VCl7AvpaSL8stJWP7dXGyO2TUSnkV7REIhI7ynHzfyEHtm8jgtCpm95KunVaWrq+7sZ2lhZEv+vBE9x508JzkN6AieOIXzs01airUUqVhXabnZCUAYm24FPkvuoyKz08cFECXVZJOGuz9HMaoES2WtrAEDrulZUMxzeKzSQv1bgveCIewP9pY8wyirq1uRWtaMeJ7TD68xQEShTDmxPGUulAXMLQrUGANjxtqEeStceAXDSay5sDtZolrhLjurn7ipZGphotcubBR6uDd9ZTJVkSwRDIEBJqrM9XRMyyL2A4DMRVutEfROwxNZePjED4YQ1BeuV4CQA4vRsAhbyBlZufmsNeoUW51hIGFburyzGu9qE2imnfltCYakhQn0/IH4u9+iHNT8Xuja4vJwE0AHdt9qp3+Oo3uZKbwApuQGTAGHgIYcLZYZjYA2E/67ZctIxI/sOTskvIM9P+KAQw2ABzoA7e4hHKlDwVtvHD24D+CKll0gIwbDVcbGgWIDdzuwxI0ubQpDMSRrmiFLG3PoqH0r7cR4aYAo8tcWRFXG9h0YtgKjIow0sZWwC754Rc4mUiMilYMGZSKrqoSQcy3++kV7Mx6I78B+02ZtUBLSSJIE4FLonBhjMQxekoexW6UdQg2J1v0qVejpjhP0qMAAgBCYagNDCAQM1TiMfn2YsJ+8G/CGU7PDnFHbzsWY3QEmzlzkauEh3veYZJZAliJu7GEDOO0UAiq8AZDKBEwjq7gPQrRBsbfpiKsgaYSwL/UBUpU3Y1gMPBYc+GZBkR8vFgu4u4BVxhF8zwWgBfjTdUzk+cunc1GX6kZWJi2WpTxifDTXKNosth+ckRxTgOmZ+OxadEyoxX1jqLU91Jhx0FxePAFzz6azhHOIsAJqGviNUKvU1Z7AZADGmPKsFZBA9TB72hC65m0BX4huDdAaJ6jBWS+4McnfJ7xh5ua4OGFK7GKQdgGu4G80oeCPCPoc4J7BryYkGcYuxYjCVmiNLPI5zajkti7Sqj2Uhl2SzM46ISeXle2b+2jAwQDbcuVyqbXWBf8C8fhhJRNyW3bVUzZLdclvtMrIHNnFzJLU8kljU3kYi9k87tI4bF42HDedkQwgEE7AMbEqPQHOQqZZK0XOnE0YsAmKIJShzzmaOMNIC4htJcQF02UntiTK7EHvev0z2IlhAMw1fgcCE16cqC/dyHNEmY2IqovMVKrS9gXaMOo4mWGsyaMEJTlWCgqG8YV5W2KN1Frag+umkGkZ1zAPxOujBmPKISbu4ZkRYxqD5rPXWV1ITEyOOwU5LsVriLRHKY4pxFajRZ5WSQPnrcSWqJCd5tH/2cQho1P+oA16ZtMsW2J6Y/1e8QXVYc1FJAt41qMsw3jAzygbz8Qqmrbyjg6FFwCXjxiYYqzwx4qtHHaHGgUweBWts/K+25OWB5EJZJzFD3MaST+LDNKNgMrvzpK+kpbhCyYR7Hip78AjLLmAaffjc9vw2edxGFg9ZwysN7J0U+YYHb7DJEdxkuO2K9MSsxuH/SrUmh00DIIsbu6Keefs7Z6YhyLhGMpLO6hHc6dRfrGnBgdkjnazm1bonObh9O2rhzPxdppLuPl29hf0VhgjzSY9oHKDWue0UCsh2A9mAPtkW+n6MP2yb1X3s6jznjPoDJjlZXJdbxF7rtBOyHd4KKJHaIXsbx1ToJWz9uRjRjThOua6G/ZH/QmAVpj9oZ0NFYTIqmmxCjUYygj/T4+ArRdm5ng/rbx5WC8qJ+/l0AOxrc0kAEqbxwvzSb4CM3fHUO/mrnc1F61uyTXqMfRt3GV9yCAgLXEkOcw8KMH3FK+7TUbQmcLMzTgaFCiYRKf9gC1EAz4NOQNVeCscRK2CHezPyvOyQjhZOT5XYrOTGyjTqITFVitRY3BP2EyeaI5jWITSErDrWNrxKrv0AMgxB+/eFDWpHtI4o9bFLbCq12tZYUuWchDqCjWNDCcWMQg2MOd62mKfBErh2kjCOjdKrCXgiGwYcyI3dtzoD6kNd3VbaubcQpY3qtIltoin478/f7189c2j758uv3706unyyfPvwW5Q57PhVfre0nQ9G3dtdUHbXHTqCMpwTNj0Zu6QxccEkIU2g/QgYpD0Sd3dKHduRg62f6jcx70MfSVyozCrmdSq/4gNbJI1A12nrF2o4QcEr60C3URDWdt+hKtnntIXGG57GWT2mFYlcAn86rrIsNJ3fYq453FhvoIaxaRraprgE/iOZZnjSksUfIDN2GJI1b2HvX2gCqffM+dqpVPoP0bvQ75oUqfZ5gryExW+G9HN4vtNtpamABBd3iDQcaHuAm+mzmxoCjQV3VeRC/fagGzxoZsWyXfOGrtNndA0dbkNbBkaLEAC4CXdIzp9G+uzE4oVV7P1zbXf5HsgXsAi1VHBHoO4lKiATppqyX/o2SDWu++A1O67DfiM1r6BSO97CKt1XTGjqKU4lSZ1qO3OikORbuRZH4+dePN+SW/KPlU+kWKgaFRlJj8mOG88O2NZDeEB8xrqzobx7T2K+qkDQ85sodMcxI6jhKQWtHeAUANuzhZMunaqS5vIO+cAh/IcpaOwS29c8Gpe2qQFpqO31Jo3et50Yis5QSWLUtI6jgBlomClt3aHbxjZ6/WSb19H/aX4vRDsVnunHmBlmGl0ReIoMC3MQW5UriRk41A1YsMXs2cJSTcvGer6sGp4zYHv9LrvcFqvr3j1mLN0Y+u0gJiNTOSCzRmWBg6d6FggseARCZ8JkCEf3R54R84/zSWa76fftg0pi/pEYZNNs0Vuc5Q9ubrfGX//ahCzuiDxjEigs4JkhjGicd3W2F5EZAKvXzx5cSW+AWwD36SFOU8l7DBQubwnpZlLm27N5b/g0yMSvWzrWVP8Iepxw4bDXAeeZr25w29A7k6xB4mdFyz3alAGzb3Se0lmDzZQWnGElG372cC4xxysIoTEQFLJBXf2uxMCcNGLhLAntLcQKBft8DcQTfwnjoPnhoU3DP4zaCz32+doP9zrM7ztypjrsYYYosrx/Xf7bnT/kD2PMtFmB05moPfchPtuez/N6C52YkR7s/vaHtz62NFY6/+Vj51Imv9H8oMLQzbH2InGw73e0NBgQSdsURPfAfE4HSVRQ7XKDz7uc49yow+3XkVXlPz7KHHlWwG5r+ZdlImCDEGNQjyOHBDPA0FsgQicmihKx2GjWaOdpn46whFEtYO108PlhZlQp6sXCwd0YNhBkEEfv4FfYyGpwXehnsh4SOqwpbkrXnsShiEuZYjrHfcAj+LQTk1PZMdNM6TPgsqjvkZbUG7BDx7sADqNkQxNj6f6fv6pHJ6bK+1WC1eRnLj3mzeh8fIYqZrQHcctpZTdYM+k6cSQXcNtCmSQfWI6R7DgX41RvpkI8RrSp6OuqC4kc/ZnOuh4yJ1bKFzM0PG5fieFG8UwAhehMa2eD1Jn8btnTVDKqLw2DXM1+EiYHGmLKDglEgnIpwfFc0dWmuaO29rzFe6JEhR7myzwEGY8shZ16aqFSFFuThQtsX3Nd+nARZdmm+7jWKGENjyh7WyDwN+8G2Yb5XkdK20bYfR7yJm7r4f9x0WIf8jbtU6r7DkeUKnqgx1gqZc23S26SP94OoYwvy7kvm1pTp0n9YMfPJ6iyjos34+fQe0v3YmYeAtFnkJhmX0F3/xwnUIZaHXoBztL6b47RvW70xnv4jbMYBjyaf0P4MV8GORysXDyoTf/LYDRPkuoVZpNJ4tFqRdUl+raToLF+rDVmiQ/Wpz0Bt9l0+G56NRbpzWZXZ1Yjp9OgsQN1YCF78JBk+aIiSM+KLqzMG50RsHNY+1gMBhsi0c99pCuNuhK8R2gtAUZrrfN71hcJ5vb2q7JnRAMlVIhqHqAxZ6Sr+yHIYiZzprXnrhy1CbQNJWVWFd0YsotD+t9y/nHLi23MsBipswG3Cw6iedaGN2iH+LNUWGNSWun1o39kZC1GfXTF2LKVQ0tC5drWejjjCpfXW24+OaSkvoijkwGxpjDv3JDmQyint7vActVZvwL6z119xHguWcClPZ8tK10R6iQjk3haSaMBd0kEKv4vIb8UCtAGtQI0J40RHD6YsGzJnMPIMqfLqMeToUaX98KXQCXAt8s0jF0TW9Xhvoo/a5X14m9b/lTVvxCvaHxUEx++mISnZoORt89SNrGHYcFPUdlh/D50+TCXO5TyL4qzuAa8z6DZu3ZnPtNO2+fOvn+XYVmwl7muwj9Kd/uVyldB+97dKd+HEq4eAD2PzoZFjVfwDa9cxHQn8i38GwCocOdEyXuDvUSJTrYGKdLLkOK3zv18oE7pAunwj9qnBU7GPzP1nt/bjRvSsb/w3D+QDzW5Y2snG1gM7hs/paCD6/RSd0JlnJcMoYj2s746W8ehv6oYyhhIGU0PkenQeK0Idh0nDU8oU4mGx4+C/1EZol6gNxphNJuI5t4gblzeutb0UwMylO9Udx41RAr6BCUa5DSW3CqBlJc8VBXW4legpHNnUVyMrWzE2RgMgvg28E7lbe8uVOyBWLjh9TSdIcbLn+JJ/16+Yuj++tVe1QEH9GgcQLOtU/xfXuvsgxw04LMFuLEF9FByr4l9vnfQR1t/mh2ZycCzu+g6M/Pcfx5xOmfosffxs3MOfqJ8BWfvnKRi/62p3fkinOeJiUIx1hXOH7lE7zilo5PRMckmkTu5GnncPLKnXbGi2+ePnrij/Px37n9LserXOA4c570P8E3N0c=",
+ ),
+]
+
+### Load the compressed module sources ###
+import sys, imp, base64, zlib
+
+for name, source in module_sources:
+ source = zlib.decompress(base64.b64decode(source))
+ mod = imp.new_module(name)
+ exec(source, mod.__dict__)
+ sys.modules[name] = mod
+
+### Original script follows ###
+#!/usr/bin/python
+"""%prog [-p|--props-file] [-r|--rev revision] [-b|--branch branch]
+ [-s|--shared-dir shared_dir] repo [dest]
+
+Tool to do safe operations with git.
+
+revision/branch on commandline will override those in props-file"""
+
+# Import snippet to find tools lib
+import os
+import site
+import logging
+
+site.addsitedir(
+ os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../lib/python")
+)
+
+try:
+ import simplejson as json
+
+ assert json
+except ImportError:
+ import json
+
+from util.git import git
+
+
+if __name__ == "__main__":
+ from optparse import OptionParser
+
+ parser = OptionParser(__doc__)
+ parser.set_defaults(
+ revision=os.environ.get("GIT_REV"),
+ branch=os.environ.get("GIT_BRANCH", None),
+ loglevel=logging.INFO,
+ shared_dir=os.environ.get("GIT_SHARE_BASE_DIR"),
+ mirrors=None,
+ clean=False,
+ )
+ parser.add_option(
+ "-r", "--rev", dest="revision", help="which revision to update to"
+ )
+ parser.add_option("-b", "--branch", dest="branch", help="which branch to update to")
+ parser.add_option(
+ "-p",
+ "--props-file",
+ dest="propsfile",
+ help="build json file containing revision information",
+ )
+ parser.add_option(
+ "-s", "--shared-dir", dest="shared_dir", help="clone to a shared directory"
+ )
+ parser.add_option(
+ "--mirror",
+ dest="mirrors",
+ action="append",
+ help="add a mirror to try cloning/pulling from before repo",
+ )
+ parser.add_option(
+ "--clean",
+ dest="clean",
+ action="store_true",
+ default=False,
+ help="run 'git clean' after updating the local repository",
+ )
+ parser.add_option(
+ "-v", "--verbose", dest="loglevel", action="store_const", const=logging.DEBUG
+ )
+
+ options, args = parser.parse_args()
+
+ logging.basicConfig(level=options.loglevel, format="%(asctime)s %(message)s")
+
+ if len(args) not in (1, 2):
+ parser.error("Invalid number of arguments")
+
+ repo = args[0]
+ if len(args) == 2:
+ dest = args[1]
+ else:
+ dest = os.path.basename(repo)
+
+ # Parse propsfile
+ if options.propsfile:
+ js = json.load(open(options.propsfile))
+ if options.revision is None:
+ options.revision = js["sourcestamp"]["revision"]
+ if options.branch is None:
+ options.branch = js["sourcestamp"]["branch"]
+
+ got_revision = git(
+ repo,
+ dest,
+ options.branch,
+ options.revision,
+ shareBase=options.shared_dir,
+ mirrors=options.mirrors,
+ clean_dest=options.clean,
+ )
+
+ print("Got revision %s" % got_revision)
diff --git a/testing/mozharness/external_tools/machine-configuration.json b/testing/mozharness/external_tools/machine-configuration.json
new file mode 100644
index 0000000000..74a0baf5a2
--- /dev/null
+++ b/testing/mozharness/external_tools/machine-configuration.json
@@ -0,0 +1,32 @@
+{
+ "win7": {
+ "screen_resolution": {
+ "x": 1280,
+ "y": 1024
+ },
+ "mouse_position": {
+ "x": 1010,
+ "y": 10
+ }
+ },
+ "win10-hw": {
+ "screen_resolution": {
+ "x": 1920,
+ "y": 1080
+ },
+ "mouse_position": {
+ "x": 1010,
+ "y": 10
+ }
+ },
+ "win10-vm": {
+ "screen_resolution": {
+ "x": 2560,
+ "y": 1440
+ },
+ "mouse_position": {
+ "x": 1010,
+ "y": 10
+ }
+ }
+}
diff --git a/testing/mozharness/external_tools/mouse_and_screen_resolution.py b/testing/mozharness/external_tools/mouse_and_screen_resolution.py
new file mode 100755
index 0000000000..ad1f8d9e4b
--- /dev/null
+++ b/testing/mozharness/external_tools/mouse_and_screen_resolution.py
@@ -0,0 +1,191 @@
+#! /usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+#
+# Script name: mouse_and_screen_resolution.py
+# Purpose: Sets mouse position and screen resolution for Windows 7 32-bit slaves
+# Author(s): Zambrano Gasparnian, Armen <armenzg@mozilla.com>
+# Target: Python 2.7 or newer
+#
+
+import os
+import platform
+import socket
+import sys
+import time
+from ctypes import Structure, byref, c_ulong, windll
+from optparse import OptionParser
+
+try:
+ from urllib2 import urlopen, URLError, HTTPError
+except ImportError:
+ from urllib.request import urlopen
+ from urllib.error import URLError, HTTPError
+try:
+ import json
+except:
+ import simplejson as json
+
+default_screen_resolution = {"x": 1024, "y": 768}
+default_mouse_position = {"x": 1010, "y": 10}
+
+
+def wfetch(url, retries=5):
+ while True:
+ try:
+ return urlopen(url, timeout=30).read()
+ except HTTPError as e:
+ print("Failed to fetch '%s': %s" % (url, str(e)))
+ except URLError as e:
+ print("Failed to fetch '%s': %s" % (url, str(e)))
+ except socket.timeout as e:
+ print("Time out accessing %s: %s" % (url, str(e)))
+ except socket.error as e:
+ print("Socket error when accessing %s: %s" % (url, str(e)))
+ if retries < 0:
+ raise Exception("Could not fetch url '%s'" % url)
+ retries -= 1
+ print("Retrying")
+ time.sleep(60)
+
+
+def main():
+
+ # NOTE: this script was written for windows 7, but works well with windows 10
+ parser = OptionParser()
+ parser.add_option(
+ "--configuration-url",
+ dest="configuration_url",
+ type="string",
+ help="Specifies the url of the configuration file.",
+ )
+ parser.add_option(
+ "--configuration-file",
+ dest="configuration_file",
+ type="string",
+ help="Specifies the path to the configuration file.",
+ )
+ parser.add_option(
+ "--platform",
+ dest="platform",
+ type="string",
+ default="win7",
+ help="Specifies the platform to coose inside the configuratoin-file.",
+ )
+ (options, args) = parser.parse_args()
+
+ if options.configuration_url == None and options.configuration_file == None:
+ print("You must specify --configuration-url or --configuration-file.")
+ return 1
+
+ if options.configuration_file:
+ with open(options.configuration_file) as f:
+ conf_dict = json.load(f)
+ new_screen_resolution = conf_dict[options.platform]["screen_resolution"]
+ new_mouse_position = conf_dict[options.platform]["mouse_position"]
+ else:
+ try:
+ conf_dict = json.loads(wfetch(options.configuration_url))
+ new_screen_resolution = conf_dict[options.platform]["screen_resolution"]
+ new_mouse_position = conf_dict[options.platform]["mouse_position"]
+ except HTTPError as e:
+ print(
+ "This branch does not seem to have the configuration file %s" % str(e)
+ )
+ print("Let's fail over to 1024x768.")
+ new_screen_resolution = default_screen_resolution
+ new_mouse_position = default_mouse_position
+ except URLError as e:
+ print("INFRA-ERROR: We couldn't reach hg.mozilla.org: %s" % str(e))
+ return 1
+ except Exception as e:
+ print("ERROR: We were not expecting any more exceptions: %s" % str(e))
+ return 1
+
+ current_screen_resolution = queryScreenResolution()
+ print("Screen resolution (current): (%(x)s, %(y)s)" % (current_screen_resolution))
+
+ if current_screen_resolution == new_screen_resolution:
+ print("No need to change the screen resolution.")
+ else:
+ print("Changing the screen resolution...")
+ try:
+ changeScreenResolution(
+ new_screen_resolution["x"], new_screen_resolution["y"]
+ )
+ except Exception as e:
+ print(
+ "INFRA-ERROR: We have attempted to change the screen resolution but ",
+ "something went wrong: %s" % str(e),
+ )
+ return 1
+ time.sleep(3) # just in case
+ current_screen_resolution = queryScreenResolution()
+ print("Screen resolution (new): (%(x)s, %(y)s)" % current_screen_resolution)
+
+ print("Mouse position (current): (%(x)s, %(y)s)" % (queryMousePosition()))
+ setCursorPos(new_mouse_position["x"], new_mouse_position["y"])
+ current_mouse_position = queryMousePosition()
+ print("Mouse position (new): (%(x)s, %(y)s)" % (current_mouse_position))
+
+ if (
+ current_screen_resolution != new_screen_resolution
+ or current_mouse_position != new_mouse_position
+ ):
+ print(
+ "INFRA-ERROR: The new screen resolution or mouse positions are not what we expected"
+ )
+ return 1
+ else:
+ return 0
+
+
+class POINT(Structure):
+ _fields_ = [("x", c_ulong), ("y", c_ulong)]
+
+
+def queryMousePosition():
+ pt = POINT()
+ windll.user32.GetCursorPos(byref(pt))
+ return {"x": pt.x, "y": pt.y}
+
+
+def setCursorPos(x, y):
+ windll.user32.SetCursorPos(x, y)
+
+
+def queryScreenResolution():
+ return {
+ "x": windll.user32.GetSystemMetrics(0),
+ "y": windll.user32.GetSystemMetrics(1),
+ }
+
+
+def changeScreenResolution(xres=None, yres=None, BitsPerPixel=None):
+ import struct
+
+ DM_BITSPERPEL = 0x00040000
+ DM_PELSWIDTH = 0x00080000
+ DM_PELSHEIGHT = 0x00100000
+ CDS_FULLSCREEN = 0x00000004
+ SIZEOF_DEVMODE = 148
+
+ DevModeData = struct.calcsize("32BHH") * b"\x00"
+ DevModeData += struct.pack("H", SIZEOF_DEVMODE)
+ DevModeData += struct.calcsize("H") * b"\x00"
+ dwFields = (
+ (xres and DM_PELSWIDTH or 0)
+ | (yres and DM_PELSHEIGHT or 0)
+ | (BitsPerPixel and DM_BITSPERPEL or 0)
+ )
+ DevModeData += struct.pack("L", dwFields)
+ DevModeData += struct.calcsize("l9h32BHL") * b"\x00"
+ DevModeData += struct.pack("LLL", BitsPerPixel or 0, xres or 0, yres or 0)
+ DevModeData += struct.calcsize("8L") * b"\x00"
+
+ return windll.user32.ChangeDisplaySettingsA(DevModeData, 0)
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/testing/mozharness/external_tools/packagesymbols.py b/testing/mozharness/external_tools/packagesymbols.py
new file mode 100644
index 0000000000..0cbe23fcb8
--- /dev/null
+++ b/testing/mozharness/external_tools/packagesymbols.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import argparse
+import os
+import subprocess
+import sys
+import zipfile
+
+
+class ProcError(Exception):
+ def __init__(self, returncode, stderr):
+ self.returncode = returncode
+ self.stderr = stderr
+
+
+def check_output(command):
+ proc = subprocess.Popen(
+ command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
+ )
+ stdout, stderr = proc.communicate()
+ if proc.returncode != 0:
+ raise ProcError(proc.returncode, stderr)
+ return stdout
+
+
+def process_file(dump_syms, path):
+ try:
+ stdout = check_output([dump_syms, path])
+ except ProcError as e:
+ print('Error: running "%s %s": %s' % (dump_syms, path, e.stderr))
+ return None, None, None
+ bits = stdout.splitlines()[0].split(" ", 4)
+ if len(bits) != 5:
+ return None, None, None
+ _, platform, cpu_arch, debug_id, debug_file = bits
+ if debug_file.lower().endswith(".pdb"):
+ sym_file = debug_file[:-4] + ".sym"
+ else:
+ sym_file = debug_file + ".sym"
+ filename = os.path.join(debug_file, debug_id, sym_file)
+ debug_filename = os.path.join(debug_file, debug_id, debug_file)
+ return filename, stdout, debug_filename
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("dump_syms", help="Path to dump_syms binary")
+ parser.add_argument("files", nargs="+", help="Path to files to dump symbols from")
+ parser.add_argument(
+ "--symbol-zip",
+ default="symbols.zip",
+ help="Name of zip file to put dumped symbols in",
+ )
+ parser.add_argument(
+ "--no-binaries",
+ action="store_true",
+ default=False,
+ help="Don't store binaries in zip file",
+ )
+ args = parser.parse_args()
+ count = 0
+ with zipfile.ZipFile(args.symbol_zip, "w", zipfile.ZIP_DEFLATED) as zf:
+ for f in args.files:
+ filename, contents, debug_filename = process_file(args.dump_syms, f)
+ if not (filename and contents):
+ print("Error dumping symbols")
+ sys.exit(1)
+ zf.writestr(filename, contents)
+ count += 1
+ if not args.no_binaries:
+ zf.write(f, debug_filename)
+ count += 1
+ print("Added %d files to %s" % (count, args.symbol_zip))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/testing/mozharness/external_tools/performance-artifact-schema.json b/testing/mozharness/external_tools/performance-artifact-schema.json
new file mode 100644
index 0000000000..a5df29b26c
--- /dev/null
+++ b/testing/mozharness/external_tools/performance-artifact-schema.json
@@ -0,0 +1,233 @@
+{
+ "definitions": {
+ "application_schema": {
+ "properties": {
+ "name": {
+ "title": "Application under performance test",
+ "enum": [
+ "firefox",
+ "chrome",
+ "chrome-m",
+ "chromium",
+ "fennec",
+ "geckoview",
+ "refbrow",
+ "fenix",
+ "safari"
+ ],
+ "maxLength": 10,
+ "type": "string"
+ },
+ "version": {
+ "title": "Application's version",
+ "maxLength": 40,
+ "type": "string"
+ }
+ },
+ "required": [
+ "name"
+ ],
+ "type": "object"
+ },
+ "framework_schema": {
+ "properties": {
+ "name": {
+ "title": "Framework name",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "subtest_schema": {
+ "properties": {
+ "name": {
+ "title": "Subtest name",
+ "type": "string"
+ },
+ "publicName": {
+ "title": "Public subtest name",
+ "description": "Allows renaming test's name, without breaking existing performance data series",
+ "maxLength": 30,
+ "type": "string"
+ },
+ "value": {
+ "description": "Summary value for subtest",
+ "title": "Subtest value",
+ "type": "number",
+ "minimum": -1000000000000.0,
+ "maximum": 1000000000000.0
+ },
+ "unit": {
+ "title": "Measurement unit",
+ "type": "string",
+ "minLength": 1,
+ "maxLength": 20
+ },
+ "lowerIsBetter": {
+ "description": "Whether lower values are better for subtest",
+ "title": "Lower is better",
+ "type": "boolean"
+ },
+ "shouldAlert": {
+ "description": "Whether we should alert",
+ "title": "Should alert",
+ "type": "boolean"
+ },
+ "alertThreshold": {
+ "description": "% change threshold before alerting",
+ "title": "Alert threshold",
+ "type": "number",
+ "minimum": 0.0,
+ "maximum": 1000.0
+ },
+ "minBackWindow": {
+ "description": "Minimum back window to use for alerting",
+ "title": "Minimum back window",
+ "type": "number",
+ "minimum": 1,
+ "maximum": 255
+ },
+ "maxBackWindow": {
+ "description": "Maximum back window to use for alerting",
+ "title": "Maximum back window",
+ "type": "number",
+ "minimum": 1,
+ "maximum": 255
+ },
+ "foreWindow": {
+ "description": "Fore window to use for alerting",
+ "title": "Fore window",
+ "type": "number",
+ "minimum": 1,
+ "maximum": 255
+ }
+ },
+ "required": [
+ "name",
+ "value"
+ ],
+ "type": "object"
+ },
+ "suite_schema": {
+ "properties": {
+ "name": {
+ "title": "Suite name",
+ "type": "string"
+ },
+ "publicName": {
+ "title": "Public suite name",
+ "description": "Allows renaming suite's name, without breaking existing performance data series",
+ "maxLength": 30,
+ "type": "string"
+ },
+ "tags": {
+ "type": "array",
+ "title": "Free form tags, which ease the grouping & searching of performance tests",
+ "description": "Similar to extraOptions, except it does not break existing performance data series",
+ "items": {
+ "type": "string",
+ "pattern": "^[a-zA-Z0-9-]{1,24}$"
+ },
+ "uniqueItems": true,
+ "maxItems": 14
+ },
+ "extraOptions": {
+ "type": "array",
+ "title": "Extra options used in running suite",
+ "items": {
+ "type": "string",
+ "maxLength": 100
+ },
+ "uniqueItems": true,
+ "maxItems": 8
+ },
+ "subtests": {
+ "items": {
+ "$ref": "#/definitions/subtest_schema"
+ },
+ "title": "Subtests",
+ "type": "array"
+ },
+ "value": {
+ "title": "Suite value",
+ "type": "number",
+ "minimum": -1000000000000.0,
+ "maximum": 1000000000000.0
+ },
+ "unit": {
+ "title": "Measurement unit",
+ "type": "string",
+ "minLength": 1,
+ "maxLength": 20
+ },
+ "lowerIsBetter": {
+ "description": "Whether lower values are better for suite",
+ "title": "Lower is better",
+ "type": "boolean"
+ },
+ "shouldAlert": {
+ "description": "Whether we should alert on this suite (overrides default behaviour)",
+ "title": "Should alert",
+ "type": "boolean"
+ },
+ "alertThreshold": {
+ "description": "% change threshold before alerting",
+ "title": "Alert threshold",
+ "type": "number",
+ "minimum": 0.0,
+ "maximum": 1000.0
+ },
+ "minBackWindow": {
+ "description": "Minimum back window to use for alerting",
+ "title": "Minimum back window",
+ "type": "integer",
+ "minimum": 1,
+ "maximum": 255
+ },
+ "maxBackWindow": {
+ "description": "Maximum back window to use for alerting",
+ "title": "Maximum back window",
+ "type": "integer",
+ "minimum": 1,
+ "maximum": 255
+ },
+ "foreWindow": {
+ "description": "Fore window to use for alerting",
+ "title": "Fore window",
+ "type": "integer",
+ "minimum": 1,
+ "maximum": 255
+ }
+ },
+ "required": [
+ "name",
+ "subtests"
+ ],
+ "type": "object"
+ }
+ },
+ "description": "Structure for submitting performance data as part of a job",
+ "id": "https://treeherder.mozilla.org/schemas/v1/performance-artifact.json#",
+ "properties": {
+ "application": {
+ "$ref": "#/definitions/application_schema"
+ },
+ "framework": {
+ "$ref": "#/definitions/framework_schema"
+ },
+ "suites": {
+ "description": "List of suite-level data submitted as part of this structure",
+ "items": {
+ "$ref": "#/definitions/suite_schema"
+ },
+ "title": "Performance suites",
+ "type": "array"
+ }
+ },
+ "required": [
+ "framework",
+ "suites"
+ ],
+ "title": "Perfherder Schema",
+ "type": "object"
+}
diff --git a/testing/mozharness/external_tools/robustcheckout.py b/testing/mozharness/external_tools/robustcheckout.py
new file mode 100644
index 0000000000..661bcfcab1
--- /dev/null
+++ b/testing/mozharness/external_tools/robustcheckout.py
@@ -0,0 +1,832 @@
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""Robustly perform a checkout.
+
+This extension provides the ``hg robustcheckout`` command for
+ensuring a working directory is updated to the specified revision
+from a source repo using best practices to ensure optimal clone
+times and storage efficiency.
+"""
+
+import contextlib
+import json
+import os
+import random
+import re
+import socket
+import ssl
+import time
+
+from mercurial.i18n import _
+from mercurial.node import hex, nullid
+from mercurial import (
+ commands,
+ configitems,
+ error,
+ exchange,
+ extensions,
+ hg,
+ match as matchmod,
+ pycompat,
+ registrar,
+ scmutil,
+ urllibcompat,
+ util,
+ vfs,
+)
+
+# Causes worker to purge caches on process exit and for task to retry.
+EXIT_PURGE_CACHE = 72
+
+testedwith = b"4.5 4.6 4.7 4.8 4.9 5.0 5.1 5.2 5.3 5.4 5.5 5.6 5.7 5.8"
+minimumhgversion = b"4.5"
+
+cmdtable = {}
+command = registrar.command(cmdtable)
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+
+configitem(b"robustcheckout", b"retryjittermin", default=configitems.dynamicdefault)
+configitem(b"robustcheckout", b"retryjittermax", default=configitems.dynamicdefault)
+
+
+def getsparse():
+ from mercurial import sparse
+
+ return sparse
+
+
+def peerlookup(remote, v):
+ # TRACKING hg46 4.6 added commandexecutor API.
+ if util.safehasattr(remote, "commandexecutor"):
+ with remote.commandexecutor() as e:
+ return e.callcommand(b"lookup", {b"key": v}).result()
+ else:
+ return remote.lookup(v)
+
+
+@command(
+ b"robustcheckout",
+ [
+ (b"", b"upstream", b"", b"URL of upstream repo to clone from"),
+ (b"r", b"revision", b"", b"Revision to check out"),
+ (b"b", b"branch", b"", b"Branch to check out"),
+ (b"", b"purge", False, b"Whether to purge the working directory"),
+ (b"", b"sharebase", b"", b"Directory where shared repos should be placed"),
+ (
+ b"",
+ b"networkattempts",
+ 3,
+ b"Maximum number of attempts for network " b"operations",
+ ),
+ (b"", b"sparseprofile", b"", b"Sparse checkout profile to use (path in repo)"),
+ (
+ b"U",
+ b"noupdate",
+ False,
+ b"the clone will include an empty working directory\n"
+ b"(only a repository)",
+ ),
+ ],
+ b"[OPTION]... URL DEST",
+ norepo=True,
+)
+def robustcheckout(
+ ui,
+ url,
+ dest,
+ upstream=None,
+ revision=None,
+ branch=None,
+ purge=False,
+ sharebase=None,
+ networkattempts=None,
+ sparseprofile=None,
+ noupdate=False,
+):
+ """Ensure a working copy has the specified revision checked out.
+
+ Repository data is automatically pooled into the common directory
+ specified by ``--sharebase``, which is a required argument. It is required
+ because pooling storage prevents excessive cloning, which makes operations
+ complete faster.
+
+ One of ``--revision`` or ``--branch`` must be specified. ``--revision``
+ is preferred, as it is deterministic and there is no ambiguity as to which
+ revision will actually be checked out.
+
+ If ``--upstream`` is used, the repo at that URL is used to perform the
+ initial clone instead of cloning from the repo where the desired revision
+ is located.
+
+ ``--purge`` controls whether to removed untracked and ignored files from
+ the working directory. If used, the end state of the working directory
+ should only contain files explicitly under version control for the requested
+ revision.
+
+ ``--sparseprofile`` can be used to specify a sparse checkout profile to use.
+ The sparse checkout profile corresponds to a file in the revision to be
+ checked out. If a previous sparse profile or config is present, it will be
+ replaced by this sparse profile. We choose not to "widen" the sparse config
+ so operations are as deterministic as possible. If an existing checkout
+ is present and it isn't using a sparse checkout, we error. This is to
+ prevent accidentally enabling sparse on a repository that may have
+ clients that aren't sparse aware. Sparse checkout support requires Mercurial
+ 4.3 or newer and the ``sparse`` extension must be enabled.
+ """
+ if not revision and not branch:
+ raise error.Abort(b"must specify one of --revision or --branch")
+
+ if revision and branch:
+ raise error.Abort(b"cannot specify both --revision and --branch")
+
+ # Require revision to look like a SHA-1.
+ if revision:
+ if (
+ len(revision) < 12
+ or len(revision) > 40
+ or not re.match(b"^[a-f0-9]+$", revision)
+ ):
+ raise error.Abort(
+ b"--revision must be a SHA-1 fragment 12-40 " b"characters long"
+ )
+
+ sharebase = sharebase or ui.config(b"share", b"pool")
+ if not sharebase:
+ raise error.Abort(
+ b"share base directory not defined; refusing to operate",
+ hint=b"define share.pool config option or pass --sharebase",
+ )
+
+ # Sparse profile support was added in Mercurial 4.3, where it was highly
+ # experimental. Because of the fragility of it, we only support sparse
+ # profiles on 4.3. When 4.4 is released, we'll need to opt in to sparse
+ # support. We /could/ silently fall back to non-sparse when not supported.
+ # However, given that sparse has performance implications, we want to fail
+ # fast if we can't satisfy the desired checkout request.
+ if sparseprofile:
+ try:
+ extensions.find(b"sparse")
+ except KeyError:
+ raise error.Abort(
+ b"sparse extension must be enabled to use " b"--sparseprofile"
+ )
+
+ ui.warn(b"(using Mercurial %s)\n" % util.version())
+
+ # worker.backgroundclose only makes things faster if running anti-virus,
+ # which our automation doesn't. Disable it.
+ ui.setconfig(b"worker", b"backgroundclose", False)
+
+ # By default the progress bar starts after 3s and updates every 0.1s. We
+ # change this so it shows and updates every 1.0s.
+ # We also tell progress to assume a TTY is present so updates are printed
+ # even if there is no known TTY.
+ # We make the config change here instead of in a config file because
+ # otherwise we're at the whim of whatever configs are used in automation.
+ ui.setconfig(b"progress", b"delay", 1.0)
+ ui.setconfig(b"progress", b"refresh", 1.0)
+ ui.setconfig(b"progress", b"assume-tty", True)
+
+ sharebase = os.path.realpath(sharebase)
+
+ optimes = []
+ behaviors = set()
+ start = time.time()
+
+ try:
+ return _docheckout(
+ ui,
+ url,
+ dest,
+ upstream,
+ revision,
+ branch,
+ purge,
+ sharebase,
+ optimes,
+ behaviors,
+ networkattempts,
+ sparse_profile=sparseprofile,
+ noupdate=noupdate,
+ )
+ finally:
+ overall = time.time() - start
+
+ # We store the overall time multiple ways in order to help differentiate
+ # the various "flavors" of operations.
+
+ # ``overall`` is always the total operation time.
+ optimes.append(("overall", overall))
+
+ def record_op(name):
+ # If special behaviors due to "corrupt" storage occur, we vary the
+ # name to convey that.
+ if "remove-store" in behaviors:
+ name += "_rmstore"
+ if "remove-wdir" in behaviors:
+ name += "_rmwdir"
+
+ optimes.append((name, overall))
+
+ # We break out overall operations primarily by their network interaction
+ # We have variants within for working directory operations.
+ if "clone" in behaviors and "create-store" in behaviors:
+ record_op("overall_clone")
+
+ if "sparse-update" in behaviors:
+ record_op("overall_clone_sparsecheckout")
+ else:
+ record_op("overall_clone_fullcheckout")
+
+ elif "pull" in behaviors or "clone" in behaviors:
+ record_op("overall_pull")
+
+ if "sparse-update" in behaviors:
+ record_op("overall_pull_sparsecheckout")
+ else:
+ record_op("overall_pull_fullcheckout")
+
+ if "empty-wdir" in behaviors:
+ record_op("overall_pull_emptywdir")
+ else:
+ record_op("overall_pull_populatedwdir")
+
+ else:
+ record_op("overall_nopull")
+
+ if "sparse-update" in behaviors:
+ record_op("overall_nopull_sparsecheckout")
+ else:
+ record_op("overall_nopull_fullcheckout")
+
+ if "empty-wdir" in behaviors:
+ record_op("overall_nopull_emptywdir")
+ else:
+ record_op("overall_nopull_populatedwdir")
+
+ server_url = urllibcompat.urlreq.urlparse(url).netloc
+
+ if "TASKCLUSTER_INSTANCE_TYPE" in os.environ:
+ perfherder = {
+ "framework": {
+ "name": "vcs",
+ },
+ "suites": [],
+ }
+ for op, duration in optimes:
+ perfherder["suites"].append(
+ {
+ "name": op,
+ "value": duration,
+ "lowerIsBetter": True,
+ "shouldAlert": False,
+ "serverUrl": server_url.decode("utf-8"),
+ "hgVersion": util.version().decode("utf-8"),
+ "extraOptions": [os.environ["TASKCLUSTER_INSTANCE_TYPE"]],
+ "subtests": [],
+ }
+ )
+ ui.write(
+ b"PERFHERDER_DATA: %s\n"
+ % pycompat.bytestr(json.dumps(perfherder, sort_keys=True))
+ )
+
+
+def _docheckout(
+ ui,
+ url,
+ dest,
+ upstream,
+ revision,
+ branch,
+ purge,
+ sharebase,
+ optimes,
+ behaviors,
+ networkattemptlimit,
+ networkattempts=None,
+ sparse_profile=None,
+ noupdate=False,
+):
+ if not networkattempts:
+ networkattempts = [1]
+
+ def callself():
+ return _docheckout(
+ ui,
+ url,
+ dest,
+ upstream,
+ revision,
+ branch,
+ purge,
+ sharebase,
+ optimes,
+ behaviors,
+ networkattemptlimit,
+ networkattempts=networkattempts,
+ sparse_profile=sparse_profile,
+ noupdate=noupdate,
+ )
+
+ @contextlib.contextmanager
+ def timeit(op, behavior):
+ behaviors.add(behavior)
+ errored = False
+ try:
+ start = time.time()
+ yield
+ except Exception:
+ errored = True
+ raise
+ finally:
+ elapsed = time.time() - start
+
+ if errored:
+ op += "_errored"
+
+ optimes.append((op, elapsed))
+
+ ui.write(b"ensuring %s@%s is available at %s\n" % (url, revision or branch, dest))
+
+ # We assume that we're the only process on the machine touching the
+ # repository paths that we were told to use. This means our recovery
+ # scenario when things aren't "right" is to just nuke things and start
+ # from scratch. This is easier to implement than verifying the state
+ # of the data and attempting recovery. And in some scenarios (such as
+ # potential repo corruption), it is probably faster, since verifying
+ # repos can take a while.
+
+ destvfs = vfs.vfs(dest, audit=False, realpath=True)
+
+ def deletesharedstore(path=None):
+ storepath = path or destvfs.read(b".hg/sharedpath").strip()
+ if storepath.endswith(b".hg"):
+ storepath = os.path.dirname(storepath)
+
+ storevfs = vfs.vfs(storepath, audit=False)
+ storevfs.rmtree(forcibly=True)
+
+ if destvfs.exists() and not destvfs.exists(b".hg"):
+ raise error.Abort(b"destination exists but no .hg directory")
+
+ # Refuse to enable sparse checkouts on existing checkouts. The reasoning
+ # here is that another consumer of this repo may not be sparse aware. If we
+ # enabled sparse, we would lock them out.
+ if destvfs.exists() and sparse_profile and not destvfs.exists(b".hg/sparse"):
+ raise error.Abort(
+ b"cannot enable sparse profile on existing " b"non-sparse checkout",
+ hint=b"use a separate working directory to use sparse",
+ )
+
+ # And the other direction for symmetry.
+ if not sparse_profile and destvfs.exists(b".hg/sparse"):
+ raise error.Abort(
+ b"cannot use non-sparse checkout on existing sparse " b"checkout",
+ hint=b"use a separate working directory to use sparse",
+ )
+
+ # Require checkouts to be tied to shared storage because efficiency.
+ if destvfs.exists(b".hg") and not destvfs.exists(b".hg/sharedpath"):
+ ui.warn(b"(destination is not shared; deleting)\n")
+ with timeit("remove_unshared_dest", "remove-wdir"):
+ destvfs.rmtree(forcibly=True)
+
+ # Verify the shared path exists and is using modern pooled storage.
+ if destvfs.exists(b".hg/sharedpath"):
+ storepath = destvfs.read(b".hg/sharedpath").strip()
+
+ ui.write(b"(existing repository shared store: %s)\n" % storepath)
+
+ if not os.path.exists(storepath):
+ ui.warn(b"(shared store does not exist; deleting destination)\n")
+ with timeit("removed_missing_shared_store", "remove-wdir"):
+ destvfs.rmtree(forcibly=True)
+ elif not re.search(b"[a-f0-9]{40}/\.hg$", storepath.replace(b"\\", b"/")):
+ ui.warn(
+ b"(shared store does not belong to pooled storage; "
+ b"deleting destination to improve efficiency)\n"
+ )
+ with timeit("remove_unpooled_store", "remove-wdir"):
+ destvfs.rmtree(forcibly=True)
+
+ if destvfs.isfileorlink(b".hg/wlock"):
+ ui.warn(
+ b"(dest has an active working directory lock; assuming it is "
+ b"left over from a previous process and that the destination "
+ b"is corrupt; deleting it just to be sure)\n"
+ )
+ with timeit("remove_locked_wdir", "remove-wdir"):
+ destvfs.rmtree(forcibly=True)
+
+ def handlerepoerror(e):
+ if pycompat.bytestr(e) == _(b"abandoned transaction found"):
+ ui.warn(b"(abandoned transaction found; trying to recover)\n")
+ repo = hg.repository(ui, dest)
+ if not repo.recover():
+ ui.warn(b"(could not recover repo state; " b"deleting shared store)\n")
+ with timeit("remove_unrecovered_shared_store", "remove-store"):
+ deletesharedstore()
+
+ ui.warn(b"(attempting checkout from beginning)\n")
+ return callself()
+
+ raise
+
+ # At this point we either have an existing working directory using
+ # shared, pooled storage or we have nothing.
+
+ def handlenetworkfailure():
+ if networkattempts[0] >= networkattemptlimit:
+ raise error.Abort(
+ b"reached maximum number of network attempts; " b"giving up\n"
+ )
+
+ ui.warn(
+ b"(retrying after network failure on attempt %d of %d)\n"
+ % (networkattempts[0], networkattemptlimit)
+ )
+
+ # Do a backoff on retries to mitigate the thundering herd
+ # problem. This is an exponential backoff with a multipler
+ # plus random jitter thrown in for good measure.
+ # With the default settings, backoffs will be:
+ # 1) 2.5 - 6.5
+ # 2) 5.5 - 9.5
+ # 3) 11.5 - 15.5
+ backoff = (2 ** networkattempts[0] - 1) * 1.5
+ jittermin = ui.configint(b"robustcheckout", b"retryjittermin", 1000)
+ jittermax = ui.configint(b"robustcheckout", b"retryjittermax", 5000)
+ backoff += float(random.randint(jittermin, jittermax)) / 1000.0
+ ui.warn(b"(waiting %.2fs before retry)\n" % backoff)
+ time.sleep(backoff)
+
+ networkattempts[0] += 1
+
+ def handlepullerror(e):
+ """Handle an exception raised during a pull.
+
+ Returns True if caller should call ``callself()`` to retry.
+ """
+ if isinstance(e, error.Abort):
+ if e.args[0] == _(b"repository is unrelated"):
+ ui.warn(b"(repository is unrelated; deleting)\n")
+ destvfs.rmtree(forcibly=True)
+ return True
+ elif e.args[0].startswith(_(b"stream ended unexpectedly")):
+ ui.warn(b"%s\n" % e.args[0])
+ # Will raise if failure limit reached.
+ handlenetworkfailure()
+ return True
+ # TODO test this branch
+ elif isinstance(e, error.ResponseError):
+ if e.args[0].startswith(_(b"unexpected response from remote server:")):
+ ui.warn(b"(unexpected response from remote server; retrying)\n")
+ destvfs.rmtree(forcibly=True)
+ # Will raise if failure limit reached.
+ handlenetworkfailure()
+ return True
+ elif isinstance(e, ssl.SSLError):
+ # Assume all SSL errors are due to the network, as Mercurial
+ # should convert non-transport errors like cert validation failures
+ # to error.Abort.
+ ui.warn(b"ssl error: %s\n" % pycompat.bytestr(str(e)))
+ handlenetworkfailure()
+ return True
+ elif isinstance(e, urllibcompat.urlerr.urlerror):
+ if isinstance(e.reason, socket.error):
+ ui.warn(b"socket error: %s\n" % pycompat.bytestr(str(e.reason)))
+ handlenetworkfailure()
+ return True
+ else:
+ ui.warn(
+ b"unhandled URLError; reason type: %s; value: %s\n"
+ % (
+ pycompat.bytestr(e.reason.__class__.__name__),
+ pycompat.bytestr(str(e.reason)),
+ )
+ )
+ else:
+ ui.warn(
+ b"unhandled exception during network operation; type: %s; "
+ b"value: %s\n"
+ % (pycompat.bytestr(e.__class__.__name__), pycompat.bytestr(str(e)))
+ )
+
+ return False
+
+ # Perform sanity checking of store. We may or may not know the path to the
+ # local store. It depends if we have an existing destvfs pointing to a
+ # share. To ensure we always find a local store, perform the same logic
+ # that Mercurial's pooled storage does to resolve the local store path.
+ cloneurl = upstream or url
+
+ try:
+ clonepeer = hg.peer(ui, {}, cloneurl)
+ rootnode = peerlookup(clonepeer, b"0")
+ except error.RepoLookupError:
+ raise error.Abort(b"unable to resolve root revision from clone " b"source")
+ except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
+ if handlepullerror(e):
+ return callself()
+ raise
+
+ if rootnode == nullid:
+ raise error.Abort(b"source repo appears to be empty")
+
+ storepath = os.path.join(sharebase, hex(rootnode))
+ storevfs = vfs.vfs(storepath, audit=False)
+
+ if storevfs.isfileorlink(b".hg/store/lock"):
+ ui.warn(
+ b"(shared store has an active lock; assuming it is left "
+ b"over from a previous process and that the store is "
+ b"corrupt; deleting store and destination just to be "
+ b"sure)\n"
+ )
+ if destvfs.exists():
+ with timeit("remove_dest_active_lock", "remove-wdir"):
+ destvfs.rmtree(forcibly=True)
+
+ with timeit("remove_shared_store_active_lock", "remove-store"):
+ storevfs.rmtree(forcibly=True)
+
+ if storevfs.exists() and not storevfs.exists(b".hg/requires"):
+ ui.warn(
+ b"(shared store missing requires file; this is a really "
+ b"odd failure; deleting store and destination)\n"
+ )
+ if destvfs.exists():
+ with timeit("remove_dest_no_requires", "remove-wdir"):
+ destvfs.rmtree(forcibly=True)
+
+ with timeit("remove_shared_store_no_requires", "remove-store"):
+ storevfs.rmtree(forcibly=True)
+
+ if storevfs.exists(b".hg/requires"):
+ requires = set(storevfs.read(b".hg/requires").splitlines())
+ # FUTURE when we require generaldelta, this is where we can check
+ # for that.
+ required = {b"dotencode", b"fncache"}
+
+ missing = required - requires
+ if missing:
+ ui.warn(
+ b"(shared store missing requirements: %s; deleting "
+ b"store and destination to ensure optimal behavior)\n"
+ % b", ".join(sorted(missing))
+ )
+ if destvfs.exists():
+ with timeit("remove_dest_missing_requires", "remove-wdir"):
+ destvfs.rmtree(forcibly=True)
+
+ with timeit("remove_shared_store_missing_requires", "remove-store"):
+ storevfs.rmtree(forcibly=True)
+
+ created = False
+
+ if not destvfs.exists():
+ # Ensure parent directories of destination exist.
+ # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
+ if util.safehasattr(util, "ensuredirs"):
+ makedirs = util.ensuredirs
+ else:
+ makedirs = util.makedirs
+
+ makedirs(os.path.dirname(destvfs.base), notindexed=True)
+ makedirs(sharebase, notindexed=True)
+
+ if upstream:
+ ui.write(b"(cloning from upstream repo %s)\n" % upstream)
+
+ if not storevfs.exists():
+ behaviors.add(b"create-store")
+
+ try:
+ with timeit("clone", "clone"):
+ shareopts = {b"pool": sharebase, b"mode": b"identity"}
+ res = hg.clone(
+ ui,
+ {},
+ clonepeer,
+ dest=dest,
+ update=False,
+ shareopts=shareopts,
+ stream=True,
+ )
+ except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
+ if handlepullerror(e):
+ return callself()
+ raise
+ except error.RepoError as e:
+ return handlerepoerror(e)
+ except error.RevlogError as e:
+ ui.warn(b"(repo corruption: %s; deleting shared store)\n" % e)
+ with timeit("remove_shared_store_revlogerror", "remote-store"):
+ deletesharedstore()
+ return callself()
+
+ # TODO retry here.
+ if res is None:
+ raise error.Abort(b"clone failed")
+
+ # Verify it is using shared pool storage.
+ if not destvfs.exists(b".hg/sharedpath"):
+ raise error.Abort(b"clone did not create a shared repo")
+
+ created = True
+
+ # The destination .hg directory should exist. Now make sure we have the
+ # wanted revision.
+
+ repo = hg.repository(ui, dest)
+
+ # We only pull if we are using symbolic names or the requested revision
+ # doesn't exist.
+ havewantedrev = False
+
+ if revision:
+ try:
+ ctx = scmutil.revsingle(repo, revision)
+ except error.RepoLookupError:
+ ctx = None
+
+ if ctx:
+ if not ctx.hex().startswith(revision):
+ raise error.Abort(
+ b"--revision argument is ambiguous",
+ hint=b"must be the first 12+ characters of a " b"SHA-1 fragment",
+ )
+
+ checkoutrevision = ctx.hex()
+ havewantedrev = True
+
+ if not havewantedrev:
+ ui.write(b"(pulling to obtain %s)\n" % (revision or branch,))
+
+ remote = None
+ try:
+ remote = hg.peer(repo, {}, url)
+ pullrevs = [peerlookup(remote, revision or branch)]
+ checkoutrevision = hex(pullrevs[0])
+ if branch:
+ ui.warn(
+ b"(remote resolved %s to %s; "
+ b"result is not deterministic)\n" % (branch, checkoutrevision)
+ )
+
+ if checkoutrevision in repo:
+ ui.warn(b"(revision already present locally; not pulling)\n")
+ else:
+ with timeit("pull", "pull"):
+ pullop = exchange.pull(repo, remote, heads=pullrevs)
+ if not pullop.rheads:
+ raise error.Abort(b"unable to pull requested revision")
+ except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
+ if handlepullerror(e):
+ return callself()
+ raise
+ except error.RepoError as e:
+ return handlerepoerror(e)
+ except error.RevlogError as e:
+ ui.warn(b"(repo corruption: %s; deleting shared store)\n" % e)
+ deletesharedstore()
+ return callself()
+ finally:
+ if remote:
+ remote.close()
+
+ # Now we should have the wanted revision in the store. Perform
+ # working directory manipulation.
+
+ # Avoid any working directory manipulations if `-U`/`--noupdate` was passed
+ if noupdate:
+ ui.write(b"(skipping update since `-U` was passed)\n")
+ return None
+
+ # Purge if requested. We purge before update because this way we're
+ # guaranteed to not have conflicts on `hg update`.
+ if purge and not created:
+ ui.write(b"(purging working directory)\n")
+ purge = getattr(commands, "purge", None)
+ if not purge:
+ purge = extensions.find(b"purge").purge
+
+ # Mercurial 4.3 doesn't purge files outside the sparse checkout.
+ # See https://bz.mercurial-scm.org/show_bug.cgi?id=5626. Force
+ # purging by monkeypatching the sparse matcher.
+ try:
+ old_sparse_fn = getattr(repo.dirstate, "_sparsematchfn", None)
+ if old_sparse_fn is not None:
+ # TRACKING hg50
+ # Arguments passed to `matchmod.always` were unused and have been removed
+ if util.versiontuple(n=2) >= (5, 0):
+ repo.dirstate._sparsematchfn = lambda: matchmod.always()
+ else:
+ repo.dirstate._sparsematchfn = lambda: matchmod.always(
+ repo.root, ""
+ )
+
+ with timeit("purge", "purge"):
+ if purge(
+ ui,
+ repo,
+ all=True,
+ abort_on_err=True,
+ # The function expects all arguments to be
+ # defined.
+ **{"print": None, "print0": None, "dirs": None, "files": None}
+ ):
+ raise error.Abort(b"error purging")
+ finally:
+ if old_sparse_fn is not None:
+ repo.dirstate._sparsematchfn = old_sparse_fn
+
+ # Update the working directory.
+
+ if repo[b"."].node() == nullid:
+ behaviors.add("empty-wdir")
+ else:
+ behaviors.add("populated-wdir")
+
+ if sparse_profile:
+ sparsemod = getsparse()
+
+ # By default, Mercurial will ignore unknown sparse profiles. This could
+ # lead to a full checkout. Be more strict.
+ try:
+ repo.filectx(sparse_profile, changeid=checkoutrevision).data()
+ except error.ManifestLookupError:
+ raise error.Abort(
+ b"sparse profile %s does not exist at revision "
+ b"%s" % (sparse_profile, checkoutrevision)
+ )
+
+ # TRACKING hg48 - parseconfig takes `action` param
+ if util.versiontuple(n=2) >= (4, 8):
+ old_config = sparsemod.parseconfig(
+ repo.ui, repo.vfs.tryread(b"sparse"), b"sparse"
+ )
+ else:
+ old_config = sparsemod.parseconfig(repo.ui, repo.vfs.tryread(b"sparse"))
+
+ old_includes, old_excludes, old_profiles = old_config
+
+ if old_profiles == {sparse_profile} and not old_includes and not old_excludes:
+ ui.write(
+ b"(sparse profile %s already set; no need to update "
+ b"sparse config)\n" % sparse_profile
+ )
+ else:
+ if old_includes or old_excludes or old_profiles:
+ ui.write(
+ b"(replacing existing sparse config with profile "
+ b"%s)\n" % sparse_profile
+ )
+ else:
+ ui.write(b"(setting sparse config to profile %s)\n" % sparse_profile)
+
+ # If doing an incremental update, this will perform two updates:
+ # one to change the sparse profile and another to update to the new
+ # revision. This is not desired. But there's not a good API in
+ # Mercurial to do this as one operation.
+ with repo.wlock(), timeit("sparse_update_config", "sparse-update-config"):
+ # pylint --py3k: W1636
+ fcounts = list(
+ map(
+ len,
+ sparsemod._updateconfigandrefreshwdir(
+ repo, [], [], [sparse_profile], force=True
+ ),
+ )
+ )
+
+ repo.ui.status(
+ b"%d files added, %d files dropped, "
+ b"%d files conflicting\n" % tuple(fcounts)
+ )
+
+ ui.write(b"(sparse refresh complete)\n")
+
+ op = "update_sparse" if sparse_profile else "update"
+ behavior = "update-sparse" if sparse_profile else "update"
+
+ with timeit(op, behavior):
+ if commands.update(ui, repo, rev=checkoutrevision, clean=True):
+ raise error.Abort(b"error updating")
+
+ ui.write(b"updated to %s\n" % checkoutrevision)
+
+ return None
+
+
+def extsetup(ui):
+ # Ensure required extensions are loaded.
+ for ext in (b"purge", b"share"):
+ try:
+ extensions.find(ext)
+ except KeyError:
+ extensions.load(ui, ext, None)
diff --git a/testing/mozharness/external_tools/tooltool.py b/testing/mozharness/external_tools/tooltool.py
new file mode 100755
index 0000000000..7ae48dce3c
--- /dev/null
+++ b/testing/mozharness/external_tools/tooltool.py
@@ -0,0 +1,1679 @@
+#!/usr/bin/env python
+
+# tooltool is a lookaside cache implemented in Python
+# Copyright (C) 2011 John H. Ford <john@johnford.info>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation version 2
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+# A manifest file specifies files in that directory that are stored
+# elsewhere. This file should only list files in the same directory
+# in which the manifest file resides and it should be called
+# 'manifest.tt'
+
+import base64
+import calendar
+import hashlib
+import hmac
+import json
+import logging
+import math
+import optparse
+import os
+import pprint
+import re
+import shutil
+import sys
+import tarfile
+import tempfile
+import threading
+import time
+import zipfile
+from contextlib import contextmanager, closing
+from functools import wraps
+
+from io import open
+from io import BytesIO
+from random import random
+from subprocess import PIPE
+from subprocess import Popen
+
+__version__ = "1"
+
+# Allowed request header characters:
+# !#$%&'()*+,-./:;<=>?@[]^_`{|}~ and space, a-z, A-Z, 0-9, \, "
+REQUEST_HEADER_ATTRIBUTE_CHARS = re.compile(
+ r"^[ a-zA-Z0-9_\!#\$%&'\(\)\*\+,\-\./\:;<\=>\?@\[\]\^`\{\|\}~]*$"
+)
+DEFAULT_MANIFEST_NAME = "manifest.tt"
+TOOLTOOL_PACKAGE_SUFFIX = ".TOOLTOOL-PACKAGE"
+HAWK_VER = 1
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+ six_binary_type = bytes
+ unicode = (
+ str # Silence `pyflakes` from reporting `undefined name 'unicode'` in Python 3.
+ )
+ import urllib.request as urllib2
+ from http.client import HTTPSConnection, HTTPConnection
+ from urllib.parse import urlparse, urljoin
+ from urllib.request import Request
+ from urllib.error import HTTPError, URLError
+else:
+ six_binary_type = str
+ import urllib2
+ from httplib import HTTPSConnection, HTTPConnection
+ from urllib2 import Request, HTTPError, URLError
+ from urlparse import urlparse, urljoin
+
+
+log = logging.getLogger(__name__)
+
+
+# Vendored code from `redo` module
+def retrier(attempts=5, sleeptime=10, max_sleeptime=300, sleepscale=1.5, jitter=1):
+ """
+ This function originates from redo 2.0.3 https://github.com/mozilla-releng/redo
+ A generator function that sleeps between retries, handles exponential
+ backoff and jitter. The action you are retrying is meant to run after
+ retrier yields.
+ """
+ jitter = jitter or 0 # py35 barfs on the next line if jitter is None
+ if jitter > sleeptime:
+ # To prevent negative sleep times
+ raise Exception(
+ "jitter ({}) must be less than sleep time ({})".format(jitter, sleeptime)
+ )
+
+ sleeptime_real = sleeptime
+ for _ in range(attempts):
+ log.debug("attempt %i/%i", _ + 1, attempts)
+
+ yield sleeptime_real
+
+ if jitter:
+ sleeptime_real = sleeptime + random.uniform(-jitter, jitter)
+ # our jitter should scale along with the sleeptime
+ jitter = jitter * sleepscale
+ else:
+ sleeptime_real = sleeptime
+
+ sleeptime *= sleepscale
+
+ if sleeptime_real > max_sleeptime:
+ sleeptime_real = max_sleeptime
+
+ # Don't need to sleep the last time
+ if _ < attempts - 1:
+ log.debug(
+ "sleeping for %.2fs (attempt %i/%i)", sleeptime_real, _ + 1, attempts
+ )
+ time.sleep(sleeptime_real)
+
+
+def retry(
+ action,
+ attempts=5,
+ sleeptime=60,
+ max_sleeptime=5 * 60,
+ sleepscale=1.5,
+ jitter=1,
+ retry_exceptions=(Exception,),
+ cleanup=None,
+ args=(),
+ kwargs={},
+ log_args=True,
+):
+ """
+ This function originates from redo 2.0.3 https://github.com/mozilla-releng/redo
+ Calls an action function until it succeeds, or we give up.
+ """
+ assert callable(action)
+ assert not cleanup or callable(cleanup)
+
+ action_name = getattr(action, "__name__", action)
+ if log_args and (args or kwargs):
+ log_attempt_args = (
+ "retry: calling %s with args: %s," " kwargs: %s, attempt #%d",
+ action_name,
+ args,
+ kwargs,
+ )
+ else:
+ log_attempt_args = ("retry: calling %s, attempt #%d", action_name)
+
+ if max_sleeptime < sleeptime:
+ log.debug("max_sleeptime %d less than sleeptime %d", max_sleeptime, sleeptime)
+
+ n = 1
+ for _ in retrier(
+ attempts=attempts,
+ sleeptime=sleeptime,
+ max_sleeptime=max_sleeptime,
+ sleepscale=sleepscale,
+ jitter=jitter,
+ ):
+ try:
+ logfn = log.info if n != 1 else log.debug
+ logfn_args = log_attempt_args + (n,)
+ logfn(*logfn_args)
+ return action(*args, **kwargs)
+ except retry_exceptions:
+ log.debug("retry: Caught exception: ", exc_info=True)
+ if cleanup:
+ cleanup()
+ if n == attempts:
+ log.info("retry: Giving up on %s", action_name)
+ raise
+ continue
+ finally:
+ n += 1
+
+
+def retriable(*retry_args, **retry_kwargs):
+ """
+ This function originates from redo 2.0.3 https://github.com/mozilla-releng/redo
+ A decorator factory for retry(). Wrap your function in @retriable(...) to
+ give it retry powers!
+ """
+
+ def _retriable_factory(func):
+ @wraps(func)
+ def _retriable_wrapper(*args, **kwargs):
+ return retry(func, args=args, kwargs=kwargs, *retry_args, **retry_kwargs)
+
+ return _retriable_wrapper
+
+ return _retriable_factory
+
+
+# end of vendored code from redo module
+
+
+def request_has_data(req):
+ if PY3:
+ return req.data is not None
+ return req.has_data()
+
+
+def get_hexdigest(val):
+ return hashlib.sha512(val).hexdigest()
+
+
+class FileRecordJSONEncoderException(Exception):
+ pass
+
+
+class InvalidManifest(Exception):
+ pass
+
+
+class ExceptionWithFilename(Exception):
+ def __init__(self, filename):
+ Exception.__init__(self)
+ self.filename = filename
+
+
+class BadFilenameException(ExceptionWithFilename):
+ pass
+
+
+class DigestMismatchException(ExceptionWithFilename):
+ pass
+
+
+class MissingFileException(ExceptionWithFilename):
+ pass
+
+
+class InvalidCredentials(Exception):
+ pass
+
+
+class BadHeaderValue(Exception):
+ pass
+
+
+def parse_url(url):
+ url_parts = urlparse(url)
+ url_dict = {
+ "scheme": url_parts.scheme,
+ "hostname": url_parts.hostname,
+ "port": url_parts.port,
+ "path": url_parts.path,
+ "resource": url_parts.path,
+ "query": url_parts.query,
+ }
+ if len(url_dict["query"]) > 0:
+ url_dict["resource"] = "%s?%s" % (
+ url_dict["resource"], # pragma: no cover
+ url_dict["query"],
+ )
+
+ if url_parts.port is None:
+ if url_parts.scheme == "http":
+ url_dict["port"] = 80
+ elif url_parts.scheme == "https": # pragma: no cover
+ url_dict["port"] = 443
+ return url_dict
+
+
+def utc_now(offset_in_seconds=0.0):
+ return int(math.floor(calendar.timegm(time.gmtime()) + float(offset_in_seconds)))
+
+
+def random_string(length):
+ return base64.urlsafe_b64encode(os.urandom(length))[:length]
+
+
+def prepare_header_val(val):
+ if isinstance(val, six_binary_type):
+ val = val.decode("utf-8")
+
+ if not REQUEST_HEADER_ATTRIBUTE_CHARS.match(val):
+ raise BadHeaderValue( # pragma: no cover
+ "header value value={val} contained an illegal character".format(
+ val=repr(val)
+ )
+ )
+
+ return val
+
+
+def parse_content_type(content_type): # pragma: no cover
+ if content_type:
+ return content_type.split(";")[0].strip().lower()
+ else:
+ return ""
+
+
+def calculate_payload_hash(algorithm, payload, content_type): # pragma: no cover
+ parts = [
+ part if isinstance(part, six_binary_type) else part.encode("utf8")
+ for part in [
+ "hawk." + str(HAWK_VER) + ".payload\n",
+ parse_content_type(content_type) + "\n",
+ payload or "",
+ "\n",
+ ]
+ ]
+
+ p_hash = hashlib.new(algorithm)
+ for p in parts:
+ p_hash.update(p)
+
+ log.debug(
+ "calculating payload hash from:\n{parts}".format(parts=pprint.pformat(parts))
+ )
+
+ return base64.b64encode(p_hash.digest())
+
+
+def validate_taskcluster_credentials(credentials):
+ if not hasattr(credentials, "__getitem__"):
+ raise InvalidCredentials(
+ "credentials must be a dict-like object"
+ ) # pragma: no cover
+ try:
+ credentials["clientId"]
+ credentials["accessToken"]
+ except KeyError: # pragma: no cover
+ etype, val, tb = sys.exc_info()
+ raise InvalidCredentials("{etype}: {val}".format(etype=etype, val=val))
+
+
+def normalize_header_attr(val):
+ if isinstance(val, six_binary_type):
+ return val.decode("utf-8")
+ return val # pragma: no cover
+
+
+def normalize_string(
+ mac_type,
+ timestamp,
+ nonce,
+ method,
+ name,
+ host,
+ port,
+ content_hash,
+):
+ return "\n".join(
+ [
+ normalize_header_attr(header)
+ # The blank lines are important. They follow what the Node Hawk lib does.
+ for header in [
+ "hawk." + str(HAWK_VER) + "." + mac_type,
+ timestamp,
+ nonce,
+ method or "",
+ name or "",
+ host,
+ port,
+ content_hash or "",
+ "", # for ext which is empty in this case
+ "", # Add trailing new line.
+ ]
+ ]
+ )
+
+
+def calculate_mac(
+ mac_type,
+ access_token,
+ algorithm,
+ timestamp,
+ nonce,
+ method,
+ name,
+ host,
+ port,
+ content_hash,
+):
+ normalized = normalize_string(
+ mac_type, timestamp, nonce, method, name, host, port, content_hash
+ )
+ log.debug(u"normalized resource for mac calc: {norm}".format(norm=normalized))
+ digestmod = getattr(hashlib, algorithm)
+
+ if not isinstance(normalized, six_binary_type):
+ normalized = normalized.encode("utf8")
+
+ if not isinstance(access_token, six_binary_type):
+ access_token = access_token.encode("ascii")
+
+ result = hmac.new(access_token, normalized, digestmod)
+ return base64.b64encode(result.digest())
+
+
+def make_taskcluster_header(credentials, req):
+ validate_taskcluster_credentials(credentials)
+
+ url = req.get_full_url()
+ method = req.get_method()
+ algorithm = "sha256"
+ timestamp = str(utc_now())
+ nonce = random_string(6)
+ url_parts = parse_url(url)
+
+ content_hash = None
+ if request_has_data(req):
+ if PY3:
+ data = req.data
+ else:
+ data = req.get_data()
+ content_hash = calculate_payload_hash( # pragma: no cover
+ algorithm,
+ data,
+ # maybe we should detect this from req.headers but we anyway expect json
+ content_type="application/json",
+ )
+
+ mac = calculate_mac(
+ "header",
+ credentials["accessToken"],
+ algorithm,
+ timestamp,
+ nonce,
+ method,
+ url_parts["resource"],
+ url_parts["hostname"],
+ str(url_parts["port"]),
+ content_hash,
+ )
+
+ header = u'Hawk mac="{}"'.format(prepare_header_val(mac))
+
+ if content_hash: # pragma: no cover
+ header = u'{}, hash="{}"'.format(header, prepare_header_val(content_hash))
+
+ header = u'{header}, id="{id}", ts="{ts}", nonce="{nonce}"'.format(
+ header=header,
+ id=prepare_header_val(credentials["clientId"]),
+ ts=prepare_header_val(timestamp),
+ nonce=prepare_header_val(nonce),
+ )
+
+ log.debug("Hawk header for URL={} method={}: {}".format(url, method, header))
+
+ return header
+
+
+class FileRecord(object):
+ def __init__(
+ self,
+ filename,
+ size,
+ digest,
+ algorithm,
+ unpack=False,
+ version=None,
+ visibility=None,
+ ):
+ object.__init__(self)
+ if "/" in filename or "\\" in filename:
+ log.error(
+ "The filename provided contains path information and is, therefore, invalid."
+ )
+ raise BadFilenameException(filename=filename)
+ self.filename = filename
+ self.size = size
+ self.digest = digest
+ self.algorithm = algorithm
+ self.unpack = unpack
+ self.version = version
+ self.visibility = visibility
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ if (
+ self.filename == other.filename
+ and self.size == other.size
+ and self.digest == other.digest
+ and self.algorithm == other.algorithm
+ and self.version == other.version
+ and self.visibility == other.visibility
+ ):
+ return True
+ else:
+ return False
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ # pylint --py3k: W1641
+ return hash(
+ (
+ self.filename,
+ self.size,
+ self.digest,
+ self.algorithm,
+ self.version,
+ self.visibility,
+ )
+ )
+
+ def __str__(self):
+ return repr(self)
+
+ def __repr__(self):
+ return (
+ "%s.%s(filename='%s', size=%s, digest='%s', algorithm='%s', visibility=%r)"
+ % (
+ __name__,
+ self.__class__.__name__,
+ self.filename,
+ self.size,
+ self.digest,
+ self.algorithm,
+ self.visibility,
+ )
+ )
+
+ def present(self):
+ # Doesn't check validity
+ return os.path.exists(self.filename)
+
+ def validate_size(self):
+ if self.present():
+ return self.size == os.path.getsize(self.filename)
+ else:
+ log.debug("trying to validate size on a missing file, %s", self.filename)
+ raise MissingFileException(filename=self.filename)
+
+ def validate_digest(self):
+ if self.present():
+ with open(self.filename, "rb") as f:
+ return self.digest == digest_file(f, self.algorithm)
+ else:
+ log.debug("trying to validate digest on a missing file, %s', self.filename")
+ raise MissingFileException(filename=self.filename)
+
+ def validate(self):
+ if self.size is None or self.validate_size():
+ if self.validate_digest():
+ return True
+ return False
+
+ def describe(self):
+ if self.present() and self.validate():
+ return "'%s' is present and valid" % self.filename
+ elif self.present():
+ return "'%s' is present and invalid" % self.filename
+ else:
+ return "'%s' is absent" % self.filename
+
+
+def create_file_record(filename, algorithm):
+ fo = open(filename, "rb")
+ stored_filename = os.path.split(filename)[1]
+ fr = FileRecord(
+ stored_filename,
+ os.path.getsize(filename),
+ digest_file(fo, algorithm),
+ algorithm,
+ )
+ fo.close()
+ return fr
+
+
+class FileRecordJSONEncoder(json.JSONEncoder):
+ def encode_file_record(self, obj):
+ if not issubclass(type(obj), FileRecord):
+ err = (
+ "FileRecordJSONEncoder is only for FileRecord and lists of FileRecords, "
+ "not %s" % obj.__class__.__name__
+ )
+ log.warn(err)
+ raise FileRecordJSONEncoderException(err)
+ else:
+ rv = {
+ "filename": obj.filename,
+ "size": obj.size,
+ "algorithm": obj.algorithm,
+ "digest": obj.digest,
+ }
+ if obj.unpack:
+ rv["unpack"] = True
+ if obj.version:
+ rv["version"] = obj.version
+ if obj.visibility is not None:
+ rv["visibility"] = obj.visibility
+ return rv
+
+ def default(self, f):
+ if issubclass(type(f), list):
+ record_list = []
+ for i in f:
+ record_list.append(self.encode_file_record(i))
+ return record_list
+ else:
+ return self.encode_file_record(f)
+
+
+class FileRecordJSONDecoder(json.JSONDecoder):
+
+ """I help the json module materialize a FileRecord from
+ a JSON file. I understand FileRecords and lists of
+ FileRecords. I ignore things that I don't expect for now"""
+
+ # TODO: make this more explicit in what it's looking for
+ # and error out on unexpected things
+
+ def process_file_records(self, obj):
+ if isinstance(obj, list):
+ record_list = []
+ for i in obj:
+ record = self.process_file_records(i)
+ if issubclass(type(record), FileRecord):
+ record_list.append(record)
+ return record_list
+ required_fields = [
+ "filename",
+ "size",
+ "algorithm",
+ "digest",
+ ]
+ if isinstance(obj, dict):
+ missing = False
+ for req in required_fields:
+ if req not in obj:
+ missing = True
+ break
+
+ if not missing:
+ unpack = obj.get("unpack", False)
+ version = obj.get("version", None)
+ visibility = obj.get("visibility", None)
+ rv = FileRecord(
+ obj["filename"],
+ obj["size"],
+ obj["digest"],
+ obj["algorithm"],
+ unpack,
+ version,
+ visibility,
+ )
+ log.debug("materialized %s" % rv)
+ return rv
+ return obj
+
+ def decode(self, s):
+ decoded = json.JSONDecoder.decode(self, s)
+ rv = self.process_file_records(decoded)
+ return rv
+
+
+class Manifest(object):
+
+ valid_formats = ("json",)
+
+ def __init__(self, file_records=None):
+ self.file_records = file_records or []
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ if len(self.file_records) != len(other.file_records):
+ log.debug("Manifests differ in number of files")
+ return False
+ # sort the file records by filename before comparing
+ mine = sorted((fr.filename, fr) for fr in self.file_records)
+ theirs = sorted((fr.filename, fr) for fr in other.file_records)
+ return mine == theirs
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ # pylint --py3k: W1641
+ return hash(tuple(sorted((fr.filename, fr) for fr in self.file_records)))
+
+ def __deepcopy__(self, memo):
+ # This is required for a deep copy
+ return Manifest(self.file_records[:])
+
+ def __copy__(self):
+ return Manifest(self.file_records)
+
+ def copy(self):
+ return Manifest(self.file_records[:])
+
+ def present(self):
+ return all(i.present() for i in self.file_records)
+
+ def validate_sizes(self):
+ return all(i.validate_size() for i in self.file_records)
+
+ def validate_digests(self):
+ return all(i.validate_digest() for i in self.file_records)
+
+ def validate(self):
+ return all(i.validate() for i in self.file_records)
+
+ def load(self, data_file, fmt="json"):
+ assert fmt in self.valid_formats
+ if fmt == "json":
+ try:
+ self.file_records.extend(
+ json.load(data_file, cls=FileRecordJSONDecoder)
+ )
+ except ValueError:
+ raise InvalidManifest("trying to read invalid manifest file")
+
+ def loads(self, data_string, fmt="json"):
+ assert fmt in self.valid_formats
+ if fmt == "json":
+ try:
+ self.file_records.extend(
+ json.loads(data_string, cls=FileRecordJSONDecoder)
+ )
+ except ValueError:
+ raise InvalidManifest("trying to read invalid manifest file")
+
+ def dump(self, output_file, fmt="json"):
+ assert fmt in self.valid_formats
+ if fmt == "json":
+ return json.dump(
+ self.file_records,
+ output_file,
+ indent=2,
+ separators=(",", ": "),
+ cls=FileRecordJSONEncoder,
+ )
+
+ def dumps(self, fmt="json"):
+ assert fmt in self.valid_formats
+ if fmt == "json":
+ return json.dumps(
+ self.file_records,
+ indent=2,
+ separators=(",", ": "),
+ cls=FileRecordJSONEncoder,
+ )
+
+
+def digest_file(f, a):
+ """I take a file like object 'f' and return a hex-string containing
+ of the result of the algorithm 'a' applied to 'f'."""
+ h = hashlib.new(a)
+ chunk_size = 1024 * 10
+ data = f.read(chunk_size)
+ while data:
+ h.update(data)
+ data = f.read(chunk_size)
+ name = repr(f.name) if hasattr(f, "name") else "a file"
+ log.debug("hashed %s with %s to be %s", name, a, h.hexdigest())
+ return h.hexdigest()
+
+
+def execute(cmd):
+ """Execute CMD, logging its stdout at the info level"""
+ process = Popen(cmd, shell=True, stdout=PIPE)
+ while True:
+ line = process.stdout.readline()
+ if not line:
+ break
+ log.info(line.replace("\n", " "))
+ return process.wait() == 0
+
+
+def open_manifest(manifest_file):
+ """I know how to take a filename and load it into a Manifest object"""
+ if os.path.exists(manifest_file):
+ manifest = Manifest()
+ with open(manifest_file, "r" if PY3 else "rb") as f:
+ manifest.load(f)
+ log.debug("loaded manifest from file '%s'" % manifest_file)
+ return manifest
+ else:
+ log.debug("tried to load absent file '%s' as manifest" % manifest_file)
+ raise InvalidManifest("manifest file '%s' does not exist" % manifest_file)
+
+
+def list_manifest(manifest_file):
+ """I know how print all the files in a location"""
+ try:
+ manifest = open_manifest(manifest_file)
+ except InvalidManifest as e:
+ log.error(
+ "failed to load manifest file at '%s': %s"
+ % (
+ manifest_file,
+ str(e),
+ )
+ )
+ return False
+ for f in manifest.file_records:
+ print(
+ "{}\t{}\t{}".format(
+ "P" if f.present() else "-",
+ "V" if f.present() and f.validate() else "-",
+ f.filename,
+ )
+ )
+ return True
+
+
+def validate_manifest(manifest_file):
+ """I validate that all files in a manifest are present and valid but
+ don't fetch or delete them if they aren't"""
+ try:
+ manifest = open_manifest(manifest_file)
+ except InvalidManifest as e:
+ log.error(
+ "failed to load manifest file at '%s': %s"
+ % (
+ manifest_file,
+ str(e),
+ )
+ )
+ return False
+ invalid_files = []
+ absent_files = []
+ for f in manifest.file_records:
+ if not f.present():
+ absent_files.append(f)
+ else:
+ if not f.validate():
+ invalid_files.append(f)
+ if len(invalid_files + absent_files) == 0:
+ return True
+ else:
+ return False
+
+
+def add_files(manifest_file, algorithm, filenames, version, visibility, unpack):
+ # returns True if all files successfully added, False if not
+ # and doesn't catch library Exceptions. If any files are already
+ # tracked in the manifest, return will be False because they weren't
+ # added
+ all_files_added = True
+ # Create a old_manifest object to add to
+ if os.path.exists(manifest_file):
+ old_manifest = open_manifest(manifest_file)
+ else:
+ old_manifest = Manifest()
+ log.debug("creating a new manifest file")
+ new_manifest = Manifest() # use a different manifest for the output
+ for filename in filenames:
+ log.debug("adding %s" % filename)
+ path, name = os.path.split(filename)
+ new_fr = create_file_record(filename, algorithm)
+ new_fr.version = version
+ new_fr.visibility = visibility
+ new_fr.unpack = unpack
+ log.debug("appending a new file record to manifest file")
+ add = True
+ for fr in old_manifest.file_records:
+ log.debug(
+ "manifest file has '%s'"
+ % "', ".join([x.filename for x in old_manifest.file_records])
+ )
+ if new_fr == fr:
+ log.info("file already in old_manifest")
+ add = False
+ elif filename == fr.filename:
+ log.error(
+ "manifest already contains a different file named %s" % filename
+ )
+ add = False
+ if add:
+ new_manifest.file_records.append(new_fr)
+ log.debug("added '%s' to manifest" % filename)
+ else:
+ all_files_added = False
+ # copy any files in the old manifest that aren't in the new one
+ new_filenames = set(fr.filename for fr in new_manifest.file_records)
+ for old_fr in old_manifest.file_records:
+ if old_fr.filename not in new_filenames:
+ new_manifest.file_records.append(old_fr)
+ if PY3:
+ with open(manifest_file, mode="w") as output:
+ new_manifest.dump(output, fmt="json")
+ else:
+ with open(manifest_file, mode="wb") as output:
+ new_manifest.dump(output, fmt="json")
+ return all_files_added
+
+
+def touch(f):
+ """Used to modify mtime in cached files;
+ mtime is used by the purge command"""
+ try:
+ os.utime(f, None)
+ except OSError:
+ log.warn("impossible to update utime of file %s" % f)
+
+
+@contextmanager
+@retriable(sleeptime=2)
+def request(url, auth_file=None):
+ req = Request(url)
+ _authorize(req, auth_file)
+ with closing(urllib2.urlopen(req)) as f:
+ log.debug("opened %s for reading" % url)
+ yield f
+
+
+def fetch_file(base_urls, file_record, grabchunk=1024 * 4, auth_file=None, region=None):
+ # A file which is requested to be fetched that exists locally will be
+ # overwritten by this function
+ fd, temp_path = tempfile.mkstemp(dir=os.getcwd())
+ os.close(fd)
+ fetched_path = None
+ for base_url in base_urls:
+ # Generate the URL for the file on the server side
+ url = urljoin(base_url, "%s/%s" % (file_record.algorithm, file_record.digest))
+ if region is not None:
+ url += "?region=" + region
+
+ log.info("Attempting to fetch from '%s'..." % base_url)
+
+ # Well, the file doesn't exist locally. Let's fetch it.
+ try:
+ with request(url, auth_file) as f, open(temp_path, mode="wb") as out:
+ k = True
+ size = 0
+ while k:
+ # TODO: print statistics as file transfers happen both for info and to stop
+ # buildbot timeouts
+ indata = f.read(grabchunk)
+ out.write(indata)
+ size += len(indata)
+ if len(indata) == 0:
+ k = False
+ log.info(
+ "File %s fetched from %s as %s"
+ % (file_record.filename, base_url, temp_path)
+ )
+ fetched_path = temp_path
+ break
+ except (URLError, HTTPError, ValueError):
+ log.info(
+ "...failed to fetch '%s' from %s" % (file_record.filename, base_url),
+ exc_info=True,
+ )
+ except IOError: # pragma: no cover
+ log.info(
+ "failed to write to temporary file for '%s'" % file_record.filename,
+ exc_info=True,
+ )
+
+ # cleanup temp file in case of issues
+ if fetched_path:
+ return os.path.split(fetched_path)[1]
+ else:
+ try:
+ os.remove(temp_path)
+ except OSError: # pragma: no cover
+ pass
+ return None
+
+
+def clean_path(dirname):
+ """Remove a subtree if is exists. Helper for unpack_file()."""
+ if os.path.exists(dirname):
+ log.info("rm tree: %s" % dirname)
+ shutil.rmtree(dirname)
+
+
+CHECKSUM_SUFFIX = ".checksum"
+
+
+def unpack_file(filename):
+ """Untar `filename`, assuming it is uncompressed or compressed with bzip2,
+ xz, gzip, or unzip a zip file. The file is assumed to contain a single
+ directory with a name matching the base of the given filename.
+ Xz support is handled by shelling out to 'tar'."""
+ if os.path.isfile(filename) and tarfile.is_tarfile(filename):
+ tar_file, zip_ext = os.path.splitext(filename)
+ base_file, tar_ext = os.path.splitext(tar_file)
+ clean_path(base_file)
+ log.info('untarring "%s"' % filename)
+ tar = tarfile.open(filename)
+ tar.extractall()
+ tar.close()
+ elif os.path.isfile(filename) and filename.endswith(".tar.xz"):
+ base_file = filename.replace(".tar.xz", "")
+ clean_path(base_file)
+ log.info('untarring "%s"' % filename)
+ # Not using tar -Jxf because it fails on Windows for some reason.
+ process = Popen(["xz", "-d", "-c", filename], stdout=PIPE)
+ stdout, stderr = process.communicate()
+ if process.returncode != 0:
+ return False
+ fileobj = BytesIO()
+ fileobj.write(stdout)
+ fileobj.seek(0)
+ tar = tarfile.open(fileobj=fileobj, mode="r|")
+ tar.extractall()
+ tar.close()
+ elif os.path.isfile(filename) and zipfile.is_zipfile(filename):
+ base_file = filename.replace(".zip", "")
+ clean_path(base_file)
+ log.info('unzipping "%s"' % filename)
+ z = zipfile.ZipFile(filename)
+ z.extractall()
+ z.close()
+ else:
+ log.error("Unknown archive extension for filename '%s'" % filename)
+ return False
+ return True
+
+
+def fetch_files(
+ manifest_file,
+ base_urls,
+ filenames=[],
+ cache_folder=None,
+ auth_file=None,
+ region=None,
+):
+ # Lets load the manifest file
+ try:
+ manifest = open_manifest(manifest_file)
+ except InvalidManifest as e:
+ log.error(
+ "failed to load manifest file at '%s': %s"
+ % (
+ manifest_file,
+ str(e),
+ )
+ )
+ return False
+
+ # we want to track files already in current working directory AND valid
+ # we will not need to fetch these
+ present_files = []
+
+ # We want to track files that fail to be fetched as well as
+ # files that are fetched
+ failed_files = []
+ fetched_files = []
+
+ # Files that we want to unpack.
+ unpack_files = []
+
+ # Lets go through the manifest and fetch the files that we want
+ for f in manifest.file_records:
+ # case 1: files are already present
+ if f.present():
+ if f.validate():
+ present_files.append(f.filename)
+ if f.unpack:
+ unpack_files.append(f.filename)
+ else:
+ # we have an invalid file here, better to cleanup!
+ # this invalid file needs to be replaced with a good one
+ # from the local cash or fetched from a tooltool server
+ log.info(
+ "File %s is present locally but it is invalid, so I will remove it "
+ "and try to fetch it" % f.filename
+ )
+ os.remove(os.path.join(os.getcwd(), f.filename))
+
+ # check if file is already in cache
+ if cache_folder and f.filename not in present_files:
+ try:
+ shutil.copy(
+ os.path.join(cache_folder, f.digest),
+ os.path.join(os.getcwd(), f.filename),
+ )
+ log.info(
+ "File %s retrieved from local cache %s" % (f.filename, cache_folder)
+ )
+ touch(os.path.join(cache_folder, f.digest))
+
+ filerecord_for_validation = FileRecord(
+ f.filename, f.size, f.digest, f.algorithm
+ )
+ if filerecord_for_validation.validate():
+ present_files.append(f.filename)
+ if f.unpack:
+ unpack_files.append(f.filename)
+ else:
+ # the file copied from the cache is invalid, better to
+ # clean up the cache version itself as well
+ log.warn(
+ "File %s retrieved from cache is invalid! I am deleting it from the "
+ "cache as well" % f.filename
+ )
+ os.remove(os.path.join(os.getcwd(), f.filename))
+ os.remove(os.path.join(cache_folder, f.digest))
+ except IOError:
+ log.info(
+ "File %s not present in local cache folder %s"
+ % (f.filename, cache_folder)
+ )
+
+ # now I will try to fetch all files which are not already present and
+ # valid, appending a suffix to avoid race conditions
+ temp_file_name = None
+ # 'filenames' is the list of filenames to be managed, if this variable
+ # is a non empty list it can be used to filter if filename is in
+ # present_files, it means that I have it already because it was already
+ # either in the working dir or in the cache
+ if (
+ f.filename in filenames or len(filenames) == 0
+ ) and f.filename not in present_files:
+ log.debug("fetching %s" % f.filename)
+ temp_file_name = fetch_file(
+ base_urls, f, auth_file=auth_file, region=region
+ )
+ if temp_file_name:
+ fetched_files.append((f, temp_file_name))
+ else:
+ failed_files.append(f.filename)
+ else:
+ log.debug("skipping %s" % f.filename)
+
+ # lets ensure that fetched files match what the manifest specified
+ for localfile, temp_file_name in fetched_files:
+ # since I downloaded to a temp file, I need to perform all validations on the temp file
+ # this is why filerecord_for_validation is created
+
+ filerecord_for_validation = FileRecord(
+ temp_file_name, localfile.size, localfile.digest, localfile.algorithm
+ )
+
+ if filerecord_for_validation.validate():
+ # great!
+ # I can rename the temp file
+ log.info(
+ "File integrity verified, renaming %s to %s"
+ % (temp_file_name, localfile.filename)
+ )
+ os.rename(
+ os.path.join(os.getcwd(), temp_file_name),
+ os.path.join(os.getcwd(), localfile.filename),
+ )
+
+ if localfile.unpack:
+ unpack_files.append(localfile.filename)
+
+ # if I am using a cache and a new file has just been retrieved from a
+ # remote location, I need to update the cache as well
+ if cache_folder:
+ log.info("Updating local cache %s..." % cache_folder)
+ try:
+ if not os.path.exists(cache_folder):
+ log.info("Creating cache in %s..." % cache_folder)
+ os.makedirs(cache_folder, 0o0700)
+ shutil.copy(
+ os.path.join(os.getcwd(), localfile.filename),
+ os.path.join(cache_folder, localfile.digest),
+ )
+ log.info(
+ "Local cache %s updated with %s"
+ % (cache_folder, localfile.filename)
+ )
+ touch(os.path.join(cache_folder, localfile.digest))
+ except (OSError, IOError):
+ log.warning(
+ "Impossible to add file %s to cache folder %s"
+ % (localfile.filename, cache_folder),
+ exc_info=True,
+ )
+ else:
+ failed_files.append(localfile.filename)
+ log.error("'%s'" % filerecord_for_validation.describe())
+ os.remove(temp_file_name)
+
+ # Unpack files that need to be unpacked.
+ for filename in unpack_files:
+ if not unpack_file(filename):
+ failed_files.append(filename)
+
+ # If we failed to fetch or validate a file, we need to fail
+ if len(failed_files) > 0:
+ log.error("The following files failed: '%s'" % "', ".join(failed_files))
+ return False
+ return True
+
+
+def freespace(p):
+ "Returns the number of bytes free under directory `p`"
+ if sys.platform == "win32": # pragma: no cover
+ # os.statvfs doesn't work on Windows
+ import win32file
+
+ secsPerClus, bytesPerSec, nFreeClus, totClus = win32file.GetDiskFreeSpace(p)
+ return secsPerClus * bytesPerSec * nFreeClus
+ else:
+ r = os.statvfs(p)
+ return r.f_frsize * r.f_bavail
+
+
+def purge(folder, gigs):
+ """If gigs is non 0, it deletes files in `folder` until `gigs` GB are free,
+ starting from older files. If gigs is 0, a full purge will be performed.
+ No recursive deletion of files in subfolder is performed."""
+
+ full_purge = bool(gigs == 0)
+ gigs *= 1024 * 1024 * 1024
+
+ if not full_purge and freespace(folder) >= gigs:
+ log.info("No need to cleanup")
+ return
+
+ files = []
+ for f in os.listdir(folder):
+ p = os.path.join(folder, f)
+ # it deletes files in folder without going into subfolders,
+ # assuming the cache has a flat structure
+ if not os.path.isfile(p):
+ continue
+ mtime = os.path.getmtime(p)
+ files.append((mtime, p))
+
+ # iterate files sorted by mtime
+ for _, f in sorted(files):
+ log.info("removing %s to free up space" % f)
+ try:
+ os.remove(f)
+ except OSError:
+ log.info("Impossible to remove %s" % f, exc_info=True)
+ if not full_purge and freespace(folder) >= gigs:
+ break
+
+
+def _log_api_error(e):
+ if hasattr(e, "hdrs") and e.hdrs["content-type"] == "application/json":
+ json_resp = json.load(e.fp)
+ log.error(
+ "%s: %s" % (json_resp["error"]["name"], json_resp["error"]["description"])
+ )
+ else:
+ log.exception("Error making RelengAPI request:")
+
+
+def _authorize(req, auth_file):
+ if not auth_file:
+ return
+
+ is_taskcluster_auth = False
+ with open(auth_file) as f:
+ auth_file_content = f.read().strip()
+ try:
+ auth_file_content = json.loads(auth_file_content)
+ is_taskcluster_auth = True
+ except Exception:
+ pass
+
+ if is_taskcluster_auth:
+ taskcluster_header = make_taskcluster_header(auth_file_content, req)
+ log.debug("Using taskcluster credentials in %s" % auth_file)
+ req.add_unredirected_header("Authorization", taskcluster_header)
+ else:
+ log.debug("Using Bearer token in %s" % auth_file)
+ req.add_unredirected_header("Authorization", "Bearer %s" % auth_file_content)
+
+
+def _send_batch(base_url, auth_file, batch, region):
+ url = urljoin(base_url, "upload")
+ if region is not None:
+ url += "?region=" + region
+ data = json.dumps(batch)
+ if PY3:
+ data = data.encode("utf-8")
+ req = Request(url, data, {"Content-Type": "application/json"})
+ _authorize(req, auth_file)
+ try:
+ resp = urllib2.urlopen(req)
+ except (URLError, HTTPError) as e:
+ _log_api_error(e)
+ return None
+ return json.load(resp)["result"]
+
+
+def _s3_upload(filename, file):
+ # urllib2 does not support streaming, so we fall back to good old httplib
+ url = urlparse(file["put_url"])
+ cls = HTTPSConnection if url.scheme == "https" else HTTPConnection
+ host, port = url.netloc.split(":") if ":" in url.netloc else (url.netloc, 443)
+ port = int(port)
+ conn = cls(host, port)
+ try:
+ req_path = "%s?%s" % (url.path, url.query) if url.query else url.path
+ with open(filename, "rb") as f:
+ content = f.read()
+ content_length = len(content)
+ f.seek(0)
+ conn.request(
+ "PUT",
+ req_path,
+ f,
+ {
+ "Content-Type": "application/octet-stream",
+ "Content-Length": str(content_length),
+ },
+ )
+ resp = conn.getresponse()
+ resp_body = resp.read()
+ conn.close()
+ if resp.status != 200:
+ raise RuntimeError(
+ "Non-200 return from AWS: %s %s\n%s"
+ % (resp.status, resp.reason, resp_body)
+ )
+ except Exception:
+ file["upload_exception"] = sys.exc_info()
+ file["upload_ok"] = False
+ else:
+ file["upload_ok"] = True
+
+
+def _notify_upload_complete(base_url, auth_file, file):
+ req = Request(urljoin(base_url, "upload/complete/%(algorithm)s/%(digest)s" % file))
+ _authorize(req, auth_file)
+ try:
+ urllib2.urlopen(req)
+ except HTTPError as e:
+ if e.code != 409:
+ _log_api_error(e)
+ return
+ # 409 indicates that the upload URL hasn't expired yet and we
+ # should retry after a delay
+ to_wait = int(e.headers.get("X-Retry-After", 60))
+ log.warning("Waiting %d seconds for upload URLs to expire" % to_wait)
+ time.sleep(to_wait)
+ _notify_upload_complete(base_url, auth_file, file)
+ except Exception:
+ log.exception("While notifying server of upload completion:")
+
+
+def upload(manifest, message, base_urls, auth_file, region):
+ try:
+ manifest = open_manifest(manifest)
+ except InvalidManifest:
+ log.exception("failed to load manifest file at '%s'")
+ return False
+
+ # verify the manifest, since we'll need the files present to upload
+ if not manifest.validate():
+ log.error("manifest is invalid")
+ return False
+
+ if any(fr.visibility is None for fr in manifest.file_records):
+ log.error("All files in a manifest for upload must have a visibility set")
+
+ # convert the manifest to an upload batch
+ batch = {
+ "message": message,
+ "files": {},
+ }
+ for fr in manifest.file_records:
+ batch["files"][fr.filename] = {
+ "size": fr.size,
+ "digest": fr.digest,
+ "algorithm": fr.algorithm,
+ "visibility": fr.visibility,
+ }
+
+ # make the upload request
+ resp = _send_batch(base_urls[0], auth_file, batch, region)
+ if not resp:
+ return None
+ files = resp["files"]
+
+ # Upload the files, each in a thread. This allows us to start all of the
+ # uploads before any of the URLs expire.
+ threads = {}
+ for filename, file in files.items():
+ if "put_url" in file:
+ log.info("%s: starting upload" % (filename,))
+ thd = threading.Thread(target=_s3_upload, args=(filename, file))
+ thd.daemon = 1
+ thd.start()
+ threads[filename] = thd
+ else:
+ log.info("%s: already exists on server" % (filename,))
+
+ # re-join all of those threads as they exit
+ success = True
+ while threads:
+ for filename, thread in list(threads.items()):
+ if not thread.is_alive():
+ # _s3_upload has annotated file with result information
+ file = files[filename]
+ thread.join()
+ if file["upload_ok"]:
+ log.info("%s: uploaded" % filename)
+ else:
+ log.error(
+ "%s: failed" % filename, exc_info=file["upload_exception"]
+ )
+ success = False
+ del threads[filename]
+
+ # notify the server that the uploads are completed. If the notification
+ # fails, we don't consider that an error (the server will notice
+ # eventually)
+ for filename, file in files.items():
+ if "put_url" in file and file["upload_ok"]:
+ log.info("notifying server of upload completion for %s" % (filename,))
+ _notify_upload_complete(base_urls[0], auth_file, file)
+
+ return success
+
+
+def send_operation_on_file(data, base_urls, digest, auth_file):
+ url = base_urls[0]
+ url = urljoin(url, "file/sha512/" + digest)
+
+ data = json.dumps(data)
+
+ req = Request(url, data, {"Content-Type": "application/json"})
+ req.get_method = lambda: "PATCH"
+
+ _authorize(req, auth_file)
+
+ try:
+ urllib2.urlopen(req)
+ except (URLError, HTTPError) as e:
+ _log_api_error(e)
+ return False
+ return True
+
+
+def change_visibility(base_urls, digest, visibility, auth_file):
+ data = [
+ {
+ "op": "set_visibility",
+ "visibility": visibility,
+ }
+ ]
+ return send_operation_on_file(data, base_urls, digest, visibility, auth_file)
+
+
+def delete_instances(base_urls, digest, auth_file):
+ data = [
+ {
+ "op": "delete_instances",
+ }
+ ]
+ return send_operation_on_file(data, base_urls, digest, auth_file)
+
+
+def process_command(options, args):
+ """I know how to take a list of program arguments and
+ start doing the right thing with them"""
+ cmd = args[0]
+ cmd_args = args[1:]
+ log.debug("processing '%s' command with args '%s'" % (cmd, '", "'.join(cmd_args)))
+ log.debug("using options: %s" % options)
+
+ if cmd == "list":
+ return list_manifest(options["manifest"])
+ if cmd == "validate":
+ return validate_manifest(options["manifest"])
+ elif cmd == "add":
+ return add_files(
+ options["manifest"],
+ options["algorithm"],
+ cmd_args,
+ options["version"],
+ options["visibility"],
+ options["unpack"],
+ )
+ elif cmd == "purge":
+ if options["cache_folder"]:
+ purge(folder=options["cache_folder"], gigs=options["size"])
+ else:
+ log.critical("please specify the cache folder to be purged")
+ return False
+ elif cmd == "fetch":
+ return fetch_files(
+ options["manifest"],
+ options["base_url"],
+ cmd_args,
+ cache_folder=options["cache_folder"],
+ auth_file=options.get("auth_file"),
+ region=options.get("region"),
+ )
+ elif cmd == "upload":
+ if not options.get("message"):
+ log.critical("upload command requires a message")
+ return False
+ return upload(
+ options.get("manifest"),
+ options.get("message"),
+ options.get("base_url"),
+ options.get("auth_file"),
+ options.get("region"),
+ )
+ elif cmd == "change-visibility":
+ if not options.get("digest"):
+ log.critical("change-visibility command requires a digest option")
+ return False
+ if not options.get("visibility"):
+ log.critical("change-visibility command requires a visibility option")
+ return False
+ return change_visibility(
+ options.get("base_url"),
+ options.get("digest"),
+ options.get("visibility"),
+ options.get("auth_file"),
+ )
+ elif cmd == "delete":
+ if not options.get("digest"):
+ log.critical("change-visibility command requires a digest option")
+ return False
+ return delete_instances(
+ options.get("base_url"),
+ options.get("digest"),
+ options.get("auth_file"),
+ )
+ else:
+ log.critical('command "%s" is not implemented' % cmd)
+ return False
+
+
+def main(argv, _skip_logging=False):
+ # Set up option parsing
+ parser = optparse.OptionParser()
+ parser.add_option(
+ "-q",
+ "--quiet",
+ default=logging.INFO,
+ dest="loglevel",
+ action="store_const",
+ const=logging.ERROR,
+ )
+ parser.add_option(
+ "-v", "--verbose", dest="loglevel", action="store_const", const=logging.DEBUG
+ )
+ parser.add_option(
+ "-m",
+ "--manifest",
+ default=DEFAULT_MANIFEST_NAME,
+ dest="manifest",
+ action="store",
+ help="specify the manifest file to be operated on",
+ )
+ parser.add_option(
+ "-d",
+ "--algorithm",
+ default="sha512",
+ dest="algorithm",
+ action="store",
+ help="hashing algorithm to use (only sha512 is allowed)",
+ )
+ parser.add_option(
+ "--digest",
+ default=None,
+ dest="digest",
+ action="store",
+ help="digest hash to change visibility for",
+ )
+ parser.add_option(
+ "--visibility",
+ default=None,
+ dest="visibility",
+ choices=["internal", "public"],
+ help='Visibility level of this file; "internal" is for '
+ "files that cannot be distributed out of the company "
+ 'but not for secrets; "public" files are available to '
+ "anyone without restriction",
+ )
+ parser.add_option(
+ "--unpack",
+ default=False,
+ dest="unpack",
+ action="store_true",
+ help="Request unpacking this file after fetch."
+ " This is helpful with tarballs.",
+ )
+ parser.add_option(
+ "--version",
+ default=None,
+ dest="version",
+ action="store",
+ help="Version string for this file. This annotates the "
+ "manifest entry with a version string to help "
+ "identify the contents.",
+ )
+ parser.add_option(
+ "-o",
+ "--overwrite",
+ default=False,
+ dest="overwrite",
+ action="store_true",
+ help="UNUSED; present for backward compatibility",
+ )
+ parser.add_option(
+ "--url",
+ dest="base_url",
+ action="append",
+ help="RelengAPI URL ending with /tooltool/; default "
+ "is appropriate for Mozilla",
+ )
+ parser.add_option(
+ "-c", "--cache-folder", dest="cache_folder", help="Local cache folder"
+ )
+ parser.add_option(
+ "-s",
+ "--size",
+ help="free space required (in GB)",
+ dest="size",
+ type="float",
+ default=0.0,
+ )
+ parser.add_option(
+ "-r",
+ "--region",
+ help="Preferred AWS region for upload or fetch; " "example: --region=us-west-2",
+ )
+ parser.add_option(
+ "--message",
+ help='The "commit message" for an upload; format with a bug number '
+ "and brief comment",
+ dest="message",
+ )
+ parser.add_option(
+ "--authentication-file",
+ help="Use the RelengAPI token found in the given file to "
+ "authenticate to the RelengAPI server.",
+ dest="auth_file",
+ )
+
+ (options_obj, args) = parser.parse_args(argv[1:])
+
+ if not options_obj.base_url:
+ tooltool_host = os.environ.get("TOOLTOOL_HOST", "tooltool.mozilla-releng.net")
+ taskcluster_proxy_url = os.environ.get("TASKCLUSTER_PROXY_URL")
+ if taskcluster_proxy_url:
+ tooltool_url = "{}/{}".format(taskcluster_proxy_url, tooltool_host)
+ else:
+ tooltool_url = "https://{}".format(tooltool_host)
+
+ options_obj.base_url = [tooltool_url]
+
+ # ensure all URLs have a trailing slash
+ def add_slash(url):
+ return url if url.endswith("/") else (url + "/")
+
+ options_obj.base_url = [add_slash(u) for u in options_obj.base_url]
+
+ # expand ~ in --authentication-file
+ if options_obj.auth_file:
+ options_obj.auth_file = os.path.expanduser(options_obj.auth_file)
+
+ # Dictionaries are easier to work with
+ options = vars(options_obj)
+
+ log.setLevel(options["loglevel"])
+
+ # Set up logging, for now just to the console
+ if not _skip_logging: # pragma: no cover
+ ch = logging.StreamHandler()
+ cf = logging.Formatter("%(levelname)s - %(message)s")
+ ch.setFormatter(cf)
+ log.addHandler(ch)
+
+ if options["algorithm"] != "sha512":
+ parser.error("only --algorithm sha512 is supported")
+
+ if len(args) < 1:
+ parser.error("You must specify a command")
+
+ return 0 if process_command(options, args) else 1
+
+
+if __name__ == "__main__": # pragma: no cover
+ sys.exit(main(sys.argv))