summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 09:43:15 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 09:43:15 +0000
commit4fd40781fe3edc4ae5895e3cfddfbfea328ac0e8 (patch)
treeefb19f2d9385f548990904757cdf44f3b9546fca
parentInitial commit. (diff)
downloadparallel-4fd40781fe3edc4ae5895e3cfddfbfea328ac0e8.tar.xz
parallel-4fd40781fe3edc4ae5895e3cfddfbfea328ac0e8.zip
Adding upstream version 20240222+ds.upstream/20240222+dsupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--CITATION25
-rw-r--r--CITATION.cff15
-rw-r--r--CREDITS41
-rw-r--r--LICENSES/CC-BY-SA-4.0.txt170
-rw-r--r--LICENSES/GFDL-1.3-or-later.txt149
-rw-r--r--LICENSES/GPL-3.0-or-later.txt232
-rw-r--r--Makefile.am198
-rw-r--r--Makefile.in961
-rw-r--r--NEWS5344
-rw-r--r--README146
-rw-r--r--TODO2
-rw-r--r--aclocal.m4786
-rw-r--r--config.h.in25
-rwxr-xr-xconfigure3846
-rw-r--r--configure.ac14
-rwxr-xr-xinstall-sh541
-rwxr-xr-xmissing215
-rw-r--r--src/Makefile.am405
-rw-r--r--src/Makefile.in975
-rwxr-xr-xsrc/env_parallel145
-rwxr-xr-xsrc/env_parallel.ash636
-rwxr-xr-xsrc/env_parallel.bash640
-rwxr-xr-xsrc/env_parallel.csh142
-rwxr-xr-xsrc/env_parallel.dash636
-rwxr-xr-xsrc/env_parallel.fish194
-rwxr-xr-xsrc/env_parallel.ksh636
-rw-r--r--src/env_parallel.mksh605
-rwxr-xr-xsrc/env_parallel.pdksh183
-rw-r--r--src/env_parallel.pod942
-rwxr-xr-xsrc/env_parallel.sh636
-rwxr-xr-xsrc/env_parallel.tcsh142
-rwxr-xr-xsrc/env_parallel.zsh636
-rwxr-xr-xsrc/niceload1173
-rw-r--r--src/niceload.pod433
-rwxr-xr-xsrc/parallel15425
-rw-r--r--src/parallel.pod4618
-rw-r--r--src/parallel_alternatives.pod4765
-rw-r--r--src/parallel_book.pod403
-rw-r--r--src/parallel_cheat_bw.fodt1001
-rw-r--r--src/parallel_design.pod1480
-rw-r--r--src/parallel_examples.pod2041
-rw-r--r--src/parallel_tutorial.pod3177
-rwxr-xr-xsrc/parcat194
-rw-r--r--src/parcat.pod191
-rwxr-xr-xsrc/parset138
-rw-r--r--src/parset.pod327
-rwxr-xr-xsrc/parsort459
-rwxr-xr-xsrc/pod2graph128
-rwxr-xr-xsrc/sem15425
-rw-r--r--src/sem.pod381
-rwxr-xr-xsrc/sql1279
51 files changed, 73301 insertions, 0 deletions
diff --git a/CITATION b/CITATION
new file mode 100644
index 0000000..7c92f41
--- /dev/null
+++ b/CITATION
@@ -0,0 +1,25 @@
+When using programs that use GNU Parallel to process data for publication please cite:
+
+@book{tange_ole_2018_1146014,
+ author = {Tange, Ole},
+ title = {GNU Parallel 2018},
+ publisher = {Ole Tange},
+ month = Mar,
+ year = 2018,
+ ISBN = {9781387509881},
+ doi = {10.5281/zenodo.1146014},
+ url = {https://doi.org/10.5281/zenodo.1146014}
+}
+
+(Feel free to use \nocite{tange_ole_2018_1146014})
+
+Or:
+
+ O. Tange (2018): GNU Parallel 2018, Mar 2018, ISBN 9781387509881,
+ DOI https://doi.org/10.5281/zenodo.11460
+
+This helps funding further development.
+
+SPDX-FileCopyrightText: 2021-2022 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+SPDX-License-Identifier: GFDL-1.3-or-later
+SPDX-License-Identifier: CC-BY-SA-4.0
diff --git a/CITATION.cff b/CITATION.cff
new file mode 100644
index 0000000..44e7cc0
--- /dev/null
+++ b/CITATION.cff
@@ -0,0 +1,15 @@
+# SPDX-FileCopyrightText: 2021-2022 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GFDL-1.3-or-later
+# SPDX-License-Identifier: CC-BY-SA-4.0
+
+cff-version: 1.2.0
+title: GNU Parallel 2018
+message: >-
+ If you use this software, please cite it using the
+ metadata from this file.
+type: software
+authors:
+ - given-names: Ole
+ family-names: Tange
+ email: ole@tange.dk
+ orcid: 'https://orcid.org/0000-0002-6345-1437'
diff --git a/CREDITS b/CREDITS
new file mode 100644
index 0000000..6afe666
--- /dev/null
+++ b/CREDITS
@@ -0,0 +1,41 @@
+People who have helped GNU Parallel different ways.
+
+Wu Zhenyu: Code snips for --completion zsh.
+Morten Rønne: Donating his old Mac for testing.
+Renan Valieris: Maintaining GNU Parallel for Anaconda Cloud.
+Jakub Kulík: Maintaining GNU Parallel for Solaris-userland.
+Rich Burridge: Maintaining GNU Parallel for Solaris-userland.
+Jonathan Kamens: Bug patch for chomp.
+John Rusnak: Feedback on all documentation.
+FrithMartin: Bug patch for orphan blocks.
+Rasmus Villemoes: Code snips for signal processing.
+Martin d'Anjou: Code snips for signal processing.
+rici@stackoverflow.com: Documentation on exporting arrays using --env.
+Malcolm Cook: The idea to use a general perl expression as replacement strings.
+Ævar Arnfjörð Bjarmason: Reading the whole code.
+Christian Netrwal: CPU detection code for AIX.
+alpinelinux.org/users/ncopa: Maintaining GNU Parallel for Alpine GNU/Linux.
+Ondřej Surý: Maintaining GNU Parallel for Debian GNU/Linux.
+Rogério Brito: Maintaining GNU Parallel for Ubuntu GNU/Linux.
+Tim Harder: Maintaining GNU Parallel for Gentoo GNU/Linux.
+Chris Howey: Maintaining GNU Parallel for FreeBSD.
+cheusov: Maintaining GNU Parallel for NetBSD.
+https://fedoraproject.org/wiki/User:Golfu: Maintaining GNU Parallel for Fedora GNU/Linux.
+Pascal Stumpf: Maintaining GNU Parallel for OpenBSD.
+Sandro Cazzaniga: Maintaining GNU Parallel for Mandriva/Mageia.
+Alan Coopersmith: Maintaining GNU Parallel for Solaris.
+jengelh@inai.de: Maintaining GNU Parallel for openSUSE.
+Timothy Redaelli: Maintaining GNU Parallel for Arch GNU/Linux.
+Michael Shigorin: Maintaining GNU Parallel for Alt GNU/Linux.
+Jesse Alama: Maintaining GNU Parallel for Fink.
+jacknagel: Maintaining GNU Parallel for Homebrew.
+Jonathan Palardy: Maintaining GNU Parallel for Homebrew.
+Tim Cuthbertson: Maintaining GNU Parallel for 0install.
+Michael Perzl: Maintaining GNU Parallel for AIX.
+Peter Simons: Maintaining GNU Parallel for Arch GNU/Linux.
+Fethican Coşkuner: Maintaining GNU Parallel for Pardus.
+Wayne E. Seguin: CPU detection improvement for Mac OSX.
+Michał Markowski: OpenBSD CPU detection.
+Drew Frank: Work on --results.
+Javier Tarradas: HPUX CPU/core detection.
+Anders F Björklund: niceload: Darwin version of --io --mem --swap.
diff --git a/LICENSES/CC-BY-SA-4.0.txt b/LICENSES/CC-BY-SA-4.0.txt
new file mode 100644
index 0000000..835a683
--- /dev/null
+++ b/LICENSES/CC-BY-SA-4.0.txt
@@ -0,0 +1,170 @@
+Creative Commons Attribution-ShareAlike 4.0 International
+
+ Creative Commons Corporation (“Creative Commons”) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an “as-is” basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible.
+
+Using Creative Commons Public Licenses
+
+Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses.
+
+Considerations for licensors: Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. More considerations for licensors.
+
+Considerations for the public: By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor’s permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described.
+
+Although not required by our licenses, you are encouraged to respect those requests where reasonable. More considerations for the public.
+
+Creative Commons Attribution-ShareAlike 4.0 International Public License
+
+By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-ShareAlike 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions.
+
+Section 1 – Definitions.
+
+ a. Adapted Material means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image.
+
+ b. Adapter's License means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License.
+
+ c. BY-SA Compatible License means a license listed at creativecommons.org/compatiblelicenses, approved by Creative Commons as essentially the equivalent of this Public License.
+
+ d. Copyright and Similar Rights means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights.
+
+ e. Effective Technological Measures means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements.
+
+ f. Exceptions and Limitations means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material.
+
+ g. License Elements means the license attributes listed in the name of a Creative Commons Public License. The License Elements of this Public License are Attribution and ShareAlike.
+
+ h. Licensed Material means the artistic or literary work, database, or other material to which the Licensor applied this Public License.
+
+ i. Licensed Rights means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license.
+
+ j. Licensor means the individual(s) or entity(ies) granting rights under this Public License.
+
+ k. Share means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them.
+
+ l. Sui Generis Database Rights means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world.
+
+ m. You means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning.
+
+Section 2 – Scope.
+
+ a. License grant.
+
+ 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to:
+
+ A. reproduce and Share the Licensed Material, in whole or in part; and
+
+ B. produce, reproduce, and Share Adapted Material.
+
+ 2. Exceptions and Limitations. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions.
+
+ 3. Term. The term of this Public License is specified in Section 6(a).
+
+ 4. Media and formats; technical modifications allowed. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material.
+
+ 5. Downstream recipients.
+
+ A. Offer from the Licensor – Licensed Material. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License.
+
+ B. Additional offer from the Licensor – Adapted Material. Every recipient of Adapted Material from You automatically receives an offer from the Licensor to exercise the Licensed Rights in the Adapted Material under the conditions of the Adapter’s License You apply.
+
+ C. No downstream restrictions. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material.
+
+ 6. No endorsement. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i).
+
+ b. Other rights.
+
+ 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise.
+
+ 2. Patent and trademark rights are not licensed under this Public License.
+
+ 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties.
+
+Section 3 – License Conditions.
+
+Your exercise of the Licensed Rights is expressly made subject to the following conditions.
+
+ a. Attribution.
+
+ 1. If You Share the Licensed Material (including in modified form), You must:
+
+ A. retain the following if it is supplied by the Licensor with the Licensed Material:
+
+ i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated);
+
+ ii. a copyright notice;
+
+ iii. a notice that refers to this Public License;
+
+ iv. a notice that refers to the disclaimer of warranties;
+
+ v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable;
+
+ B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and
+
+ C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License.
+
+ 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information.
+
+ 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable.
+
+ b. ShareAlike.In addition to the conditions in Section 3(a), if You Share Adapted Material You produce, the following conditions also apply.
+
+ 1. The Adapter’s License You apply must be a Creative Commons license with the same License Elements, this version or later, or a BY-SA Compatible License.
+
+ 2. You must include the text of, or the URI or hyperlink to, the Adapter's License You apply. You may satisfy this condition in any reasonable manner based on the medium, means, and context in which You Share Adapted Material.
+
+ 3. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, Adapted Material that restrict exercise of the rights granted under the Adapter's License You apply.
+
+Section 4 – Sui Generis Database Rights.
+
+Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material:
+
+ a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database;
+
+ b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material, including for purposes of Section 3(b); and
+
+ c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database.
+For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights.
+
+Section 5 – Disclaimer of Warranties and Limitation of Liability.
+
+ a. Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You.
+
+ b. To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You.
+
+ c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability.
+
+Section 6 – Term and Termination.
+
+ a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically.
+
+ b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates:
+
+ 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or
+
+ 2. upon express reinstatement by the Licensor.
+
+ c. For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License.
+
+ d. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License.
+
+ e. Sections 1, 5, 6, 7, and 8 survive termination of this Public License.
+
+Section 7 – Other Terms and Conditions.
+
+ a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed.
+
+ b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License.
+
+Section 8 – Interpretation.
+
+ a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License.
+
+ b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions.
+
+ c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor.
+
+ d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority.
+
+Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.” Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at creativecommons.org/policies, Creative Commons does not authorize the use of the trademark “Creative Commons” or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses.
+
+Creative Commons may be contacted at creativecommons.org.
diff --git a/LICENSES/GFDL-1.3-or-later.txt b/LICENSES/GFDL-1.3-or-later.txt
new file mode 100644
index 0000000..b51dc2a
--- /dev/null
+++ b/LICENSES/GFDL-1.3-or-later.txt
@@ -0,0 +1,149 @@
+GNU Free Documentation License
+Version 1.3, 3 November 2008
+
+Copyright (C) 2000, 2001, 2002, 2007, 2008 Free Software Foundation, Inc. <http://fsf.org/>
+
+Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
+
+0. PREAMBLE
+
+The purpose of this License is to make a manual, textbook, or other functional and useful document "free" in the sense of freedom: to assure everyone the effective freedom to copy and redistribute it, with or without modifying it, either commercially or noncommercially. Secondarily, this License preserves for the author and publisher a way to get credit for their work, while not being considered responsible for modifications made by others.
+
+This License is a kind of "copyleft", which means that derivative works of the document must themselves be free in the same sense. It complements the GNU General Public License, which is a copyleft license designed for free software.
+
+We have designed this License in order to use it for manuals for free software, because free software needs free documentation: a free program should come with manuals providing the same freedoms that the software does. But this License is not limited to software manuals; it can be used for any textual work, regardless of subject matter or whether it is published as a printed book. We recommend this License principally for works whose purpose is instruction or reference.
+
+1. APPLICABILITY AND DEFINITIONS
+
+This License applies to any manual or other work, in any medium, that contains a notice placed by the copyright holder saying it can be distributed under the terms of this License. Such a notice grants a world-wide, royalty-free license, unlimited in duration, to use that work under the conditions stated herein. The "Document", below, refers to any such manual or work. Any member of the public is a licensee, and is addressed as "you". You accept the license if you copy, modify or distribute the work in a way requiring permission under copyright law.
+
+A "Modified Version" of the Document means any work containing the Document or a portion of it, either copied verbatim, or with modifications and/or translated into another language.
+
+A "Secondary Section" is a named appendix or a front-matter section of the Document that deals exclusively with the relationship of the publishers or authors of the Document to the Document's overall subject (or to related matters) and contains nothing that could fall directly within that overall subject. (Thus, if the Document is in part a textbook of mathematics, a Secondary Section may not explain any mathematics.) The relationship could be a matter of historical connection with the subject or with related matters, or of legal, commercial, philosophical, ethical or political position regarding them.
+
+The "Invariant Sections" are certain Secondary Sections whose titles are designated, as being those of Invariant Sections, in the notice that says that the Document is released under this License. If a section does not fit the above definition of Secondary then it is not allowed to be designated as Invariant. The Document may contain zero Invariant Sections. If the Document does not identify any Invariant Sections then there are none.
+
+The "Cover Texts" are certain short passages of text that are listed, as Front-Cover Texts or Back-Cover Texts, in the notice that says that the Document is released under this License. A Front-Cover Text may be at most 5 words, and a Back-Cover Text may be at most 25 words.
+
+A "Transparent" copy of the Document means a machine-readable copy, represented in a format whose specification is available to the general public, that is suitable for revising the document straightforwardly with generic text editors or (for images composed of pixels) generic paint programs or (for drawings) some widely available drawing editor, and that is suitable for input to text formatters or for automatic translation to a variety of formats suitable for input to text formatters. A copy made in an otherwise Transparent file format whose markup, or absence of markup, has been arranged to thwart or discourage subsequent modification by readers is not Transparent. An image format is not Transparent if used for any substantial amount of text. A copy that is not "Transparent" is called "Opaque".
+
+Examples of suitable formats for Transparent copies include plain ASCII without markup, Texinfo input format, LaTeX input format, SGML or XML using a publicly available DTD, and standard-conforming simple HTML, PostScript or PDF designed for human modification. Examples of transparent image formats include PNG, XCF and JPG. Opaque formats include proprietary formats that can be read and edited only by proprietary word processors, SGML or XML for which the DTD and/or processing tools are not generally available, and the machine-generated HTML, PostScript or PDF produced by some word processors for output purposes only.
+
+The "Title Page" means, for a printed book, the title page itself, plus such following pages as are needed to hold, legibly, the material this License requires to appear in the title page. For works in formats which do not have any title page as such, "Title Page" means the text near the most prominent appearance of the work's title, preceding the beginning of the body of the text.
+
+The "publisher" means any person or entity that distributes copies of the Document to the public.
+
+A section "Entitled XYZ" means a named subunit of the Document whose title either is precisely XYZ or contains XYZ in parentheses following text that translates XYZ in another language. (Here XYZ stands for a specific section name mentioned below, such as "Acknowledgements", "Dedications", "Endorsements", or "History".) To "Preserve the Title" of such a section when you modify the Document means that it remains a section "Entitled XYZ" according to this definition.
+
+The Document may include Warranty Disclaimers next to the notice which states that this License applies to the Document. These Warranty Disclaimers are considered to be included by reference in this License, but only as regards disclaiming warranties: any other implication that these Warranty Disclaimers may have is void and has no effect on the meaning of this License.
+
+2. VERBATIM COPYING
+
+You may copy and distribute the Document in any medium, either commercially or noncommercially, provided that this License, the copyright notices, and the license notice saying this License applies to the Document are reproduced in all copies, and that you add no other conditions whatsoever to those of this License. You may not use technical measures to obstruct or control the reading or further copying of the copies you make or distribute. However, you may accept compensation in exchange for copies. If you distribute a large enough number of copies you must also follow the conditions in section 3.
+
+You may also lend copies, under the same conditions stated above, and you may publicly display copies.
+
+3. COPYING IN QUANTITY
+
+If you publish printed copies (or copies in media that commonly have printed covers) of the Document, numbering more than 100, and the Document's license notice requires Cover Texts, you must enclose the copies in covers that carry, clearly and legibly, all these Cover Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on the back cover. Both covers must also clearly and legibly identify you as the publisher of these copies. The front cover must present the full title with all words of the title equally prominent and visible. You may add other material on the covers in addition. Copying with changes limited to the covers, as long as they preserve the title of the Document and satisfy these conditions, can be treated as verbatim copying in other respects.
+
+If the required texts for either cover are too voluminous to fit legibly, you should put the first ones listed (as many as fit reasonably) on the actual cover, and continue the rest onto adjacent pages.
+
+If you publish or distribute Opaque copies of the Document numbering more than 100, you must either include a machine-readable Transparent copy along with each Opaque copy, or state in or with each Opaque copy a computer-network location from which the general network-using public has access to download using public-standard network protocols a complete Transparent copy of the Document, free of added material. If you use the latter option, you must take reasonably prudent steps, when you begin distribution of Opaque copies in quantity, to ensure that this Transparent copy will remain thus accessible at the stated location until at least one year after the last time you distribute an Opaque copy (directly or through your agents or retailers) of that edition to the public.
+
+It is requested, but not required, that you contact the authors of the Document well before redistributing any large number of copies, to give them a chance to provide you with an updated version of the Document.
+
+4. MODIFICATIONS
+
+You may copy and distribute a Modified Version of the Document under the conditions of sections 2 and 3 above, provided that you release the Modified Version under precisely this License, with the Modified Version filling the role of the Document, thus licensing distribution and modification of the Modified Version to whoever possesses a copy of it. In addition, you must do these things in the Modified Version:
+
+ A. Use in the Title Page (and on the covers, if any) a title distinct from that of the Document, and from those of previous versions (which should, if there were any, be listed in the History section of the Document). You may use the same title as a previous version if the original publisher of that version gives permission.
+ B. List on the Title Page, as authors, one or more persons or entities responsible for authorship of the modifications in the Modified Version, together with at least five of the principal authors of the Document (all of its principal authors, if it has fewer than five), unless they release you from this requirement.
+ C. State on the Title page the name of the publisher of the Modified Version, as the publisher.
+ D. Preserve all the copyright notices of the Document.
+ E. Add an appropriate copyright notice for your modifications adjacent to the other copyright notices.
+ F. Include, immediately after the copyright notices, a license notice giving the public permission to use the Modified Version under the terms of this License, in the form shown in the Addendum below.
+ G. Preserve in that license notice the full lists of Invariant Sections and required Cover Texts given in the Document's license notice. H. Include an unaltered copy of this License.
+ I. Preserve the section Entitled "History", Preserve its Title, and add to it an item stating at least the title, year, new authors, and publisher of the Modified Version as given on the Title Page. If there is no section Entitled "History" in the Document, create one stating the title, year, authors, and publisher of the Document as given on its Title Page, then add an item describing the Modified Version as stated in the previous sentence.
+ J. Preserve the network location, if any, given in the Document for public access to a Transparent copy of the Document, and likewise the network locations given in the Document for previous versions it was based on. These may be placed in the "History" section. You may omit a network location for a work that was published at least four years before the Document itself, or if the original publisher of the version it refers to gives permission.
+ K. For any section Entitled "Acknowledgements" or "Dedications", Preserve the Title of the section, and preserve in the section all the substance and tone of each of the contributor acknowledgements and/or dedications given therein.
+ L. Preserve all the Invariant Sections of the Document, unaltered in their text and in their titles. Section numbers or the equivalent are not considered part of the section titles.
+ M. Delete any section Entitled "Endorsements". Such a section may not be included in the Modified Version.
+ N. Do not retitle any existing section to be Entitled "Endorsements" or to conflict in title with any Invariant Section.
+ O. Preserve any Warranty Disclaimers.
+
+If the Modified Version includes new front-matter sections or appendices that qualify as Secondary Sections and contain no material copied from the Document, you may at your option designate some or all of these sections as invariant. To do this, add their titles to the list of Invariant Sections in the Modified Version's license notice. These titles must be distinct from any other section titles.
+
+You may add a section Entitled "Endorsements", provided it contains nothing but endorsements of your Modified Version by various parties--for example, statements of peer review or that the text has been approved by an organization as the authoritative definition of a standard.
+
+You may add a passage of up to five words as a Front-Cover Text, and a passage of up to 25 words as a Back-Cover Text, to the end of the list of Cover Texts in the Modified Version. Only one passage of Front-Cover Text and one of Back-Cover Text may be added by (or through arrangements made by) any one entity. If the Document already includes a cover text for the same cover, previously added by you or by arrangement made by the same entity you are acting on behalf of, you may not add another; but you may replace the old one, on explicit permission from the previous publisher that added the old one.
+
+The author(s) and publisher(s) of the Document do not by this License give permission to use their names for publicity for or to assert or imply endorsement of any Modified Version.
+
+5. COMBINING DOCUMENTS
+
+You may combine the Document with other documents released under this License, under the terms defined in section 4 above for modified versions, provided that you include in the combination all of the Invariant Sections of all of the original documents, unmodified, and list them all as Invariant Sections of your combined work in its license notice, and that you preserve all their Warranty Disclaimers.
+
+The combined work need only contain one copy of this License, and multiple identical Invariant Sections may be replaced with a single copy. If there are multiple Invariant Sections with the same name but different contents, make the title of each such section unique by adding at the end of it, in parentheses, the name of the original author or publisher of that section if known, or else a unique number. Make the same adjustment to the section titles in the list of Invariant Sections in the license notice of the combined work.
+
+In the combination, you must combine any sections Entitled "History" in the various original documents, forming one section Entitled "History"; likewise combine any sections Entitled "Acknowledgements", and any sections Entitled "Dedications". You must delete all sections Entitled "Endorsements".
+
+6. COLLECTIONS OF DOCUMENTS
+
+You may make a collection consisting of the Document and other documents released under this License, and replace the individual copies of this License in the various documents with a single copy that is included in the collection, provided that you follow the rules of this License for verbatim copying of each of the documents in all other respects.
+
+You may extract a single document from such a collection, and distribute it individually under this License, provided you insert a copy of this License into the extracted document, and follow this License in all other respects regarding verbatim copying of that document.
+
+7. AGGREGATION WITH INDEPENDENT WORKS
+
+A compilation of the Document or its derivatives with other separate and independent documents or works, in or on a volume of a storage or distribution medium, is called an "aggregate" if the copyright resulting from the compilation is not used to limit the legal rights of the compilation's users beyond what the individual works permit. When the Document is included in an aggregate, this License does not apply to the other works in the aggregate which are not themselves derivative works of the Document.
+
+If the Cover Text requirement of section 3 is applicable to these copies of the Document, then if the Document is less than one half of the entire aggregate, the Document's Cover Texts may be placed on covers that bracket the Document within the aggregate, or the electronic equivalent of covers if the Document is in electronic form. Otherwise they must appear on printed covers that bracket the whole aggregate.
+
+8. TRANSLATION
+
+Translation is considered a kind of modification, so you may distribute translations of the Document under the terms of section 4. Replacing Invariant Sections with translations requires special permission from their copyright holders, but you may include translations of some or all Invariant Sections in addition to the original versions of these Invariant Sections. You may include a translation of this License, and all the license notices in the Document, and any Warranty Disclaimers, provided that you also include the original English version of this License and the original versions of those notices and disclaimers. In case of a disagreement between the translation and the original version of this License or a notice or disclaimer, the original version will prevail.
+
+If a section in the Document is Entitled "Acknowledgements", "Dedications", or "History", the requirement (section 4) to Preserve its Title (section 1) will typically require changing the actual title.
+
+9. TERMINATION
+
+You may not copy, modify, sublicense, or distribute the Document except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, or distribute it is void, and will automatically terminate your rights under this License.
+
+However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation.
+
+Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice.
+
+Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, receipt of a copy of some or all of the same material does not give you any rights to use it.
+
+10. FUTURE REVISIONS OF THIS LICENSE
+
+The Free Software Foundation may publish new, revised versions of the GNU Free Documentation License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. See http://www.gnu.org/copyleft/.
+
+Each version of the License is given a distinguishing version number. If the Document specifies that a particular numbered version of this License "or any later version" applies to it, you have the option of following the terms and conditions either of that specified version or of any later version that has been published (not as a draft) by the Free Software Foundation. If the Document does not specify a version number of this License, you may choose any version ever published (not as a draft) by the Free Software Foundation. If the Document specifies that a proxy can decide which future versions of this License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Document.
+
+11. RELICENSING
+
+"Massive Multiauthor Collaboration Site" (or "MMC Site") means any World Wide Web server that publishes copyrightable works and also provides prominent facilities for anybody to edit those works. A public wiki that anybody can edit is an example of such a server. A "Massive Multiauthor Collaboration" (or "MMC") contained in the site means any set of copyrightable works thus published on the MMC site.
+
+"CC-BY-SA" means the Creative Commons Attribution-Share Alike 3.0 license published by Creative Commons Corporation, a not-for-profit corporation with a principal place of business in San Francisco, California, as well as future copyleft versions of that license published by that same organization.
+
+"Incorporate" means to publish or republish a Document, in whole or in part, as part of another Document.
+
+An MMC is "eligible for relicensing" if it is licensed under this License, and if all works that were first published under this License somewhere other than this MMC, and subsequently incorporated in whole or in part into the MMC, (1) had no cover texts or invariant sections, and (2) were thus incorporated prior to November 1, 2008.
+
+The operator of an MMC Site may republish an MMC contained in the site under CC-BY-SA on the same site at any time before August 1, 2009, provided the MMC is eligible for relicensing.
+
+ADDENDUM: How to use this License for your documents
+
+To use this License in a document you have written, include a copy of the License in the document and put the following copyright and license notices just after the title page:
+
+ Copyright (c) YEAR YOUR NAME. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.3 or any later version published by the Free Software Foundation; with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license is included in the section entitled "GNU Free Documentation License".
+
+If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts, replace the "with...Texts." line with this:
+
+ with the Invariant Sections being LIST THEIR TITLES, with the Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST.
+
+If you have Invariant Sections without Cover Texts, or some other combination of the three, merge those two alternatives to suit the situation.
+
+If your document contains nontrivial examples of program code, we recommend releasing these examples in parallel under your choice of free software license, such as the GNU General Public License, to permit their use in free software.
diff --git a/LICENSES/GPL-3.0-or-later.txt b/LICENSES/GPL-3.0-or-later.txt
new file mode 100644
index 0000000..d41c0bd
--- /dev/null
+++ b/LICENSES/GPL-3.0-or-later.txt
@@ -0,0 +1,232 @@
+GNU GENERAL PUBLIC LICENSE
+Version 3, 29 June 2007
+
+Copyright © 2007 Free Software Foundation, Inc. <http://fsf.org/>
+
+Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
+
+Preamble
+
+The GNU General Public License is a free, copyleft license for software and other kinds of works.
+
+The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too.
+
+When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things.
+
+To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others.
+
+For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.
+
+Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it.
+
+For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions.
+
+Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users.
+
+Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free.
+
+The precise terms and conditions for copying, distribution and modification follow.
+
+TERMS AND CONDITIONS
+
+0. Definitions.
+
+“This License” refers to version 3 of the GNU General Public License.
+
+“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks.
+
+“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations.
+
+To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work.
+
+A “covered work” means either the unmodified Program or a work based on the Program.
+
+To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well.
+
+To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying.
+
+An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion.
+
+1. Source Code.
+The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work.
+
+A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language.
+
+The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it.
+
+The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work.
+
+The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source.
+
+The Corresponding Source for a work in source code form is that same work.
+
+2. Basic Permissions.
+All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law.
+
+You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you.
+
+Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary.
+
+3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures.
+
+When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures.
+
+4. Conveying Verbatim Copies.
+You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program.
+
+You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee.
+
+5. Conveying Modified Source Versions.
+You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”.
+
+ c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so.
+
+A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate.
+
+6. Conveying Non-Source Forms.
+You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b.
+
+ d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d.
+
+A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work.
+
+A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product.
+
+“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made.
+
+If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM).
+
+The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network.
+
+Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying.
+
+7. Additional Terms.
+“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions.
+
+When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission.
+
+Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors.
+
+All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying.
+
+If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms.
+
+Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way.
+
+8. Termination.
+You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11).
+
+However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation.
+
+Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice.
+
+Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10.
+
+9. Acceptance Not Required for Having Copies.
+You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so.
+
+10. Automatic Licensing of Downstream Recipients.
+Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License.
+
+An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts.
+
+You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it.
+
+11. Patents.
+A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”.
+
+A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License.
+
+Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version.
+
+In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party.
+
+If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid.
+
+If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it.
+
+A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007.
+
+Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law.
+
+12. No Surrender of Others' Freedom.
+If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program.
+
+13. Use with the GNU Affero General Public License.
+Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such.
+
+14. Revised Versions of this License.
+The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation.
+
+If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program.
+
+Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version.
+
+15. Disclaimer of Warranty.
+THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+16. Limitation of Liability.
+IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+17. Interpretation of Sections 15 and 16.
+If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee.
+
+END OF TERMS AND CONDITIONS
+
+How to Apply These Terms to Your New Programs
+
+If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms.
+
+To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the “copyright” line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an “about box”.
+
+You should also get your employer (if you work as a programmer) or school, if any, to sign a “copyright disclaimer” for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see <http://www.gnu.org/licenses/>.
+
+The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read <http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/Makefile.am b/Makefile.am
new file mode 100644
index 0000000..5f7f13c
--- /dev/null
+++ b/Makefile.am
@@ -0,0 +1,198 @@
+# SPDX-FileCopyrightText: 2002-2022 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+SUBDIRS = src
+
+test:
+ cd testsuite; make 1
+
+web:
+ cd src; make web
+ cp -r src/*.pdf src/sphinx/build/html/* ../parallel-web/parallel
+ cp ../parallel-web/parallel/parallel.html ../parallel-web/parallel/man.html
+
+upload:
+ eval `gpg-agent --daemon`
+# make sure $YYYYMMDD is set
+ echo $(YYYYMMDD) | grep 20
+ cp parallel-$(YYYYMMDD).tar.bz2 parallel-latest.tar.bz2
+ cp doc/parallel.directive parallel-$(YYYYMMDD).tar.bz2.directive
+ perl -i -pe "s/20\d\d\d\d\d\d/$(YYYYMMDD)/" parallel-*.tar.*directive
+ gpg --clearsign --yes parallel-$(YYYYMMDD).tar.bz2.directive
+
+ cp doc/parallel.latest.directive parallel-latest.tar.bz2.directive
+ perl -i -pe "s/20\d\d\d\d\d\d/$(YYYYMMDD)/" parallel-latest.tar.*directive
+ gpg --clearsign --yes parallel-latest.tar.bz2.directive
+
+ (echo '#!/bin/bash'; \
+ echo; \
+ echo "# To check the signature run:"; \
+ echo "# echo | gpg"; \
+ echo "# gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve parallel-$(YYYYMMDD).tar.bz2.sig"; \
+ echo; \
+ echo "echo | gpg 2>/dev/null"; \
+ echo 'gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve $$0'; \
+ echo 'exit $$?' ; \
+ echo; \
+ gpg -ab -o - parallel-$(YYYYMMDD).tar.bz2; \
+ ) > parallel-$(YYYYMMDD).tar.bz2.sig
+
+ (echo '#!/bin/bash'; \
+ echo; \
+ echo "# To check the signature run:"; \
+ echo "# echo | gpg"; \
+ echo "# gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve parallel-latest.tar.bz2.sig"; \
+ echo; \
+ echo "echo | gpg 2>/dev/null"; \
+ echo 'gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve $$0'; \
+ echo 'exit $$?' ; \
+ echo; \
+ gpg -ab -o - parallel-$(YYYYMMDD).tar.bz2; \
+ ) > parallel-latest.tar.bz2.sig
+
+ gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve parallel-latest.tar.bz2.sig
+ gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve parallel-$(YYYYMMDD).tar.bz2.sig
+
+# "|| true" needed if already uploaded
+ eval p=parallel-latest.tar.bz2{,.sig}; lftp -e "put $$p; bye" ftp://ftp-upload.gnu.org/incoming/ftp/ || true
+ eval p=parallel-$(YYYYMMDD).tar.bz2{,.sig}; lftp -e "put $$p; bye" ftp://ftp-upload.gnu.org/incoming/ftp/ || true
+
+# ../ftpsync/src/ftpsync parallel-$(YYYYMMDD).tar.bz2{,.sig,*asc} ftp://ftp-upload.gnu.org/incoming/ftp/
+# ../ftpsync/src/ftpsync parallel-latest.tar.bz2{,.sig,*asc} ftp://ftp-upload.gnu.org/incoming/ftp/
+
+# This can take 7 minutes
+ pushd /tmp; \
+ rm -rf /tmp/parallel-$(YYYYMMDD)*; \
+ while ! wget https://ftp.gnu.org/gnu/parallel/parallel-$(YYYYMMDD).tar.bz2 ; do sleep 10; done; \
+ tar xjvf parallel-$(YYYYMMDD).tar.bz2; \
+ cd parallel-$(YYYYMMDD); \
+ ./configure; \
+ make -j && sudo make -j install; \
+ pushd; \
+ sudo cp /usr/local/bin/parallel /usr/local/bin/parallel-$(YYYYMMDD)
+
+alphaupload:
+# Copy of upload
+ eval `gpg-agent --daemon`
+# make sure $YYYYMMDD is set
+ echo $(YYYYMMDD) | grep 20
+ cp parallel-$(YYYYMMDD).tar.bz2 parallel-latest.tar.bz2
+ cp doc/parallel.directive parallel-$(YYYYMMDD).tar.bz2.directive
+ perl -i -pe "s/20\d\d\d\d\d\d/$(YYYYMMDD)/" parallel-*.tar.*directive
+ gpg --clearsign --yes parallel-$(YYYYMMDD).tar.bz2.directive
+
+ cp doc/parallel.latest.directive parallel-latest.tar.bz2.directive
+ perl -i -pe "s/20\d\d\d\d\d\d/$(YYYYMMDD)/" parallel-latest.tar.*directive
+ gpg --clearsign --yes parallel-latest.tar.bz2.directive
+
+ (echo '#!/bin/bash'; \
+ echo; \
+ echo "# To check the signature run:"; \
+ echo "# echo | gpg"; \
+ echo "# gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve parallel-$(YYYYMMDD).tar.bz2.sig"; \
+ echo; \
+ echo "echo | gpg 2>/dev/null"; \
+ echo 'gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve $$0'; \
+ echo 'exit $$?' ; \
+ echo; \
+ gpg -ab -o - parallel-$(YYYYMMDD).tar.bz2; \
+ ) > parallel-$(YYYYMMDD).tar.bz2.sig
+
+ (echo '#!/bin/bash'; \
+ echo; \
+ echo "# To check the signature run:"; \
+ echo "# echo | gpg"; \
+ echo "# gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve parallel-latest.tar.bz2.sig"; \
+ echo; \
+ echo "echo | gpg 2>/dev/null"; \
+ echo 'gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve $$0'; \
+ echo 'exit $$?' ; \
+ echo; \
+ gpg -ab -o - parallel-$(YYYYMMDD).tar.bz2; \
+ ) > parallel-latest.tar.bz2.sig
+
+ gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve parallel-latest.tar.bz2.sig
+ gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve parallel-$(YYYYMMDD).tar.bz2.sig
+
+ ../ftpsync/src/ftpsync parallel-$(YYYYMMDD).tar.bz2{,.sig,*asc} ftp://ftp-upload.gnu.org/incoming/alpha/
+
+# This can take 7 minutes
+ pushd /tmp; \
+ rm -rf /tmp/parallel-$(YYYYMMDD)*; \
+ while ! wget http://alpha.gnu.org/gnu/parallel/parallel-$(YYYYMMDD).tar.bz2 ; do sleep 2; done; \
+ tar xjvf parallel-$(YYYYMMDD).tar.bz2; \
+ cd parallel-$(YYYYMMDD); \
+ ./configure; \
+ make -j && sudo make -j install; \
+ pushd; \
+ sudo cp /usr/local/bin/parallel /usr/local/bin/parallel-$(YYYYMMDD)
+
+monitorman:
+# If man page changed: open new pdfman
+ inotifywait -qmre MOVED_TO -e CLOSE_WRITE --format %w%f . | parallel -uj1 'echo {=/\.pod$$/ or skip()=};make -j && sudo make install; pdfman {/.} &'
+
+reuse:
+ rm -rf /tmp/reuse
+ mkdir /tmp/reuse
+ git commit -a -m dummy
+ (wd=`pwd`; \
+ cd /tmp/reuse; \
+ git clone "$$wd"; \
+ cd parallel; \
+ reuse lint || \
+ (echo Use; \
+ echo ' reuse addheader --copyright="Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc." --license="GPL-3.0-or-later" source-files'; \
+ echo or; \
+ echo ' reuse addheader --copyright="Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc." --license="GPL-3.0-or-later" --explicit-license bin-files' \
+ ) )
+ git reset HEAD~ >/dev/null
+ git commit -c ORIG_HEAD >/dev/null
+
+testurls:
+ mkdir -p urls
+ cd urls && ../src/testurls
+
+reconf:
+ rm -fr autom4te.cache aclocal.m4 config.h config.h.in config.log Makefile.in missing install-sh
+ rm -rf src/Makefile.in
+ autoreconf --install -W gnu
+ ./configure
+ make -j
+ sudo make install
+
+pack_unpack_and_test_build:
+ echo '### Building tar.bz2'
+ ./configure
+ make dist
+ make dist-bzip2
+ echo "### Unpack parallel-$(YYYYMMDD).tar.bz2"
+ cp parallel-$(YYYYMMDD).tar.bz2 /tmp
+ cd /tmp && \
+ tar xjf parallel-$(YYYYMMDD).tar.bz2 && \
+ cd parallel-$(YYYYMMDD) && \
+ ./configure && make -j && sudo make -j install
+
+# This ought to be correct, but fails on Centos
+# bashcompletiondir=$(DESTDIR)${datarootdir}/bash-completion/completions
+# zshcompletiondir=$(DESTDIR)${datarootdir}/zsh/site-functions
+bashcompletiondir=$(DESTDIR)${prefix}/share/bash-completion/completions
+zshcompletiondir=$(DESTDIR)${prefix}/share/zsh/site-functions
+bashcompletion=${bashcompletiondir}/parallel
+zshcompletion=${zshcompletiondir}/_parallel
+install-data-hook:
+ mkdir -p ${zshcompletiondir} ${bashcompletiondir}
+ $(DESTDIR)$(bindir)/parallel --shell-completion bash > ${bashcompletion}
+# Make zsh eval the --shell-completion code dynamically so if
+# a newer version of GNU Parallel is installed by a normal
+# user, this newer version will generate the
+# --shell-completion code.
+ echo '#compdef parallel' > ${zshcompletion}
+ echo '(( $$+functions[_comp_parallel] )) ||' >> ${zshcompletion}
+ echo ' eval "$$(parallel --shell-completion auto)" &&' >> ${zshcompletion}
+ echo ' _comp_parallel' >> ${zshcompletion}
+
+uninstall-hook:
+ rm -rf ${zshcompletiondir} ${bashcompletiondir}
+
+EXTRA_DIST = CITATION CITATION.cff CREDITS LICENSES/CC-BY-SA-4.0.txt LICENSES/GFDL-1.3-or-later.txt LICENSES/GPL-3.0-or-later.txt
diff --git a/Makefile.in b/Makefile.in
new file mode 100644
index 0000000..39456d1
--- /dev/null
+++ b/Makefile.in
@@ -0,0 +1,961 @@
+# Makefile.in generated by automake 1.16.5 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2021 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-FileCopyrightText: 2002-2022 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+subdir = .
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(top_srcdir)/configure \
+ $(am__configure_deps) $(am__DIST_COMMON)
+am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \
+ configure.lineno config.status.lineno
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ cscope distdir distdir-am dist dist-all distcheck
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) \
+ config.h.in
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+DIST_SUBDIRS = $(SUBDIRS)
+am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/config.h.in NEWS \
+ README TODO install-sh missing
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+distdir = $(PACKAGE)-$(VERSION)
+top_distdir = $(distdir)
+am__remove_distdir = \
+ if test -d "$(distdir)"; then \
+ find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \
+ && rm -rf "$(distdir)" \
+ || { sleep 5 && rm -rf "$(distdir)"; }; \
+ else :; fi
+am__post_remove_distdir = $(am__remove_distdir)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+DIST_ARCHIVES = $(distdir).tar.gz
+GZIP_ENV = --best
+DIST_TARGETS = dist-gzip
+# Exists only to be overridden by the user if desired.
+AM_DISTCHECK_DVI_TARGET = dvi
+distuninstallcheck_listfiles = find . -type f -print
+am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \
+ | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$'
+distcleancheck_listfiles = find . -type f -print
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CSCOPE = @CSCOPE@
+CTAGS = @CTAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+ETAGS = @ETAGS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+am__leading_dot = @am__leading_dot@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build_alias = @build_alias@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host_alias = @host_alias@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+SUBDIRS = src
+
+# This ought to be correct, but fails on Centos
+# bashcompletiondir=$(DESTDIR)${datarootdir}/bash-completion/completions
+# zshcompletiondir=$(DESTDIR)${datarootdir}/zsh/site-functions
+bashcompletiondir = $(DESTDIR)${prefix}/share/bash-completion/completions
+zshcompletiondir = $(DESTDIR)${prefix}/share/zsh/site-functions
+bashcompletion = ${bashcompletiondir}/parallel
+zshcompletion = ${zshcompletiondir}/_parallel
+EXTRA_DIST = CITATION CITATION.cff CREDITS LICENSES/CC-BY-SA-4.0.txt LICENSES/GFDL-1.3-or-later.txt LICENSES/GPL-3.0-or-later.txt
+all: config.h
+ $(MAKE) $(AM_MAKEFLAGS) all-recursive
+
+.SUFFIXES:
+am--refresh: Makefile
+ @:
+$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \
+ $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \
+ && exit 0; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ echo ' $(SHELL) ./config.status'; \
+ $(SHELL) ./config.status;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ $(SHELL) ./config.status --recheck
+
+$(top_srcdir)/configure: $(am__configure_deps)
+ $(am__cd) $(srcdir) && $(AUTOCONF)
+$(ACLOCAL_M4): $(am__aclocal_m4_deps)
+ $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS)
+$(am__aclocal_m4_deps):
+
+config.h: stamp-h1
+ @test -f $@ || rm -f stamp-h1
+ @test -f $@ || $(MAKE) $(AM_MAKEFLAGS) stamp-h1
+
+stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status
+ @rm -f stamp-h1
+ cd $(top_builddir) && $(SHELL) ./config.status config.h
+$(srcdir)/config.h.in: $(am__configure_deps)
+ ($(am__cd) $(top_srcdir) && $(AUTOHEADER))
+ rm -f stamp-h1
+ touch $@
+
+distclean-hdr:
+ -rm -f config.h stamp-h1
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscope: cscope.files
+ test ! -s cscope.files \
+ || $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS)
+clean-cscope:
+ -rm -f cscope.files
+cscope.files: clean-cscope cscopelist
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+ -rm -f cscope.out cscope.in.out cscope.po.out cscope.files
+distdir: $(BUILT_SOURCES)
+ $(MAKE) $(AM_MAKEFLAGS) distdir-am
+
+distdir-am: $(DISTFILES)
+ $(am__remove_distdir)
+ test -d "$(distdir)" || mkdir "$(distdir)"
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+ -test -n "$(am__skip_mode_fix)" \
+ || find "$(distdir)" -type d ! -perm -755 \
+ -exec chmod u+rwx,go+rx {} \; -o \
+ ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \
+ ! -type d ! -perm -400 -exec chmod a+r {} \; -o \
+ ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \
+ || chmod -R a+r "$(distdir)"
+dist-gzip: distdir
+ tardir=$(distdir) && $(am__tar) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).tar.gz
+ $(am__post_remove_distdir)
+
+dist-bzip2: distdir
+ tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2
+ $(am__post_remove_distdir)
+
+dist-lzip: distdir
+ tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz
+ $(am__post_remove_distdir)
+
+dist-xz: distdir
+ tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz
+ $(am__post_remove_distdir)
+
+dist-zstd: distdir
+ tardir=$(distdir) && $(am__tar) | zstd -c $${ZSTD_CLEVEL-$${ZSTD_OPT--19}} >$(distdir).tar.zst
+ $(am__post_remove_distdir)
+
+dist-tarZ: distdir
+ @echo WARNING: "Support for distribution archives compressed with" \
+ "legacy program 'compress' is deprecated." >&2
+ @echo WARNING: "It will be removed altogether in Automake 2.0" >&2
+ tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z
+ $(am__post_remove_distdir)
+
+dist-shar: distdir
+ @echo WARNING: "Support for shar distribution archives is" \
+ "deprecated." >&2
+ @echo WARNING: "It will be removed altogether in Automake 2.0" >&2
+ shar $(distdir) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).shar.gz
+ $(am__post_remove_distdir)
+
+dist-zip: distdir
+ -rm -f $(distdir).zip
+ zip -rq $(distdir).zip $(distdir)
+ $(am__post_remove_distdir)
+
+dist dist-all:
+ $(MAKE) $(AM_MAKEFLAGS) $(DIST_TARGETS) am__post_remove_distdir='@:'
+ $(am__post_remove_distdir)
+
+# This target untars the dist file and tries a VPATH configuration. Then
+# it guarantees that the distribution is self-contained by making another
+# tarfile.
+distcheck: dist
+ case '$(DIST_ARCHIVES)' in \
+ *.tar.gz*) \
+ eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).tar.gz | $(am__untar) ;;\
+ *.tar.bz2*) \
+ bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\
+ *.tar.lz*) \
+ lzip -dc $(distdir).tar.lz | $(am__untar) ;;\
+ *.tar.xz*) \
+ xz -dc $(distdir).tar.xz | $(am__untar) ;;\
+ *.tar.Z*) \
+ uncompress -c $(distdir).tar.Z | $(am__untar) ;;\
+ *.shar.gz*) \
+ eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).shar.gz | unshar ;;\
+ *.zip*) \
+ unzip $(distdir).zip ;;\
+ *.tar.zst*) \
+ zstd -dc $(distdir).tar.zst | $(am__untar) ;;\
+ esac
+ chmod -R a-w $(distdir)
+ chmod u+w $(distdir)
+ mkdir $(distdir)/_build $(distdir)/_build/sub $(distdir)/_inst
+ chmod a-w $(distdir)
+ test -d $(distdir)/_build || exit 0; \
+ dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \
+ && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \
+ && am__cwd=`pwd` \
+ && $(am__cd) $(distdir)/_build/sub \
+ && ../../configure \
+ $(AM_DISTCHECK_CONFIGURE_FLAGS) \
+ $(DISTCHECK_CONFIGURE_FLAGS) \
+ --srcdir=../.. --prefix="$$dc_install_base" \
+ && $(MAKE) $(AM_MAKEFLAGS) \
+ && $(MAKE) $(AM_MAKEFLAGS) $(AM_DISTCHECK_DVI_TARGET) \
+ && $(MAKE) $(AM_MAKEFLAGS) check \
+ && $(MAKE) $(AM_MAKEFLAGS) install \
+ && $(MAKE) $(AM_MAKEFLAGS) installcheck \
+ && $(MAKE) $(AM_MAKEFLAGS) uninstall \
+ && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \
+ distuninstallcheck \
+ && chmod -R a-w "$$dc_install_base" \
+ && ({ \
+ (cd ../.. && umask 077 && mkdir "$$dc_destdir") \
+ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \
+ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \
+ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \
+ distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \
+ } || { rm -rf "$$dc_destdir"; exit 1; }) \
+ && rm -rf "$$dc_destdir" \
+ && $(MAKE) $(AM_MAKEFLAGS) dist \
+ && rm -rf $(DIST_ARCHIVES) \
+ && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \
+ && cd "$$am__cwd" \
+ || exit 1
+ $(am__post_remove_distdir)
+ @(echo "$(distdir) archives ready for distribution: "; \
+ list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \
+ sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x'
+distuninstallcheck:
+ @test -n '$(distuninstallcheck_dir)' || { \
+ echo 'ERROR: trying to run $@ with an empty' \
+ '$$(distuninstallcheck_dir)' >&2; \
+ exit 1; \
+ }; \
+ $(am__cd) '$(distuninstallcheck_dir)' || { \
+ echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \
+ exit 1; \
+ }; \
+ test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \
+ || { echo "ERROR: files left after uninstall:" ; \
+ if test -n "$(DESTDIR)"; then \
+ echo " (check DESTDIR support)"; \
+ fi ; \
+ $(distuninstallcheck_listfiles) ; \
+ exit 1; } >&2
+distcleancheck: distclean
+ @if test '$(srcdir)' = . ; then \
+ echo "ERROR: distcleancheck can only run from a VPATH build" ; \
+ exit 1 ; \
+ fi
+ @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \
+ || { echo "ERROR: files left in build directory after distclean:" ; \
+ $(distcleancheck_listfiles) ; \
+ exit 1; } >&2
+check-am: all-am
+check: check-recursive
+all-am: Makefile config.h
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f $(am__CONFIG_DISTCLEAN_FILES)
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-hdr distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+ @$(NORMAL_INSTALL)
+ $(MAKE) $(AM_MAKEFLAGS) install-data-hook
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f $(am__CONFIG_DISTCLEAN_FILES)
+ -rm -rf $(top_srcdir)/autom4te.cache
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+ @$(NORMAL_INSTALL)
+ $(MAKE) $(AM_MAKEFLAGS) uninstall-hook
+.MAKE: $(am__recursive_targets) all install-am install-data-am \
+ install-strip uninstall-am
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \
+ am--refresh check check-am clean clean-cscope clean-generic \
+ cscope cscopelist-am ctags ctags-am dist dist-all dist-bzip2 \
+ dist-gzip dist-lzip dist-shar dist-tarZ dist-xz dist-zip \
+ dist-zstd distcheck distclean distclean-generic distclean-hdr \
+ distclean-tags distcleancheck distdir distuninstallcheck dvi \
+ dvi-am html html-am info info-am install install-am \
+ install-data install-data-am install-data-hook install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ installdirs-am maintainer-clean maintainer-clean-generic \
+ mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags \
+ tags-am uninstall uninstall-am uninstall-hook
+
+.PRECIOUS: Makefile
+
+
+test:
+ cd testsuite; make 1
+
+web:
+ cd src; make web
+ cp -r src/*.pdf src/sphinx/build/html/* ../parallel-web/parallel
+ cp ../parallel-web/parallel/parallel.html ../parallel-web/parallel/man.html
+
+upload:
+ eval `gpg-agent --daemon`
+# make sure $YYYYMMDD is set
+ echo $(YYYYMMDD) | grep 20
+ cp parallel-$(YYYYMMDD).tar.bz2 parallel-latest.tar.bz2
+ cp doc/parallel.directive parallel-$(YYYYMMDD).tar.bz2.directive
+ perl -i -pe "s/20\d\d\d\d\d\d/$(YYYYMMDD)/" parallel-*.tar.*directive
+ gpg --clearsign --yes parallel-$(YYYYMMDD).tar.bz2.directive
+
+ cp doc/parallel.latest.directive parallel-latest.tar.bz2.directive
+ perl -i -pe "s/20\d\d\d\d\d\d/$(YYYYMMDD)/" parallel-latest.tar.*directive
+ gpg --clearsign --yes parallel-latest.tar.bz2.directive
+
+ (echo '#!/bin/bash'; \
+ echo; \
+ echo "# To check the signature run:"; \
+ echo "# echo | gpg"; \
+ echo "# gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve parallel-$(YYYYMMDD).tar.bz2.sig"; \
+ echo; \
+ echo "echo | gpg 2>/dev/null"; \
+ echo 'gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve $$0'; \
+ echo 'exit $$?' ; \
+ echo; \
+ gpg -ab -o - parallel-$(YYYYMMDD).tar.bz2; \
+ ) > parallel-$(YYYYMMDD).tar.bz2.sig
+
+ (echo '#!/bin/bash'; \
+ echo; \
+ echo "# To check the signature run:"; \
+ echo "# echo | gpg"; \
+ echo "# gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve parallel-latest.tar.bz2.sig"; \
+ echo; \
+ echo "echo | gpg 2>/dev/null"; \
+ echo 'gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve $$0'; \
+ echo 'exit $$?' ; \
+ echo; \
+ gpg -ab -o - parallel-$(YYYYMMDD).tar.bz2; \
+ ) > parallel-latest.tar.bz2.sig
+
+ gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve parallel-latest.tar.bz2.sig
+ gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve parallel-$(YYYYMMDD).tar.bz2.sig
+
+# "|| true" needed if already uploaded
+ eval p=parallel-latest.tar.bz2{,.sig}; lftp -e "put $$p; bye" ftp://ftp-upload.gnu.org/incoming/ftp/ || true
+ eval p=parallel-$(YYYYMMDD).tar.bz2{,.sig}; lftp -e "put $$p; bye" ftp://ftp-upload.gnu.org/incoming/ftp/ || true
+
+# ../ftpsync/src/ftpsync parallel-$(YYYYMMDD).tar.bz2{,.sig,*asc} ftp://ftp-upload.gnu.org/incoming/ftp/
+# ../ftpsync/src/ftpsync parallel-latest.tar.bz2{,.sig,*asc} ftp://ftp-upload.gnu.org/incoming/ftp/
+
+# This can take 7 minutes
+ pushd /tmp; \
+ rm -rf /tmp/parallel-$(YYYYMMDD)*; \
+ while ! wget https://ftp.gnu.org/gnu/parallel/parallel-$(YYYYMMDD).tar.bz2 ; do sleep 10; done; \
+ tar xjvf parallel-$(YYYYMMDD).tar.bz2; \
+ cd parallel-$(YYYYMMDD); \
+ ./configure; \
+ make -j && sudo make -j install; \
+ pushd; \
+ sudo cp /usr/local/bin/parallel /usr/local/bin/parallel-$(YYYYMMDD)
+
+alphaupload:
+# Copy of upload
+ eval `gpg-agent --daemon`
+# make sure $YYYYMMDD is set
+ echo $(YYYYMMDD) | grep 20
+ cp parallel-$(YYYYMMDD).tar.bz2 parallel-latest.tar.bz2
+ cp doc/parallel.directive parallel-$(YYYYMMDD).tar.bz2.directive
+ perl -i -pe "s/20\d\d\d\d\d\d/$(YYYYMMDD)/" parallel-*.tar.*directive
+ gpg --clearsign --yes parallel-$(YYYYMMDD).tar.bz2.directive
+
+ cp doc/parallel.latest.directive parallel-latest.tar.bz2.directive
+ perl -i -pe "s/20\d\d\d\d\d\d/$(YYYYMMDD)/" parallel-latest.tar.*directive
+ gpg --clearsign --yes parallel-latest.tar.bz2.directive
+
+ (echo '#!/bin/bash'; \
+ echo; \
+ echo "# To check the signature run:"; \
+ echo "# echo | gpg"; \
+ echo "# gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve parallel-$(YYYYMMDD).tar.bz2.sig"; \
+ echo; \
+ echo "echo | gpg 2>/dev/null"; \
+ echo 'gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve $$0'; \
+ echo 'exit $$?' ; \
+ echo; \
+ gpg -ab -o - parallel-$(YYYYMMDD).tar.bz2; \
+ ) > parallel-$(YYYYMMDD).tar.bz2.sig
+
+ (echo '#!/bin/bash'; \
+ echo; \
+ echo "# To check the signature run:"; \
+ echo "# echo | gpg"; \
+ echo "# gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve parallel-latest.tar.bz2.sig"; \
+ echo; \
+ echo "echo | gpg 2>/dev/null"; \
+ echo 'gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve $$0'; \
+ echo 'exit $$?' ; \
+ echo; \
+ gpg -ab -o - parallel-$(YYYYMMDD).tar.bz2; \
+ ) > parallel-latest.tar.bz2.sig
+
+ gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve parallel-latest.tar.bz2.sig
+ gpg --auto-key-locate keyserver --keyserver-options auto-key-retrieve parallel-$(YYYYMMDD).tar.bz2.sig
+
+ ../ftpsync/src/ftpsync parallel-$(YYYYMMDD).tar.bz2{,.sig,*asc} ftp://ftp-upload.gnu.org/incoming/alpha/
+
+# This can take 7 minutes
+ pushd /tmp; \
+ rm -rf /tmp/parallel-$(YYYYMMDD)*; \
+ while ! wget http://alpha.gnu.org/gnu/parallel/parallel-$(YYYYMMDD).tar.bz2 ; do sleep 2; done; \
+ tar xjvf parallel-$(YYYYMMDD).tar.bz2; \
+ cd parallel-$(YYYYMMDD); \
+ ./configure; \
+ make -j && sudo make -j install; \
+ pushd; \
+ sudo cp /usr/local/bin/parallel /usr/local/bin/parallel-$(YYYYMMDD)
+
+monitorman:
+# If man page changed: open new pdfman
+ inotifywait -qmre MOVED_TO -e CLOSE_WRITE --format %w%f . | parallel -uj1 'echo {=/\.pod$$/ or skip()=};make -j && sudo make install; pdfman {/.} &'
+
+reuse:
+ rm -rf /tmp/reuse
+ mkdir /tmp/reuse
+ git commit -a -m dummy
+ (wd=`pwd`; \
+ cd /tmp/reuse; \
+ git clone "$$wd"; \
+ cd parallel; \
+ reuse lint || \
+ (echo Use; \
+ echo ' reuse addheader --copyright="Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc." --license="GPL-3.0-or-later" source-files'; \
+ echo or; \
+ echo ' reuse addheader --copyright="Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc." --license="GPL-3.0-or-later" --explicit-license bin-files' \
+ ) )
+ git reset HEAD~ >/dev/null
+ git commit -c ORIG_HEAD >/dev/null
+
+testurls:
+ mkdir -p urls
+ cd urls && ../src/testurls
+
+reconf:
+ rm -fr autom4te.cache aclocal.m4 config.h config.h.in config.log Makefile.in missing install-sh
+ rm -rf src/Makefile.in
+ autoreconf --install -W gnu
+ ./configure
+ make -j
+ sudo make install
+
+pack_unpack_and_test_build:
+ echo '### Building tar.bz2'
+ ./configure
+ make dist
+ make dist-bzip2
+ echo "### Unpack parallel-$(YYYYMMDD).tar.bz2"
+ cp parallel-$(YYYYMMDD).tar.bz2 /tmp
+ cd /tmp && \
+ tar xjf parallel-$(YYYYMMDD).tar.bz2 && \
+ cd parallel-$(YYYYMMDD) && \
+ ./configure && make -j && sudo make -j install
+install-data-hook:
+ mkdir -p ${zshcompletiondir} ${bashcompletiondir}
+ $(DESTDIR)$(bindir)/parallel --shell-completion bash > ${bashcompletion}
+# Make zsh eval the --shell-completion code dynamically so if
+# a newer version of GNU Parallel is installed by a normal
+# user, this newer version will generate the
+# --shell-completion code.
+ echo '#compdef parallel' > ${zshcompletion}
+ echo '(( $$+functions[_comp_parallel] )) ||' >> ${zshcompletion}
+ echo ' eval "$$(parallel --shell-completion auto)" &&' >> ${zshcompletion}
+ echo ' _comp_parallel' >> ${zshcompletion}
+
+uninstall-hook:
+ rm -rf ${zshcompletiondir} ${bashcompletiondir}
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/NEWS b/NEWS
new file mode 100644
index 0000000..9dca680
--- /dev/null
+++ b/NEWS
@@ -0,0 +1,5344 @@
+20240222
+
+New in this release:
+
+* No new functionality
+
+* Bug fixes and man page updates.
+
+
+20240122
+
+New in this release:
+
+* --sshlogin supports ranges: server[01-12,15] 10.0.[1-10].[2-254]
+
+* --plus enables {slot-1} and {seq-1} = {%}-1 and {#}-1 to count from 0.
+
+* env_parallel.{sh,ash,dash,bash,ksh,zsh} is now the same script.
+
+* Bug fixes and man page updates.
+
+
+20231222
+
+New in this release:
+
+* --combine-exec combines GNU Parallel with any executable.
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* The first GNU Parallel video that seems to be completely AI
+ generated: https://www.youtube.com/watch?v=Ie8eAhlEHOg
+
+* GNU Parallel and “fail immediately if any fails” problem
+ https://antiarchitect.medium.com/gnu-parallel-and-fail-immediately-if-any-fails-problem-481ddea690ba
+
+
+20231122
+
+New in this release:
+
+* -a file1 -a +file2 will link file2 to file1 similar to ::::+
+
+* --bar shows total time when all jobs are done.
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* Using GNU Parallel in bash scripts to optimize python processes
+ https://climate-cms.org/posts/2023-11-02-gnu-parallel.html
+
+
+20231022
+
+New in this release:
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* Resume long parallel jobs
+ https://ginolhac.github.io/posts/2023-10-02_resume-parallel/index.html
+
+* Efficiency and Speed with GNU Parallel
+ https://dev.to/0xog_pg/efficiency-and-speed-with-gnu-parallel-loo
+
+
+20230922
+
+New in this release:
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* This CLI Tool is AMAZING | Prime Reacts
+ https://www.youtube.com/watch?v=ry49BZA-tgg
+
+* New Data Engineering Stack - GNU parallel
+ https://www.linkedin.com/feed/update/urn:li:activity:7100509073149743104?updateEntityUrn=urn%3Ali%3Afs_feedUpdate%3A%28V2%2Curn%3Ali%3Aactivity%3A7100509073149743104%29
+
+
+20230822
+
+New in this release:
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* GNU Parallel, where have you been all my life?
+ https://alexplescan.com/posts/2023/08/20/gnu-parallel/
+
+* Parallel (multithreaded) music download from Youtube
+ https://hrna.moe/?p=parallel-multithread-music-download
+
+
+20230722
+
+New in this release:
+
+* Time functions (e.g. yyyymmdd($secs) ) can now take secs since epoch as argument.
+
+* Bug fixes and man page updates.
+
+
+20230622
+
+New in this release:
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* Run faster in the terminal with Gnu Parallel https://www.youtube.com/watch?v=fSYVYsKXWP0
+
+* Building an (Evil) Android Phone Bot Army http://blog.faucher.net/2023/05/building-evil-android-phone-bot-army.html
+
+
+20230522
+
+New in this release:
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* Introduction to parallel computing https://youtu.be/fvrHXV8yqU4?t=2874
+
+* Script for summarizing GNU parallel joblog file https://github.com/gavinmdouglas/parallel_joblog_summary
+
+* How to copy a single file to multiple directories in Linux or Unix https://www.cyberciti.biz/faq/linux-unix-copy-a-file-to-multiple-directories-using-cp-command/
+
+* Search in your Jupyter notebooks from the CLI, fast. https://dev.to/attilavm/search-in-your-jupyter-notebooks-from-the-cli-fast-1408
+
+* Parallel Job Orchestration with GNU Parallel https://www.youtube.com/watch?v=2tVpUfND3LI
+
+* Optional Individual Submission 4 Job Handling 20266001 - GNU Parallel https://www.youtube.com/watch?v=eC_RPuFCcU8
+
+* DOE Cross-facility Workflows Training - April 12, 2023 https://www.youtube.com/watch?v=Ke3sirM-aQQ
+
+* How to run MiXCR 4.x on multiple patient samples using GNU Parallel https://www.youtube.com/watch?v=OXg-WHlB_dk
+
+
+20230422
+
+New in this release:
+
+* --jobs evaluates expression: +3*log(55)% = ncpu*1.12
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* Running shell script in parallel
+ https://w3toppers.com/running-shell-script-in-parallel/
+
+* Simulating climate risk scenarios for the Amazon Rainforest
+ https://aws.amazon.com/blogs/hpc/simulating-climate-risk-scenarios-for-the-amazon-rainforest/
+
+* How To Use Your Entire CPU In Bash With Parallel
+ https://bash-prompt.net/guides/parallell-bash/
+
+* Bash - parallel command execution
+ https://medium.com/linuxstories/bash-parallel-command-execution-d4bd7c7cc1d6
+
+* 3 tips for faster batch post-pro
+ https://www.cfdengine.com/newsletter/142/
+
+
+20230322
+
+New in this release:
+
+* Better support for wide characters in --latest-line.
+
+* Support for rsync 3.2.7.
+
+* Bug fixes and man page updates.
+
+
+20230222
+
+New in this release:
+
+* parsort: --parallel now does closer to what you expect.
+
+* parallel: --files0 is --files but \0 separated.
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* 5 great Perl scripts to keep in your sysadmin toolbox
+ https://www.redhat.com/sysadmin/perl-scripts
+
+
+20230122
+
+New in this release:
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* The Best Ethical Hacking Tools of 2023 (and their basic usage)
+ https://www.purevpn.com/blog/the-best-hacking-tools-of-2023/#11_GNU_Parallel
+
+* GNU Parallel: criando atividades em paralelo com shell script
+ https://www.vivaolinux.com.br/artigo/GNU-Parallel-criando-atividades-em-paralelo-com-shell-script/
+
+
+20221222
+
+New in this release:
+
+* --results works on more file systems (e.g. fat)
+
+* Joblog gives the same exit code as bash.
+
+News about GNU Parallel:
+
+* Programação Shell Linux: Paralelismo de processos com GNU parallel
+ https://www.youtube.com/watch?v=duheTWLIrp8
+
+* Talk Python: Data Science from the Command Line
+ https://talkpython.fm/episodes/show/392/data-science-from-the-command-line
+
+
+20221122
+
+New in this release:
+
+* Support for IPv6 adresses and _ in hostnames in --sshlogin.
+
+* Use --total-jobs for --eta/--bar if generating jobs is slow.
+
+* A lot of bugs fixed in --latest-line.
+
+* Better support for MSYS2.
+
+* Better Text::CSV error messages.
+
+* --bar supports UTF8.
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* GNU Parallel is now on Mastodon: @GNU_Parallel@hostux.social
+
+
+20221022
+
+New in this release:
+
+* --latest-line chops line length at terminal width.
+
+* Determine max command length faster on Microsoft Windows.
+
+News about GNU Parallel:
+
+* Distributed Task Processing with GNU Parallel
+ https://www.youtube.com/watch?v=usbMLggdMgc
+
+* GNU Parallel workflow for many small, independent runs
+ https://docs.csc.fi/support/tutorials/many/
+
+* Copy a File To Multiple Directories With A Single Command on Linux
+ https://www.linuxfordevices.com/tutorials/linux/copy-file-to-multiple-directories-with-one-command
+
+* Behind The Scenes: The Power Of Simple Command Line Tools At Cloud
+ Scale
+ https://blog.gdeltproject.org/behind-the-scenes-the-power-of-simple-command-line-tools-at-cloud-scale/
+
+* Run lz4 compression in parallel using GNU parallel
+ https://www.openguru.com/2022/09/
+
+* Xargs / Parallel With Code Examples
+ https://www.folkstalk.com/2022/09/xargs-parallel-with-code-examples.html
+
+* Parallel processing on a single node with GNU Parallel
+ https://www3.cs.stonybrook.edu/~cse416/Section01/Slides/SeaWulfIntro_CSE416_09222022.pdf
+
+* Using GNU parallel painlessly -- from basics to bioinformatics job
+ orchestration https://www.youtube.com/watch?v=qypUdm-IE9c
+
+
+20220922
+
+New in this release:
+
+* --colour-failed only changes output for failing jobs.
+
+* Password for --sshlogin can be put in $SSHPASS.
+
+* Examples are moved from `man parallel` to `man parallel_examples`.
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* WOMM - Works On My Machine uses GNU Parallel https://pypi.org/project/womm/
+
+
+20220822
+
+New in this release:
+
+* --header 0 allows using {filename} as replacement string
+
+* Bug fixes and man page updates.
+
+
+20220722
+
+New in this release:
+
+* --colour-failed will color output red if the job fails.
+
+* sql: support for InfluxDB.
+
+* Polarhome.com is dead, so these OSs are no longer supported: AIX
+ HPUX IRIX Minix OPENSTEP OpenIndiana OpenServer QNX Solaris Syllable
+ Tru64 Ultrix UnixWare.
+
+* Bug fixes and man page updates.
+
+
+20220622
+
+New in this release:
+
+* , can be used in --sshlogin if quoted as \, or ,,
+
+* --plus {/#regexp/str} replace ^regexp with str.
+
+* --plus {/%regexp/str} replace regexp$ with str.
+
+* --plus {//regexp/str} replace every regexp with str.
+
+* 'make install' installs bash+zsh completion files.
+
+* Bug fixes and man page updates.
+
+
+20220522
+
+New in this release:
+
+* --latest-line shows only the latest line of running jobs.
+
+* --color colors output in different colors per job (this obsoletes --ctag).
+
+* xargs compatibility: --process-slot-var foo sets $foo to jobslot-1.
+
+* xargs compatibility: --open-tty opens the terminal on stdin (standard input).
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* Building a fault-tolerant work queue for command-line executions
+ with GNU Parallel https://www.jvt.me/posts/2022/04/28/shell-queue/
+
+* Make your code do more, with less
+ https://www.blopig.com/blog/2022/05/make-your-code-do-more-with-less/
+
+* W09 B IntroHPC II 20220513 gnu parallel
+ https://www.youtube.com/watch?v=2udvC90rIa4
+
+
+20220422
+
+New in this release:
+
+* sash is no longer supported as shell.
+
+* --retries 0 is an alias for --retries 2147483647.
+
+* --shell-completion returns shell completion code.
+
+* --ssh-login-file reloads every second.
+
+* --parset is replaced with --_parset because it is only used internally.
+
+* sem --pipe passes STDIN (standard input) to the command.
+
+* Bug fixes and man page updates.
+
+
+20220322
+
+New in this release:
+
+* --sshlogin user:password@host is now supported by using sshpass.
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* Bash: GNU Parallel with Curl
+ https://gist.github.com/CMCDragonkai/5914e02df62137e47f32?permalink_comment_id=2617456
+
+* The Pipe Operator Explained
+ https://medium.com/geekculture/the-pipe-operator-explained-cbd41e23775a
+
+
+20220222
+
+New in this release:
+
+* Encoding of remote commands has changed. This limits the length of
+ remote commands to less than half. It also requires $REMOTE_TMPDIR
+ to be writable. It makes this release beta quality.
+
+* Time functions hh_mm_ss() hhmmss() hh_mm() hhmm() are now available
+ for {= =}.
+
+* Many options in the man page for parallel now has 'see also' for
+ related options.
+
+* parallel_options_map.pdf is a map showing which options other
+ options relate to.
+
+* {uniq} added to --plus to only run jobs with unique values.
+
+* sem --fg exits with the exit code of the job.
+
+* Bug fixes and man page updates.
+
+
+20220122
+
+New in this release:
+
+* GNU Parallel turned 20 on the 2022-01-06
+ https://www.gnu.org/software/parallel/20th-birthday.html
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* Save Time with Modern Search Techniques
+ https://www.eventbrite.com/e/ohio-information-security-forum-january-2022-monthly-meeting-tickets-240632737997
+
+* Bash "Fail immediately if any fails"
+ https://morioh.com/p/a01b883dc57c
+
+* Re-encoding the EmacsConf videos with FFmpeg and GNU Parallel
+ https://sachachua.com/blog/2021/12/re-encoding-the-emacsconf-videos-with-ffmpeg-and-gnu-parallel/
+
+* Bash 平行 https://learntutorials.net/ja/bash/topic/10778/%E5%B9%B3%E8%A1%8C
+
+
+20211222
+
+New in this release:
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* The first version of Parallel will be 20 years on 2022-01-06
+
+* HerrComp Intro HPC gnu parallel
+ https://www.youtube.com/watch?v=tcgjKWfgWQY
+
+* 多数のファイルを処理する場合の工夫
+ https://qiita.com/toramili/items/44a770cfb9ef1edc9e26
+
+* How To Install parallel on Ubuntu 20.04
+ https://installati.one/ubuntu/20.04/parallel/
+
+* Distributing embarrassingly parallel tasks GNU Parallel
+ https://ulhpc-tutorials.readthedocs.io/en/latest/sequential/gnu-parallel/
+
+
+20211122
+
+New in this release:
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* Otimizando o seu Hacking com o GNU Parallel - Mateus Buogo
+ https://www.youtube.com/watch?v=xIpPPZXDKGU
+
+* SMACK 12 - Do it faster! Simple ways to use all those cores (and
+ GPUs) efficiently. https://www.youtube.com/watch?v=lXDGY7NvOYk
+
+
+20211022
+
+New in this release:
+
+* Don't postpone output until $jobslots jobs have been started.
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* How to Install GNU Parallel using Anaconda | Linux
+ https://www.youtube.com/watch?v=UwDNVP-L0qA
+
+* Install GNU Parallel from source code | Linux
+ https://www.youtube.com/watch?v=PIyJH4ben5o
+
+* Compiling GNU Parallel on CentOS-7
+ https://thelinuxcluster.com/2021/10/19/compiling-gnu-parallel-on-centos-7/
+
+
+20210922
+
+New in this release:
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* 3 Programs that will Level Up your Bash Game
+ https://medium.com/codex/3-programs-that-will-level-up-your-bash-game-bf14e17e2291
+
+* I Summarized 100+ Command-Line Tools for Data Scientists in 15
+ Minutes
+ https://towardsdatascience.com/i-summarized-100-command-line-tools-for-data-scientists-in-15-minutes-976def544ffe
+
+* Speed up Your Builds by Parallelizing
+ https://www.incredibuild.com/blog/speed-up-your-builds-by-parallelizing
+
+* Bash: GNU Parallel with Curl
+ https://gist.github.com/dinh/93b936a55de9a331199d6582bc2921d2
+
+
+20210822
+
+New in this release:
+
+* --ctag/--ctagstring colors the tag in different colors for each job.
+
+* You can use unit prefixes (k, m, g, etc) with -n -N -L.
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* Parallelising jobs with GNU parallel
+ https://blog.ronin.cloud/gnu-parallel/
+
+* Use multiple CPU Cores with your Linux commands - awk, sed, bzip2,
+ grep, wc, etc. https://cdmana.com/2021/07/20210728132344693t.html
+
+* How to execute commands in parallel in Linux
+ https://net2.com/how-to-execute-commands-in-parallel-in-linux/
+
+
+20210722
+
+New in this release:
+
+* --results no longer prints the result to standard output (stdout) as
+ voted in
+ https://lists.gnu.org/archive/html/parallel/2020-12/msg00003.html
+
+* parset supports associative arrays in bash, ksh, zsh.
+
+* Online HTML is now generated by Sphinx.
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* Cleaning Up Scanned Documents with Open Source Tools
+ https://kaerumy.medium.com/cleaning-up-scanned-documents-with-open-source-tools-9d87e15305b
+
+
+20210622
+
+New in this release:
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* How to use GNU Parallel
+ https://techtipbits.com/linux/how-to-use-gnu-parallel/
+
+* How to Speed Up Bash Scripts with Multithreading and GNU Parallel
+ https://adamtheautomator.com/how-to-speed-up-bash-scripts-with-multithreading-and-gnu-parallel/
+
+* Use Parallel to split by line
+ https://madflex.de/use-parallel-to-split-by-line/
+
+* Optimizing long batch processes or ETL by using buff/cache properly
+ II (parallelizing network operations)
+ http://www.elsotanillo.net/2021/06/optimizing-long-batch-processes-or-etl-by-using-buff-cache-properly-ii-parallelizing-network-operations/
+
+* Parallelization 3: GNU Parallel https://www.youtube.com/watch?v=Rl06WD60afA
+
+
+20210522
+
+New in this release:
+
+* --plus includes {%%regexp} and {##regexp}.
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* Batch Calculate and Verify MD5 Checksum With GNU Parallel
+ https://omicx.cc/posts/2021-04-28-calculate-and-verify-md5-checksum-with-gnu-parallel/
+
+* HerrComp Gnu parallel, c++11 threads 2021 04 28
+ https://www.youtube.com/watch?v=wDd9F9nn0qA
+
+* Distributing embarrassingly parallel tasks GNU Parallel
+ https://ulhpc-tutorials.readthedocs.io/en/latest/sequential/gnu-parallel/
+
+* Job Parallelization on Niagara
+ https://www.maryamdaryalal.com/post/job-parallelization-on-niagara
+
+* Use Parallel to split by line
+ https://madflex.de/use-parallel-to-split-by-line/
+
+* m1 multi-core batch convert with gpu parallel + ffmpeg
+ https://www.youtube.com/watch?v=hAuc0YsXv6A
+
+
+20210422
+
+New in this release:
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* Bioinformatics tutorials - linux and shell advanced - parallel
+ https://www.youtube.com/watch?v=5leL8pyl0XA
+
+* GNU Parallel for quick gains
+ https://edbennett.github.io/high-performance-python/04-gnu-parallel/index.html
+
+* Processing Linux Commands in Parallel
+ https://www.baeldung.com/linux/processing-commands-in-parallel
+
+* GNU parallel
+ https://docs-research-it.berkeley.edu/services/high-performance-computing/user-guide/running-your-jobs/gnu-parallel/
+
+* GNU Parallel 활용 가이드https://genoglobe.com/kribb/gnu_parallel
+
+* Parallel Grep and Awk
+ https://www.highonscience.com/blog/2021/03/21/parallel-grep/
+
+* Getting things done with shell scripting
+ https://doma.dev/blog/get-things-done-with-bash/
+
+
+20210322
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* The very first version of Parallel dated 2002-01-06 was found in an
+ old backup:
+
+ #!/usr/bin/perl
+
+ $processes=shift;
+
+ chomp(@jobs=<>);
+ for (@jobs) {
+ $jobnr++;
+ push @makefile,
+ (".PHONY : job$jobnr\n",
+ "job$jobnr :\n",
+ "\t$_\n");
+ }
+ unshift @makefile, "all : ",(map { "job$_ " } 1 .. $jobnr),"\n";
+
+ open (MAKE, "| make -k -f - -j $processes") || die;
+ print MAKE @makefile;
+ close MAKE;
+
+* Introduction to GNU Parallel
+ https://www.youtube.com/watch?v=Kj-6JkAqw-8
+
+* Using GNU Parallel with GooseSLURM
+ https://readthedocs.org/projects/gooseslurm/downloads/pdf/latest/#chapter.7
+
+* Why GNU-parallel?
+ https://github.com/lijingbu/omics/blob/main/why_gnu_parallel.md
+
+
+20210222
+
+* --filter only run jobs where the filter is true. The filter can
+ contain replacement strings and Perl code such as: '{1} < {2}+1'
+
+* --template takes a text file as a template with replacement
+ strings. Then it replaces the replacement strings and saves it
+ under a new filename.
+
+* --plus implements {0%} {0#} replacement string for zero padded slot
+ and sequence.
+
+* Warn that you probably mean -d '\r\n' if the first 3 values end in
+ \r\n (DOS newline).
+
+* {= perlexpression =} must return the same value, if run twice on the
+ same input.
+
+* Man pages available as .rst-format.
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* A short tutorial on Gnu Parallel
+ https://www.polarmicrobes.org/a-short-tutorial-on-gnu-parallel/
+
+* 5 reasons why I love coding on Linux
+ https://opensource.com/article/21/2/linux-programming
+
+* Simple Tutorial to install & use GNU Parallel
+ https://medium.com/analytics-vidhya/simple-tutorial-to-install-use-gnu-parallel-79251120d618
+
+* LiDO3 - first contact
+ https://www.lido.tu-dortmund.de/cms/de/LiDO3/LiDO3_first_contact_handout.pdf
+
+* Rill Stage 2-1: Ways of command-line data analysis
+ https://blog.jastix.biz/post/rill-stage-2-1-cli-data-analysis/
+
+* Introduction to GNU Parallel
+ https://blog.knoldus.com/introduction-to-gnu-parallel/
+
+* GNU Parallel https://www.hahwul.com/cullinan/parallel/
+
+
+20210122
+
+* --memsuspend suspends jobs when there is little memory free. This
+ way you can run jobs in parallel that added up potentially will take
+ up more RAM than is available.
+
+* $PARALLEL_ARGHOSTGROUPS and the replacement string {agrp} will give
+ the hostgroup given on the argument when using --hostgroup.
+
+* Handy time functions for {= =}: yyyy_mm_dd_hh_mm_ss()
+ yyyy_mm_dd_hh_mm() yyyy_mm_dd() yyyymmddhhmmss() yyyymmddhhmm()
+ yyyymmdd() hash($str)
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* Software Engineering For Scientists
+ https://canvas.stanford.edu/courses/133091
+
+* Optional Individual Submission 4 Job Handling - GNU Parallel
+ https://www.youtube.com/watch?v=-tX0bFAYAxM
+
+* 关于gnu parallel:GnuParallel:在集群上并行化脚本,脚本将文件写入主
+ 节点https://www.codenong.com/25172209/
+
+* すぐ忘れてしまう、仕事で使う技
+ https://qiita.com/hana_shin/items/53c3c78525c9c758ae7c
+
+
+20201222
+
+* --pipe engine changed making --pipe alpha quality.
+
+* --results -.json outputs results as JSON objects on stdout (standard
+ output).
+
+* --delay 123auto will auto-adjust --delay. If jobs fail due to being
+ spawned too quickly, --delay will exponentially increase.
+
+* --plus sets {hgrp} that gives the intersection of the hostgroups of
+ the job and the sshlogin that the job is run on.
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* Best practices for accelerating data migrations using AWS Snowball
+ Edge
+ https://aws.amazon.com/blogs/storage/best-practices-for-accelerating-data-migrations-using-aws-snowball-edge/
+
+* Pass the Output of a Command as an Argument for Another
+ https://www.baeldung.com/linux/pass-cmd-output-as-an-argument
+
+* Warwick RSE Drop-in - Workflow Management Part 2 - GNU Parallel
+ https://www.youtube.com/watch?v=t_v2Otgt87g
+
+
+20201122
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* https://bash-prompt.net/guides/gnu-parallel-multi-server/
+
+
+20201022
+
+* --termseq now works for jobs running remotely.
+
+* parsort: Performance optimized for 64-core machines.
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* Juan Sierra Pons has made a testimonial:
+ http://www.elsotanillo.net/wp-content/uploads/GnuParallel_JuanSierraPons.mp4
+
+* 「翻译」在生物信息学中使用 GNU-Parallel
+ https://cloud.tencent.com/developer/article/1705499
+
+
+20200922
+
+* New CPU detection for GNU/Linux.
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* Aug 21, 2020 - More Unix tools
+ https://floki.blog/2020/08/more_unix_tools
+
+
+20200822
+
+New in this release:
+
+* Max command line length is changed for MacOS - making this version
+ beta quality for MacOS.
+
+* Bug fixes and man page updates.
+
+
+20200722
+
+New in this release:
+
+* No new functionality
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* Tips for Running Jobs on your VM
+ https://support.ehelp.edu.au/support/solutions/articles/6000089713-tips-for-running-jobs-on-your-vm#parallel
+
+* Introduction to GNU Parallel: Parallelizing Massive Individual Tasks
+ https://www.youtube.com/watch?v=QSFLTEyCKvo
+
+* Slides for same
+ http://www.hpc.lsu.edu/training/weekly-materials/2020-Spring/GNUParallel_Spring_2020.pdf
+
+* Linux-Fu: Parallel Universe
+ https://hackaday.com/2020/06/29/linux-fu-parallel-universe/
+
+* Resumable batch jobs with gnu parallel https://igor.io/parallel/
+
+* Introducing Parallel into Shell
+ https://www.petelawson.com/post/parallel-in-shell/
+
+* AV1 encoding parallelization with GNU Parallel
+ https://www.reddit.com/r/AV1/comments/fjvqp9/av1_encoding_parallelization_with_gnu_parallel/
+
+* Beschleunigte Befehlsbearbeitung mit GNU Parallel
+ https://www.linux-community.de/ausgaben/linuxuser/2013/06/beschleunigte-befehlsbearbeitung-mit-gnu-parallel/
+
+
+20200622
+
+New in this release:
+
+* No new functionality
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* GNU Parallel is used at NERSC:
+ https://www.nersc.gov/assets/Uploads/08-Workflows-20200616.pdf
+
+* Resumable batch jobs with GNU Parallel https://igor.io/parallel/
+
+* Lab Meeting technical talk
+ https://www.slideshare.net/hoffmanlab/gnu-parallel-194030490
+
+* Open Sky Software Website Credits
+ https://www.openskysoftware.com/site-credits.htm
+
+* Speed up your Python Scripts using Ubuntu and GNU Parallel
+ https://ecce.esri.ca/wpecce/2019/12/24/speed-up-your-python-scripts-using-ubuntu-and-gnu-parallel/
+
+* Indispensable command-line tools
+ https://negfeedback.blogspot.com/2020/05/indispensable-command-line-tools.html
+
+* GNU Parallel for simulations
+ http://pdebuyl.be/blog/2020/gnu-parallel-for-simulations.html
+
+* Introduction to GNU parallel
+ https://bioinformaticsworkbook.org/Appendix/GNUparallel/GNU_parallel_examples.html
+
+
+20200522
+
+New in this release:
+
+* While running a job $PARALLEL_JOBSLOT is the jobslot of the job. It
+ is equal to {%} unless the job is being retried. See {%} for
+ details.
+
+* While running a job $PARALLEL_SSHLOGIN is the sshlogin line with
+ number of cores removed. E.g. '4//usr/bin/specialssh user@host'
+ becomes: '/usr/bin/specialssh user@host'
+
+* While running a job $PARALLEL_SSHHOST is the host part of an
+ sshlogin line. E.g. '4//usr/bin/specialssh user@host' becomes:
+ 'host'
+
+* --plus activates the replacement strings {slot} = $PARALLEL_JOBSLOT,
+ {sshlogin} = $PARALLEL_SSHLOGIN, {host} = $PARALLEL_SSHHOST
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* Portable Batch System (PBS) & GNU Parallel - Running a Program
+ Multiple Times in Parallel
+ https://www.youtube.com/watch?v=6ccbWu6Befo
+
+* GNU Parallel przykład https://www.youtube.com/watch?v=gs_wG4Kt2G4
+
+* Demo of LINUX APP - GNU PARALLEL - running multiple Gstreamer webcam
+ .sh scripts with only 1 command
+ https://www.youtube.com/watch?v=trQuA_wmWjg
+
+
+20200422
+
+This release celebrates GNU Parallel's 10 years as a GNU tool:
+
+> commit ed2dfb1043768154d4e7678e01e10287155fa834
+> Author: Ole Tange <ole@tange.dk>
+> Date: Thu Apr 22 01:23:00 2010 +0200
+>
+> Name change: Parallel is now GNU Parallel.
+> Basic structure for sshlogin and sshloginfile.
+
+New in this release:
+
+* parsort makes GNU sort run faster for files with more than 1M lines.
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* Several clusters have updated their documentation of GNU Parallel:
+ https://www.grid5000.fr/w/GNU_Parallel
+ https://research-it.berkeley.edu/virtual-training-running-jobs-parallel-savio
+ http://www.hpc.lsu.edu/training/archive/tutorials.php
+
+* add_pho uses GNU Parallel: https://github.com/SeedlingsBabylab/add_pho
+
+* GNU parallel使用笔记https://zhuanlan.zhihu.com/p/37340011
+
+* Virtual Workshop: Running Jobs in Parallel on Savio
+ https://ais.berkeley.edu/events/virtual-workshop-running-jobs-parallel-savio/2020-04-20
+
+* Free Concurrency with GNU Parallel
+ https://phili.pe/posts/free-concurrency-with-gnu-parallel/
+
+
+20200322
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* The GNU Parallel course in Copenhagen is cancelled
+ https://www.prosa.dk/nc/arrangementer/arrangement/gnu-parallel-med-ole-tange/
+
+* The GNU Parallel course in Århus is cancelled
+ https://www.prosa.dk/nc/arrangementer/arrangement/gnu-parallel-og-parallelisering-i-unix-shellen/
+
+* The 10 year anniversary may be cancelled, see
+ https://www.gnu.org/software/parallel/10-years-anniversary.html
+
+* Running shell commands in parallel, via GNU Parallel
+ https://dev.to/saveriomiroddi/running-shell-commands-in-parallel-via-gnu-parallel-3a72
+
+* Faster nmap scanning with the help of GNU parallel
+ https://badshah.io/experiment/faster-nmap-scanning-with-the-help-of-gnu-parallel/
+
+
+20200222
+
+* Bug fixes and man page updates.
+
+
+20200122
+
+* --blocktimeout dur - Time out for reading block when using
+ --pipe. If it takes longer than dur to read a full block, use the
+ partial block read so far.
+
+* Bug fixes and man page updates.
+
+News about GNU Parallel:
+
+* GNU Parallel course in Copenhagen
+ https://www.prosa.dk/nc/arrangementer/arrangement/gnu-parallel-med-ole-tange/
+
+* GNU Parallel course in Århus
+ https://www.prosa.dk/nc/arrangementer/arrangement/gnu-parallel-og-parallelisering-i-unix-shellen/
+
+* GNU Parallel pour accélérer vos process sous Linux
+ https://www.yvonh.com/gnu-parallel-pour-accelerer-vos-process-sous-linux/
+
+* How to copy a file to multiple directories in Linux
+ https://net2.com/how-to-copy-a-file-to-multiple-directories-in-linux/
+
+* Running linux commands in parallel https://dev.to/voyeg3r/runing-linux-commands-in-parallel-4ff8
+
+
+20191222
+
+* GNU Parallel course in Copenhagen
+ https://www.prosa.dk/nc/arrangementer/arrangement/gnu-parallel-med-ole-tange/
+
+* GNU Parallel course in Århus
+ https://www.prosa.dk/nc/arrangementer/arrangement/gnu-parallel-og-parallelisering-i-unix-shellen/
+
+* GNU Parallel is used in
+ https://github.com/JeffersonLab/rfw_tsf_extractor
+
+* Bug fixes and man page updates.
+
+
+20191122
+
+* GNU Parallel was presented at Driving IT. Slides:
+ cloud.prosa.dk/s/drivingit
+
+* Restarting supervisord processes in parallel
+ https://blog.naderman.de/2019/11/14/restarting-supervisord-processes-in-parallel/
+
+* Using GNU Parallel to Create Archives Faster
+ https://www.reddit.com/r/DataHoarder/comments/dsgmhc/parallel_archiving_techniques/
+
+* Bug fixes and man page updates.
+
+
+20191022
+
+* --tee will use --output-error=warn-nopipe if supported by tee.
+
+* GNU Parallel will be presented at Driving IT 2019:
+ https://ida.dk/arrangementer-og-kurser/konferencer/driving-it/tools
+
+* UMN Duluth: Job parallelization with task arrays and GNU parallel
+ https://www.msi.umn.edu/tutorials/umn-duluth-job-parallelization-task-arrays-and-gnu-parallel
+
+* Genome updater uses GNU Parallel
+ https://github.com/pirovc/genome_updater
+
+* Using GNU-Parallel for bioinformatics
+ https://www.danielecook.com/using-gnu-parallel-for-bioinformatics/
+
+* Speeding up PostgreSQL ETL pipeline with the help of GODS
+ https://cfengine.com/company/blog-detail/speeding-up-postgresql-etl-pipeline-with-the-help-of-gods/
+
+* Runing linux commands in parallel
+ https://dev.to/voyeg3r/runing-linux-commands-in-parallel-4ff8
+
+* Research Computing University of Colorado Boulder contains an intro
+ to GNU Parallel
+ https://readthedocs.org/projects/curc/downloads/pdf/latest/
+
+* 如何使用Parallel在Shell中并行执行命令
+ https://www.myfreax.com/gnu-parallel/
+
+* 如何测试 Amazon Elastic File System
+ https://aws.amazon.com/cn/blogs/china/how-to-test-drive-amazon-elastic-file-system/
+
+* Bug fixes and man page updates.
+
+
+20190922
+
+* --nice is now inherited by the nice level that GNU Parallel is
+ started at. So 'nice -n10 parallel' will also cause remote jobs to
+ be run at nice level 10.
+
+* --delay is now accurate to within 10 ms
+
+* GNU Parallel will be presented at Driving IT 2019:
+ https://ida.dk/arrangementer-og-kurser/konferencer/driving-it/tools
+
+* Greenland Ice Sheet solid ice discharge from 1986 through 2017
+ https://www.earth-syst-sci-data.net/11/769/2019/essd-11-769-2019.pdf
+
+* App Center Cloud Build Machines > Installed Software > Utilities
+ https://docs.microsoft.com/en-us/appcenter/build/software
+
+* Curryfinger - SNI & Host header spoofing utility
+ https://dualuse.io/blog/curryfinger/
+
+* Hyak parallel-sql
+ https://wiki.cac.washington.edu/display/hyakusers/Hyak+parallel-sql
+
+* A meditation on analysing data with Perl in parallel using GNU Parallel
+ https://www.reddit.com/r/perl/comments/cyo2qc/a_meditation_on_analysing_data_with_perl_in/
+
+* Accélérer ses tâches avec GNU Parallel et les lots de tâches (UL)
+ https://www.eventbrite.ca/e/inscription-accelerer-ses-taches-avec-gnu-parallel-et-les-lots-de-taches-ul-71412840729
+
+* RMACC Summit GNU Parallel
+ https://curc.readthedocs.io/en/latest/software/GNUParallel.html
+
+* Introducing Parallel into Shell
+ https://petelawson.com/post/parallel-in-shell/
+
+* Linux Productivity Tools
+ https://www.usenix.org/conference/lisa19/presentation/maheshwari
+
+* Gnu/parallel + Nfs 实现共享文件网络的并发计算等操作
+ http://atomicer.cn/2018/01/18/gnu-parallel-nfs-%E5%AE%9E%E7%8E%B0%E5%85%B1%E4%BA%AB%E6%96%87%E4%BB%B6%E7%BD%91%E7%BB%9C%E7%9A%84%E5%B9%B6%E5%8F%91%E8%AE%A1%E7%AE%97%E7%AD%89%E6%93%8D%E4%BD%9C/#more
+
+* Bug fixes and man page updates.
+
+
+20190822
+
+* GNU Parallel will be presented at Driving IT 2019:
+ https://ida.dk/arrangementer-og-kurser/konferencer/driving-it#beskrivelse
+
+* Linux Productivity Tools
+ https://www.usenix.org/conference/lisa19/presentation/maheshwari
+
+* Improve your reconnaissance performance by using GNU Parallel
+ https://mailchi.mp/3f46ceb46357/improve-your-reconnaissance-performance-by-using-gnu-parallel
+
+* Paralel İşler / İşlemler / Programlar Bash’de Nasıl Çalıştırılır?
+ https://kaleinfo.com/2019/07/29/paralel-isler-islemler-programlar-bashde-nasil-calistirilir-2/
+
+* Use POLYBAR to change WALLPAPER with GNU Parallel
+ https://www.youtube.com/watch?v=rjYg-b_eiRU
+
+* Finding the Balance Between Speed & Accuracy During an Internet-wide
+ Port Scanning
+ https://captmeelo.com/pentest/2019/07/29/port-scanning.html
+
+* GNU Parallel Ryugu released
+ https://linuxreviews.org/GNU_Parallel_Ryugu_released
+
+* CO2017 - CW1 - Operating Systems & Networks - Shell scripting
+ http://www.7daixie.com/2019071646302396.html
+
+* Bug fixes and man page updates.
+
+
+20190722
+
+* {= uq; =} causes the replacement string to be unquoted. Example:
+ parallel echo '{=uq;=}.jpg' ::: '*'
+
+* --tagstring {=...=} is now evaluated for each line with --linebuffer.
+
+* Use -J ./profile to read a profile in current dir.
+
+* Speedup of startup by 40%: Find the parent shell differently on
+ GNU/Linux, cache information about the CPU and which setpgrp method
+ to use to make GNU Parallel start 40% faster.
+
+* $PARALLEL_SSHLOGIN can be used in the command line.
+
+* Occasionally sane: Simple usage of GNU parallel
+ https://vfoley.xyz/parallel/
+
+* How to run multiple bash scripts in parallel
+ https://techieroop.com/how-to-run-multiple-bash-scripts-in-parallel/
+
+* Announcing Oracle Solaris 11.4 SRU10
+ https://blogs.oracle.com/solaris/announcing-oracle-solaris-114-sru10
+
+* Руководство по использованию GNU Parallel https://zalinux.ru/?p=2623
+
+* Bug fixes and man page updates.
+
+
+20190622
+
+* --shard can now take a column name and optionally a perl
+ expression. Similar to --group-by and replacement strings.
+
+* Using AWK and R to parse 25tb
+ https://livefreeordichotomize.com/2019/06/04/using_awk_and_r_to_parse_25tb/
+
+* Parallel and Visual testing with Behat
+ http://parallelandvisualtestingwithbehat.blogspot.com/p/blog-page.html
+
+* 维基百科,自由的百科全书https://zh.wikipedia.org/wiki/GNU_parallel
+
+* Bug fixes and man page updates.
+
+
+20190522
+
+* --group-by groups lines depending on value of a column. The value
+ can be computed.
+
+* How to compress (bzip / gzip) a very large text quickly?
+ https://medium.com/@gchandra/how-to-compress-bzip-gzip-a-very-large-text-quickly-27c11f4c6681
+
+* Simple tutorial to install & use GNU Parallel
+ https://medium.com/@gchandra/simple-tutorial-to-install-use-gnu-parallel-79251120d618
+
+* Introducing Parallel into Shell
+ https://petelawson.com/post/parallel-in-shell/
+
+* Bug fixes and man page updates.
+
+
+20190422
+
+* Invitation to the 10 years anniversary next year.
+
+* Bug fixes and man page updates.
+
+
+20190322
+
+* SIGTERM is changed to SIGHUP, so sending SIGHUP will make GNU
+ Parallel start no more jobs, but wait for running jobs to finish.
+
+* SIGTERM SIGTERM is changed to SIGTERM, so sending SIGTERM will make
+ GNU Parallel kill all running jobs.
+
+* GNU Parallel now includes a cheat sheet: parallel_cheat.pdf
+
+* High Throughput Computing on RMACC Summit and Beyond
+ https://calendar.colorado.edu/event/high_throughput_computing_on_rmacc_summit_and_beyond#.XH2NBhB7mV4
+
+* GNU Parallel Cheat Sheet by cpriest
+ https://www.cheatography.com/cpriest/cheat-sheets/gnu-parallel/
+
+* Use Multiple CPU Cores(Parallelize) with Single Threaded Linux
+ Commands
+ http://xensoft.com/use-multiple-cpu-cores-parallelize-with-single-threaded-linux-commands/
+
+* Bug fixes and man page updates.
+
+
+20190222
+
+* --shard makes it possible to send input to a the same jobslot based
+ on the value in one column of the input. It is similar to sharding
+ in databases.
+
+* --shellquote --shellquote will shell quote the input twice.
+
+* GNU Parallel is available in Termux
+ https://github.com/termux/termux-packages/tree/master/packages/parallel
+
+* Linux, command line & MetaCentrum
+ https://trapa.cz/sites/default/files/linux_bash_metacentrum_course_4.pdf
+
+* How to supercharge your bash workflows with GNU parallel
+ https://medium.freecodecamp.org/how-to-supercharge-your-bash-workflows-with-gnu-parallel-53aab0aea141
+
+* GNU Parallel Spring 2017 https://www.youtube.com/watch?v=LHb29uW_KyI
+
+* Parallelizing Freesurfer blog.cogneurostats.com/?p=148
+
+* FreeSurfer #4: Running recon-all in Parallel https://www.youtube.com/watch?v=XHN2tm3tNaw
+
+* FreeSurfer: recon-all https://www.neurotrivial.com/2019/01/24/freesurfer-recon-all/
+
+* Come ridimensionare 10k foto con la riga di comando
+ https://pigrecoinfinito.wordpress.com/2018/12/22/come-ridimensionare-10k-foto-con-la-riga-di-comando/
+
+* 并行处理 – Gnu Parallel:嵌套并行
+ https://codeday.me/bug/20190122/552902.html
+
+* シェルスクリプトでコマンドを並列実行する方法
+ https://www.kwbtblog.com/entry/2019/01/12/020423
+
+* Bug fixes and man page updates.
+
+
+20190122
+
+* 'env_parallel --end-session' makes it possible to nest 'env_parallel
+ --session'
+
+* Using GNU Parallel in pentesting https://youtu.be/mZ0OUJmkIlA?t=873
+
+* Supercomputer Workshop: Feb 1&7
+ http://www.ece.unm.edu/featured-students/supercomputer-workshop-feb-17.html
+
+* Bug fixes and man page updates.
+
+
+20181222
+
+* Better /proc/cpuinfo parser.
+
+* Simpler job scheduling flow.
+
+* MPGI Workshop: Job Parallelization with Task Arrays and GNU Parallel
+ https://www.msi.umn.edu/tutorials/mpgi-workshop-module-4
+
+* CONCOCT uses GNU Parallel
+ https://media.readthedocs.org/pdf/concoct/latest/concoct.pdf
+
+* If you use SLURM read
+ https://wiki.hpc.uconn.edu/index.php/Advanced_SLURM
+
+* すぐ忘れてしまう、仕事で使う技
+ https://qiita.com/hana_shin/items/53c3c78525c9c758ae7c
+
+* Bug fixes and man page updates.
+
+
+20181122
+
+* Experimental simpler job flow control.
+
+* 時間がかかるコマンドを GNU parallel で 並列実行する
+ https://qiita.com//grohiro/items/4db3fa951a4778c5c479
+
+* Bug fixes and man page updates.
+
+
+20181022
+
+* env_parallel.fish: --session support (alpha quality)
+
+* GNU Parallel was cited in: High-throughput sequencing of murine
+ immunoglobulin heavy chain repertoires using single side unique
+ molecular identifiers on an Ion Torrent PGM
+ https://doi.org/10.18632/oncotarget.25493
+
+* GNU parallel in Japanese Wikipedia
+ https://ja.wikipedia.org/wiki/GNU_parallel
+
+* Outils de paramétrisation des tâches: lots de tâches, GNU Parallel
+ https://www.eventbrite.ca/e/inscription-outils-de-parametrisation-des-taches-lots-de-taches-gnu-parallel-et-bqtools-midi-conference-de-50271076142
+
+* Bug fixes and man page updates.
+
+
+20180922
+
+* Minix is supported again.
+
+* Updated graph showing GNU Parallel's overhead per job for each
+ version. https://www.gnu.org/software/parallel/process-time-j2-1700MHz-3000-1000.pdf
+
+* Intro to Computing at CARC includes introduction to GNU Parallel
+ http://carc.unm.edu/education--training/workshops-and-seminars.html
+
+* Fast-GBS uses GNU Parallel https://bitbucket.org/jerlar73/fast-gbs/wiki/Home
+
+* Belgium at 10 m resolution in July 2018 http://www.cesbio.ups-tlse.fr/multitemp/?p=14116
+
+* Bug fixes and man page updates.
+
+
+20180822
+
+* parset sets exit code.
+
+* How to install GNU Parallel
+ https://www.techrepublic.com/videos/how-to-install-gnu-parallel/
+
+* Bug fixes and man page updates.
+
+
+20180722
+
+* The quoting engine has been changed. Instead of using \-quoting GNU
+ Parallel now uses '-quoting in bash/ash/dash/ksh. This should
+ improve compatibility with different locales. This is a big change
+ causing this release to be alpha quality.
+
+* The CPU calculation has changed. By default GNU Parallel uses the
+ number of CPU threads as the number of CPUs. This can be changed to
+ the number of CPU cores or number of CPU sockets with
+ --use-cores-instead-of-threads or --use-sockets-instead-of-threads.
+
+* The detected number of sockets, cores, and threads can be shown with
+ --number-of-sockets, --number-of-cores, and --number-of-threads.
+
+* env_parallel now support mksh using env_parallel.mksh.
+
+* GNU Parallel is distributed as part of Snippy
+ https://github.com/tseemann/snippy
+
+* GNU Parallel: Ejecutar comandos simultáneamente en Linux
+ https://esgeeks.com/gnu-parallel-ejecutar-comandos-simultaneo-linux/
+
+* Parallel download genomic data with GNU-Parallel
+ https://digibio.blogspot.com/search/label/GNU-Parallel
+
+* Bug fixes and man page updates.
+
+
+20180622
+
+* Deal better with multibyte chars by forcing LC_ALL=C.
+
+* GNU Parallel was introduced in Pincount Podcast:
+ https://pincountpodcast.com/episodes/29.html
+
+* GNU Parallel was shown on Danish national news for 1.7 seconds: dr.dk
+
+* How to run Linux commands simultaneously with GNU Parallel
+ https://www.techrepublic.com/article/how-to-run-commands-simultaneously-with-gnu-parallel/#ftag=RSS56d97e7
+
+* Easy parallelization with GNU parallel
+ http://mpharrigan.com/2016/08/02/parallel.html
+
+* GNU Parallel for mutlithreaded tasks
+ http://slhogle.github.io/2016/gnu-parallel/
+
+* A GNU Parallel Clustering Demo
+ https://www.crowdsupply.com/ground-electronics/circumference/updates/a-gnu-parallel-clustering-demo
+
+* 使用 GNU Parallel 提高 Linux 命令行执行效率
+ https://linux.cn/article-9718-1.html
+
+* Ejecutando tareas desde consola simultáneamente con GNU Parallel
+ https://poesiabinaria.net/2018/06/ejecutando-tareas-desde-consola-simultaneamente-gnu-parallel/
+
+* Cómo ejecutar varias tareas simultáneamente en el terminal con GNU
+ Parallel
+ https://www.ubuntizando.com/como-ejecutar-varias-tareas-simultaneamente-en-el-terminal-con-gnu-parallel/
+
+* GNU Parallel 활용 가이드 http://genoglobe.kr/kribb/gnu_parallel
+
+* Bug fixes and man page updates.
+
+
+20180522
+
+* --tty allows for more programs accessing /dev/tty in parallel. Some
+ programs require tty access without using it.
+
+* env_parallel --session will record names in current environment in
+ $PARALLEL_IGNORED_NAMES and exit. It is only used with env_parallel,
+ and can work like --record-env but in a single session.
+
+* 35 day course: Linux shell scripting and text processing for big
+ data https://comquest.in/training
+
+* How To Use Your Entire CPU In Bash With Parallel
+ https://bash-prompt.net/guides/parallell-bash/
+
+* An easy way to run parallel jobs on Stampede2
+ https://blog.uta.edu/xus/2017/09/29/an-easy-way-to-run-parallel-jobs-on-stampede2/
+
+* Get more done at the Linux command line with GNU Parallel
+ https://opensource.com/article/18/5/gnu-parallel
+
+* Save Time with Modern Search Techniques
+ https://www.sans.org/summit-archives/file/summit-archive-1524582079.pdf
+
+* GNU Parallel: haz más cosas simultáneamente en el terminal
+ https://blog.desdelinux.net/gnu-parallel-haz-mas-cosas-simultaneamente-en-el-terminal/
+
+* 「今日から使い切る」ための GNU Parallelによる並列処理入門
+ https://www.slideshare.net/koji_matsuda/gnu-parallel
+
+* Bug fixes and man page updates.
+
+
+20180422
+
+* --csv makes GNU Parallel parse the input sources as CSV. When used
+ with --pipe it only passes full CSV-records.
+
+* Time in --bar is printed as 1d02h03m04s.
+
+* Optimization of --tee: It spawns a process less per value.
+
+* The GNU Parallel 2018 book is now available:
+ http://www.lulu.com/shop/ole-tange/gnu-parallel-2018/paperback/product-23558902.html
+
+* Modern pentest tricks for faster, wider, greater engagement (15)
+ https://conference.hitb.org/hitbsecconf2018ams/materials/D1%20COMMSEC%20-%20Thomas%20Debize%20-%20Modern%20Pentest%20Tricks%20for%20Faster,%20Wider,%20Greater%20Engagements.pdf
+
+* Running many serial jobs efficiently
+ https://userinfo.surfsara.nl/documentation/running-many-serial-jobs-efficiently
+
+* GNU Parallel: как сохранить результаты нескольких команд для
+ переменной? http://fliplinux.com/gnu-parallel-6.html
+
+* Running Multiple Commands in Parallel on a GNU Linux
+ https://www.youtube.com/watch?v=sHpTywpb4_4
+
+* Klaatu covers the documentation of GNU parallel in episode 12x15
+ http://gnuworldorder.info/
+
+* Bug fixes and man page updates.
+
+
+20180322
+
+* niceload -p can now take multiple pids separated by comma
+
+* --timeout gives a warning when killing processes
+
+* --embed now uses the same code for all supported shells
+
+* --delay can now take arguments like 1h12m07s
+
+* Parallel. Straight from your command line
+ https://medium.com/@alonisser/parallel-straight-from-your-command-line-feb6db8b6cee
+
+* GNU Parallel was used in POAP-Parallelized Open Babel & Autodock
+ suite Pipeline https://github.com/inpacdb/POAP
+
+* Pi Cluster Parallel Script First Fire
+ https://chiefio.wordpress.com/2018/02/23/pi-cluster-parallel-script-first-fire/
+
+* How to Restore MySQL Logical Backup at Maximum Speed
+ https://www.percona.com/blog/2018/02/22/restore-mysql-logical-backup-maximum-speed/
+
+* Bug fixes and man page updates.
+
+
+20180222
+
+* --embed makes it possible to embed GNU parallel in a shell
+ script. This is useful if you need to distribute your script to
+ someone who does not want to install GNU parallel.
+
+* Parallelize Your Large Batches of Small Jobs
+ https://amito.me/2018/02/Parallelize-Your-Large-Batches-of-Small-Jobs/
+
+* Klaatu expounds upon xargs and talks GNU parallel in episode 12x6
+ http://gnuworldorder.info/
+
+* How to use parallel to speed up your work
+ https://mike42.me/blog/how-to-use-parallel-to-speed-up-your-work
+
+* Transient compute clustering with GNU Parallel and sshfs
+ https://dennismungai.wordpress.com/2018/02/06/transient-compute-clustering-with-gnu-parallel-and-sshfs/
+
+* Useful Examples of ffmpeg and GNU parallel on the command-line
+ https://dennismungai.wordpress.com/2018/02/06/useful-examples-of-ffmpeg-and-gnu-parallel-on-the-command-line/
+
+* Data Science at the Command Line (Chapter 8.3)
+ https://www.datascienceatthecommandline.com/
+
+* Bug fixes and man page updates.
+
+
+20180122
+
+* bash / 병렬 https://code.i-harness.com/ko/docs/bash/2a1a
+
+* 15分钟神器gnu parallel 入门观止
+ http://www.ezilin.com/2018/01/15gnu-parallel.html
+
+* How To Run Multiple Commands In Parallel on Linux
+ https://www.slashroot.in/how-run-multiple-commands-parallel-linux
+
+* Bug fixes and man page updates.
+
+
+20171222
+
+* env_parset for ash, dash, ksh, sh, zsh
+
+* Automatically create hostgroups if argument ends in @sshlogin
+
+* Tips for running jobs on your VM
+ https://support.ehelp.edu.au/support/solutions/articles/6000089713-tips-for-running-jobs-on-your-vm#parallel
+
+* Node-local scheduling
+ https://mogonwiki.zdv.uni-mainz.de/dokuwiki/node_local_scheduling
+
+* GNU Parallel https://docs.computecanada.ca/wiki/GNU_Parallel
+
+* GNU Parallel のエラー処理 https://qiita.com/inouet/items/bfc208668c86caf8ff74
+
+* Using GNU Parallel
+ https://wiki.ncsa.illinois.edu/display/ROGER/Using+GNU+Parallel
+
+* Bug fixes and man page updates.
+
+
+20171122
+
+* Using GNU Parallel to speed up Google Cloud Infrastructure
+ management
+ https://medium.com/@pczarkowski/using-gnu-parallel-to-speed-up-google-cloud-infrastructure-management-53e5c555ec05
+
+* What is gnu parallel
+ http://wiki.chpc.ac.za/howto:bioinformatics:gnu-parallel
+
+* Parallel, Scripts, Clusters, & Easy Use
+ https://chiefio.wordpress.com/2017/11/10/parallel-scripts-clusters-easy-use/
+
+* Bug fixes and man page updates.
+
+
+20171022
+
+* Initial support for XDG Base Directory $XDG_* environment variables.
+
+* Validating half a million TIFF files. Part
+ Two
+ https://www.dpoc.ac.uk/2017/08/17/validating-half-a-million-tiff-files-part-two/
+
+* Turing data science class: Cluster computing
+ https://www.turing.ac.uk/events/masterclass-cluster-computing/
+
+* Butlin:Unix for Bioinformatics - advanced tutorial
+ https://openwetware.org/wiki/Butlin:Unix_for_Bioinformatics_-_advanced_tutorial
+
+* พยายามใช้ CPU ให้ครบทุก CORE
+ https://veer66.wordpress.com/2017/09/02/use-multi-core/
+
+* Examples on using GNU Parallel
+ https://gist.github.com/drmalex07/de8ff1379a560f6d84a0d863e60ae378
+
+* Bug fixes and man page updates.
+
+
+20170922
+
+* Use '--limit myprog' to make a dynamic job limit. Just return 0 to
+ spawn another job, 1 to not spawn another job, and 2 to kill the
+ youngest job.
+
+* PARALLEL_RSYNC_OPTS and --rsync-opts sets the options for rsync
+ (Default: -rlDzR).
+
+* Download all of humble bundle books in parallel
+ https://til.hashrocket.com/posts/ggt1jaes4y-download-all-of-humble-bundle-books-in-parallel
+
+* Bug fixes and man page updates.
+
+
+20170822
+
+* --line-buffer no longer uses tempfiles. This is faster and makes it
+ possible for a single process to output more data than there is free
+ disk space.
+
+* Vote for GNU Parallel's community ads on
+ http://meta.unix.stackexchange.com/a/4356/2972
+ http://meta.askubuntu.com/a/16750/22307
+ http://meta.serverfault.com/a/9040/45704
+
+* Edit images with GNU Parallel and ImageMagick
+ https://fedoramagazine.org/edit-images-parallel-imagemagick/
+
+* Running Bash Commands in Parallel
+ https://dzone.com/articles/running-bash-commands-in-parallel-1
+
+* Today I Learned: GNU parallel, plate tectonics
+ https://medium.com/@nornagon/today-i-learned-gnu-parallel-plate-tectonics-9fcf24045e63
+
+* GNU Parallel Tutorial
+ https://www.upf.edu/web/sct-sit/gnu-parallel-tutorial
+
+* Validating half a million TIFF files
+ http://www.dpoc.ac.uk/2017/08/17/validating-half-a-million-tiff-files-part-two/
+
+* Blender and GNU Parallel
+ https://www.blendernation.com/2017/08/16/blender-gnu-parallel/
+
+* Quick Introduction to GNU Parallel
+ https://github.com/LangilleLab/microbiome_helper/wiki/Quick-Introduction-to-GNU-Parallel
+
+* A simple distributed animation render example using GNU Parallel
+ https://gitlab.com/skororu/scripts
+
+* Do stuff on things, in parallel
+ http://blogs.fluidinfo.com/terry/2017/08/05/do-stuff-on-things-in-parallel/
+
+* Bash And GNU Parallel NGS Bidniz
+ http://ino.pm/outreach/presentations/2014/03/genomics-wranglers/index.html
+
+* Using for loop vs Gnu parallel for BLAST
+ http://www.ettemalab.org/using-for-loop-vs-gnu-parallel-for-blast/
+
+* 并行rsync https://gxnotes.com/article/175866.html
+
+* Bug fixes and man page updates.
+
+
+20170722
+
+* Vote for GNU Parallel's community ads on
+ http://meta.unix.stackexchange.com/a/4356/2972
+ http://meta.askubuntu.com/a/16750/22307
+ http://meta.serverfault.com/a/9040/45704
+
+* GNU Parallel was cited in: Hayabusa: Simple and Fast Full-Text
+ Search Engine for Massive System Log Data
+ http://dl.acm.org/citation.cfm?id=3095788
+
+* コマンドの並列化を行える『GNU parallel』の個人的使い方まとめ
+ https://orebibou.com/2017/07/%E3%82%B3%E3%83%9E%E3%83%B3%E3%83%89%E3%81%AE%E4%B8%A6%E5%88%97%E5%8C%96%E3%82%92%E8%A1%8C%E3%81%88%E3%82%8B%E3%80%8Egnu-parallel%E3%80%8F%E3%81%AE%E5%80%8B%E4%BA%BA%E7%9A%84%E4%BD%BF%E3%81%84/
+
+* How to play and play with thousands of digitized 78rpm records
+ https://blog.archive.org/2017/07/10/how-to-play-and-play-with-78rpm-record-transfers/
+
+* 并行一个for循环 https://gxnotes.com/article/130363.html
+
+* RFC 8142: GeoJSON Text Sequences
+ https://sgillies.net/2017/05/18/rfc-8142-geojson-text-sequences.html
+
+* Git auto fetch script I run every day
+ https://lukas.zapletalovi.com/2017/07/git-auto-fetch-script-i-run-every-day.html
+
+* cores, cpus and threads
+ http://crazyhottommy.blogspot.de/2017/07/cores-cpus-and-threads.html
+
+* Bug fixes and man page updates.
+
+
+20170622
+
+* The command line can now contain \257 (Macron) or UTF characters
+ containing \257.
+
+* Vote for GNU Parallel's community ads on
+ http://meta.unix.stackexchange.com/a/4356/2972
+ http://meta.askubuntu.com/a/16750/22307
+ http://meta.serverfault.com/a/9040/45704
+
+* Graph for overhead for 1000 jobs has been updated. It remains
+ steadily at 3.5 ms per job on a 1.7 GHz machine
+ https://www.gnu.org/software/parallel/process-time-j2-1700MHz-3000-1000.pdf
+
+* GNU Parallel was cited in: The topology of the directed clique
+ complex as a network invariant
+ https://springerplus.springeropen.com/articles/10.1186/s40064-016-2022-y
+
+* GNU Parallel was cited in: Improving Optical Character Recognition
+ of Finnish Historical Newspapers with a Combination of Fraktur &
+ Antiqua Models and Image
+ http://www.ep.liu.se/ecp/131/038/ecp17131038.pdf
+
+* GNU Parallel was cited in: Parallel Execution of Structural Mechanic
+ Tasks with Use of Small Supercomputers
+ http://www.sciencedirect.com/science/article/pii/S1877705817324815
+
+* GNU Parallel was cited in: Integration of a Multilingual Preordering
+ Component into a Commercial SMT Platform
+ https://www.degruyter.com/downloadpdf/j/pralin.2017.108.issue-1/pralin-2017-0009/pralin-2017-0009.pdf
+
+* Tips For Running Jobs On Your VM
+ https://support.ehelp.edu.au/support/solutions/articles/6000089713-tips-for-running-jobs-on-your-vm#parallel
+
+* Speed up ML model development with GNU Parallel
+ https://pzelasko.github.io/posts/speed-up-ml-model-development-with-gnu-parallel
+
+* Parallelizing a Python Function for the Extremely Lazy
+ http://tdhopper.com/blog/2017/Jun/07/parallelizing-a-python-function-for-the-extremely-lazy/
+
+* 使用GNU parallel
+ http://www.android5.online/Android/androidjc/androidkf/gykf/201603/8741.html
+
+* Bug fixes and man page updates.
+
+
+20170522
+
+* --timeout can now use s=second,m=minute,h=hour,d=day.
+
+* --dr is now an alias for --dry-run.
+
+* Vote for GNU Parallel's community ads on
+ http://meta.unix.stackexchange.com/a/4356/2972
+ http://meta.askubuntu.com/a/16750/22307
+ http://meta.serverfault.com/a/9040/45704
+
+* GNU Parallel was cited in: Feature Detection in Visual Cortex during
+ Different Functional States
+ https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5397493/
+
+* GNU Parallel was cited in: From genotype to phenotype: inferring
+ relationships between microbial traits and genomic components
+ https://docserv.uni-duesseldorf.de/servlets/DerivateServlet/Derivate-44622/main.pdf
+
+* GNU Parallel was cited in: Designing fecal microbiota transplant
+ trials that account for differences in donor stool efficacy
+ http://biorxiv.org/content/biorxiv/early/2016/07/25/065383.full.pdf
+
+* GNU Parallel was cited in: Assessing the predictability of
+ anharmonic vibrational modes at the example of hydroxyl groups - ad
+ hoc construction of localised modes and the influence of structural
+ solute–solvent motifs
+ http://pubs.rsc.org/-/content/articlelanding/2017/cp/c7cp01662j#!divAbstract
+
+* GNU Parallel was cited in: Mapping Environmental Influences on
+ Utility Fire Threat
+ ftp://frapftp.fire.ca.gov/outgoing/transfer/PUC_Map1/Utility%20Fire%20map%201%20final%20review%20and%20development%20report21616.pdf
+
+* GNU Parallel was cited in: Weak vs. Strong Disorder Superfluid-Bose
+ Glass Transition in One Dimension
+ https://arxiv.org/pdf/1704.02257.pdf
+
+* GNU Parallel was cited in: Online Automatic Post-editing for MT in a
+ Multi-Domain Translation Environment
+ https://www.aclweb.org/anthology/E/E17/E17-1050.pdf
+
+* GNU Parallel was cited in: Cross-calibration of S-NPP VIIRS
+ moderate-resolution reflective solar bands against MODIS Aqua over
+ dark water scenes
+ http://www.atmos-meas-tech.net/10/1425/2017/amt-10-1425-2017.pdf
+
+* GNU Parallel was cited in: Scattering Networks: Efficient 2D
+ Implementation And Application To Melanoma Classification
+ http://amslaurea.unibo.it/12261/1/nurrito_eugenio_tesi_fisica_magistrale.pdf
+
+* GNU Parallel was cited in: Genome-scale detection of positive
+ selection in 9 primates predicts human-virus evolutionary conflicts
+ http://biorxiv.org/content/biorxiv/early/2017/04/27/131680.full.pdf
+
+* GNU Parallel is used in: Creates a speech dataset for deep learning
+ https://github.com/jupiter126/Create_Speech_Dataset
+
+* GNU Parallel is used in: Parse and process the demixing secrets
+ dataset (DSD100) https://github.com/faroit/dsdtools
+
+* First glance at the gnu parallel utility
+ https://ibiscybernetics.com/blog/2017-04-05.html
+
+* Installing parallel from source
+ https://bitsanddragons.wordpress.com/2017/05/03/installing-parallel-from-source/
+
+* How to run command or code in parallel in bash shell under Linux or
+ Unix
+ https://www.cyberciti.biz/faq/how-to-run-command-or-code-in-parallel-in-bash-shell-under-linux-or-unix/
+
+* Parallel Serial Jobs Using GNU Parallel
+ http://www.hpc.lsu.edu/training/weekly-materials/2017-Spring/gnuparallel-Feb2017.pdf
+
+* GNU Parallel or How to list millions of S3 objects
+ https://joeray.me/gnu-parallel-how-to-list-millions-objects.html
+
+* Which Is the Best Tool for Copying a Large Directory Tree Locally?
+ http://arjon.es/2017/05/11/which-is-the-best-tool-for-copying-a-large-directory-tree-locally/
+
+* Bug fixes and man page updates.
+
+
+20170422
+
+* --halt supports done in addition to success and fail.
+
+* parset will set variables in parallel in Bash.
+
+* Perl 5.6 on Irix is no longer supported.
+
+* Vote for GNU Parallel's community ads on
+ http://meta.unix.stackexchange.com/a/4356/2972
+ http://meta.askubuntu.com/a/16750/22307
+ http://meta.serverfault.com/a/9040/45704
+
+* GNU Parallel was cited in: Halvade-RNA: Parallel variant calling
+ from transcriptomic data using MapReduce
+ http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0174575
+
+* GNU Parallel was cited in: StrAuto: automation and parallelization
+ of STRUCTURE analysis
+ http://bmcbioinformatics.biomedcentral.com/articles/10.1186/s12859-017-1593-0
+
+* GNU Parallel was cited in: Trespassing the Boundaries: Labeling
+ Temporal Bounds for Object Interactions in Egocentric Video
+ https://128.84.21.199/pdf/1703.09026.pdf
+
+* GNU Parallel is used in: https://github.com/lucascbeyeler/zmbackup
+
+* Using GNU Parallel
+ https://wiki.ncsa.illinois.edu/display/ROGER/Using+GNU+Parallel
+
+* Find and xargs and parallel
+ http://helpful.knobs-dials.com/index.php/Find_and_xargs_and_parallel#Parallel
+
+* Quick Introduction to GNU Parallel
+ https://github.com/mlangill/microbiome_helper/wiki/Quick-Introduction-to-GNU-Parallel
+
+* Data science at the command line https://www.slideshare.net/sharatsc/data-science-at-the-command-line
+
+* Bedtools笔记 http://www.jianshu.com/p/67b0665490ac
+
+* Bug fixes and man page updates.
+
+
+20170322
+
+* --rpl can now take arguments by adding '(regexp)' in the replacement
+ string.
+
+* Vote for GNU Parallel's community ads on
+ http://meta.unix.stackexchange.com/a/4356/2972
+ http://meta.askubuntu.com/a/16750/22307
+ http://meta.serverfault.com/a/9040/45704
+
+* GNU Parallel was cited in: shmlast: An improved implementation of
+ Conditional Reciprocal Best Hits with LAST and Python
+ https://joss.theoj.org/papers/3cde54de7dfbcada7c0fc04f569b36c7
+
+* GNU Parallel was cited in: Prediction of relativistic electron flux
+ in the Earth’s outer radiation belt at geostationary orbit by
+ adaptive methods
+ https://link.springer.com/article/10.1134/S0016793217010108
+
+* GNU Parallel was cited in: Higher likelihood of multiple bit-flips
+ due to neutron-induced strikes on logic gates
+ https://arxiv.org/pdf/1612.08239.pdf
+
+* GNU Parallel was cited in: A Monte Carlo Resampling Approach for the
+ Calculation of Hybrid Classical and Quantum Free Energies
+ http://pubs.acs.org/doi/pdfplus/10.1021/acs.jctc.6b00506
+
+* GNU Parallel was cited in: Learning string distance with smoothing
+ for OCR spelling correction
+ https://link.springer.com/article/10.1007/s11042-016-4185-5
+
+* GNU Parallel was cited in: The Breakdown of String Perturbation
+ Theory for Many External Particles
+ https://arxiv.org/pdf/1611.08003.pdf
+
+* GNU Parallel was cited in: Operations Research Applications in
+ Nuclear Energy
+ https://dspace.library.colostate.edu/bitstream/handle/11124/170687/Johnson_mines_0052E_11207.pdf?sequence=1&isAllowed=y
+
+* GNU Parallel was cited in: Parallel Computing: GNU Parallel
+ http://www.blopig.com/blog/2017/02/parallel-computing-gnu-parallel/
+
+* Stig Sandbeck Mathisen: Change All The Passwords (Again)
+ http://garf.us/2017/02/stig-sandbeck-mathisen-change-all-the-passwords-again/
+
+* Trabalhando como Paralelismo no Bash usando o GNU Parallel
+ http://www.beyeler.com.br/2017/03/trabalhando-como-paralelismo-no-bash-usando-o-gnu-parallel/
+
+* GNU parallel と SQL*Loader 組み合わせ実行例
+ https://blogs.oracle.com/LetTheSunShineIn/entry/gnu_parallel_%E3%81%A8_sql_loader
+
+* Bug fixes and man page updates.
+
+
+20170222
+
+* env_parallel now supports /bin/sh, ash, dash.
+
+* --tee will pipe all data to all jobs. Used with --pipe/--pipepart
+ and :::. Small example: seq 3333 | parallel --pipe --tee --tag 'grep
+ {} | wc -l' ::: {0..9}
+
+* If --results {replstr} ends in /: save in dir. Otherwise save as
+ file. Example: parallel --results {} echo {} ::: a b c
+
+* Vote for GNU Parallel's community ads on
+ http://meta.unix.stackexchange.com/a/4356/2972
+ http://meta.askubuntu.com/a/16750/22307
+ http://meta.serverfault.com/a/9040/45704
+
+* GNU Parallel was cited in: Low-delay forwarding with multiple
+ candidates for VANETs using multi-criteria decision making
+ http://ieeexplore.ieee.org/document/7835961/
+
+* GNU Parallel was cited in: An Integrated Simulator and Dataset that
+ Combines Grasping and Vision for Deep Learning
+ https://arxiv.org/pdf/1702.02103.pdf
+
+* GNU Parallel was cited in: A Bayesian model selection approach for
+ identifying differentially expressed transcripts from RNA sequencing
+ data http://onlinelibrary.wiley.com/doi/10.1111/rssc.12213/full
+
+* GNU Parallel was cited in: The impact of copy number variation on
+ gout, and the apple genome
+ https://ourarchive.otago.ac.nz/bitstream/handle/10523/7060/BoocockJames2016MSc.pdf?sequence=3
+
+* GNU Parallel was cited in: Reaction rates for reaction-diffusion
+ kinetics on unstructured meshes
+ http://aip.scitation.org/doi/10.1063/1.4975167
+
+* GNU Parallel was cited in: Modeling prediction error improves power
+ of transcriptome-wide association studies
+ http://biorxiv.org/content/biorxiv/early/2017/02/14/108316.full.pdf
+
+* GNU Parallel was cited in: Proper experimental design requires
+ randomization/balancing of molecular ecology experiments
+ http://biorxiv.org/content/biorxiv/early/2017/02/17/109280.full.pdf
+
+* GNU Parallel was cited in: Experiment-based thermal micromagnetic
+ simulations of the magnetization reversal for ns-range clocked
+ nanomagnetic logic
+ http://aip.scitation.org/doi/full/10.1063/1.4974021
+
+* GNU Parallel was cited in: The University of Edinburgh’s systems
+ submission to the MT task at IWSLT
+ http://workshop2016.iwslt.org/downloads/IWSLT_2016_paper_27.pdf
+
+* Fast-GBS uses GNU Parallel: https://bitbucket.org/jerlar73/fast-gbs
+
+* Job: DevOps Engineer/Senior Systems Administrator in New York, New
+ York
+ http://rockefeller.jobs/new-york-ny/devops-engineersenior-systems-administrator/2DB3408EAC9E4617AD238550F31C7641/job/
+
+* Training in GNU Parallel:
+ https://www.carc.unm.edu/education-outreach/workshops--training/
+
+* Training in GNU Parallel:
+ http://www.hpc.lsu.edu/training/registration.php?topic=Parallel%20Serial%20Jobs%20Using%20GNU%20Parallel&date=February%2022,%202017&time=9:00%20AM%20-%2011:00%20AM&place=307%20Frey&deadline=2017-02-21%2016:30:00
+
+* Batch Convert Images from PNG to JPEG
+ http://coreygoldberg.blogspot.dk/2017/02/batch-convert-images-from-png-to-jpeg.html
+
+* Accelerating Oxford Nanopore Basecalling
+ http://fowlerlab.org/2017/01/26/accelerating-oxford-nanopore-basecalling/
+
+* parallel и imagemagick в loop на bash
+ https://www.linux.org.ru/forum/development/13190247
+
+* Bug fixes and man page updates.
+
+
+20170206alpha
+
+* --tee introduced.
+
+
+20170122
+
+* sql now uses a temporary credentials file for MySQL to avoid
+ warning.
+
+* --sqlmaster and --sqlworker have been reworked.
+
+* --sqlworker only reads from the DBURL: any command and any arguments
+ are ignored.
+
+* --sqlmaster +DBURL will append different commands to the DBURL and
+ not erase the table.
+
+* If --results contain a replacement string, no tree structure with
+ args will be generated.
+
+* --fg --tmux or --fg --tmuxpane will connect to the tmux.
+
+* GNU Parallel was cited in: Fast, Automated Indoor Light Detection,
+ Classification, and Measurement
+ https://www2.eecs.berkeley.edu/Pubs/TechRpts/2016/EECS-2016-212.pdf
+
+* GNU Parallel was cited in: Unbiased RNA Shotgun Metagenomics in
+ Social and Solitary Wild Bees Detects Associations with Eukaryote
+ Parasites and New Viruses
+ http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0168456
+
+* GNU Parallel was cited in: Almost Event-Rate Independent Monitoring
+ of Metric Temporal Logic
+ http://people.inf.ethz.ch/trayteld/papers/tacas17-aerial/aerial.pdf
+
+* 使用GNU parallel命令并行预处理数据: https://blog.razrlele.com/p/1843
+
+* GNU Parallel as a queuing system:
+ http://puntoblogspot.blogspot.com/2017/01/gnu-parallel-as-queuing-system.html
+
+* AWS re:Invent 2016: Deep Dive on Amazon Elastic File System (STG202)
+ slide 45
+ http://www.slideshare.net/AmazonWebServices/aws-reinvent-2016-deep-dive-on-amazon-elastic-file-system-stg202
+ and https://youtu.be/PlTuJx4VnGw?t=30m16s
+
+* Bug fixes and man page updates.
+
+
+20161222
+
+* --results foo.csv will save output as a CSV-file. Can be directly
+ imported into R or LibreCalc.
+
+* When using --pipepart a negative --block-size is not interpreted as
+ a block size but as the number of blocks each jobslot should have.
+
+* --sqlmaster/--sqlandworker will append jobs to the DBURL if the
+ DBURL is prepended with +.
+
+* GNU Parallel was cited in: A cloud-based workflow to quantify
+ transcript-expression levels in public cancer compendia
+ http://www.nature.com/articles/srep39259
+
+* GNU Parallel was cited in: Determination of crystal structures of
+ proteins of unknown identity using a marathon molecular replacement
+ procedure: Structure of Stenotrophomonas maltophilia
+ phosphate-binding protein
+ https://www.researchgate.net/publication/308186413_Determination_of_crystal_structures_of_proteins_of_unknown_identity_using_a_marathon_molecular_replacement_procedure_Structure_of_Stenotrophomonas_maltophilia_phosphate-binding_protein
+
+* GNU Parallel was cited in: Large-scale benchmarking reveals false
+ discoveries and count transformation sensitivity in 16S rRNA gene
+ amplicon data analysis methods used in microbiome studies
+ http://microbiomejournal.biomedcentral.com/articles/10.1186/s40168-016-0208-8
+
+* GNU Parallel was cited in: Decomposing Images into Layers via
+ RGB-space Geometry
+ https://cs.gmu.edu/~ygingold/singleimage/Decomposing%20Images%20into%20Layers%20via%20RGB-space%20Geometry%20(Tan%20et%20al%202016%20TOG)%20small.pdf
+
+* Transient compute clustering with GNU Parallel and sshfs
+ https://gist.github.com/Brainiarc7/24c966c8a001061ee86cc4bc05826bf4
+
+* 4 Ways to Batch Convert Your PNG to JPG and Vice-Versa
+ http://www.tecmint.com/linux-image-conversion-tools/
+
+* All's Fair in Love and Distributed Storage
+ http://cohesity.com/blog/alls-fair-love-distributed-storage/
+
+* How can I use GNU Parallel to run a lot of commands in parallel
+ https://www.msi.umn.edu/support/faq/how-can-i-use-gnu-parallel-run-lot-commands-parallel
+
+* 정해영의 블로그 - JEONG Haeyoung's blog
+ http://blog.genoglobe.com/2016/11/gnu-parallel.html
+
+* 在Linux下将PNG和JPG批量互转的四种方法
+ http://os.51cto.com/art/201612/524182.htm
+
+* Running in parallel http://tomkimpson.com/howto/gnuparallel/
+
+* Taco Bell Parallel Programming
+ https://giorgos.sealabs.net/taco-bell-parallel-programming.html
+
+* Bug fixes and man page updates.
+
+
+20161122
+
+* --record-env can now be used with env_parallel for bash, ksh, pdksh,
+ and zsh.
+
+* GNU Parallel was cited in: An improved implementation of Conditional
+ Reciprocal Best Hits with LAST and Python
+ https://pypi.python.org/pypi/shmlast/1.0.3
+
+* GNU Parallel was cited in: miRPathDB: a new dictionary on microRNAs
+ and target pathways
+ http://nar.oxfordjournals.org/content/early/2016/10/13/nar.gkw926.full
+
+* GNU/Parallel – Run multiple commands simultaneously
+ https://vinusumi.wordpress.com/2016/11/08/gnuparallel-run-multiple-commands-simultaneously/
+
+* Tutorial: Parallel web scraping with CasperJS and GNU Parallel
+ http://g-liu.com/blog/2016/10/tutorial-parallel-web-scraping-with-casperjs-and-gnu-parallel/
+
+* Bug fixes and man page updates.
+
+
+20161022
+
+* First command no longer gets stdin as it causes unpredictable issues.
+
+* GNU Parallel was cited in: Efficiently parallelized modeling of
+ tightly focused, large bandwidth laser pulses
+ https://arxiv.org/pdf/1609.08146v1.pdf
+
+* How to crunch lots of geodata in parallel
+ http://blog.faraday.io/how-to-crunch-lots-of-geodata-in-parallel/
+
+* GNU parallel is my new toaster
+ http://amedee.me/gnu-parallel-is-my-new-toaster/index.html
+
+* Bug fixes and man page updates.
+
+
+20160922
+
+* parcat - cat files or fifos in parallel
+
+* GNU Parallel was cited in: Velocity: A Netflow Based Optimized
+ Geo-IP Lookup Tool
+ https://mospace.umsystem.edu/xmlui/bitstream/handle/10355/50842/Thesis_2016_Pakhare.pdf?sequence=1&isAllowed=y
+
+* GNU Parallel was cited in: Viral deep sequencing needs an adaptive
+ approach: IRMA, the iterative refinement meta-assembler
+ http://www.ncbi.nlm.nih.gov/pmc/articles/PMC5011931/
+
+* Using GNU Parallel and ImageMagick for Conversions
+ https://blog.fractalcomic.com/tutorials/using-gnu-parallel-and-imagemagick-for-conversions/
+
+* Training in GNU Parallel
+ http://www.cism.ucl.ac.be/Services/Formations/parcomp.pdf
+
+* Chown Ceph OSD data directory using GNU Parallel
+ http://blog.widodh.nl/2016/08/chown-ceph-osd-data-directory-using-gnu-parallel/
+
+* FastGBS uses GNU Parallel https://bitbucket.org/jerlar73/fastgbs
+
+* dDocent: A guide to ddRAD sequencing
+ https://ddocent.wordpress.com/ddocent-pipeline-user-guide/
+
+* SOFTWARE GNU Parallel
+ http://msi-riss.readthedocs.io/en/latest/software/gnu-parallel.html
+
+* Bug fixes and man page updates.
+
+
+20160822
+
+* --tmuxpane opens jobs in panes in tmux. Useful if you want to
+ monitor progress of less than 100 simultaneous jobs.
+
+* --linebuffer now treats \r as line ending, too.
+
+* Perl changes forces use of floats to be given with leading zero, so
+ 0.1 and not .1
+
+* --xapply renamed to --link.
+
+* Support for pdksh will fade until someone packages it for Ubuntu 16.
+
+* GNU Parallel was cited in: Exome sequencing of geographically
+ diverse barley landraces and wild relatives gives insights into
+ environmental adaptation
+ http://www.nature.com/ng/journal/vaop/ncurrent/full/ng.3612.html?WT.feed_name=subjects_genetics#references
+
+* GNU Parallel was cited in: Vision-Enhanced Lidar Odometry and
+ Mapping https://www.ri.cmu.edu/pub_files/2016/8/DLL-thesis.pdf
+
+* Easy parallelization with GNU parallel
+ http://mpharrigan.com/2016/08/02/parallel.html
+
+* Facebook V: Predicting Check Ins, Winner's Interview: 2nd Place,
+ Markus Kliegl
+ http://blog.kaggle.com/2016/08/02/facebook-v-predicting-check-ins-winners-interview-2nd-place-markus-kliegl/
+
+* Parallel import
+ http://www.manitou-mail.org/blog/2016/07/parallel-import/
+
+* Large file batch processing using NodeJs and GNU Parallel
+ http://www.zacorndorff.com/2016/07/27/large-file-batch-processing-using-nodejs-and-gnu-parallel/
+
+* Bug fixes and man page updates.
+
+
+20160722
+
+* env_parallel is now ready for wider testing. It is still beta
+ quality.
+
+* env_parallel is heavily modified for all shells and testing has been
+ increased.
+
+* Selectively choosing what to export using --env now works for
+ env_parallel (bash, csh, fish, ksh, pdksh, tcsh, zsh).
+
+* --round-robin now gives more work to a job that processes faster
+ instead of same amount to all jobs.
+
+* --pipepart works on block devices on GNU/Linux.
+
+* GNU Parallel was cited in: HybPiper: Extracting Coding Sequence and
+ Introns for Phylogenetics from High-Throughput Sequencing Reads
+ Using Target Enrichment
+ http://www.bioone.org/doi/full/10.3732/apps.1600016
+
+* GNU Parallel was cited in: StrAuto - Automation and Parallelization
+ of STRUCTURE Analysis
+ http://www.crypticlineage.net/download/strauto/strauto_doc.pdf
+
+* GNU Parallel was cited in: Tools and techniques for computational
+ reproducibility
+ http://gigascience.biomedcentral.com/articles/10.1186/s13742-016-0135-4
+
+* GNU Parallel was cited in: FlashPCA: fast sparse canonical
+ correlation analysis of genomic data
+ http://biorxiv.org/content/biorxiv/suppl/2016/04/06/047217.DC1/047217-1.pdf
+
+* GNU Parallel was cited in: Math Indexer and Searcher under the Hood:
+ Fine-tuning Query Expansion and Unification Strategies
+ http://research.nii.ac.jp/ntcir/workshop/OnlineProceedings12/pdf/ntcir/MathIR/05-NTCIR12-MathIR-RuzickaM.pdf
+
+* GNU Parallel was cited in: The Evolution and Fate of
+ Super-Chandrasekhar Mass White Dwarf Merger Remnants
+ http://arxiv.org/pdf/1606.02300.pdf
+
+* GNU Parallel was cited in: Evaluation of Coastal Scatterometer
+ Products
+ https://mdc.coaps.fsu.edu/scatterometry/meeting/docs/2016/Thu_AM/coastal-poster.pdf
+
+* GNU Parallel was used in: https://github.com/splitice/bulkdnsblcheck
+
+* The iconv slurp misfeature
+ http://www.openfusion.net/linux/iconv_slurp_misfeature
+
+* แบบว่า CPU เหลือ https://veer66.wordpress.com/2016/06/15/gnu-parallel/
+
+* Bug fixes and man page updates.
+
+
+20160622
+
+* $PATH can now be exported using --env PATH. Useful if GNU Parallel
+ is not in your path on remote machines.
+
+* If --block is left out, --pipepart will use a block size that will
+ result in 10 jobs per jobslot.
+
+* The cookie from 2016-01-04 was won by Morgan Rodgers on the
+ 2016-06-06 after 5 months.
+
+* Vote for GNU Parallel's community ad on
+ https://meta.askubuntu.com/questions/14925/community-promotion-ads-2016/15046#15046
+
+* GNU Parallel was cited in: Genotypic Resistance Tests Sequences
+ Reveal the Role of Marginalized Populations in HIV-1 Transmission in
+ Switzerland http://www.nature.com/articles/srep27580
+
+* GNU Parallel was cited in: A Deep-Learning Approach for Operation of
+ an Automated Realtime Flare Forecast
+ https://arxiv.org/pdf/1606.01587.pdf
+
+* Unix parallel: Populating all the USB sticks
+ http://www.markhneedham.com/blog/2016/06/01/unix-parallel-populating-all-the-usb-sticks/
+
+* GNU Parallel is a fantastic utility https://www.dray.be/parallel/
+
+* Combining a large number of VCF files
+ https://gtamazian.com/2016/06/16/combining-a-large-number-of-vcf-files/
+
+* Useful Scraping Techniques
+ http://blog.apps.npr.org/2016/06/17/scraping-tips.html
+
+* Tools of the trade
+ http://onox.com.br/2015/05/tools-of-the-trade/#more-198
+
+* Armslist scraper https://github.com/nprapps/armslist-scraper/
+
+* Bug fixes and man page updates.
+
+
+20160522
+
+* niceload --net pauses the program if the internet connection is
+ overloaded.
+
+* Vote for GNU Parallel's community ad on
+ https://meta.askubuntu.com/questions/14925/community-promotion-ads-2016/15046#15046
+
+* Updated speed comparison between versions
+ https://www.gnu.org/software/parallel/process-time-j2-1700MHz-3000-1000.pdf
+
+* GNU Parallel was cited in: Improving computation efficiency by
+ parallel programming
+ http://www.irbis-nbuv.gov.ua/cgi-bin/irbis_nbuv/cgiirbis_64.exe?C21COM=2&I21DBN=UJRN&P21DBN=UJRN&IMAGE_FILE_DOWNLOAD=1&Image_file_name=PDF/ape_2013_3_44.pdf
+
+* GNU Parallel was cited in: A supernova feedback implementation for
+ the astrophysical simulation software Arepo
+ https://arxiv.org/abs/1604.06071
+
+* GNU Parallel was cited in: Lorenz-Mie theory for 2D scattering and
+ resonance calculations https://arxiv.org/pdf/1505.07691v2.pdf
+
+* GNU Parallel was cited in: Host-pathogen co-evolution and the
+ emergence of broadly neutralizing antibodies in chronic infections
+ https://arxiv.org/abs/1512.06296
+
+* GNU Parallel was cited in: Pacific People, Metabolic Disease and
+ Evolutionary Processes: a mitochondrial DNA study
+ https://otago.ourarchive.ac.nz/handle/10523/6340
+
+* GNU Parallel was cited in: The effect of domain modeling on
+ efficiency of planning: Lessons from the Nomystery domain
+ http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7407131
+
+* GNU Parallel was cited in: Oops, my tests broke the build: An
+ analysis of Travis CI builds with GitHub
+ https://peerj.com/preprints/1984/
+
+* GNU Parallel was cited in: Drosophila Muller F Elements Maintain a
+ Distinct Set of Genomic Properties Over 40 Million Years of
+ Evolution http://www.g3journal.org/content/5/5/719.full.pdf+html
+
+* GNU Parallel was cited in: An Empirical Comparison of Neural
+ Architectures for Reinforcement Learning in Partially Observable
+ Environments
+ http://biorxiv.org/content/biorxiv/early/2016/03/24/022707.full.pdf
+
+* GNU Parallel was cited in: PleaseTM: Enabling Transaction Conflict
+ Management in Requester-wins Hardware Transactional Memory
+ http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7446072
+
+* GNU Parallel was cited in: EASE-MM: Sequence-Based Prediction of
+ Mutation-Induced Stability Changes with Feature-Based Multiple
+ Models
+ http://www.sciencedirect.com/science/article/pii/S0022283616000310
+
+* GNU Parallel was cited in: A new orthology assessment method for
+ phylogenomic data: Unrooted Phylogenetic Orthology
+ http://mbe.oxfordjournals.org/content/early/2016/04/06/molbev.msw069.short
+ https://github.com/ballesterus/UPhO
+
+* GNU Parallel was cited in: Distinctive Interest Point Selection for
+ Efficient Near-duplicate Image Retrieval
+ http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7459172
+
+* GNU Parallel was cited in: The Evolution of C Programming Practices:
+ A Study of the Unix Operating System 1973–2015
+ https://dl.acm.org/citation.cfm?id=2884799 (It has the cutest
+ thumbnail graphs I have ever seen scattered all over the text)
+
+* GNU Parallel was cited in: Data processing pipeline for serial
+ femtosecond crystallography at SACLA
+ http://journals.iucr.org/j/issues/2016/03/00/zw5001/index.html
+
+* GNU Parallel was cited in: Reconstruction of Fine-Scale Auroral
+ Dynamics http://arxiv.org/pdf/1512.01460.pdf
+
+* GNU Parallel was cited in: Genomic legacy of the African cheetah,
+ Acinonyx jubatus
+ https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4676127/pdf/13059_2015_Article_837.pdf
+
+* GNU Parallel was cited in: Don’t Forget to Lock the Back Door! A
+ Characterization of IPv6 Network Security Policy
+ http://benign-research-probe2.eecs.umich.edu/ndss16_ipv6_final.pdf
+
+* GNU Parallel was cited in: Comprehensive Annotation of the
+ Parastagonospora nodorum Reference Genome Using Next-Generation
+ Genomics, Transcriptomics and Proteogenomics
+ http://journals.plos.org/plosone/article/asset?id=10.1371%2Fjournal.pone.0147221.PDF
+
+* GNU Parallel was cited in: How attention influences perceptual
+ decision making: Single-trial EEG correlates of drift-diffusion
+ model parameters http://www.cidlab.com/prints/nunez2016attention.pdf
+
+* GNU Parallel was cited in: Telomere And Proximal Sequence Analysis
+ Using High-Throughput Sequencing Reads
+ http://repository.upenn.edu/edissertations/1460/
+
+* GNU Parallel was cited in: AT-GIS: Highly Parallel Spatial Query
+ Processing with Associative Transducers
+ http://lsds.doc.ic.ac.uk/sites/default/files/ATGIS-SIGMOD16.pdf
+
+* GNU Parallel was cited in: PVAIR: Partial Variable Assignment
+ InterpolatoR http://verify.inf.usi.ch/sites/default/files/main-2.pdf
+
+* GNU Parallel was cited in: Comparative Cladistics: Fossils,
+ Morphological Data Partitions and Lost Branches in the Fossil Tree
+ of Life http://opus.bath.ac.uk/43955/
+
+* GNU Parallel was cited in: Shannon: An Information-Optimal de
+ NovoRNA-Seq Assembler
+ http://biorxiv.org/content/biorxiv/early/2016/02/09/039230.full.pdf
+
+* GNU Parallel was cited in: Adaptive Measure-Theoretic Parameter
+ Estimation for Coastal Ocean Modeling
+ https://repositories.lib.utexas.edu/handle/2152/32435
+
+* GNU Parallel was cited in: Computational Design of DNA-Binding
+ Proteins
+ http://link.springer.com/protocol/10.1007/978-1-4939-3569-7_16
+
+* GNU Parallel was cited in: Reference genotype and exome data from an
+ Australian Aboriginal population for health-based research
+ http://www.nature.com/articles/sdata201623
+
+* GNU Parallel was cited in: Do aye-ayes echolocate?
+ http://biorxiv.org/content/biorxiv/early/2016/04/11/048165.full.pdf
+
+* GNU Parallel was cited in: Functional enrichments of disease
+ variants across thousands of independent loci in eight diseases
+ http://biorxiv.org/content/biorxiv/early/2016/04/11/048066.full.pdf
+
+* GNU Parallel was cited in: From genomes to phenotypes: Traitar, the
+ microbial trait analyzer
+ http://biorxiv.org/content/biorxiv/early/2016/03/12/043315.full.pdf
+
+* GNU Parallel was mentioned in: Fast Playback Framework for Analysis
+ of Ground-Based Doppler Radar Observations Using MapReduce
+ Technology
+ http://journals.ametsoc.org/doi/pdf/10.1175/JTECH-D-15-0118.1
+
+* NCBI blast tutorial
+ https://github.com/enormandeau/ncbi_blast_tutorial
+
+* Distributed Preservation Made Simple
+ https://blog.archive.org/2016/02/26/distributed-preservation-made-simple/
+
+* Parallel Jobs in Luigi http://rjbaxley.com/posts/2016/03/13/parallel_jobs_in_luigi.html
+
+* Bug fixes and man page updates.
+
+
+20160422
+
+* :::+ and ::::+ work like ::: and :::: but links this input source to
+ the previous input source in a --xapply fashion. Contrary to
+ --xapply values do not wrap: The shortest input source determines
+ the length.
+
+* --line-buffer --keep-order now outputs continously from the oldest
+ job still running. This is more what you would expect than the
+ earlier behaviour where --keep-order had no effect with
+ --line-buffer.
+
+* env_parallel supports tcsh, csh, pdksh. In fish it now supports
+ arrays. In csh/tcsh it now supports variables, aliases, and arrays
+ with no special chars. In pdksh it supports aliases, functions,
+ variables, and arrays.
+
+* Function exporting on Mac OS X works around old Bash version.
+
+* Better CPU detection on OpenIndiana.
+
+* GNU Parallel was cited in: Automatic Methods for Assisted
+ Recruitment
+ https://www.researchgate.net/publication/297738658_Automatic_Methods_for_Assisted_Recruitment
+
+* GNU Parallel was cited in: Reinterpretation of ATLAS 8 TeV searches
+ for Natural SUSY with a R-Sneutrino LSP
+ http://arxiv.org/pdf/1603.06130.pdf
+
+* GNU Parallel was cited in: An Operational Radiometric Landsat
+ Preprocessing Framework for Large-Area Time Series Applications
+ https://www.uni-trier.de/fileadmin/fb6/prof/FER/Publikationen/frantz_et_al_ieee-tgrs-2016-post-print.pdf
+
+* A basic demo of how GNU Parallel can speed up execution of commands
+ https://www.youtube.com/watch?v=kl8LO2jcvMc
+
+* qbatch uses GNU Parallel: https://pypi.python.org/pypi/qbatch/1.0rc2
+
+* FaceCrop uses GNU Parallel: https://github.com/EderSantana/FaceCrop
+
+* Parallel Processing with Catmandu
+ https://librecatproject.wordpress.com/2016/04/20/parallel-processing-with-catmandu/
+
+* GNU parallel 應用範例
+ http://staypython.blogspot.dk/2016/04/gnu-parallel.html
+
+* Bug fixes and man page updates.
+
+
+20160322
+
+* env_parallel is a function that exports the environment (functions,
+ aliases, variables, and arrays) to GNU Parallel. Run 'man
+ env_parallel' for details.
+
+* niceload --prg now searches for substrings if no process with the
+ name is found.
+
+* GNU Parallel was cited in: Random Forest DBSCAN for USPTO Inventor
+ Name Disambiguation http://arxiv.org/pdf/1602.01792.pdf
+
+* GNU Parallel was mentioned (with wrong citation) in: Dual Level
+ Parallel Computations for LargeScale High-Fidelity Database to
+ Design Aerospace Vehicles
+ http://www.nas.nasa.gov/assets/pdf/papers/Guruswamy_2013_DualLevelParallelComputations.pdf
+
+* Using ‘Parallel’ in Unix
+ https://shearnrylan.wordpress.com/2016/02/22/using-parallel-in-unix/
+
+* JPEG Squish uses (and co-distributes) GNU Parallel:
+ http://dantidswell.co.uk/jpeg-squish/
+
+* Bug fixes and man page updates.
+
+
+20160222
+
+* Vote for new community on Superuser, Ask Ubuntu, Unix&Linux
+ http://meta.superuser.com/questions/11127/community-promotion-ads-2016/11225#11225
+ http://meta.askubuntu.com/questions/14925/community-promotion-ads-2016/15046#15046
+ http://meta.unix.stackexchange.com/questions/3854/community-promotion-ads-2016/3893#3893
+
+* Daily Quick Tip: parallel
+ https://medium.com/@jacaetevha/daily-quick-tip-parallel-8772435f6582
+
+* Downloading a list of URLs
+ http://blog.gypsydave5.com/2016/02/04/xargs-and-curl/
+
+* Using GNU Parallel and ImageMagick for Conversions
+ http://blog.fractalcomic.com/tutorials/using-gnu-parallel-and-imagemagick-for-conversions/
+
+* Running bash commands in parallel
+ http://linuxcommando.blogspot.dk/2016/01/running-bash-commands-in-parallel.html
+
+* parallel processing in PDAL
+ https://smathermather.wordpress.com/2016/01/28/parallel-processing-in-pdal/
+
+* A Million Text Files And A Single Laptop
+ http://randyzwitch.com/gnu-parallel-medium-data/
+
+* 「今日から使い切る」ための GNU Parallelによる並列処理入門
+ http://www.slideshare.net/koji_matsuda/gnu-parallel
+
+* Bug fixes and man page updates.
+
+
+20160122
+
+* --sql DBURL uses DBURL as storage for jobs and output. It does not
+ run any jobs so it requires at least one --sqlworker. DBURL must
+ point to a table.
+
+* --sqlworker DBURL gets jobs from DBURL and stores the result back to
+ DBURL.
+
+* --sqlandworker is a shorthand for --sql and --sqlworker.
+
+* --sqlworker requires the output of a single job to fit in memory.
+
+* --results now also saves a file called 'seq' containing the sequence
+ number.
+
+* If $PARALLEL_ENV is a file, then that file will be read into
+ $PARALLEL_ENV.
+
+* man parallel_tutorial has been given an overhaul.
+
+* --workdir now accepts replacementstrings.
+
+* GNU Parallel was cited in: Possum - A Framework for
+ Three-Dimensional Reconstruction of Brain Images from Serial
+ Sections http://link.springer.com/article/10.1007/s12021-015-9286-1
+
+* GNU Parallel was cited in: A Genetic Algorithm for the selection of
+ structural MRI features for classification of Mild Cognitive
+ Impairment and Alzheimer's Disease
+ http://ieeexplore.ieee.org/xpl/abstractReferences.jsp?tp=&arnumber=7359909&url=http%3A%2F%2Fieeexplore.ieee.org%2Fxpls%2Fabs_all.jsp%3Farnumber%3D7359909
+
+* GNU Parallel was cited in: The Effect of Domain Modeling on
+ Performance of Planning Algorithms
+ http://isaim2016.cs.virginia.edu/papers/ISAIM2016_Bartak_Vodrazka.pdf
+
+* Plotting time tables for jobs run by GNU Parallel
+ https://github.com/ziotom78/plot_joblog
+
+* GNU Parallel was used in: Mission Impossible: you have 1 minute to
+ analyze the Ebola Genome https://www.biostars.org/p/119397
+
+* GNU Parallel is used in LAST: http://last.cbrc.jp/
+
+* Distributed Log Search Using GNU Parallel
+ http://codehate.com/blog/2015/12/01/distributed-log-search-using-gnu-parallel/
+
+* Bug fixes and man page updates.
+
+
+20151222
+
+* --transfer is now an alias for --transferfile {}.
+
+* --transferfile works like --transfer, but takes an argument like
+ --return. This makes it possible to combine transferring files
+ with multiple input sources: parallel -S server --tf {1} wc {2}
+ {1} ::: * ::: -l -w -c
+
+* total_jobs() can now be used in {= =}: parallel echo job {#} of '{=
+ $_=total_jobs() =}' ::: {1..50}
+
+* GNU Parallel was cited in: Evolution and Learning in Heterogeneous
+ Environments
+ http://research.gold.ac.uk/15078/1/COM_thesis_JonesD_2015.pdf
+
+* GNU Parallel was cited in: Achieving Consistent Doppler Measurements
+ from SDO/HMI Vector Field Inversions
+ http://arxiv.org/pdf/1511.06500.pdf
+
+* Flo uses GNU Parallel: https://github.com/wurmlab/flo
+
+* 使用 GNU parallel 來平行運算
+ http://mutolisp.logdown.com/posts/316959-using-gnu-parallel-to-parallel-computing
+
+* Bug fixes and man page updates.
+
+
+20151122
+
+* GNU Parallel packaged for CERN CentOS:
+ http://linuxsoft.cern.ch/cern/centos/7/cern/x86_64/repoview/parallel.html
+
+* GNU Parallel was cited in: The Outer Solar System Origins Survey:
+ I. Design and First-Quarter Discoveries
+ http://arxiv.org/pdf/1511.02895.pdf
+
+* GNU Parallel was cited in: Contrasting genetic architectures of
+ schizophrenia and other complex diseases using fast
+ variance-components analysis
+ http://www.nature.com/ng/journal/vaop/ncurrent/full/ng.3431.html
+
+* GNU Parallel was cited in: Named-Entity Chunking for Norwegian Text
+ using Support Vector Machines
+ http://ojs.bibsys.no/index.php/NIK/article/viewFile/248/211
+
+* GNU Parallel was cited in: Multiscale Estimation of Binding Kinetics
+ Using Brownian Dynamics, Molecular Dynamics and Milestoning
+ http://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1004381#pcbi.1004381.ref072
+
+* GNU Parallel was cited in: A Detailed Characterization of Errors in
+ Logic Circuits due to Single-Event Transients
+ http://ieeexplore.ieee.org/xpl/articleDetails.jsp?tp=&arnumber=7302348&url=http%3A%2F%2Fieeexplore.ieee.org%2Fxpls%2Fabs_all.jsp%3Farnumber%3D7302348
+
+* GNU Parallel was cited in: Arabic Speaker Emotion Classification
+ Using Rhythm Metrics and Neural Networks
+ http://www.eurasip.org/Proceedings/Eusipco/Eusipco2015/papers/1570104855.pdf
+
+* GNU Parallel was cited in: Stride Search: a general algorithm for
+ storm detection in high resolution climate data
+ http://www.geosci-model-dev-discuss.net/8/7727/2015/gmdd-8-7727-2015.pdf
+
+* GNU Parallel was cited in: Decomposing Digital Paintings into Layers
+ via RGB-space Geometry http://arxiv.org/pdf/1509.03335.pdf
+
+* GNU Parallel was cited in: Structure and evolutionary history of a
+ large family of NLR proteins in the zebrafish
+ http://www.biorxiv.org/content/biorxiv/early/2015/09/18/027151.full.pdf
+
+* GNU Parallel was cited in: Evolution of movement strategies under
+ competitive interactions
+ http://digital.csic.es/bitstream/10261/115973/1/evolution_movement_strategies_Kiziridis.pdf
+
+* Automating large numbers of tasks
+ https://rcc.uchicago.edu/docs/tutorials/kicp-tutorials/running-jobs.html
+
+* Max out your IOPs with GNU Parallel
+ http://blog.bitratchet.com/2015/11/11/max-out-your-iops-with-gnu-parallel/
+
+* Bug fixes and man page updates.
+
+
+20151022
+
+* --plus makes it possible to use {##} as a shorthand for
+ {= $_=$Global::JobQueue->total_jobs() =} which gives the the number
+ of jobs to run in total.
+
+* {= $_=$Global::JobQueue->total_jobs() =} is incompatible with -X,
+ -m, and --xargs.
+
+* GNU Parallel is now mostly compatible with lsh
+ (http://www.lysator.liu.se/~nisse/lsh/) and somewhat compatible with
+ autossh (http://www.harding.motd.ca/autossh/).
+
+* --workdir ... now also works when run locally.
+
+* GNU Parallel was cited in: There is no (75, 32, 10, 16) strongly
+ regular graph http://arxiv.org/pdf/1509.05933.pdf
+
+* GNU Parallel was cited in: Roary: rapid large-scale prokaryote pan
+ genome analysis
+ http://bioinformatics.oxfordjournals.org/content/early/2015/08/05/bioinformatics.btv421.full.pdf+html
+
+* GNU Parallel is used in TraitAR:
+ https://testpypi.python.org/pypi/traitar/0.1.4
+
+* A parallel and fast way to download multiple files
+ http://onetipperday.blogspot.com/2015/10/a-parallel-and-fast-way-to-download.html
+
+* Usar GNU Parallel para aumentar el rendimiento de tus scripts
+ http://adrianarroyocalle.github.io/blog/2015/10/20/usar-gnu-parallel/
+
+* Bug fixes and man page updates.
+
+
+20150922
+
+* GNU Parallel was cited in: Flexible Modeling of Epidemics with an
+ Empirical Bayes Framework
+ http://journals.plos.org/ploscompbiol/article?id=10.1371%2Fjournal.pcbi.1004382
+
+* GNU Parallel was cited in: BL1: 2D Potts Model with a Twist
+ https://sucs.swan.ac.uk/~rjames93/Dissertation.pdf
+
+* GNU Parallel was cited in: A Scalable Parallel Implementation of
+ Evolutionary Algorithms for Multi-Objective Optimization on GPUs
+ http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7257074
+
+* GNU Parallel was cited in: How Can We Measure the Similarity Between
+ Résumés of Selected Candidates for a Job?
+ https://www.researchgate.net/publication/275954089_How_can_we_measure_the_similarity_between_rsums_of_selected_candidates_for_a_job
+
+* GNU Parallel was cited in: Interplay of cell dynamics and epithelial
+ tension during morphogenesis of the Drosophila pupal wing
+ http://www.researchgate.net/profile/Raphael_Etournay/publication/279061859_Interplay_of_cell_dynamics_and_epithelial_tension_during_morphogenesis_of_the_Drosophila_pupal_wing/links/558a95ad08aeae8413bcceea.pdf
+
+* Third-party selling GNU Parallel T-shirts
+ http://www.aliexpress.com/item/2015F-BSO-GNU-LOGO-GNU-PARALLEL-men-s-shirt-sleeve-visual-illusion-error/32464827966.html
+
+* Joys of gnu parallel
+ http://scottolesen.com/index.php/2015/08/26/joys-of-gnu-parallel/
+
+* Crop and resize images with bash and ImageMagick
+ https://www.simonholywell.com/post/2015/08/image-resize-crop-bash-imagemagick/
+
+* Three Ways to Script Processes in Parallel
+ http://www.codeword.xyz/2015/09/02/three-ways-to-script-processes-in-parallel/
+
+* What It Looks Like to Process 3.5 Million Books
+ http://blog.gdeltproject.org/what-it-looks-like-to-process-3-5-million-books/
+
+* Bug fixes and man page updates.
+
+
+20150822
+
+* If $job->skip() is called in {= =} the job will not be run.
+
+* @arg can now be used in {= =}: parallel echo '{= $arg[2] < 5 and
+ $_="j" =}' ::: 1 2 3 ::: 4 5 6
+
+* Version of GNU Parallel that uses PostgreSQL as backend
+ https://github.com/stephen-fralich/parallel-sql/
+
+* SpeedSeq: ultra-fast personal genome analysis and interpretation
+ http://www.nature.com/nmeth/journal/vaop/ncurrent/full/nmeth.3505.html
+
+* GNU Parallel is used in elPrep: https://github.com/ExaScience/elprep
+
+* Who actually reads the code?
+ http://www.fsf.org/blogs/community/who-actually-reads-the-code
+
+* Introduction to GNU Parallel
+ http://erictleung.com/2015/08/06/intro-to-gnu-parallel/
+
+* ¿Alguien se lee el código?
+ http://www.taringa.net/post/linux/18854759/Alguien-se-lee-el-codigo.html
+
+* ¿Quién se lee el código fuente?
+ https://victorhckinthefreeworld.wordpress.com/2015/08/12/quien-se-lee-el-codigo-fuente/
+
+* Distribution des traitements avec GNU Parallel
+ http://blog.inovia-conseil.fr/?p=226#4
+
+* Эксперимент по проверке, читают ли код открытых проектов
+ http://www.opennet.ru/opennews/art.shtml?num=42718
+
+* 多核心主機搭配 GNU parallel
+ http://blog.zeroplex.tw/2015/08/gnu-parallel.html
+
+* Bug fixes and man page updates.
+
+
+20150722
+
+* If a record fits in a block, --block only reads upto the size of
+ --block. If not: --block reads a full --block more.
+
+* Due to widely spread distributions still using --tollef even after a
+ year of being retired following a year of being obsolete, --tollef
+ now gives an error unless used with --gnu.
+
+* --nice now uses a perl wrapper instead of the nice command.
+
+* Cookie 2013-08-18 was won by Mark Maimone, 1 year and 11 months
+ after release.
+
+* A parser for GNU Parallel --joblog files:
+ https://github.com/harryjubb/joblog
+
+* GNU Parallel was cited in: Dynamical formation & scattering of
+ hierarchical triples: Cross sections, Kozai-Lidov oscillations, and
+ collisions http://arxiv.org/pdf/1507.03593.pdf
+
+* GNU Parallel was cited in: A Fast Compiler for NetKAT
+ http://arxiv.org/pdf/1506.06378.pdf
+
+* GNU Parallel was cited in: Decomposing Time-Lapse Paintings into
+ Layers
+ http://cs.gmu.edu/~ygingold/timemap/Decomposing%20Time-Lapse%20Paintings%20into%20Layers%20(Tan%20et%20al%202015%20SIGGRAPH)%20600dpi.pdf
+
+* GNU Parallel was used in: Somatic mutation calling from the matched
+ tumor-normal exome-sequencing
+ http://fafner.meb.ki.se/biostatwiki/somac/
+
+* Parallel Processing from the Command Line
+ https://www.perpetual-beta.org/weblog/parallel-processing-from-the-command-line.html
+
+* Using GNU Parallel to speed up network operations
+ http://vielmetti.typepad.com/vacuum/2015/06/using-gnu-parallel-to-speed-up-network-operations.html
+
+* Crawl faster with "parallel" - but how fast?
+ http://andreas-wpv.blogspot.com/2015/07/crawl-faster-with-parallel-but-how-fast.html
+
+* Simultaneously benchmark many URLs with ApacheBench and GNU parallel
+ https://www.simonholywell.com/post/2015/06/parallel-benchmark-many-urls-with-apachebench/
+
+* Bug fixes and man page updates.
+
+
+20150622
+
+* --halt has been rewritten completely. You can now combine
+ percentages with success or fail. See the man page.
+
+* Exit values 102..254 have been removed. 101 means more than 100 jobs
+ failed.
+
+* Killing through --timeout, --memfree, or --halt is now done as a
+ process group.
+
+* --termseq determines which signals are sent when a job is killed.
+
+* An empty argument would previously cause no string to be
+ inserted. This is now changed to '' being inserted, thus prepending
+ a space to the output of: parallel echo {} b ::: ''
+
+* $PARALLEL_ENV can now be set to an environment prepending the
+ command. Used in env_parallel as mentioned in the manpage.
+
+* --retry-failed will retry all failed jobs in a joblog. It will
+ ignore any command given.
+
+* --ssh and $PARALLEL_SSH can be used to set the command used for
+ ssh. The command is assumed to behave the same way as ssh.
+
+* --fifo now works in csh, too.
+
+* Q(...) can be used in {= =} to shell quote a string.
+
+* GNU Parallel was cited in: DockBench: An Integrated Informatic
+ Platform Bridging the Gap between the Robust Validation of Docking
+ Protocols and Virtual Screening Simulations
+ http://www.mdpi.com/1420-3049/20/6/9977
+
+* GNU Parallel was cited in: Swedes Online: You Are More Tracked Than
+ You Think
+ http://www.diva-portal.org/smash/get/diva2:807623/FULLTEXT01.pdf
+
+* GNU Parallel was cited in: Tutorial: Lorenz-Mie theory for 2D
+ scattering and resonance calculations
+ http://arxiv.org/pdf/1505.07691.pdf
+
+* GNU Parallel was cited in: A quantitative assessment of the Hadoop
+ framework for analyzing massively parallel DNA sequencing data
+ http://link.springer.com/article/10.1186/s13742-015-0058-5
+
+* GNU Parallel was cited in: A composite genome approach to identify
+ phylogenetically informative data from next-generation sequencing
+ http://www.biomedcentral.com/content/pdf/s12859-015-0632-y.pdf
+
+* GNU Parallel is used in: taxator-tk
+ http://algbio.cs.uni-duesseldorf.de/webapps/wa-download/
+
+* GNU Parallel is used in: https://github.com/d2207197/local-mapreduce
+
+* GNU Parallel is used in: wiki-edit-news https://github.com/albert-decatur/wiki-edit-news
+
+* GNU Parallel is used in: A Fingerprint Identification System
+ https://curve.carleton.ca/system/files/theses/28733.pdf
+
+* Job ad asking for GNU Parallel experience:
+ http://searchjobs.intel.com/gdansk-pol/software-validation-engineer/63A06826DAF24797AB414DC146201C2E/job/
+
+* Using BLAT http://wangzhengyuan.blogspot.dk/2015/06/using-blat.html
+
+* Bug fixes and man page updates.
+
+
+20150522
+
+* Security: The security issue for --sshlogin + --fifo/--cat has been
+ fixed. Thereby all issues with
+ http://lists.gnu.org/archive/html/parallel/2015-04/msg00045.html
+ have been fixed.
+
+* Security: After further security analysis the issue fixed in
+ 20150422 also fixed the problem for --tmux.
+
+* GNU Parallel was cited in: CIDER: a pipeline for detecting waves of
+ coordinated transcriptional regulation in gene expression
+ time-course data
+ http://biorxiv.org/content/biorxiv/early/2015/03/17/012518.full.pdf
+
+* GNU Parallel was cited in: Building Genomic Analysis Pipelines in a
+ Hackathon Setting with Bioinformatician Teams: DNA-seq, Epigenomics,
+ Metagenomics and RNA-seq
+ http://biorxiv.org/content/biorxiv/early/2015/05/05/018085.full.pdf
+
+* GNU Parallel was cited in: Sequencing the cap-snatching repertoire
+ of H1N1 influenza provides insight into the mechanism of viral
+ transcription initiation
+ http://nar.oxfordjournals.org/content/early/2015/04/20/nar.gkv333.full.pdf
+
+* GNU Parallel was cited in: Genome assembly using Nanopore-guided
+ long and error-free DNA reads
+ http://www.biomedcentral.com/content/pdf/s12864-015-1519-z.pdf
+
+* GNU Parallel was used (unfortunately with wrong citation) in:
+ Comparing the CarbonTracker and TM5-4DVar data assimilation systems
+ for CO2 surface flux inversions
+ http://www.atmos-chem-phys-discuss.net/15/8883/2015/acpd-15-8883-2015-discussion.html
+
+* GNU Parallel was used in: Gene Set Omic Analysis (GSOA) method
+ https://bitbucket.org/srp33/gsoa
+
+* A Quick and Neat :) Orchestrator using GNU Parallel
+ http://www.elsotanillo.net/2015/05/a-quick-and-neat-orchestrator-using-gnu-parallel/
+
+* Execute commands on multiple computers using GNU Parallel (setting
+ up a cluster on the cheap)
+ https://spectraldifferences.wordpress.com/2015/04/26/execute-commands-on-multiple-computers-using-gnu-parallel-setting-up-a-cluster-on-the-cheap/
+
+* Functions and GNU parallel for effective cluster load management
+ http://genomespot.blogspot.dk/2015/04/functions-and-gnu-parallel-for.html
+
+* Use parallel processing to save time importing databases
+ http://drupalsun.com/node/41854
+
+* Run multiple ssh commands in parallel with GNU Parallel
+ http://www.ameir.net/blog/archives/380-run-multiple-ssh-commands-in-parallel-with-gnu-parallel.html
+
+* Importing huge databases faster
+ https://www.lullabot.com/blog/article/importing-huge-databases-faster
+
+* Parallel? Gnu parallel! https://debian.pro/1834
+
+* Bug fixes and man page updates.
+
+
+20150422
+
+New in this release:
+
+* Security fix. An attacker on the local system could make you
+ overwrite one of your own files with a single byte. The problem
+ exist when you use --compress or --tmux or --pipe or --cat or
+ --fifo. The attacker must figure out the randomly chosen file name
+ and create a symlink within a time window of 15 ms.
+
+* --shuf will generate all jobs, and shuffle them before running
+ them. This is useful to get a quick preview of the results before
+ running the full batch.
+
+* GNU Parallel now has a DOI: https://dx.doi.org/10.5281/zenodo.16303
+
+* GNU Parallel was cited in: Scaling Machine Learning for Target
+ Prediction in Drug Discovery using Apache Spark
+ https://cris.cumulus.vub.ac.be/portal/files/5147244/spark.pdf
+
+* GNU Parallel was cited in: Bayesian inference of protein structure
+ from chemical shift data https://peerj.com/articles/861/
+
+* GNU Parallel was cited in: Toward Enhanced Metadata Quality of
+ Large-Scale Digital Libraries: Estimating Volume Time Range
+ https://www.ideals.illinois.edu/bitstream/handle/2142/73656/186_ready.pdf
+
+* GNU Parallel was cited in: Experimental study on the Wind Farm
+ Substation Cable Installation Problem
+ http://i11www.iti.uni-karlsruhe.de/_media/teaching/theses/ma-schmitz-14.pdf
+
+* GNU Parallel was used in: https://github.com/KAPPS-/landsat-gifworks
+
+* GNU Parallel was used in: https://github.com/exascience/elprep
+
+* GNU Parallel was used in:
+ http://search.cpan.org/~jillrowe/HPC-Runner-GnuParallel-0.01/
+
+* GNU Parallel was used in:
+ http://search.cpan.org/~ajpage/Bio-Roary-2.0.7/lib/Bio/Roary/JobRunner/Parallel.pm
+
+* GNU Parallel was used in: Rapid haploid SNP calling
+ https://github.com/tseemann/snippy
+
+* Pictures and Metadata
+ http://www.ozzy.no/2015/02/05/pictures-and-metadata/
+
+* Task automation with bash and parallel
+ https://biowize.wordpress.com/2015/03/23/task-automation-with-bash-and-parallel/
+
+* How To: Speed Up File Transfers in Linux using RSync with GNU
+ Parallel
+ http://www.yourownlinux.com/2015/04/speed-up-file-transfers-using-rsync-with-gnu-parallel.html
+
+* Mirroring a Git Repository
+ https://avacariu.me/articles/mirroring-a-git-repository.html
+
+* Add an ENVI header to JAXA Global Mangrove Watch PALSAR tiles
+ https://spectraldifferences.wordpress.com/2015/03/24/add-an-envi-header-to-jaxa-global-mangrove-watch-palsar-tiles/
+
+* 你见过的最牛逼的命令行程序是什么?
+ http://www.zhihu.com/question/29257300
+
+* Bug fixes and man page updates.
+
+
+20150322
+
+New in this release:
+
+* --number-of-cores respects 'taskset' on GNU/Linux.
+
+* --joblog --pipe gives the data send and received in the log.
+
+* GNU Parallel was tested to support 100 GB sized records in --pipe. A
+ few bugs was fixed to support >2 GB records. It works, but is rather
+ slow.
+
+* GNU Parallel was cited in: RIG: Recalibration and Interrelation of
+ genomic sequence data with the GATK
+ http://www.g3journal.org/content/early/2015/02/13/g3.115.017012.full.pdf+html
+
+* GNU Parallel was cited in: MPI-blastn and NCBI-TaxCollector:
+ Improving metagenomic analysis with high performance classification
+ and wide taxonomic attachment
+ http://www.worldscientific.com/doi/abs/10.1142/S0219720014500139?af=R&
+
+* GNU Parallel was cited in: Recent evolution in Rattus norvegicus is
+ shaped by declining effective population size
+ http://biorxiv.org/content/biorxiv/early/2015/03/01/015818.full.pdf
+
+* GNU Parallel was cited in: Evidence for DCO+ as a probe of
+ ionization in the warm disk surface
+ http://arxiv.org/pdf/1503.02659.pdf
+
+* GNU Parallel was cited in: De novo assembly and annotation of the
+ Asian tiger mosquito (Aedes albopictus) repeatome with dnaPipeTE
+ from raw genomic reads and comparative analysis with the yellow
+ fever mosquito (Aedes aegypti)
+ http://gbe.oxfordjournals.org/content/early/2015/03/11/gbe.evv050.full.pdf
+
+* GNU Parallel was cited in: A General Approach to Network
+ Configuration Analysis
+ http://research.microsoft.com/en-us/um/people/ratul/papers/nsdi2015-batfish.pdf
+
+* GNU Parallel was cited in: Scrimer: designing primers from
+ transcriptome data
+ http://onlinelibrary.wiley.com/doi/10.1111/1755-0998.12403/pdf
+
+* GNU Parallel was cited in: Efficient Retrieval of Key Material for
+ Inspecting Potentially Malicious Traffic in the Cloud
+ http://sacko.uk/pdf/2015.1.pdf
+
+* GNU Parallel will be presented at Strataconf: Poor Man's Parallel
+ Pipelines
+ http://strataconf.com/big-data-conference-uk-2015/public/schedule/detail/40031
+
+* GNU Parallel was used in:
+ https://github.com/alexbyrnes/FCC-Political-Ads_The-Code
+
+* GNU Parallel was used in: https://github.com/martymac/fpart
+
+* GNU Parallel was used in:
+ https://github.com/hoytak/diabetic-retinopathy-code
+
+* GNU Parallel was used in:
+ https://github.com/mehmattski/HybSeqPipeline
+
+* GNU Parallel was used in:
+ http://search.cpan.org/~ajpage/Bio-Roary-2.0.0/lib/Bio/Roary/JobRunner/Parallel.pm
+
+* Using GNU Parallel on a Raspberry Pi cluster:
+ http://www.dcglug.org.uk/cluster-progress/
+
+* An introduction to vector tiling and map reduce in postgis:
+ http://dimensionaledge.com/intro-vector-tiling-map-reduce-postgis/
+
+* Running scripts in parallel with GNU Parallel:
+ http://code.jasonbhill.com/2015/03/
+
+* Mahout: Parallelising the creation of DecisionTrees:
+ http://www.markhneedham.com/blog/2012/12/27/mahout-parallelising-the-creation-of-decisiontrees/
+
+* High-Performance Scientific Computing: Running serial jobs in
+ parallel
+ https://support.scinet.utoronto.ca/education/staticpublic/course178/serial.pdf
+
+* Parallel cardinality on your laptop
+ https://highonscience.wordpress.com/2015/03/15/parallel-cardinality-on-your-laptop/
+
+* Experiments about a better locate using grep
+ http://a3nm.net/blog/better_locate.html
+
+* Homework with GNU Parallel:
+ https://support.scinet.utoronto.ca/education/staticpublic/course178content349.html
+
+* Parallel - Jobs in Skripten parallelisieren:
+ https://slzm.de/blog/linux-tool-des-tages-parallel-jobs-in-skripten-parallelisieren/#more-612
+
+* GNU Parallel, czyli 100% użycia procesora
+ http://matmatyk.blogspot.dk/2015/03/gnu-parallel-czyli-100-uzycia-procesora.html
+
+* Bug fixes and man page updates.
+
+
+20150222
+
+* --tmux has gotten a major overhaul.
+
+* GNU Parallel was cited in: RaftLib: A C++ Template Library for High
+ Performance Stream Parallel Processing
+ http://www.cs.wustl.edu/~lip/pubs/pmam15_jbeard.pdf
+
+* GNU Parallel was cited in: Quantifying properties of hot and dense
+ QCD matter through systematic model-to-data comparison
+ http://arxiv.org/pdf/1502.00339.pdf
+
+* GNU Parallel was cited in: Towards Collaborative Exploration and
+ Analysis of Big Data from Mars: A Noachis Terra Case Study
+ http://link.springer.com/chapter/10.1007/978-3-319-13865-7_25
+
+* GNU Parallel was used (unfortunately with wrong citation) in:
+ Performance and Scaling Comparison Study of RDBMS and NoSQL
+ (MongoDB)
+ http://ijact.in/wp-content/uploads/2014/11/COMPUSOFT-311-1270-1275.pdf
+
+* GNU Parallel was used (unfortunately without citation) in: Parallel
+ Implementation of Big Data Pre-Processing Algorithms for Sentiment
+ Analysis of Social Networking Data
+ http://www.researchmathsci.org/IJFMAart/ijfma-v6n2-7.pdf
+
+* GNU Parallel was used (unfortunately without citation) in: SpeedSeq:
+ Ultra-fast personal genome analysis and interpretation
+ http://biorxiv.org/content/biorxiv/early/2014/12/05/012179.full.pdf
+
+* Zip Folders with GNU Parallel
+ http://fazky.github.io/posts/Linux/2015-01-07-GNU-Parallel.html
+
+* Using GNU Parallel with Freesurfer
+ http://programminginadarkroom.blogspot.dk/2015/02/using-gnu-parallel-with-freesurfer.html
+
+* GNU Parallel is used in Velociraptor: https://github.com/ericwhyne/Velociraptor
+
+* Marcus Beach GNU Parallel http://marcusbeach.co/gnu-parallel/
+
+* Bug fixes and man page updates.
+
+
+20150122
+
+* Remote jobs now send stderr (standard error) to stderr (standard
+ error) instead of stdout (standard output).
+
+* Remote execution command is now packed using base64 encoded
+ bzip2. This means that big environments (app. 100 KB) can be
+ transferred. This change makes remote execution alpha quality.
+
+* --semaphoretimeout now takes a negative value. This means give up if
+ the semaphore is not gotten within the timeout.
+
+* --halt -1 and -2 now means halt if a job succeeds (so the opposite
+ of 1 and 2).
+
+* --no-keep-order will reverse --keep-order.
+
+* Bash's second fix of shellshock caused --env to break again when
+ exporting functions. This has been fixed again.
+
+* A semibig refactoring of big functions. All non-trivial functions
+ are now less than 100 lines. The refactoring makes this release beta
+ quality.
+
+* A description of the design decisions for GNU Parallel can be found
+ in 'man parallel_design'.
+
+* A bug fix in replacement strings caused rewrite of the replacement
+ function. This makes use of replacement strings alpha quality.
+
+* GNU Parallel was cited in: Improving Energy and Performance with
+ Spintronics Caches in Multicore Systems
+ http://link.springer.com/chapter/10.1007/978-3-319-14313-2_24
+
+* GNU Parallel was cited in: Parallel post-processing with MPI-Bash
+ http://dl.acm.org/citation.cfm?id=2691137
+
+* GNU Parallel was cited in: Distinguishing cause from effect using
+ observational data: methods and benchmarks
+ http://arxiv-web3.library.cornell.edu/pdf/1412.3773.pdf
+
+* GNU Parallel was cited in: Data Science at the Command Line
+ http://cdn.oreillystatic.com/oreilly/booksamplers/9781491947852_sampler.pdf
+
+* GNU Parallel was cited in: Resolution of Genetic Map Expansion
+ Caused by Excess Heterozygosity in Plant Recombinant Inbred
+ Populations http://www.g3journal.org/content/4/10/1963.full.pdf
+
+* GNU Parallel was cited in: Bayesian Inference of Protein Structure
+ from Chemical Shift Data https://peerj.com/preprints/692v1.pdf
+
+* GNU Parallel was cited in: Clonal interference and Mullerʼs ratchet
+ in spatial habitats http://iopscience.iop.org/1478-3975/11/5/056003
+
+* GNU Parallel was cited in: Computação Verde na Camada de Aplicação
+ https://linux.ime.usp.br/~tpaiva/mac0499/files/monografia.pdf
+
+* GNU Parallel was cited in: Frameshift alignment: statistics and
+ post-genomic applications
+ http://bioinformatics.oxfordjournals.org/content/30/24/3575.full.pdf+html
+
+* GNU Parallel was cited in: Towards Recommender Engineering Tools and
+ Experiments for Identifying Recommender Differences
+ http://elehack.net/research/thesis/mde-thesis.pdf
+
+* GNU Parallel was cited in: Speaker Environment Classification Using
+ Rhythm Metrics in Levantine Arabic Dialect
+ http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6923918
+
+* GNU Parallel was cited in: A Formal Proof Of The Kepler Conjecture
+ http://arxiv.org/pdf/1501.02155.pdf
+
+* GNU Parallel was cited in: Allocation Policy Analysis for Cache
+ Coherence Protocols for STT-MRAM-based caches
+ http://conservancy.umn.edu/bitstream/handle/11299/169362/Nandkar_umn_0130M_15551.pdf
+
+* GNU Parallel was cited in: Using Trusted Platform Module for
+ securing virtual environment access in Cloud
+ http://dar.aucegypt.edu:8080/bitstream/handle/10526/4237/Using%20Trusted%20Platform%20Module%20for%20Securing%20Virtual%20Environment%20Access%20in%20Cloud.pdf
+
+* GNU Parallel was cited in: Weakly Supervised Learning for
+ Compositional Sentiment Recognition
+ http://computerlinguist.org/static/downloads/ma-thesis/Master_Thesis-Michael_Haas-Weakly_Supervised_Learning_for_Compositional_Sentiment_Recognition.pdf
+
+* GNU Parallel was cited in: Agility and Performance in Elastic
+ Distributed Storage http://dl.acm.org/citation.cfm?id=2668129
+
+* GNU Parallel was used (unfortunately without citation) in: Large
+ Scale Author Name Disambiguation in Digital Libraries
+ http://semanticommunity.info/@api/deki/files/31523/P227_3546.pdf
+
+* GNU Parallel is used in BEDOPS:
+ https://media.readthedocs.org/pdf/bedops/v2p5p0/bedops.pdf
+
+* Job ad asking for GNU Parallel skills:
+ http://www.aplitrak.com/?adid=dXMzMTYxMjQuMzg0MTkuMTU1MEBtYW5wb3dlcmZveG5hLmFwbGl0cmFrLmNvbQ#sthash.kYCIhhty.KcoT6HnJ.dpbs
+
+* GNU Parallel: Open Source For You (OSFY) magazine, October 2013
+ edition
+ http://www.shakthimaan.com/posts/2014/11/27/gnu-parallel/news.html
+
+* Multithreaded Encryption and Compression
+ http://www.krazyworks.com/multithreaded-encryption-and-compression/
+
+* GNU Parallel on OSX and Linux
+ https://darknightelf.wordpress.com/2015/01/01/gnu-parallel-on-osx/
+
+* How to build gnu parallel from source on Ubuntu PC?
+ http://blog.ovidiuparvu.com/build-gnu-parallel-source-ubuntu-pc/
+
+* GNU parallel for openaddr-process-one
+ https://nelsonslog.wordpress.com/2015/01/09/gnu-parallel-for-openaddr-process-one/
+
+* From SRA Project to FastQ http://danielecook.com/sra-project-fastq/
+
+* Software Development Tooling
+ http://www.computer.org/csdl/mags/so/2014/06/mso2014060021.pdf
+
+* コマンドを並列に実行するGNU parallelがとても便利
+ http://bicycle1885.hatenablog.com/entry/2014/08/10/143612
+
+* Bug fixes and man page updates.
+
+
+20141122
+
+* Remote systems can be divided into hostgroups (e.g. web and db) by
+ prepending '@groupname/' to the sshlogin. Multiple groups can be
+ given by separating groups with '+'. E.g. @web/www1 @web+db/www2
+ @db/mariadb
+
+* Remote execution can be restricted to servers that are part of one
+ or more groups by '@groupname' as an sshlogin. Multiple groups can
+ be given by separating groups with '+'. E.g. -S @web or -S @db+web
+
+* With --hostgroup you can restrict arguments to certain hostgroups by
+ appending '@groupname' to the argument. Multiple groups can be
+ given by separating groups with '+'. E.g. my_web_arg@web
+ db-or-web-arg@db+web db-only-arg@db Thanks to Michel Courtine for
+ developing a prototype for this.
+
+* GNU Parallel was cited in: HTSeq-Hadoop: Extending HTSeq for
+ Massively Parallel Sequencing Data Analysis using Hadoop
+ http://essenceofescience.se/wp-content/uploads/2014/11/Siretskiy.pdf
+
+* GNU Parallel was cited in: SlideToolkit: An Assistive Toolset for
+ the Histological Quantification of Whole Slide Images
+ http://www.plosone.org/article/info%3Adoi%2F10.1371%2Fjournal.pone.0110289
+
+* GNU Parallel was cited in: Exploring a multiprocessor design space
+ to analyze the impact of using STT-RAM in the memory hierarchy
+ http://conservancy.umn.edu/bitstream/handle/11299/167286/Borse_umn_0130M_15431.pdf
+
+* Command-Line OCR with Tesseract on Mac OS X
+ https://ryanfb.github.io/etc/2014/11/13/command_line_ocr_on_mac_os_x.html
+
+* Bug fixes and man page updates.
+
+
+20141022
+
+* --env was changed to deal with Bash's new way of exporting a function.
+
+* GNU Parallel was cited in: Exclusion of cosmic rays in
+ protoplanetary disks. II. Chemical gradients and observational
+ signatures http://iopscience.iop.org/0004-637X/794/2/123/article
+
+* GNU Parallel was cited in: Application of Machine Learning to
+ Algorithm Selection for TSP
+ http://www.dbai.tuwien.ac.at/staff/musliu/art_ictai_cam.pdf
+
+* GNU Parallel was cited in: Performance Models for LTE-Advanced
+ Random Access
+ http://repository.asu.edu/attachments/137242/content/Tyagi_asu_0010E_14116.pdf
+
+* GNU Parallel was used (unfortunately without citation) in: De Novo
+ Insertions and Deletions of Predominantly Paternal Origin Are
+ Associated with Autism Spectrum Disorder
+ http://www.cell.com/cms/attachment/2019079667/2039310868/mmc1.pdf
+
+* GNU Parallel was determined 'popular' by Statistical Analysis of
+ Popular Open Source Software Projects and Their Communities
+ http://andiwre.itmaranatha.org/prosiding/ICITEE2014/PaperAndiWRE-ICITEE2014.pdf
+
+* GNU Parallel is demonstrated in: Data Science at the Command Line:
+ Facing the Future with Time-Tested Tools
+
+* GNU Parallel was covered in a talk at Sydney Perl Mongers group
+ http://www.openfusion.net/talks/pwp/#24
+
+* GNU Parallel and how to get started with it
+ http://www.jduck.net/blog/2014/09/30/gnu-paralell/
+
+* Behat/Selenium/Parallel
+ http://jaffamonkey.com/behat-selenium-parallel/
+
+* Comparing Golang, Scala, Elixir and Ruby for ETL
+ http://www.dimroc.com/2014/09/29/etl-language-showdown/
+
+* Assemble a list of which of these hosts are up or down
+ http://opsfun.wordpress.com/2014/03/31/gnu-parallel/
+
+* Running commands on many nodes simultaneously
+ http://opsfun.wordpress.com/2014/03/31/19/
+
+* Ubuntu 13.04でGNU ParallelをGNUモードで動かす
+ http://qiita.com/YusukeSuzuki@github/items/7b96b5876bf980f21e85
+
+* Bug fixes and man page updates.
+
+
+20140922
+
+* If the file given as --sshloginfile is changed it will be re-read
+ when a job finishes though at most once per second. This makes it
+ possible to add and remove hosts while running.
+
+* Brutha uses GNU Parallel https://pypi.python.org/pypi/brutha/1.0.2
+
+* OCRmyPDF uses GNU Parallel https://github.com/fritz-hh/OCRmyPDF/
+
+* GNU Parallel was presented at Balti and Bioinformatics "On-Air"
+ http://youtu.be/UtXlr19xTh8?t=2h5m0s
+
+* Pleiades Plus uses GNU Parallel
+ https://github.com/ryanfb/pleiades-plus
+
+* Imagemagick and GNU Parallel
+ http://deepdish.io/2014/09/15/gnu-parallel/
+
+* GNU Parallel (Sebuah Uji Coba)
+ http://pr4ka5a.wordpress.com/2014/09/04/gnu-parallel-sebuah-uji-coba/
+
+* GNU Parallel: 并行执行Linux命令
+ http://blog.csdn.net/xzz_hust/article/details/39183837
+
+
+20140822
+
+* GNU Parallel now uses the same shell it was started from as the
+ command shell for local jobs. So if GNU Parallel is started from
+ tcsh it will use tcsh as its shell even if the login $SHELL is
+ different. For remote jobs the login $SHELL will be used.
+
+* The whole current environment in bash can be copied by using a shell
+ wrapper function (Search manual for env_parallel).
+
+* --plus adds the replacement strings {+/} {+.} {+..} {+...} {..}
+ {...} {/..} {/...}. The idea being that '+foo' matches the opposite
+ of 'foo' and {} = {+/}/{/} = {.}.{+.} = {+/}/{/.}.{+.} = {..}.{+..}
+ = {+/}/{/..}.{+..} = {...}.{+...} = {+/}/{/...}.{+...}
+
+* GNU Parallel now deals correctly with the combination
+ rsync-3.1.X-client and rsync-2.5.7-server
+
+* GNU Parallel was cited in: A Web Service for Scholarly Big Data
+ Information Extraction
+ http://patshih.ist.psu.edu/publications/Williams-CiteSeerExtractor-ICWS14.pdf
+
+* Comparison of the speed of different GNU Parallel versions:
+ http://lists.gnu.org/archive/html/parallel/2014-08/msg00030.html
+
+* GNU Parallel was covered in the webcast 2014-08-20: Data Science at
+ the Command Line http://www.oreilly.com/pub/e/3115
+
+* Distributed processing with GNU parallel
+ http://kazjote.eu/2014/08/11/distributed-processing-with-gnu-parallel
+
+* A Peek into GNU Parallel
+ http://blog.dataweave.in/post/94238943763/a-peek-into-gnu-parallel
+
+* Сборка GNU parallel для CentOS/RHEL
+ http://www.stableit.ru/2014/07/gnu-parallel-centosrhel.html
+
+* Bug fixes and man page updates.
+
+
+20140722
+
+* {= perl expression =} can be used as replacement string. The
+ expression should modify $_. E.g. {= s/\.gz$// =} to remove .gz from
+ the string. This makes replacement strings extremely flexible.
+
+* Positional perl expressions (similar to {2}) are given as {=2 perl
+ expression=} where 2 is the position.
+
+* One small backwards incompatibility: {1}_{2} will replace {2} with
+ the empty string if there is only one argument. Previously {2} would
+ have been left untouched.
+
+* Replacement strings can be defined using --rpl. E.g. parallel --rpl
+ '{.gz} s/\.gz$//' echo {.gz} ::: *.gz
+
+* The parenthesis around {= perl expression =} can be changed with
+ --parens.
+
+* --tmux will direct the output to a tmux session instead of
+ files. Each running jobs will be in its own window.
+
+* --halt 10% will stop spawning new jobs if 10% failed so far.
+
+* GNU Parallel was cited in: bammds: A tool for assessing the ancestry
+ of low depth whole genome data using multidimensional scaling (MDS)
+ http://bioinformatics.oxfordjournals.org/content/early/2014/06/28/bioinformatics.btu410.abstract
+
+* GNU Parallel was cited in: Molecular ferroelectric contributions to
+ anomalous hysteresis in hybrid perovskite solar cells
+ http://people.bath.ac.uk/aw558/publications/2014/arxiv_hysteresis_14.pdf
+
+* GNU Parallel was cited in: Energy Sorghum-a genetic model for the
+ design of C4 grass bioenergy crops
+ http://jxb.oxfordjournals.org/content/early/2014/06/20/jxb.eru229.short
+
+* GNU Parallel was cited in: Web-scale Content Reuse Detection
+ www.isi.edu/~johnh/PAPERS/Ardi14a.pdf
+
+* Tell your friends to sign up for the Webcast at 2014-08-20 covering
+ GNU Parallel: Data Science at the Command Line
+ http://www.oreilly.com/pub/e/3115
+
+* GNU Parallel all the things!
+ http://longwayaround.org.uk/notes/gnu-parallel-all-the-things/
+
+* Shell command composition and dispatch
+ http://lukeluo.blogspot.dk/2014/07/linux-virtual-console6-shell-command.html
+
+* Parallelising plink (or anything else) the easy way
+ http://chrisladroue.com/2012/03/parallelising-plink-or-anything-else-the-easy-way/
+
+* Easy and cheap cluster building on AWS
+ https://grapeot.me/easy-and-cheap-cluster-building-on-aws.html
+
+* Paralelizace běžných činností v konzoli pomocí GNU Parallel
+ http://www.abclinuxu.cz/clanky/paralelizace-beznych-cinnosti-v-konzoli-pomoci-gnu-parallel
+
+* [原] Ubuntu 下使用 parallel 命令的注意事项
+ http://blog.ailms.me/2014/06/28/ubuntu-with-parallel.html
+
+* 简单的并行, 可使用GNU parallel
+ http://jerkwin.github.io/2014/06/10/GNU-parallel/
+
+* Bug fixes and man page updates.
+
+
+20140622
+
+* --shebang and --shebang-wrap now works on FreeBSD.
+
+* --pipepart works with --header, --regexp, --cat and --fifo.
+
+* ./configure --disable-documentation will not build documentation.
+
+* {%} works as job slot.
+
+* GNU Parallel was cited in: Energy Efficient, High-speed
+ Communication in WSNs
+ https://gupea.ub.gu.se/bitstream/2077/35801/1/gupea_2077_35801_1.pdf
+
+* GNU Parallel was cited in: Ferroelectric contributions to anomalous
+ hysteresis in hybrid perovskite solar cells
+ http://arxiv.org/pdf/1405.5810.pdf
+
+* Processes Paralleling to Speed up Computing and Tasks Execution in
+ Linux
+ http://kukuruku.co/hub/nix/processes-paralleling-to-speed-up-computing-and-tasks-execution-in-linux
+
+* Moving / Copying lots of s3 files quickly using gnu parallel
+ http://blog.aclarke.eu/moving-copying-lots-of-s3-files-quickly-using-gnu-parallel/
+
+* Speeding up grep log queries with GNU Parallel
+ http://www.tripwire.com/state-of-security/incident-detection/speeding-grep-queries-gnu-parallel/
+
+* Ubuntu - Gnu parallel - It's awesome
+ http://kasunweranga.blogspot.dk/2014/06/ubuntu-gnu-parallel-its-awesome.html
+
+* Export quicklooks with vector overlay using TuiView
+ http://spectraldifferences.wordpress.com/2014/03/08/export-quicklooks-with-vector-overlay-using-tuiview/
+
+* GNU Parallel指南 http://my.oschina.net/enyo/blog/271612
+
+* GNU parallelで違和感 http://jarp.does.notwork.org/diary/201405c.html#201405301
+
+* Bug fixes and man page updates.
+
+
+20140522
+
+* Use --semaphoretimeout to ignore the semaphore lock after a while.
+
+* {%} introduced as job slot replacement string. It has known bugs.
+
+* --slotreplace changes slot replacement string.
+
+* GNU Parallel officially in Solaris 11.2beta
+ https://blogs.oracle.com/alanc/entry/solaris_11_2_changes_to
+
+* GNU Parallel was cited in: A Comparison of Antenna Placement
+ Algorithms http://abhinavjauhri.com/publications/eap.pdf
+
+* Using GNU Parallel on a PBS cluster (nice use of --shebang-wrap)
+ https://docs.it4i.cz/anselm-cluster-documentation/resource-allocation-and-job-execution/capacity-computing
+
+* BioLite uses GNU Parallel: https://bitbucket.org/caseywdunn/biolite
+
+* Isitdown uses GNU Parallel: http://www.isi.edu/~calvin/isitdown.md
+
+* Convert FLAC to MP3 with GNU parallel
+ http://www.blogobramje.nl/posts/Convert_FLAC_to_MP3_with_GNU_parallel/
+
+* JD Edwards Business Function Documentation
+ http://www.azertech.net/content/jd-edwards-business-function-documentation
+
+* Tools of the trade http://blog.eikeland.se/2014/05/01/tools-of-the-trade/
+
+* How I start a bioinformatics project
+ http://nickloman.github.io/2014/05/14/how-i-start-a-bioinformatics-project/
+
+* No:The-Me GNU Parallel http://adityalaghate.in/gnu-parallel.html
+
+* Bug fixes and man page updates.
+
+
+20140422
+
+* --pipepart is a highly efficient alternative to --pipe if the input
+ is a real file and not a pipe.
+
+* If using --cat or --fifo with --pipe the {} in the command will be
+ replaced with the name of a physical file and a fifo respectively
+ containing the block from --pipe. Useful for commands that cannot
+ read from standard input (stdin).
+
+* --controlmaster has gotten an overhaul and is no longer
+ experimental.
+
+* --env is now copied when determining CPUs on remote system. Useful
+ for copying $PATH if parallel is not in the normal path.
+
+* --results now chops the argument if the argument is longer than the
+ allowed path length.
+
+* Build now survives if pod2* are not installed.
+
+* The git repository now contains tags of releases.
+
+* GNU Parallel was cited in: Proactive System for Digital Forensic
+ Investigation
+ http://dspace.library.uvic.ca:8080/bitstream/handle/1828/5237/Alharbi_Soltan_PhD_2014.pdf
+
+* GNU Parallel was cited in: Beyond MAP estimation with the
+ track-oriented multiple hypothesis tracker
+ http://ieeexplore.ieee.org/xpl/abstractReferences.jsp?tp=&arnumber=6766651&url=http%3A%2F%2Fieeexplore.ieee.org%2Fxpls%2Fabs_all.jsp%3Farnumber%3D6766651
+
+* GNU Parallel was cited in: Prokka: rapid prokaryotic genome
+ annotation
+ http://bioinformatics.oxfordjournals.org/content/early/2014/03/18/bioinformatics.btu153.short
+
+* GNU Parallel was used (unfortunately with improper citation) in:
+ Perspectives in magnetic resonance: NMR in the post-FFT era
+ http://www.sciencedirect.com/science/article/pii/S1090780713003054
+
+* GNU Parallel is used in https://github.com/cc2qe/speedseq
+
+* Batch XML validation at the command line.
+ http://horothesia.blogspot.dk/2014/04/batch-xml-validation-at-command-line.html
+
+* freebayes-parallel uses GNU Parallel
+ https://github.com/ekg/freebayes/commit/31ee997984cebe8a196381c3de57e618e34a2281
+
+* Org-mode with Parallel Babel http://draketo.de/english/emacs/parallel-babel#sec-2
+
+* Speeding Up Grep Log Queries with GNU Parallel
+ http://www.cybersecurity.io/speeding-grep-queries-gnu-parallel/
+
+* How to run tbss_2_reg in parallel
+ http://tadpolebrainimaging.blogspot.dk/2014/03/how-to-run-tbss2reg-in-parallel.html
+
+* GNU parallel example: blastn https://asciinema.org/a/8775
+
+* Iterative DNS Brute Forcing
+ http://www.room362.com/blog/2014/02/19/iterative-dns-brute-forcing/
+
+* Ejecutando comandos en paralelo
+ http://jesusmercado.com/guias/ejecutando-comandos-en-paralelo/
+
+* Ejecutando en paralelo en bash (ejemplo con rsync)
+ http://eithel-inside.blogspot.dk/2014/04/ejecutando-en-paralelo-en-bash-ejemplo.html
+
+* Bug fixes and man page updates.
+
+
+20140322
+
+* Offical package for Alpine Linux now exists:
+ http://alpinelinux.org/apk/main/x86/parallel
+
+* GNU Parallel was cited in: Estimating uncertainties in statistics
+ computed from direct numerical simulation
+ http://dx.doi.org/10.1063/1.4866813
+
+* GNU Parallel was cited in: Vehicle routing problems with time
+ windows and multiple service workers: a systematic comparison
+ between ACO and GRASP
+ http://link.springer.com/article/10.1007/s10100-014-0341-z
+
+* GNU Parallel, quick and easy
+ http://www.reganmian.net/blog/2014/03/09/gnu-parallel-quick-and-easy/
+
+* Using Multiple CPU Cores With Command Line Tools
+ http://prateekvjoshi.com/2014/03/07/using-multiple-cpu-cores-with-command-line-tools/
+
+* Batch convert mp3 CBR to VBR
+ http://blogs.zercle.com/bouroo/2013/03/batch-convert-mp3-cbr-to-vbr/
+
+* Running Behat in parallel with Selenium grid
+ http://zalas.eu/running-behat-in-parallel-with-selenium-grid/
+
+* Git-Mirror per cronjob aktuell halten
+ https://www.schtobia.de/2013/git-mirror-per-cronjob-aktuell-halten/
+
+* Пишем многопоточный скрипт на bash
+ http://dafter.ru/duf/howto/130.html
+
+* 如何利用多核CPU来加速你的Linux命令
+ http://www.chinadmd.com/file/xpcr6wpctzwtuu3ivwiaatcs_1.html
+
+* Bug fixes and man page updates.
+
+
+20140222
+
+* --tollef has been retired.
+
+* --compress has be redesigned due to bugs.
+
+* Format of --eta and --joblog has changed slightly.
+
+* GNU Parallel was cited in: Speaker Verification and Keyword Spotting
+ Systems for Forensic Applications
+ http://lantana.tenet.res.in/website_files/thesis/Phd/srikanth.pdf
+
+* GNU Parallel was cited in: Scaleable Code Clone Detection
+ http://scg.unibe.ch/archive/phd/schwarz-phd.pdf
+
+* GNU Parallel was cited in: Clone Detection that scales
+ http://scg.unibe.ch/archive/masters/Vogt14a.pdf
+
+* GNU Parallel was cited in: Scalable Positional Analysis for Studying
+ Evolution of Nodes in Networks
+ http://arxiv-web3.library.cornell.edu/pdf/1402.3797
+
+* GNU Parallel was cited in: Aggregate Characterization of User
+ Behavior in Twitter and Analysis of the Retweet Graph
+ http://arxiv.org/pdf/1402.2671v1
+
+* GNU Parallel was used (unfortunately without citation) in: Causal
+ Network Models for Predicting Compound Targets and Driving Pathways
+ in Cancer
+ http://jbx.sagepub.com/content/early/2014/02/10/1087057114522690.full
+
+* GNU Parallel was used (unfortunately without citation) in:
+ Performance and Energy Efficiency of Common Compression /
+ Decompression Utilities
+ http://www.researchgate.net/publication/243962643_Performance_and_Energy_Efficiency_of_Common_CompressionDecompression_Utilities_An_Experimental_Study_in_Mobile_and_Workstation_Computer_Platforms/file/3deec51d1dbc0474f9.pdf
+
+* GNU Parallel was used (unfortunately without citation) in: SpringFS:
+ Bridging Agility and Performance in Elastic Distributed Storage
+ http://www.istc-cc.cmu.edu/publications/papers/2014/SpringFS-FAST14.pdf
+
+* GNU Parallel was recommended (without citation) in: Redesigning the
+ Specificity of Protein-DNA Interactions with Rosetta
+ http://link.springer.com/protocol/10.1007/978-1-62703-968-0_17
+
+* GNU Parallel is co-distributed with RepeatExplorer
+ http://www.vcru.wisc.edu/simonlab/bioinformatics/programs/repeatexplorer/README.txt
+
+* Convert MP3 to a scrolling spectrum waterfall plot video
+ http://jdesbonnet.blogspot.dk/2014/02/convert-mp3-to-scrolling-spectrum.html
+
+* PHP wrapper class for the GNU Parallel tool
+ https://github.com/geopal-solutions/gnu-parallel-wrapper/tree/master
+
+* Exploratory Data Analysis http://www.slideshare.net/thinrhino/gnunify
+
+* Copias de archivos con rsync y parallel
+ http://linuxmanr4.com/2014/02/20/copias-de-archivos-con-rsync-y-parallel
+
+* Single-Thread-Programme auf Multicore-Rechnern parallelisieren
+ http://www.adlerweb.info/blog/2014/02/08/linux-single-thread-programme-auf-multicore-rechnern-parallelisieren
+
+* Распараллеливаем процессы для ускорения вычислений и выполнения
+ заданий в Linux http://savepearlharbor.com/?p=210480
+
+* Bug fixes and man page updates.
+
+
+20140122
+
+* GNU Parallel was cited in: On the likelihood of multiple bit upsets
+ in logic circuits
+ http://arxiv-web3.library.cornell.edu/pdf/1401.1003
+
+* HaploClique uses GNU Parallel
+ https://github.com/armintoepfer/haploclique
+
+* Scraping NSScreencast
+ https://blog.nicolai86.eu/posts/2014-01-12/scraping-nsscreencast/
+
+* 30 Cool Open Source Software I Discovered in 2013
+ http://www.cyberciti.biz/open-source/30-cool-best-open-source-softwares-of-2013/
+
+* [Unix] 13. The power of doing things in Parallel
+ http://leetaey.tistory.com/384
+
+* Parallel the execution of a job that read from stdin
+ http://www.linuxask.com/questions/parallel-the-execution-of-a-job-that-read-from-stdin
+
+* Mon Make à moi (6:38-11:50)
+ http://videos.rennes.inria.fr/ReNaBI-GO2013/indexPierreLindenbaum.html
+
+* Shell-Abarbeitung beschleunigen: Wie Sie mit parallelen Prozesse
+ effizienter in der Shell arbeiten
+ https://www.hosteurope.ch/blog/shell-abarbeitung-beschleunigen-wie-sie-mit-parallelen-prozesse-effizienter-in-der-shell-arbeiten/
+
+* Summary of GNU Parallel tutorial
+ http://hacktracking.blogspot.dk/2014/01/gnu-parallel-tutorial.html
+
+* Bug fixes and man page updates.
+
+
+20131222
+
+* GNU Parallel now has a motto: For people who live life in the
+ parallel lane.
+
+* Detect if the buffer dir $TMPDIR runs out of space. This changes a
+ central piece of code, making this release alpha quality.
+
+* --keep-order --(n)onall will sort according to the sshlogin.
+
+* Detect the number of CPUs on DEC Tru64.
+
+* GNU sql: --list-tables for sqlite3.
+
+* GNU Parallel was cited in: Variational Message-Passing: Extension to
+ Continuous Variables and Applications in Multi-Target Tracking
+ http://www.ics.uci.edu/~ajfrank/pubs/thesis.pdf
+
+* GNU Parallel was cited in: Genome Assembly: Scaffolding Guided by
+ Related Genomes
+ https://www.duo.uio.no/handle/10852/37431
+
+* GNU Parallel was cited in: Metagenomic analysis of Mariana Trench
+ sediment samples
+ http://repositorio.ul.pt/handle/10451/9436
+
+* GNU Parallel was cited in: Diacritization, automatic segmentation
+ and labeling for Levantine Arabic speech
+ http://ieeexplore.ieee.org/xpl/articleDetails.jsp?tp=&arnumber=6642556
+
+* GNU Parallel was cited in: Investigation of emotion classification
+ using speech rhythm metrics
+ http://ieeexplore.ieee.org/xpl/articleDetails.jsp?tp=&arnumber=6642591
+
+* GNU Parallel was cited in: A Rhythm-Based Analysis of Arabic Native
+ and Non-Native Speaking Styles
+ http://www.ijsps.com/uploadfile/2013/1128/20131128014413279.pdf
+
+* GNU Parallel was cited in: Evolutionary optimization of wetlands
+ design
+ http://dl.acm.org/citation.cfm?id=2480400
+
+* GNU Parallel was cited (unfortunately with wrong citation) in:
+ RDBMS vs NoSQL: Performance and Scaling Comparison
+ http://www.epcc.ed.ac.uk/sites/default/files/Dissertations/2012-2013/RDBMS%20vs%20NoSQL%20-%20Performance%20and%20Scaling%20Comparison.pdf
+
+* GNU Parallel was used (without proper citation) in: Comprehensive
+ Analysis Pipeline for Discovery of Human Genetic Variation
+ http://www.google.com/patents/US20130311106
+
+* Encrypt and decrypt in parallel with GPG
+ http://blog.tkassembled.com/412/encrypt-and-decrypt-in-parallel-with-gpg/
+
+* Using GNU Parallel to create a SVM classifier
+ http://ivanyu.me/blog/2013/12/01/parallel/
+
+* Parallel xz compression
+ http://anthon.home.xs4all.nl/rants/2013/parallel_xz/
+
+* Got CPUs to burn? Put 'em to work with GNU parallel
+ http://baoilleach.blogspot.dk/2013/11/got-cpus-to-burn-put-em-to-work-with.html
+
+* Parallel rsync
+ http://pastebin.com/JmnB9ffq
+
+* Gnu Parallel for fun and profit
+ https://gist.github.com/celoyd/f7eb55ad69c9b33fd8c3
+
+* Parallelization of Exploitation
+ http://rantsideasstuff.com/parallelization-of-exploitation/
+
+* Running assemblies in parallel
+ https://khmer-protocols.readthedocs.org/en/v0.8.3/metagenomics/4-assemble.html
+
+* Procesando la contabilidad del PP
+ http://www.neorazorx.com/2013/07/procesando-la-contabilidad-del-pp.html
+
+* Mon make à moi (French)
+ http://www.slideshare.net/lindenb/mon-make-moi-tout-sauf-galaxy
+
+* Bug fixes and man page updates.
+
+
+20131122
+
+* A citation notice is printed on stderr only if stderr is a terminal,
+ the user has not specified --no-notice and the user has not run
+ --bibtex once. This makes the release alpha quality.
+
+* --compress will compress temporary files. If the output is big and
+ very compressible this will take up less disk space in $TMPDIR and
+ possibly be faster due to less disk I/O.
+
+* --compress-program comtrols which program to use for compressing
+ temporary files.
+
+* --bar show progress as a progress bar compatible with zenity.
+
+* --resume can now be used with --result: Jobs already run will be
+ skipped.
+
+* --transfer and --basefile support paths relative to the --workdir by
+ inserting /./ into the path.
+
+* GNU Parallel was cited in: fastphylo: Fast tools for phylogenetics
+ http://www.biomedcentral.com/1471-2105/14/334/abstract
+
+* Using GNU parallel
+ http://davetang.org/muse/2013/11/18/using-gnu-parallel/
+
+* Techlux - GNU - Parallel (German)
+ https://techlux.de/blog/2013/11/07/gnu-parallel/
+
+* awk, sed, bzip2, grep, wc на всех ядрах
+ http://vk.com/page-30666517_45528467
+
+* 如何利用多核CPU來加速你的Linux命令 — awk, sed, bzip2, grep, wc等
+ http://www.hksilicon.com/kb/articles/290543/CPULinuxawk-sed-bzip2-grep-wc
+
+* GNU Parallel (Japanese)
+ http://jarp.does.notwork.org/diary/201311b.html#20131117
+
+* Bug fixes and man page updates.
+
+
+20131022
+
+* --transfer files with /./ in the path will copy the files relative
+ to the --workdir.
+
+* The maximal command length is now cached in a file halfing the
+ startup time.
+
+* GNU Parallel was cited in: Investigating speaker gender using rhythm
+ metrics in Arabic dialects
+ http://ieeexplore.ieee.org/xpl/login.jsp?tp=&arnumber=6602389
+
+* GNU Parallel was cited in: Tiedon erist ̈minen ja visualisointi a
+ lastensuojelun asiakaskertomuksista
+ https://tampub.uta.fi/handle/10024/84978
+
+* Some Additional Useful Bioinformatics Software
+ http://nix-bio.blogspot.dk/2013/10/additional-software-for-bioinformatics.html
+
+* GNU Parallel Example Screen Cast (bil to Geotiff conversion)
+ http://www.youtube.com/watch?v=PpF27eC7Mys
+
+* Using GNU Parallel to roll-your-own Map Reduce!
+ http://www.rankfocus.com/hello-world/
+
+* Use multiple CPU Cores with your Linux commands
+ http://www.rankfocus.com/use-cpu-cores-linux-commands/
+
+* Using GNU Parallel at HPC @ Uni.lu
+ https://hpc.uni.lu/users/use_cases/
+
+* Scaling up with parallelization
+ https://www.msi.umn.edu/sites/default/files/AdvPython_1.pdf
+
+* How to teach yourself to be a technical tester
+ http://infiniteundo.com/post/62421363463/qa-toc
+
+* Faster Rasters For All
+ http://2013.foss4g.org/conf/programme/presentations/52/
+
+* Optimizing translated file downloads
+ http://www.smartling.com/blog/2013/05/20/optimizing-translated-file-downloads/
+
+* EMC NetWorker parallel saveset cloning with nsrclone and GNU parallel
+ http://www.beebotech.com.au/2013/10/emc-networker-parallel-saveset-cloning-with-nsrclone-and-gnu-parallel/
+
+* Best Practices for Amazon EMR
+ http://media.amazonwebservices.com/AWS_Amazon_EMR_Best_Practices.pdf
+
+* Computing checksums for backup
+ https://identi.ca/evan/note/6yf1GzAARtyBhj__xzMvAg
+
+* Un exemplu de utilizare a programului GNU parallel
+ http://vundicind.blogspot.dk/2013/10/exemplu-utilizare-gnu-parallel.html
+
+* 平行化你的工作
+ http://www.slideshare.net/drakeguan/part1-23705978
+
+* なんか気持ちよかです Gnu Parallel
+ http://a3-lab.jpn.org/note/%E3%81%AA%E3%82%93%E3%81%8B%E6%B0%97%E6%8C%81%E3%81%A1%E3%82%88%E3%81%8B%E3%81%A7%E3%81%99-gnu-parallel
+
+* Bug fixes and man page updates.
+
+
+20130922
+
+* PDF-files of documentation is now included.
+
+* Cloning at warp speed 100 VMs in 1 minute using one command line
+ http://www.beebotech.com.au/tag/command-line/
+
+* Calcul Quebec's introduction to GNU Parallel
+ https://wiki.calculquebec.ca/w/GNU_parallel
+
+* Brincando com o GNU parallel
+ http://blog.tadeucruz.com/2013/09/brincando-com-o-gnu-parallel/
+
+* GNU Parallelで並列処理を行う
+ http://qiita.com/PENGUINANA_/items/af27a833e835fe17f09b
+
+* Bug fixes and man page updates.
+
+
+20130822
+
+* A tutorial showing much of GNU Parallel's functionality. Spend an
+ hour walking through the tutorial. Your commandline will love you
+ for it. man parallel_tutorial
+
+* --line-buffer will buffer output on line basis. --group keeps the
+ output together for a whole job. --ungroup allows output to mixup
+ with half a line coming from one job and half a line coming from
+ another job. --line-buffer fits between these two; it prints a full
+ line, but will allow for mixing lines of different jobs.
+
+* --record-env records all variables and functions set. Useful to
+ record the normal environment before using --env _.
+
+* --env _ exports all functions and variables to the remote server
+ except for the ones recorded.
+
+* New signing key. Due to recommendations from NIST
+ http://www.keylength.com/en/4/ the signing key was changed from
+ 1024D/ID:FFFFFFF1 to 9888R/ID:88888888.
+
+* Agalma: an automated phylogenomics workflow
+ http://arxiv.org/pdf/1307.6432
+
+* Job ad asking for GNU Parallel expertise
+ http://seattle.craigslist.org/est/sof/4006079352.html
+
+* Transient Beowulf Clustering with GNU Parallel and SSHfs
+ http://www.reddit.com/r/linux/comments/1ka8mn/transient_beowulf_clustering_with_gnu_parallel/
+
+* Aligning to unique regions
+ http://davetang.org/muse/2013/07/22/aligning-to-unique-regions/
+
+* Top 10 scripting tricks for basic bioinformatics
+ http://students.washington.edu/bowmanjs/wordpress/?p=873
+
+* Using GNU Parallel to speed up and simplify data analyzes
+ http://www.sergeymarkov.com/blog/2013/07/using-gnu-parallel-to-speed-up-and-simplify-data-analyzes/
+
+* Compression of files in parallel using GNU parallel
+ http://codextechnicanum.blogspot.dk/2013/07/compression-of-files-in-parallel-using.html
+
+* Using GNU Parallel with Amazon
+ media.amazonwebservices.com/AWS_Amazon_EMR_Best_Practices.pdf
+
+* Some useful comments on GNU Parallel
+ https://news.ycombinator.com/item?id=6209767
+
+* Using GNU Parallel to count reads mapping to genes for multiple BAMs
+ http://drduanehassane.com/blog/sequencing-resources
+
+* TimeMachineっぽいバックアップスクリプト
+ http://rio.tc/2013/07/timemachine-1.html
+
+* GNU ParallelでAndroid NDKの全バージョンを一括ダウンロード
+ http://qiita.com/mazgi/items/b10bf0ff3da2045d19ab
+
+* Bug fixes and man page updates.
+
+
+20130722
+
+* --round-robin with --pipe will write all blocks to already running
+ jobs.
+
+* --env can now transfer Bash function for remote execution. That is
+ pretty cool!
+
+* GNU Parallel was used (unfortunately with improper citation) in:
+ Understanding the Impact of E-Commerce Software on the Adoption of
+ Structured Data on the Web
+ http://link.springer.com/chapter/10.1007/978-3-642-38366-3_9#page-1
+
+* GNU Parallel was used (unfortunately with improper citation) in:
+ CWI at TREC 2012, KBA track and Session Track
+ http://trec.nist.gov/pubs/trec21/papers/CWI.kba.session.final.pdf
+
+* Mitigation of Adverse Effects Caused by Shock Wave Boundary Layer
+ Interactions through Optimal Wall Shaping.
+ http://arc.aiaa.org/doi/abs/10.2514/6.2013-2653
+
+* Using GNU parallel to convert images.
+ http://www.brunokim.com.br/blog/?p=18
+
+* A quick way to parallelize.
+ http://timotheepoisot.fr/2013/07/08/parallel/
+
+* GNU Parallel 20130522 ('Rana Plaza') 发布,并行作业执行.
+ http://www.open-open.com/news/view/371301
+
+* Quite a few bug fixes and man page updates.
+
+
+20130622
+
+* --xapply now recycles arguments if an input source has more
+ arguments than others.
+
+* The sleep time between jobs is now both increased and decreased
+ exponentially.
+
+* 10 seconds installation check the signature using GnuPG if GnuPG is
+ installed.
+
+* Developer job asking for GNU Parallel expertise.
+ http://careers.stackoverflow.com/jobs/35562/developer-big-data-geo-and-web-climate-central
+
+* A small utility program to run youtube-dl in parallel.
+ https://github.com/dlh/youtube-dl-parallel
+
+* Parallelizing Freesurfer:
+ http://blog.cogneurostats.com/?p=148
+
+* Bug fixes and man page updates.
+
+
+20130522
+
+* --ctrlc is now default if --pipe is not used; thus making it
+ possible to kill remotely started jobs simply by pressing Ctrl-C.
+
+* --timeout 200% now means kill jobs that take more than twice the
+ time of the median run time of a job.
+
+* Jobs are now distributed round robin when having mulitiple
+ --sshlogin instead of filling up one --sshlogin at a time.
+
+* niceload: darwin version of --io --mem --swap. Thanks to Anders F
+ Björklund.
+
+* Bug fixes and man page updates.
+
+
+20130422
+
+* 10 seconds installation: wget -O - pi.dk/3 | sh
+
+* HPUX CPU/core detection. Thanks to Javier Tarradas.
+
+* CPU detection for HURD, IRIX, SCO OpenServer and (old) HPUX.
+
+* --ctrlc will send SIGINT to tasks running on remote computers and
+ thus killing them.
+
+* --load now uses 'ps' to see immediately see the number of running
+ processes instead of 'uptime' thus making --load react much faster.
+
+* Testing on Centos 3.9 and FreeBSD 7 revealed a lot of compability
+ bugs. Some of these required quite extensive changes making this
+ release beta quality.
+
+* --retries works with --onall.
+
+* The new --load computation now works on FreeBSD 7.
+
+* --nice works under tcsh.
+
+* GNU Parallel is officially supported in NetBSD.
+ http://ftp.netbsd.org/pub/pkgsrc/current/pkgsrc/parallel/parallel/README.html
+
+* GNU Parallel is accepted for openSUSE.
+ http://software.opensuse.org/package/gnu_parallel
+
+* GNU Parallel can be installed under Microsoft Windows (CygWin).
+ http://blogs.msdn.com/b/hpctrekker/archive/2013/03/30/preparing-and-uploading-datasets-for-hdinsight.aspx
+
+* Excuses for not installing GNU Parallel.
+ http://oletange.blogspot.dk/2013/04/why-not-install-gnu-parallel.html
+
+* Job advert that requires GNU Parallel competence.
+ http://versium.com/about/careers/
+
+* Parallelizing Batch Jobs for Fun and Profit.
+ http://mikeseidle.com/tech/programming/2013/03/parallelizing-batch-jobs
+
+* Processing Transcription Start Sites(TSS) for the entire Mouse genome.
+ http://qbrc.swmed.edu/2013/03/gnu-parallel-speeding-up-unix-commands-and-scripts/
+
+* GNU parallel is used throughout Scrimer.
+ http://scrimer.readthedocs.org/en/latest/
+
+* GNU Parallel helped making public documents searchable.
+ http://danpalmer.me/blog/articles/2013-04-06-unlocking-hillsborough.html
+
+* Identifying big movie files (German).
+ http://blackappsolutions.wordpress.com/2013/03/23/wenn-der-plattenplatz-knapp-wird-filme-identifizieren-die-viel-platz-belegen/
+
+* Bug fixes and man page updates.
+
+
+20130222
+
+* --resume works with --pipe.
+
+* --resume-failed will go through --joblog, redo the failed jobs and
+ then continue like --resume.
+
+* Negative positional arguments count from the end: {-1} means the
+ last argument, {-2} the second to last.
+
+* NetBSD CPU detection.
+
+* --blocksize increases exponentially if it smaller than a full
+ record.
+
+* Processing n-line records (--pipe -L n) is now much faster.
+
+* --tollef is obsoleted after discussion on the mailing list.
+
+* GNU Parallel is the highest rated tool on:
+ http://www.biostars.org/show/tools/?sort=votes&since=all%20time
+
+* GNU Parallel was loved during FSFE's #ilovefs campaign.
+ http://fsfe.org/news/2013/news-20130212-01.en.html
+
+* Using GNU Parallel with s3cmd (Japanese).
+ http://blog.suz-lab.com/2013/02/s3cmd-gnu-paralells3.html
+
+* Intro to GNU Parallel (Chinese).
+ http://guiquanz.github.com/2013/02/12/gnu-parallel-intro/
+
+* Bug fixes and man page updates.
+
+
+20130122
+
+* --sshdelay Delay starting next ssh by secs seconds. GNU parallel
+ will pause secs seconds after starting each ssh. secs can be less
+ than 1 seconds.
+
+* Official OpenBSD port:
+ http://ftp.openbsd.org/ports/sysutils/parallel/
+
+* Official DragonFlyBSD package:
+ http://www.mirrorservice.org/sites/ftp.dragonflybsd.org/packages/amd64/DragonFly-3.0.0/stable/parallel/
+
+* Post about niceload in Hungarian:
+ http://commandline.blog.hu/2013/01/02/niceload
+
+* 自炊スキャンデータをKobo Gloに最適化
+ http://interstadial.wordpress.com/2013/01/20/
+
+* GNU parallel+ssh で複数のリモートホストに複数のコマンドを実行させる
+ http://oshiire.to/archives/1686
+
+* Bug fixes and man page updates.
+
+
+20121222
+
+* --shebang-wrap makes is possible to parallelize scripts by putting
+ parallel in the shebang line (the first #! line in scripts).
+
+* --delay puts in a delay after starting each job. Useful to avoid
+ raceconditions and thundering herd problems.
+
+* --results now uses / as separator instead of TAB and thus creates
+ subdirs. Thanks to Drew Frank.
+
+* parallel: OpenBSD CPU detection. Thanks to Michał Markowski.
+
+* niceload --sensor allows for negative numbers to signify a lower
+ number means the limit is lower.
+
+* niceload --prg suspend a named program that is already running.
+
+* niceload multiple --pid suspend process ids that are already running.
+
+* Convertir des images avec find et imagemagick
+ http://roger.b.over-blog.com/article-convertir-des-images-avec-find-et-imagemagick-113416637.html
+
+* Bug fixes and man page updates.
+
+
+20121122
+
+* niceload --sensor makes it possible to use a program to control if a
+ program should be suspended.
+
+* GNU Parallel was part of the justification for Nordic Free Software
+ Award 2012
+ https://fsfe.org/news/2012/news-20121112-01.html
+
+* Dateien schneller packen und konvertieren mit GNU parallel
+ http://www.linux-community.de/Archiv/Tipp-der-Woche/Dateien-schneller-packen-und-konvertieren-mit-GNU-parallel
+
+* Processing images from Lunar Reconnaissance Orbiter Narrow Angle
+ Camera http://lunokhod.org/?p=774
+
+* GNU Parallel is used in types2: Type and Hapax Accumulation Curves
+ http://www.cs.helsinki.fi/u/josuomel/types2/
+
+* GNU Parallel used in: A Hybrid Factor Analysis and Probabilistic
+ PCA-based system for Dictionary Learning and Encoding for Robust
+ Speaker Recognition
+ http://www.superlectures.com/odyssey2012/lecture.php?id=8&lang=en
+ http://www.cse.iitm.ac.in/~mrsri/ppca.pdf
+
+* Mapreduce avec parallel, cat et une redirection
+ http://datacratic.com/site/blog/mapreduce-avec-parallel-cat-et-une-redirection
+
+* GNU Parallel for Large Batches of Small Jobs
+ https://support.scinet.utoronto.ca/wiki/images/7/7b/Tech-talk-gnu-parallel.pdf
+
+* GNU Parallel used in black belt tip (8:50-10:12)
+ https://aws.amazon.com/apac/awspodcast/episode20/
+
+* Bug fixes and man page updates.
+
+
+20121022
+
+* --results structures output files so they can be imported using
+ Pandas (http://pandas.pydata.org/). Useful when doing simulations to
+ select output from some arguments.
+
+* Use --env to transfer environment variables through ssh to remote
+ machines.
+
+* GNU Parallel is accepted for Ubuntu Quantal.
+ http://packages.ubuntu.com/quantal/parallel
+
+* GNU Parallel was requested for OpenSUSE (go vote!)
+ https://features.opensuse.org/314461
+
+* Kneth's Korner: Map/Reduce and GNU Parallel
+ http://kenneth.geisshirt.dk/2012/10/mapreduce-and-gnu-parallel.html
+
+* Blog post in Japanese by Siguniang.
+ https://siguniang.wordpress.com/2012/09/09/notes-on-gnu-parallel-the-command-line-power-tool/
+ (Siguniang mentions the logo is called 'The Cafe Wall Illusion')
+ http://www.michaelbach.de/ot/ang_cafewall/index.html
+
+* GNU Parallel, where have you been all my life?
+ http://ssdigit.nothingisreal.com/2012/09/gnu-parallel-where-have-you-been-all-my.html
+
+* Reference-free ddRADseq analysis tools uses GNU Parallel.
+ https://github.com/brantp/rtd#readme
+
+* When bash just isn’t quick enough.
+ https://soimasysadmin.wordpress.com/2012/08/15/gnu-parallel-when-bash-just-isnt-quick-enough/
+
+* Using GNU Parallel to Package Multiple Jobs in a Single PBS Job.
+ http://www.nas.nasa.gov/hecc/support/kb/Using-GNU-Parallel-to-Package-Multiple-Jobs-in-a-Single-PBS-Job_303.html
+
+* GNU Parallel Notes.
+ http://xed.ch/help/parallel.html
+
+* Bug fixes and man page updates.
+
+
+20120822
+
+* Due to a bugfix the main flow has changed. Making this release beta
+ quality.
+
+* Parallel Proxy Scraper & Checker Tool
+ https://www.youtube.com/watch?v=iXCeR_XsP6o
+
+* Bug fixes and man page updates.
+
+
+20120722
+
+* GNU Parallel was presented at Chiang Mai Bar Camp
+ http://barcampchiangmai.org/
+ See write up here:
+ http://www.cnx-software.com/2012/07/02/use-gnu-parallel-to-speed-up-script-execution-on-multiple-cores-andor-machines/
+
+* I don't always max out all my CPUs, but when I do, I do it with GNU
+ Parallel.
+ http://memegenerator.net/instance/22638454
+
+* Bug fixes and man page updates.
+
+
+20120622
+
+* '-L n --pipe' will use records of n lines. This is useful when
+ processing data that have fixed records with a fixed number of
+ lines (e.g. fastq).
+
+* --filter-hosts will remove down hosts. For each remote host: check
+ that login through ssh works. If not: do not use this host.
+ Currently you can not put --filter-hosts in a profile, $PARALLEL,
+ /etc/parallel/config or similar. This is because GNU Parallel uses
+ GNU Parallel to compute this, so you will get an infinite loop. This
+ will likely be fixed in a later release.
+
+* --pipe now uses fork() instead of busy wait. The performance should
+ be better on computers with >10 cores while remaining the same on
+ computers with few cores.
+
+* GNU Parallel will be represented at Chiang Mai Bar Camp.
+ http://barcampchiangmai.org/
+
+* Indexing Big Data on Amazon AWS: The Screencast
+ (Check out his T-shirt at 18:40-21:00).
+ http://www.opensourceconnections.com/2012/06/06/indexing-big-data-on-amazon-aws-screencast/
+
+* biotoolbox uses GNU Parallel.
+ https://code.google.com/p/biotoolbox/wiki/Pod_novo_wrapper
+
+* Spiceweasel uses GNU Parallel.
+ https://github.com/mattray/spiceweasel
+
+* GNU Parallel part of The Administrators Challenge.
+ http://challenge.twistedrack.com/questions/qs3.php
+
+* Finding Oldest Firefox Code using GNU Parallel.
+ http://gregoryszorc.com/blog/2012/06/18/finding-oldest-firefox-code/
+
+* Bug fixes (quite a few for remote job running) and man page updates.
+
+
+20120522
+
+* Timings in --joblog now with milliseconds.
+
+* Thesis using GNU Parallel (correctly cited; hooray!).
+ http://scholarworks.sjsu.edu/etd_projects/222/
+
+* Package for AIX.
+ http://www.perzl.org/aix/index.php?n=Main.Parallel
+
+* Prokka uses GNU Parallel.
+ http://bioinformatics.net.au/prokka-manual.html
+
+* GNU Parallel was presented at MUUG.
+ http://muug.mb.ca/blog/meetings/2012/05/09/may-8th-2012-rtfm-gnu-parallel1/
+
+* Blog post: Manipulating many or large files with GNU parallel.
+ http://compbiously.blogspot.com/2012/05/manipulating-many-or-large-files-with.html
+
+* Blog post: Running commands in parallel using bash.
+ http://blog.michaelboman.org/2012/04/running-commands-in-parallel-using-bash.html
+
+* Blog post in Hungarian.
+ http://commandline.blog.hu/2012/05/03/gnu_parallel
+
+* Popularity of GNU Parallel in Debian explodes.
+ http://qa.debian.org/popcon-graph.php?packages=parallel
+
+* Bug fixes and man page updates.
+
+
+20120422
+
+* A race condition bug caused restructuring of a central piece of the
+ code. Thus this release of GNU Parallel should be considered beta
+ quality.
+
+* sql --dbsize will now list sizes of all databases if given no
+ database.
+
+* GNU Parallel is now in Debian Unstable.
+ apt-get install parallel
+
+* Article: Computational and experimental analyses of
+ retrotransposon-associated minisatellite DNAs in the soybean genome.
+ http://www.biomedcentral.com/content/supplementary/1471-2105-13-s2-s13-s1.pdf
+
+* Blog post: GNU parallel - the best thing since sliced bread.
+ https://arrayal.wordpress.com/2012/03/27/gnu-parallel-the-best-thing-since-sliced-bread/
+
+* Blog post: GNU Parallel makes everything faster.
+ http://compbiously.blogspot.com/2012/03/gnu-parallel-makes-everything-faster.html
+
+* Blog post (German): Howto: GNU parallel.
+ http://d24m.de/2012/04/05/howto-gnu-parallel/
+
+* Blog post: Running in Parallel
+ http://interactivity.ifactory.com/2012/04/running-in-parallel/
+
+* 20000 watched the first intro video.
+ http://pi.dk/1
+
+* The first 5 have received their GNU Parallel merchandise
+ https://www.gnu.org/software/parallel/merchandise.html
+
+* Bug fixes and man page updates.
+
+
+20120322
+
+* Parallel Process Database Dumps.
+ http://blog.mattoates.co.uk/2012/02/parallel-process-database-dumps.html
+
+* Using GNU Parallel to process images from Mars.
+ http://lunokhod.org/?p=468
+
+* Using GNU Parallel with bzgrep.
+ http://filip.rembialkowski.net/did-you-know-gnu-parallel/
+
+* Bug fixes and man page updates.
+
+
+20120222
+
+* --workdir . will use the current working dir. If the current working
+ dir is beneath your home dir, the value . is treated as the relative
+ path to your home dir. This means that if your home dir is different
+ on remote computers (e.g. if your login is different) the relative
+ path will still be relative to your home dir.
+
+* A texinfo file is now generated from the POD file using pod2texi.
+
+* The bioinformatics article "Fast large-scale clustering of protein
+ structures using Gauss integrals" mentions GNU Parallel. They forgot
+ to add the reference. Please remember --bibtex if you use GNU
+ Parallel in an article.
+ http://bioinformatics.oxfordjournals.org/content/28/4/510.short
+
+* The first supporter received his merchandise. Did you order yours?
+ https://www.gnu.org/software/parallel/merchandise.html
+
+* How to use GNU Parallel with samtools.
+ http://zvfak.blogspot.com/2012/02/samtools-in-parallel.html
+
+* How to use GNU Parallel for vacuuming PostgreSQL tables.
+ http://blog.timmattison.com/archives/2012/01/24/mini-hack-parallel-vacuuming-in-postgresql/
+
+* Converting e-books with Calibre and GNU Parallel.
+ http://www.linkhal.de/blog/2012/01/converting-massive-e-book-collections-with-calibre-and-gnu-parrallel/
+
+* Using GNU Parallel for tailing logfiles from multiple servers.
+ http://silviud.blogspot.com/2012/02/shell-parallel-processing.html
+
+* Bug fixes and man page updates.
+
+
+20120122
+
+* --header : uses the first input line as column names and you can
+ then use {colname} as a replacement string in the command. This also
+ works with multiple :::'s.
+
+* --header <regexp> matches a header as a regular expression and
+ repeats the header for each block with --pipe.
+
+* --resume resumes from the last unfinished job. Useful if you stop
+ GNU Parallel and restart it later with the same arguments.
+
+* niceload now propagates exit status correctly.
+
+* Show your support for GNU Parallel. For a limited time you can get a
+ GNU Parallel T-shirt+mug+pen+100 postcards at 20 EUR. See
+ https://www.gnu.org/software/parallel/merchandise.html
+
+* Options -g -B -T -U -W -Y are retired as warned 6 months ago.
+
+* GNU Parallel referenced in article on Holographic Grid Cloud. Thanks
+ to Stefano Gallozzi. http://arxiv.org/pdf/1112.6128
+
+* Article in IEEE Software on GNU Parallel. Thanks to Diomidis
+ Spinellis. http://www.spinellis.gr/blog/20110911/
+
+* An article on Narwhal which uses GNU Parallel. They forgot to add
+ the reference. Please remember --bibtex if you use GNU Parallel in
+ an article.
+ http://bioinformatics.oxfordjournals.org/content/early/2011/11/08/bioinformatics.btr613.full.pdf
+
+* Blog post on using GNU Parallel to speed up BLAST queries:
+ http://blog.mckuhn.de/2012/01/embarrassingly-parallel-blast-search.html
+
+* Video showing Wake-on-LAN with GNU Parallel.
+ https://www.youtube.com/watch?v=0mB-yIyKFLQ
+
+* Using GNU Parallel with Solr.
+ http://sujitpal.blogspot.com/2011/12/solr-report-generation-with-python.html
+
+* First job ad including GNU Parallel:
+ http://seeker.dice.com/jobsearch/servlet/JobSearch?op=101&dockey=xml/7/6/76f858de083a094f74b1a5d3ba53ffc5@endecaindex
+
+* Bug fixes and man page updates.
+
+
+20111222
+
+* --timeout will now kill grandchildren.
+
+* Interview in Hacker Public Radio.
+ http://hackerpublicradio.org/eps.php?id=0860
+
+* Blog post in Chinese.
+ http://blog.sina.com.cn/s/blog_3f7652740100y0ju.html
+
+* Bug fixes and man page updates.
+
+
+20111122
+
+* GNU Parallel package for Fedora.
+ https://admin.fedoraproject.org/pkgdb/acls/name/parallel
+
+* Using GNU Parallel with Mozilla cross referencing.
+ https://blog.mozilla.com/it/2011/11/15/mxr-improvements/
+
+* Neat flash presentation of GNU Parallel.
+ http://prezi.com/0nq74b2elgtx/about-gnu-parallel/
+
+* How to create tar files of dirs in parallel.
+ https://joneslee85.wordpress.com/2011/11/22/shell-tips-how-to-recursively-archive-many-folders/
+
+* Article in Admin Network&Security by Ben Martin.
+ http://www.admin-magazine.com/HPC/Articles/GNU-Parallel
+
+* Blog post in Polish.
+ http://blog.tiger.com.pl/2011/01/05/gnu-parallel/
+
+* Blog post on using GNU Parallel of Mac OS X.
+ http://www.0xcb0.com/2011/10/19/running-parallel-bash-tasks-on-os-x/
+
+* Blog post in English by researcher.
+ http://zandyware.wordpress.com/2011/10/15/gnu-parallell-make-best-use-of-your-multicore-computer/
+
+* Blog entry in Chinese.
+ http://blog.csdn.net/tossense/article/details/6889026
+
+* Blog entry in Japanese on managing many servers.
+ http://hiroakis.com/blog/2011/11/13/gnu-parallel/
+
+* GNU Parallel is now part of ppbs.
+ http://physiology.med.cornell.edu/faculty/mason/lab/ppbs/introduction/introduction.html
+
+* How to use GNU Parallel with MCX.
+ http://mcx.sourceforge.net/cgi-bin/index.cgi?MMC/Doc/MMCCluster
+
+* Bug fixes and man page updates.
+
+
+20111022
+
+* --tag prepends lines with the argument(s). If used with --(n)onall
+ the line will be prepended with the sshlogin instead.
+
+* --shellquote does not run the command but quotes it using \'s.
+ Useful for making quoted composed commands.
+
+* --profile can now be repeated merging multiple profiles.
+
+* --bibtex now gives the BibTeX entry.
+ Thanks to Aleksandr Levchuk for testing.
+
+* Makefile for simple .deb package.
+ cd packager/debian; make
+
+* sql: --list-databases lists the databases.
+
+* GNU Parallel officially in Fedora 16
+ http://www.spinics.net/lists/fedora-package-announce/msg67130.html
+
+* Blog post on recompression FLAC audio.
+ http://blog.oxplot.com/2011/08/reflac-flac-cleanser.html
+
+* The most complex use of ::: I have seen so far.
+ http://agentzlerich.blogspot.com/2011/09/following-up-on-previous-failures-ive.html
+
+* Example of using GNU Parallel with sudo.
+ http://loopkid.net/articles/2011/10/07/recursive-chmod-and-chown-on-files
+
+* Use wget and gnu parallel to download mp3s at rocket speed.
+ http://sdbrain.posterous.com/use-wget-and-gnu-parallel-to-download-mp3s-at
+
+* First CV referring to GNU Parallel.
+ http://www.peopleperhour.com/freelancers/stephen_/internet_researcher_and_protocol_develop/140602
+
+* Cartwright labs usage of GNU Parallel.
+ http://cartwrightlab.wikispaces.com/The+Most+Bang+for+Your+Buck
+
+* Bug fixes and man page updates.
+
+
+20110822
+
+* --timeout implemented so that slow commands can be killed
+
+* CPU detection improved for Mac OSX. Thanks to Wayne E. Seguin.
+
+* Example of a parallel webcrawler in the man page.
+
+* Backup up PostgreSQL with GNU Parallel. Thanks to Stephane Wirtel.
+ http://wirtel.be/2011/07/15/rsync_parallel/
+
+* Blog post in Japanese.
+ http://dminor11th.blogspot.com/2011/08/gnu-parallel.html
+
+* Blog post about optimizing JPEGs. Thanks to Thomas Jost.
+ http://schnouki.net/2011/07/22/optimizing-jpeg-pictures/
+
+* Bug fixes and man page updates.
+
+
+20110722
+
+* niceload: --hard will suspend a program if a limit is reached - as
+ opposed to just slowing the program down.
+
+* niceload: --soft will slow the program down - as opposed to
+ suspending the program completely.
+
+* niceload: --run-io will slow down a program if disk io goes above a
+ certain limit.
+
+* niceload: --run-load will slow down a program if loadaverage goes
+ above a certain limit.
+
+* niceload: --run-mem will slow down a program if free memory goes
+ below a certain limit.
+
+* niceload: --run-noswap will slow down a program if the computer is
+ swapping.
+
+* niceload: --start-io, --start-load, --start-mem, --start-noswap will
+ defer starting a program until the system is below the limit.
+
+* --io, --load, --mem, and --noswap sets both --run-* and --start-*.
+
+* niceload got a major rewrite and is now object oriented.
+
+* GNU Parallel was presented at Nordic Perl Workshop 2011.
+ http://conferences.yapceurope.org/npw2011/talk/3416
+
+* Blog post about zcat and GNU Parallel. Thanks to Dr. John.
+ http://drjohnstechtalk.com/blog/2011/06/gnu-parallel-really-helps-with-zcat/
+
+* 2 blog posts in Japanese. Thanks to Negima.
+ http://d.hatena.ne.jp/negima1976/20110607/1307412660
+ http://d.hatena.ne.jp/negima1976/20110628/1309252494
+
+* Blog post for bioinformatics. Thanks to Chris Miller.
+ http://chrisamiller.com/science/2010/05/26/use-parallel-for-easy-multi-processor-execution/
+
+* Bug fixes and man page updates.
+
+
+20110622
+
+* --onall will run all the jobs on all the computers. This is useful
+ for systemadinistrators having multiple servers.
+
+* --nonall runs like --onall but reads no arguments from stdin
+ (standard input), so is possible to do:
+ parallel --nonall -S computer1,computer2 uptime
+
+* Context replace now works with multiple input sources:
+ parallel -X echo {1}-{2} ::: 1 2 3 ::: a b c
+
+* --noswap do not start a job on a server that is swapping.
+
+* Article: Parallel processing without Hadoop!
+ http://www.solutionhacker.com/parallel-processing-without-hadoop/
+
+* Article in Linux Magazine (Spanish). Thanks to Ben Martin.
+ http://www.linux-magazine.es/issue/67/
+
+* First blog post in Korean. Thanks to 민병국.
+ http://blog.daum.net/_blog/BlogTypeView.do?blogid=02RQ3&articleno=13380895&admin=#
+
+* Blog post in Japanese. Thanks to Naoya Nakazawa.
+ http://www.sssg.org/blogs/naoya/archives/2003
+
+* Blog post in English. Thanks to Dagon.
+ http://hekate.homeip.net/2011/05/29/
+
+* Bug fixes and man page updates.
+
+
+20110522
+
+* Multiple ::: can be put on the command line and will be treated
+ similar to contents from multiple -a's.
+
+* ::: and :::: can now be mixed on the command line. Use {1} .. {n} to
+ refer to inputs from the different input sources.
+
+* --xapply is now needed to get xapply's behaviour of reading one line
+ from each of the input sources.
+
+* Multiple input sources will cause all combinations of arguments from
+ the sources to be generated. E.g. 'parallel echo {1}+{2} ::: 1 2 :::
+ a b c' will print 1+a 1+b 1+c 2+a 2+b 2+c. This can often replace
+ nested loops.
+
+* Implemented {//} for the input line with the basename removed (dirname).
+
+* New video showing the new ::: and :::: input sources.
+ http://tinyogg.com/watch/iOdhU/ http://www.youtube.com/watch?v=fOX1EyHkQwc
+
+* GNU Parallel now has a logo.
+ http://www.gnu.org/software/parallel/logo.png
+
+* Article about GNU SQL in USENIX Magazine ;login: (print)
+ http://www.usenix.org/publications/login/2011-04/
+
+* Using GNU Parallel with EC2. Thanks to Kevin Wu.
+ http://blog.kevinformatics.com/post/4970574713/interested-in-your-experience-using-gnu-parallel-in
+
+* Review with idea for {..} and {...} in Japanese. Thanks to ichii386.
+ http://d.hatena.ne.jp/ichii386/20110426
+
+* Upgrade GNU Parallel using Macports. Thanks to Phil Hollenback.
+ http://www.hollenback.net/index.php/MacportsParallel
+
+* Robert from Echo One discusses using processes instead of threads:
+ http://rrees.wordpress.com/2011/04/25/many-cores-many-threads/
+
+* How to run GNU Parallel on a SLURM cluster. Thanks to Kevin Stock.
+ http://www.kevinstock.org/2011/04/using-the-ri-cluster/
+
+* Short article about using GNU Parallel with lame:
+ http://loopkid.net/articles/2011/04/30/accelerate-lame-mp3-conversion
+
+* Using GNU Parallel to run tail -f in Japanese. Thanks to Clouder.
+ http://blog.clouder.jp/archives/001140.html
+
+* BBC Research & Development uses GNU Parallel:
+ http://www.bbc.co.uk/blogs/researchanddevelopment/2010/11/prototyping-weeknotes-41-26112.shtml
+
+* Short article about using GNU Parallel on RHEL. Thanks to Rishi Deshpande.
+ http://nuclear-imaging.info/site_content/2011/05/11/gnu-parallel/
+
+* Using GNU Parallel for FLAC->MP3 conversion. Thanks to Derek Marcotte.
+ http://derek.chezmarcotte.ca/?p=286
+
+* Bug fixes and man page updates.
+
+
+20110422
+
+* {#} now works as an alias for $PARALLEL_PID.
+
+* --eta now estimates way more accurately.
+
+* CPU detection code for AIX thanks to Christian Netrwal.
+
+* --joblog contains exitcode.
+
+* Thanks to Ævar Arnfjörð Bjarmason for reading my code.
+
+* GNU Parallel was presented at:
+ - LUGA, Augsburg, 2011-03-26, http://www.luga.de/Aktionen/LIT-2011/Programm
+ - OSAA.dk, Aarhus, 2011-04-12
+
+* Blog entry in Japanese. Thanks to Riywo.
+ http://blog.riywo.com/2011/04/19/022802
+
+* Example of how to use GNU Parallel on PBS clusters. Thanks to Andrew
+ J Dolgert: http://web0.tc.cornell.edu/wiki/index.php?title=Gnu_Parallel
+
+* First example of using GNU Parallel in Hebrew. Thanks to omry.
+ http://nd.gd/jk
+
+* Intro video got past 10000 hits.
+ http://www.youtube.com/watch?v=OpaiGYxkSuQ
+
+* Bug fixes and man page updates.
+
+
+20110322
+
+* --tollef to be switch compatible with Tollef's parallel. This will
+ cause -l to mean --load, and the argument separator will be --
+ instead of :::
+
+* --gnu will force GNU Parallel to behave like GNU Parallel even if
+ --tollef is set.
+
+* Site wide config file: /etc/parallel/config
+
+ This should solve the issue with some packagers renaming GNU
+ Parallel to gparallel to avoid the naming conflict.
+
+ By putting --tollef in the site wide config file you can deinstall
+ Tollef's parallel and install GNU Parallel instead without any
+ change for users or scripts. This is useful for packagers that
+ currently rename GNU Parallel or simply do not distribute GNU
+ Parallel because the command name conflicts with Tollef's parallel.
+
+* -L 0 -n 0, and -N 0 implemented. They will read one argument,
+ but insert 0 arguments on the command line. Useful if you just want
+ to run the same command with the same arguments a number of times.
+
+* GNU Parallel is now in Fink. Thanks to Jesse Alama.
+ http://pdb.finkproject.org/pdb/package.php/parallel
+
+* Opscode Cookbook for Chef. Thanks to Joshua Timberman.
+ https://github.com/opscode/cookbooks/tree/master/gnu_parallel
+
+* Man page examples translated into Japanese. Thanks to Koshigoe.
+ http://w.koshigoe.jp/study/?%5Bsystem%5D+GNU+parallel+%BB%C8%CD%D1%CE%E3#l13
+
+* GNU Parallel will be presented at:
+ - LUGA, Augsburg, 2011-03-26, http://www.luga.de/Aktionen/LIT-2011/Programm
+ - OSAA.dk, Aarhus, 2011-04-12
+
+* Video of presentation from FSCONS 2010-11-07. The presenter was
+ _really_ hoarse that day (Something to do with loads of alcohol the
+ night before). http://vimeo.com/20838834
+
+* Review with examples in German. Thanks to M. Nieberg.
+ http://kenntwas.de/2011/linux/gnu-parallel/
+
+* A review of the --pipe option in German. Thanks to Mathias Huber.
+ http://www.linux-magazin.de/NEWS/GNU-Parallel-20110205-stueckelt-Daten
+
+* A small example of grepping maillogs. Thanks to François Maillet.
+ http://www.francoismaillet.com/blog/?p=399
+
+* Using GNU Parallel instead of xargs. Thanks to James Cuff.
+ http://blog.jcuff.net/2011/02/on-train-ride-in.html
+
+* Bug fixes and man page updates.
+
+
+20110205
+
+* --pipe splits piped data into blocks. Each block is piped to a
+ program for processing. The piping and the programs will be run in
+ parallel. Useful if the data you want to process is data for a
+ program and not arguments.
+
+* --blocksize sets the blocksize in bytes for --pipe. The blocksize is
+ approximate. It can deviate as much as the size of one record.
+ Default is 1M.
+
+* --recstart sets the string matching the start of a
+ record. Default is "".
+
+* --recend sets the string matching the end of a
+ record. Default is '\n'. To specify none use --recend "".
+
+ If both --recstart and --recend are set, the end of a record must be
+ followed immediately by a start of a record. This is useful if
+ either recend or recstart can occur in the middle of a record.
+
+* --remove-rec-sep removes the string matched by --recstart and
+ --recend.
+
+* --regexp will make GNU Parallel treat --recstart and --recend as
+ regular expressions.
+
+* --output-as-files will put the output of the programs into files and
+ instead of giving the output GNU Parallel will output the name of
+ these files.
+
+* -N if used with --pipe sets the number of records to read.
+
+* New video showing --pipe at
+ http://www.youtube.com/watch?v=1ntxT-47VPA
+
+* GNU Parallel was presented at FOSDEM.
+
+* Article in USENIX Magazine ;login: (print)
+ http://www.usenix.org/publications/login/2011-02/
+
+* GNU Parallel is now on ohloh.net. Thanks to Wim Muskee.
+ https://www.ohloh.net/p/gnu-parallel
+
+* Advanced recursive example. Thanks to Ruarí Ødegaard.
+ http://my.opera.com/ruario/blog/2011/01/24/editing-debian-packages-more-fun-with-gnu
+
+* Small example on using GNU Parallel through 0install (German).
+ http://forum.ubuntuusers.de/topic/gnu-parallel-mit-zero-install-kurzinfo-kein-wi/
+
+* Bug fixes and man page updates.
+
+
+20110122
+
+* --joblog makes a simple log of completed jobs.
+
+* -X now spreads arguments between job slots when reaching last
+ argument. Use -j1 to avoid this.
+
+* People on the email list have voted -j+0 to be the new default
+ instead of -j9.
+
+* First review in Polish. Thanks to Patryk Krawaczyński.
+ http://nfsec.pl/root/2458
+
+* Review in Spanish (in print).
+ https://www.linux-magazine.es/issue/67/moreinfo.html
+
+* Review in English. Thanks to Brian Gough.
+ http://blogs.fsfe.org/bjg/2011/01/gnu-parallel-a-map-operator-for-the-command-line/
+
+* Review in French. Thanks to Denis Dordoigne.
+ http://linuxfr.org/2010/12/29/27715.html
+
+* Review in Spanish.
+ http://gufete.net/index.php?entry=entry110116-200022
+
+* Article with advanced recursive example. Thanks to Ruarí Ødegaard
+ http://my.opera.com/ruario/blog/2011/01/18/fun-with-gnu-parallel
+
+* Use case for memcache.
+ http://www.dctrwatson.com/2010/12/how-to-dump-memcache-keyvalue-pairs-fast/
+
+* Bug fixes and man page updates.
+
+
+20101222
+
+* GNU niceload is now part of GNU Parallel. GNU niceload slows down a
+ program if the load average is above a certain limit.
+
+* Implemented --tmpdir to buffer standard output and standard error in
+ a different place.
+
+* Implemented --load to wait until the load is below a limit before
+ starting another job on that computer.
+
+* Implemented --nice set the niceness of jobs running both locally and
+ remotely.
+
+* Implemented --dry-run to print the job without running it.
+
+* Implemented --tty as the old default of assigning a tty to the first
+ job causes problems.
+
+* Review with focus on clusters. Thanks to Taylor Gillespie
+ http://www.unixpronews.com/unixpronews-49-20101019GNUParallelSpeedUpProcessingWithMulticoresClusters.html
+
+* Review with focus on protein similarity.
+ http://kevinformatics.tumblr.com/post/2142473893/cluster-like-computing-using-gnu-parallel
+
+* Review in Spanish.
+ http://gr3p.com/2010/12/gnu-parallel-acelera-tus-scripts-en-linux
+
+* Quite a few bug fixes and man page updates.
+
+
+20101202
+
+* Implemented {/} for the input line with the path removed (basename).
+
+* Implemented {/.} for the input line with extension and path removed
+ (basename).
+
+* Output from --progress is now sent to standard error instead of
+ standard output.
+
+* --eta was broken and counted down from 0. Now fixed.
+
+* Standard output and standard error are flushed after every job so if
+ standard output does not end with a newline it will still be grouped
+ with the rest of standard output.
+
+* --command, -c, --file, and -f are now removed as options. They were
+ never used in practice.
+
+* GetOptionsFromArray rewritten to work with old Perl libraries.
+
+* The file COPYING now contains the GNU General Public License 3
+
+* Major rewrite to make the code more object oriented and easier to
+ maintain in the future.
+
+
+20101113
+
+* Using -j myfile the number of jobs can be changed while GNU Parallel
+ is running simply by changing the content of myfile.
+
+* Implemented --profile to use different .parallel/config for
+ different situations.
+
+* Ugly newlines in $PARALLEL and .parallel/config are no longer
+ needed. Instead you have to use \ in front of special shell
+ characters.
+
+* --workdir puts the files transferred to remote machines in a
+ specified directory.
+
+* $PARALLEL_PID is set to the process id of GNU Parallel.
+
+* $PARALLEL_SEQ is set to the sequence number of the job.
+
+* -v now only shows the command to run. Use -v -v to show the
+ ssh/rsync wrapping.
+
+* Slow spawning error is now only a warning.
+
+* If stdin is a tty and input is read from stdin you now get a
+ warning.
+
+* GNU sql: \n and \x0a in arguments is replaced with newline.
+
+* Patch for Debian package and spelling mistakes. Thanks to Rogério
+ Brito <rbrito at ime dot usp dot br>
+
+* Mac OS X Homebrew package. Thanks to Jonathan Palardy <jonathan dot
+ palardy at gmail dot com>
+
+* FreeBSD port. Thanks to Chris Howey <howeyc at gmail dot com>
+
+* Pardus package. Thanks to Fethican Coşkuner
+ <fethicanc at gmail dot com>
+
+* First review in Chinese. Thanks to 曾義峰:
+ http://antbsd.twbbs.org/~ant/wordpress/?p=2876
+
+* First review in print:
+ http://www.linux-magazine.com/Issues/2010 Nov 2010
+
+* First review in Spanish:
+ http://www.muylinux.com/2010/10/18/gnu-parallel-computacion-paralela-a-golpe-de-comando
+
+* First review in Dutch thanks to Koen Vervloesem <koen at vervloesem
+ dot eu>:
+ http://techworld.nl/technologie/33493/gebruik-al-je-processorkernen-met-gnu-parallel.html
+
+* Blog review thanks to R. Tyler Croy <tyler at monkeypox dot org>:
+ http://unethicalblogger.com/posts/2010/11/gnuparallel_changed_my_life
+
+* 5000 views of the intro video:
+ http://www.youtube.com/watch?v=OpaiGYxkSuQ
+
+* As usual a bunch of bugfixes and more usage examples in the man
+ page.
+
+* GNU Parallel was presented at FSCONS 2010-11-07:
+ http://www.fscons.org/fs/gnu-parallel Hopefully the
+ video will be online soon.
+
+
+20100922
+
+* See GNU Parallel live at FSCONS 2010-11-07:
+ http://www.fscons.org/fs/gnu-parallel
+
+* Untested Debian and xUbuntu packages available through OpenSUSE
+ build service:
+ https://build.opensuse.org/package/show?package=parallel&project=home%3Atange
+
+* Using --retries a job will be retried on another computer if it
+ fails. This is useful if some jobs fail for no apparent reason (such
+ as network failure).
+
+* BSD xargs -o (open /dev/tty) is now default for the job running in
+ foreground. Useful for interactive commands like:
+ ls | parallel -Xuj1 vi
+
+* GNU sql now supports SQLite.
+
+* Renamed .dburl.aliases to .sql/aliases and /etc/sql/aliases.
+
+* GNU sql now support --list-tables
+
+* Alias for DBURL can contain '?query' part with %-quoting.
+
+
+20100906
+
+* Using --shebang GNU Parallel can be used as the parser for a script.
+ E.g: #!/usr/bin/parallel --shebang traceroute (followed by lines of
+ hosts)
+
+* First community generated bugfixes
+
+* Alt Linux package of GNU Parallel. Thanks to Michael Shigorin <mike
+ at altlinux dot org>
+
+* Sunfreeware package of GNU Parallel. Thanks to Steven M. Christensen
+ <sunfreeware at gmail.com>
+
+* Untested CentOS, Fedora, Mandriva, RedHat, and SUSE packages
+ available through OpenSUSE build service:
+ https://build.opensuse.org/package/show?package=parallel&project=home%3Atange
+
+* Review of GNU Parallel. Thanks to Andrew McFague amcfague at wgen dot net
+ http://www.andrew-mcfague.com/linux/utilities-linux/
+ commands-every-serious-nix-user-should-know/#parallel
+
+* First 1000 views of the intro video
+
+* sql - a small script to access sql bases from the command line which
+ is a handy companion to parallel --colsep
+
+
+20100822
+
+* Counting semaphore functionality: start a job in the background. If
+ N jobs are already running, wait for one to complete. Examples:
+ sem 'sleep 2; echo foo'; sem 'sleep 1; echo bar'; sem --wait
+ sem -j2 'sleep 2; echo foo'; sem -j2 'sleep 1; echo bar'; sem --wait
+
+* With --colsep a table can be used as input. Example:
+ cat tab_sep_table | parallel --colsep '\t' echo col1 {1} col2 {2}
+
+* --trim can remove white space around arguments.
+
+* --sshloginfile '..' means use ~/.parallel/sshloginfile
+
+* Zero install package. Thanks to Tim Cuthbertson <tim3d dot junk at
+ gmail dot com>
+
+* OpenSUSE package. Thanks to Markus Ammer <mkmm at gmx-topmail dot
+ de>
+
+* NixOS package. Thanks to Ludovic Courtès <ludo at gnu dot org>
+
+* Web review http://oentend.blogspot.com/2010/08/gnu-parallel.html
+ Thanks to Pavel Nuzhdin <pnzhdin at gmail dot com>
+
+* Web review http://psung.blogspot.com/2010/08/gnu-parallel.html
+ Thanks to Phil Sung <psung at alum dot mit dot edu>
+
+
+20100722
+
+* Arguments can now be given on the command line. Example:
+ parallel bzip2 ::: *.txt
+
+* xapply like functionality. Example:
+ parallel diff {1} {2} :::: filelist1 filelist2
+ parallel diff {1} {2} ">"{1.}-{2.}.diff :::: filelist1 filelist2
+
+* Arch Linux package. Thanks to Peter Simons <simons at cryp dot to>
+
+* Mandriva package. Thanks to Sandro Cazzaniga <kharec at mandriva dot
+ org>
+
+* -L -l -n -s now implies -X unless -m is set
+
+
+20100620
+
+* New video showing the new options.
+ http://www.youtube.com/watch?v=OpaiGYxkSuQ or at
+ http://tinyogg.com/watch/TORaR/ and http://tinyogg.com/watch/hfxKj/
+
+* 100% options complete with xargs. All options for xargs can now be
+ used in GNU Parallel - even the more exotic.
+
+* --basefile for transferring basedata. When running jobs on remote
+ computers --basefile will transfer files before the first jobs is
+ run. It can be used to transfer data that remains the same for each
+ job such as scripts or lookup tables.
+
+* --progress shows progress. To see how many jobs is running on each
+ server use --progress. It can be turned on even after GNU Parallel
+ is started.
+
+* --eta shows estimated time left in seconds.
+
+* --halt-on-error stops if an error occurs. GNU Parallel will default
+ to run all jobs - even if some of them fail. With --halt-on-error
+ GNU Parallel can ignore errors, wait for the currently running jobs
+ to finish, or stop immediately when an error occurs.
+
+
+20100601
+
+* GNU Parallel now has support for running jobs on remote machines
+
+* Intro video http://www.youtube.com/watch?v=LlXDtd_pRaY
+ http://openvideo.dailymotion.com/video/xdmqwz_gnu-parallel-script-processing-and_tech
+
+
+20100424
+
+* GNU Parallel is now official GNU software
diff --git a/README b/README
new file mode 100644
index 0000000..b7f3738
--- /dev/null
+++ b/README
@@ -0,0 +1,146 @@
+
+ GNU Parallel
+
+ https://www.gnu.org/software/parallel/
+
+
+= Presentation of GNU Parallel =
+
+GNU Parallel is a shell tool for executing jobs in parallel using one
+or more computers. A job can be a single command or a small script
+that has to be run for each of the lines in the input. The typical
+input is a list of files, a list of hosts, a list of users, a list of
+URLs, or a list of tables. A job can also be a command that reads from
+a pipe. GNU Parallel can then split the input and pipe it into
+commands in parallel.
+
+If you use xargs and tee today you will find GNU Parallel very easy to
+use as GNU Parallel is written to have the same options as xargs. If
+you write loops in shell, you will find GNU Parallel may be able to
+replace most of the loops and make them run faster by running several
+jobs in parallel.
+
+GNU Parallel makes sure output from the commands is the same output as
+you would get had you run the commands sequentially. This makes it
+possible to use output from GNU Parallel as input for other programs.
+
+See the cheat sheet for examples that cover most of the daily usage:
+www.gnu.org/s/parallel/parallel_cheat.pdf
+
+
+= 10 seconds installation =
+
+For security reasons it is recommended you use your package manager to
+install. But if you cannot do that then you can use this 10 seconds
+installation.
+
+The 10 seconds installation will try to do a full installation; if
+that fails, a personal installation; if that fails, a minimal
+installation.
+
+ $ (wget -O - pi.dk/3 || lynx -source pi.dk/3 || curl pi.dk/3/ || \
+ fetch -o - http://pi.dk/3 ) > install.sh
+ $ sha1sum install.sh | grep 883c667e01eed62f975ad28b6d50e22a
+ 12345678 883c667e 01eed62f 975ad28b 6d50e22a
+ $ md5sum install.sh | grep cc21b4c943fd03e93ae1ae49e28573c0
+ cc21b4c9 43fd03e9 3ae1ae49 e28573c0
+ $ sha512sum install.sh | grep da012ec113b49a54e705f86d51e784ebced224fdf
+ 79945d9d 250b42a4 2067bb00 99da012e c113b49a 54e705f8 6d51e784 ebced224
+ fdff3f52 ca588d64 e75f6033 61bd543f d631f592 2f87ceb2 ab034149 6df84a35
+ $ bash install.sh
+
+This will literally install faster than reading the rest of this
+document.
+
+
+= Full installation =
+
+Full installation of GNU Parallel is as simple as:
+
+ wget https://ftpmirror.gnu.org/parallel/parallel-20240222.tar.bz2
+ wget https://ftpmirror.gnu.org/parallel/parallel-20240222.tar.bz2.sig
+ gpg parallel-20240222.tar.bz2.sig
+ bzip2 -dc parallel-20240222.tar.bz2 | tar xvf -
+ cd parallel-20240222
+ ./configure && make && sudo make install
+
+
+= Personal installation =
+
+If you are not root you can add ~/bin to your path and install in
+~/bin and ~/share:
+
+ wget https://ftpmirror.gnu.org/parallel/parallel-20240222.tar.bz2
+ wget https://ftpmirror.gnu.org/parallel/parallel-20240222.tar.bz2.sig
+ gpg parallel-20240222.tar.bz2.sig
+ bzip2 -dc parallel-20240222.tar.bz2 | tar xvf -
+ cd parallel-20240222
+ ./configure --prefix=$HOME && make && make install
+
+Or if your system lacks 'make' you can simply copy src/parallel
+src/sem src/niceload src/sql to a dir in your path.
+
+
+= Minimal installation =
+
+If you just need parallel and do not have 'make' installed (maybe the
+system is old or Microsoft Windows):
+
+ wget https://git.savannah.gnu.org/cgit/parallel.git/plain/src/parallel
+ chmod 755 parallel
+ cp parallel sem
+ mv parallel sem dir-in-your-$PATH/bin/
+
+
+= Installation on compute servers =
+
+If you are developing your script to run on a remote server, that does
+not have GNU Parallel installed, but you have it installed on you
+development machine, then you can use `parallel --embed`.
+
+ parallel --embed > newscript.sh
+
+Just edit the last lines of newscript.sh and copy it to the compute
+server.
+
+
+= Test the installation =
+
+After this you should be able to do:
+
+ parallel -j0 ping -nc 3 ::: qubes-os.org gnu.org freenetproject.org
+
+This will send 3 ping packets to 3 different hosts in parallel and print
+the output when they complete.
+
+Watch the intro video for a quick introduction:
+https://www.youtube.com/playlist?list=PL284C9FF2488BC6D1
+
+Walk through the tutorial (man parallel_tutorial). You command line
+will love you for it.
+
+When using programs that use GNU Parallel to process data for
+publication please cite:
+
+ Tange, O. (2024, February 22). GNU Parallel 20240222 ('Навальный').
+ Zenodo. https://doi.org/10.5281/zenodo.10719803
+
+Copyright (C) 2007-2022 Ole Tange, http://ole.tange.dk and Free
+Software Foundation, Inc.
+
+
+= New versions =
+
+New versions will be released at: https://ftp.gnu.org/gnu/parallel/
+
+
+= Dependencies =
+
+GNU Parallel should work with a normal full Perl installation. However,
+if you system has split up Perl into multiple packages then these are
+the important ones:
+
+ opkg install perlbase-getopt perlbase-ipc procps-ng-ps perlbase-mime
+
+SPDX-FileCopyrightText: 2021-2022 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+SPDX-License-Identifier: GPL-3.0-or-later
diff --git a/TODO b/TODO
new file mode 100644
index 0000000..8d763e6
--- /dev/null
+++ b/TODO
@@ -0,0 +1,2 @@
+screencast
+perl
diff --git a/aclocal.m4 b/aclocal.m4
new file mode 100644
index 0000000..bd8a03c
--- /dev/null
+++ b/aclocal.m4
@@ -0,0 +1,786 @@
+# generated automatically by aclocal 1.16.5 -*- Autoconf -*-
+
+# Copyright (C) 1996-2021 Free Software Foundation, Inc.
+
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])])
+m4_ifndef([AC_AUTOCONF_VERSION],
+ [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
+m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.71],,
+[m4_warning([this file was generated for autoconf 2.71.
+You have another version of autoconf. It may work, but is not guaranteed to.
+If you have problems, you may need to regenerate the build system entirely.
+To do so, use the procedure documented by the package, typically 'autoreconf'.])])
+
+# Copyright (C) 2002-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_AUTOMAKE_VERSION(VERSION)
+# ----------------------------
+# Automake X.Y traces this macro to ensure aclocal.m4 has been
+# generated from the m4 files accompanying Automake X.Y.
+# (This private macro should not be called outside this file.)
+AC_DEFUN([AM_AUTOMAKE_VERSION],
+[am__api_version='1.16'
+dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to
+dnl require some minimum version. Point them to the right macro.
+m4_if([$1], [1.16.5], [],
+ [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl
+])
+
+# _AM_AUTOCONF_VERSION(VERSION)
+# -----------------------------
+# aclocal traces this macro to find the Autoconf version.
+# This is a private macro too. Using m4_define simplifies
+# the logic in aclocal, which can simply ignore this definition.
+m4_define([_AM_AUTOCONF_VERSION], [])
+
+# AM_SET_CURRENT_AUTOMAKE_VERSION
+# -------------------------------
+# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced.
+# This function is AC_REQUIREd by AM_INIT_AUTOMAKE.
+AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION],
+[AM_AUTOMAKE_VERSION([1.16.5])dnl
+m4_ifndef([AC_AUTOCONF_VERSION],
+ [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
+_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))])
+
+# AM_AUX_DIR_EXPAND -*- Autoconf -*-
+
+# Copyright (C) 2001-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets
+# $ac_aux_dir to '$srcdir/foo'. In other projects, it is set to
+# '$srcdir', '$srcdir/..', or '$srcdir/../..'.
+#
+# Of course, Automake must honor this variable whenever it calls a
+# tool from the auxiliary directory. The problem is that $srcdir (and
+# therefore $ac_aux_dir as well) can be either absolute or relative,
+# depending on how configure is run. This is pretty annoying, since
+# it makes $ac_aux_dir quite unusable in subdirectories: in the top
+# source directory, any form will work fine, but in subdirectories a
+# relative path needs to be adjusted first.
+#
+# $ac_aux_dir/missing
+# fails when called from a subdirectory if $ac_aux_dir is relative
+# $top_srcdir/$ac_aux_dir/missing
+# fails if $ac_aux_dir is absolute,
+# fails when called from a subdirectory in a VPATH build with
+# a relative $ac_aux_dir
+#
+# The reason of the latter failure is that $top_srcdir and $ac_aux_dir
+# are both prefixed by $srcdir. In an in-source build this is usually
+# harmless because $srcdir is '.', but things will broke when you
+# start a VPATH build or use an absolute $srcdir.
+#
+# So we could use something similar to $top_srcdir/$ac_aux_dir/missing,
+# iff we strip the leading $srcdir from $ac_aux_dir. That would be:
+# am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"`
+# and then we would define $MISSING as
+# MISSING="\${SHELL} $am_aux_dir/missing"
+# This will work as long as MISSING is not called from configure, because
+# unfortunately $(top_srcdir) has no meaning in configure.
+# However there are other variables, like CC, which are often used in
+# configure, and could therefore not use this "fixed" $ac_aux_dir.
+#
+# Another solution, used here, is to always expand $ac_aux_dir to an
+# absolute PATH. The drawback is that using absolute paths prevent a
+# configured tree to be moved without reconfiguration.
+
+AC_DEFUN([AM_AUX_DIR_EXPAND],
+[AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl
+# Expand $ac_aux_dir to an absolute path.
+am_aux_dir=`cd "$ac_aux_dir" && pwd`
+])
+
+# AM_CONDITIONAL -*- Autoconf -*-
+
+# Copyright (C) 1997-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_CONDITIONAL(NAME, SHELL-CONDITION)
+# -------------------------------------
+# Define a conditional.
+AC_DEFUN([AM_CONDITIONAL],
+[AC_PREREQ([2.52])dnl
+ m4_if([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])],
+ [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl
+AC_SUBST([$1_TRUE])dnl
+AC_SUBST([$1_FALSE])dnl
+_AM_SUBST_NOTMAKE([$1_TRUE])dnl
+_AM_SUBST_NOTMAKE([$1_FALSE])dnl
+m4_define([_AM_COND_VALUE_$1], [$2])dnl
+if $2; then
+ $1_TRUE=
+ $1_FALSE='#'
+else
+ $1_TRUE='#'
+ $1_FALSE=
+fi
+AC_CONFIG_COMMANDS_PRE(
+[if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then
+ AC_MSG_ERROR([[conditional "$1" was never defined.
+Usually this means the macro was only invoked conditionally.]])
+fi])])
+
+# Do all the work for Automake. -*- Autoconf -*-
+
+# Copyright (C) 1996-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This macro actually does too much. Some checks are only needed if
+# your package does certain things. But this isn't really a big deal.
+
+dnl Redefine AC_PROG_CC to automatically invoke _AM_PROG_CC_C_O.
+m4_define([AC_PROG_CC],
+m4_defn([AC_PROG_CC])
+[_AM_PROG_CC_C_O
+])
+
+# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE])
+# AM_INIT_AUTOMAKE([OPTIONS])
+# -----------------------------------------------
+# The call with PACKAGE and VERSION arguments is the old style
+# call (pre autoconf-2.50), which is being phased out. PACKAGE
+# and VERSION should now be passed to AC_INIT and removed from
+# the call to AM_INIT_AUTOMAKE.
+# We support both call styles for the transition. After
+# the next Automake release, Autoconf can make the AC_INIT
+# arguments mandatory, and then we can depend on a new Autoconf
+# release and drop the old call support.
+AC_DEFUN([AM_INIT_AUTOMAKE],
+[AC_PREREQ([2.65])dnl
+m4_ifdef([_$0_ALREADY_INIT],
+ [m4_fatal([$0 expanded multiple times
+]m4_defn([_$0_ALREADY_INIT]))],
+ [m4_define([_$0_ALREADY_INIT], m4_expansion_stack)])dnl
+dnl Autoconf wants to disallow AM_ names. We explicitly allow
+dnl the ones we care about.
+m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl
+AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl
+AC_REQUIRE([AC_PROG_INSTALL])dnl
+if test "`cd $srcdir && pwd`" != "`pwd`"; then
+ # Use -I$(srcdir) only when $(srcdir) != ., so that make's output
+ # is not polluted with repeated "-I."
+ AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl
+ # test to see if srcdir already configured
+ if test -f $srcdir/config.status; then
+ AC_MSG_ERROR([source directory already configured; run "make distclean" there first])
+ fi
+fi
+
+# test whether we have cygpath
+if test -z "$CYGPATH_W"; then
+ if (cygpath --version) >/dev/null 2>/dev/null; then
+ CYGPATH_W='cygpath -w'
+ else
+ CYGPATH_W=echo
+ fi
+fi
+AC_SUBST([CYGPATH_W])
+
+# Define the identity of the package.
+dnl Distinguish between old-style and new-style calls.
+m4_ifval([$2],
+[AC_DIAGNOSE([obsolete],
+ [$0: two- and three-arguments forms are deprecated.])
+m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl
+ AC_SUBST([PACKAGE], [$1])dnl
+ AC_SUBST([VERSION], [$2])],
+[_AM_SET_OPTIONS([$1])dnl
+dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT.
+m4_if(
+ m4_ifset([AC_PACKAGE_NAME], [ok]):m4_ifset([AC_PACKAGE_VERSION], [ok]),
+ [ok:ok],,
+ [m4_fatal([AC_INIT should be called with package and version arguments])])dnl
+ AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl
+ AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl
+
+_AM_IF_OPTION([no-define],,
+[AC_DEFINE_UNQUOTED([PACKAGE], ["$PACKAGE"], [Name of package])
+ AC_DEFINE_UNQUOTED([VERSION], ["$VERSION"], [Version number of package])])dnl
+
+# Some tools Automake needs.
+AC_REQUIRE([AM_SANITY_CHECK])dnl
+AC_REQUIRE([AC_ARG_PROGRAM])dnl
+AM_MISSING_PROG([ACLOCAL], [aclocal-${am__api_version}])
+AM_MISSING_PROG([AUTOCONF], [autoconf])
+AM_MISSING_PROG([AUTOMAKE], [automake-${am__api_version}])
+AM_MISSING_PROG([AUTOHEADER], [autoheader])
+AM_MISSING_PROG([MAKEINFO], [makeinfo])
+AC_REQUIRE([AM_PROG_INSTALL_SH])dnl
+AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl
+AC_REQUIRE([AC_PROG_MKDIR_P])dnl
+# For better backward compatibility. To be removed once Automake 1.9.x
+# dies out for good. For more background, see:
+# <https://lists.gnu.org/archive/html/automake/2012-07/msg00001.html>
+# <https://lists.gnu.org/archive/html/automake/2012-07/msg00014.html>
+AC_SUBST([mkdir_p], ['$(MKDIR_P)'])
+# We need awk for the "check" target (and possibly the TAP driver). The
+# system "awk" is bad on some platforms.
+AC_REQUIRE([AC_PROG_AWK])dnl
+AC_REQUIRE([AC_PROG_MAKE_SET])dnl
+AC_REQUIRE([AM_SET_LEADING_DOT])dnl
+_AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])],
+ [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])],
+ [_AM_PROG_TAR([v7])])])
+_AM_IF_OPTION([no-dependencies],,
+[AC_PROVIDE_IFELSE([AC_PROG_CC],
+ [_AM_DEPENDENCIES([CC])],
+ [m4_define([AC_PROG_CC],
+ m4_defn([AC_PROG_CC])[_AM_DEPENDENCIES([CC])])])dnl
+AC_PROVIDE_IFELSE([AC_PROG_CXX],
+ [_AM_DEPENDENCIES([CXX])],
+ [m4_define([AC_PROG_CXX],
+ m4_defn([AC_PROG_CXX])[_AM_DEPENDENCIES([CXX])])])dnl
+AC_PROVIDE_IFELSE([AC_PROG_OBJC],
+ [_AM_DEPENDENCIES([OBJC])],
+ [m4_define([AC_PROG_OBJC],
+ m4_defn([AC_PROG_OBJC])[_AM_DEPENDENCIES([OBJC])])])dnl
+AC_PROVIDE_IFELSE([AC_PROG_OBJCXX],
+ [_AM_DEPENDENCIES([OBJCXX])],
+ [m4_define([AC_PROG_OBJCXX],
+ m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl
+])
+# Variables for tags utilities; see am/tags.am
+if test -z "$CTAGS"; then
+ CTAGS=ctags
+fi
+AC_SUBST([CTAGS])
+if test -z "$ETAGS"; then
+ ETAGS=etags
+fi
+AC_SUBST([ETAGS])
+if test -z "$CSCOPE"; then
+ CSCOPE=cscope
+fi
+AC_SUBST([CSCOPE])
+
+AC_REQUIRE([AM_SILENT_RULES])dnl
+dnl The testsuite driver may need to know about EXEEXT, so add the
+dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This
+dnl macro is hooked onto _AC_COMPILER_EXEEXT early, see below.
+AC_CONFIG_COMMANDS_PRE(dnl
+[m4_provide_if([_AM_COMPILER_EXEEXT],
+ [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl
+
+# POSIX will say in a future version that running "rm -f" with no argument
+# is OK; and we want to be able to make that assumption in our Makefile
+# recipes. So use an aggressive probe to check that the usage we want is
+# actually supported "in the wild" to an acceptable degree.
+# See automake bug#10828.
+# To make any issue more visible, cause the running configure to be aborted
+# by default if the 'rm' program in use doesn't match our expectations; the
+# user can still override this though.
+if rm -f && rm -fr && rm -rf; then : OK; else
+ cat >&2 <<'END'
+Oops!
+
+Your 'rm' program seems unable to run without file operands specified
+on the command line, even when the '-f' option is present. This is contrary
+to the behaviour of most rm programs out there, and not conforming with
+the upcoming POSIX standard: <http://austingroupbugs.net/view.php?id=542>
+
+Please tell bug-automake@gnu.org about your system, including the value
+of your $PATH and any error possibly output before this message. This
+can help us improve future automake versions.
+
+END
+ if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then
+ echo 'Configuration will proceed anyway, since you have set the' >&2
+ echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2
+ echo >&2
+ else
+ cat >&2 <<'END'
+Aborting the configuration process, to ensure you take notice of the issue.
+
+You can download and install GNU coreutils to get an 'rm' implementation
+that behaves properly: <https://www.gnu.org/software/coreutils/>.
+
+If you want to complete the configuration process using your problematic
+'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM
+to "yes", and re-run configure.
+
+END
+ AC_MSG_ERROR([Your 'rm' program is bad, sorry.])
+ fi
+fi
+dnl The trailing newline in this macro's definition is deliberate, for
+dnl backward compatibility and to allow trailing 'dnl'-style comments
+dnl after the AM_INIT_AUTOMAKE invocation. See automake bug#16841.
+])
+
+dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion. Do not
+dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further
+dnl mangled by Autoconf and run in a shell conditional statement.
+m4_define([_AC_COMPILER_EXEEXT],
+m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])])
+
+# When config.status generates a header, we must update the stamp-h file.
+# This file resides in the same directory as the config header
+# that is generated. The stamp files are numbered to have different names.
+
+# Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the
+# loop where config.status creates the headers, so we can generate
+# our stamp files there.
+AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK],
+[# Compute $1's index in $config_headers.
+_am_arg=$1
+_am_stamp_count=1
+for _am_header in $config_headers :; do
+ case $_am_header in
+ $_am_arg | $_am_arg:* )
+ break ;;
+ * )
+ _am_stamp_count=`expr $_am_stamp_count + 1` ;;
+ esac
+done
+echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count])
+
+# Copyright (C) 2001-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_PROG_INSTALL_SH
+# ------------------
+# Define $install_sh.
+AC_DEFUN([AM_PROG_INSTALL_SH],
+[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
+if test x"${install_sh+set}" != xset; then
+ case $am_aux_dir in
+ *\ * | *\ *)
+ install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;;
+ *)
+ install_sh="\${SHELL} $am_aux_dir/install-sh"
+ esac
+fi
+AC_SUBST([install_sh])])
+
+# Copyright (C) 2003-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# Check whether the underlying file-system supports filenames
+# with a leading dot. For instance MS-DOS doesn't.
+AC_DEFUN([AM_SET_LEADING_DOT],
+[rm -rf .tst 2>/dev/null
+mkdir .tst 2>/dev/null
+if test -d .tst; then
+ am__leading_dot=.
+else
+ am__leading_dot=_
+fi
+rmdir .tst 2>/dev/null
+AC_SUBST([am__leading_dot])])
+
+# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*-
+
+# Copyright (C) 1997-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_MISSING_PROG(NAME, PROGRAM)
+# ------------------------------
+AC_DEFUN([AM_MISSING_PROG],
+[AC_REQUIRE([AM_MISSING_HAS_RUN])
+$1=${$1-"${am_missing_run}$2"}
+AC_SUBST($1)])
+
+# AM_MISSING_HAS_RUN
+# ------------------
+# Define MISSING if not defined so far and test if it is modern enough.
+# If it is, set am_missing_run to use it, otherwise, to nothing.
+AC_DEFUN([AM_MISSING_HAS_RUN],
+[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
+AC_REQUIRE_AUX_FILE([missing])dnl
+if test x"${MISSING+set}" != xset; then
+ MISSING="\${SHELL} '$am_aux_dir/missing'"
+fi
+# Use eval to expand $SHELL
+if eval "$MISSING --is-lightweight"; then
+ am_missing_run="$MISSING "
+else
+ am_missing_run=
+ AC_MSG_WARN(['missing' script is too old or missing])
+fi
+])
+
+# Helper functions for option handling. -*- Autoconf -*-
+
+# Copyright (C) 2001-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# _AM_MANGLE_OPTION(NAME)
+# -----------------------
+AC_DEFUN([_AM_MANGLE_OPTION],
+[[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])])
+
+# _AM_SET_OPTION(NAME)
+# --------------------
+# Set option NAME. Presently that only means defining a flag for this option.
+AC_DEFUN([_AM_SET_OPTION],
+[m4_define(_AM_MANGLE_OPTION([$1]), [1])])
+
+# _AM_SET_OPTIONS(OPTIONS)
+# ------------------------
+# OPTIONS is a space-separated list of Automake options.
+AC_DEFUN([_AM_SET_OPTIONS],
+[m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])])
+
+# _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET])
+# -------------------------------------------
+# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise.
+AC_DEFUN([_AM_IF_OPTION],
+[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])])
+
+# Check to make sure that the build environment is sane. -*- Autoconf -*-
+
+# Copyright (C) 1996-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_SANITY_CHECK
+# ---------------
+AC_DEFUN([AM_SANITY_CHECK],
+[AC_MSG_CHECKING([whether build environment is sane])
+# Reject unsafe characters in $srcdir or the absolute working directory
+# name. Accept space and tab only in the latter.
+am_lf='
+'
+case `pwd` in
+ *[[\\\"\#\$\&\'\`$am_lf]]*)
+ AC_MSG_ERROR([unsafe absolute working directory name]);;
+esac
+case $srcdir in
+ *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*)
+ AC_MSG_ERROR([unsafe srcdir value: '$srcdir']);;
+esac
+
+# Do 'set' in a subshell so we don't clobber the current shell's
+# arguments. Must try -L first in case configure is actually a
+# symlink; some systems play weird games with the mod time of symlinks
+# (eg FreeBSD returns the mod time of the symlink's containing
+# directory).
+if (
+ am_has_slept=no
+ for am_try in 1 2; do
+ echo "timestamp, slept: $am_has_slept" > conftest.file
+ set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
+ if test "$[*]" = "X"; then
+ # -L didn't work.
+ set X `ls -t "$srcdir/configure" conftest.file`
+ fi
+ if test "$[*]" != "X $srcdir/configure conftest.file" \
+ && test "$[*]" != "X conftest.file $srcdir/configure"; then
+
+ # If neither matched, then we have a broken ls. This can happen
+ # if, for instance, CONFIG_SHELL is bash and it inherits a
+ # broken ls alias from the environment. This has actually
+ # happened. Such a system could not be considered "sane".
+ AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken
+ alias in your environment])
+ fi
+ if test "$[2]" = conftest.file || test $am_try -eq 2; then
+ break
+ fi
+ # Just in case.
+ sleep 1
+ am_has_slept=yes
+ done
+ test "$[2]" = conftest.file
+ )
+then
+ # Ok.
+ :
+else
+ AC_MSG_ERROR([newly created file is older than distributed files!
+Check your system clock])
+fi
+AC_MSG_RESULT([yes])
+# If we didn't sleep, we still need to ensure time stamps of config.status and
+# generated files are strictly newer.
+am_sleep_pid=
+if grep 'slept: no' conftest.file >/dev/null 2>&1; then
+ ( sleep 1 ) &
+ am_sleep_pid=$!
+fi
+AC_CONFIG_COMMANDS_PRE(
+ [AC_MSG_CHECKING([that generated files are newer than configure])
+ if test -n "$am_sleep_pid"; then
+ # Hide warnings about reused PIDs.
+ wait $am_sleep_pid 2>/dev/null
+ fi
+ AC_MSG_RESULT([done])])
+rm -f conftest.file
+])
+
+# Copyright (C) 2009-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_SILENT_RULES([DEFAULT])
+# --------------------------
+# Enable less verbose build rules; with the default set to DEFAULT
+# ("yes" being less verbose, "no" or empty being verbose).
+AC_DEFUN([AM_SILENT_RULES],
+[AC_ARG_ENABLE([silent-rules], [dnl
+AS_HELP_STRING(
+ [--enable-silent-rules],
+ [less verbose build output (undo: "make V=1")])
+AS_HELP_STRING(
+ [--disable-silent-rules],
+ [verbose build output (undo: "make V=0")])dnl
+])
+case $enable_silent_rules in @%:@ (((
+ yes) AM_DEFAULT_VERBOSITY=0;;
+ no) AM_DEFAULT_VERBOSITY=1;;
+ *) AM_DEFAULT_VERBOSITY=m4_if([$1], [yes], [0], [1]);;
+esac
+dnl
+dnl A few 'make' implementations (e.g., NonStop OS and NextStep)
+dnl do not support nested variable expansions.
+dnl See automake bug#9928 and bug#10237.
+am_make=${MAKE-make}
+AC_CACHE_CHECK([whether $am_make supports nested variables],
+ [am_cv_make_support_nested_variables],
+ [if AS_ECHO([['TRUE=$(BAR$(V))
+BAR0=false
+BAR1=true
+V=1
+am__doit:
+ @$(TRUE)
+.PHONY: am__doit']]) | $am_make -f - >/dev/null 2>&1; then
+ am_cv_make_support_nested_variables=yes
+else
+ am_cv_make_support_nested_variables=no
+fi])
+if test $am_cv_make_support_nested_variables = yes; then
+ dnl Using '$V' instead of '$(V)' breaks IRIX make.
+ AM_V='$(V)'
+ AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)'
+else
+ AM_V=$AM_DEFAULT_VERBOSITY
+ AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY
+fi
+AC_SUBST([AM_V])dnl
+AM_SUBST_NOTMAKE([AM_V])dnl
+AC_SUBST([AM_DEFAULT_V])dnl
+AM_SUBST_NOTMAKE([AM_DEFAULT_V])dnl
+AC_SUBST([AM_DEFAULT_VERBOSITY])dnl
+AM_BACKSLASH='\'
+AC_SUBST([AM_BACKSLASH])dnl
+_AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl
+])
+
+# Copyright (C) 2001-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_PROG_INSTALL_STRIP
+# ---------------------
+# One issue with vendor 'install' (even GNU) is that you can't
+# specify the program used to strip binaries. This is especially
+# annoying in cross-compiling environments, where the build's strip
+# is unlikely to handle the host's binaries.
+# Fortunately install-sh will honor a STRIPPROG variable, so we
+# always use install-sh in "make install-strip", and initialize
+# STRIPPROG with the value of the STRIP variable (set by the user).
+AC_DEFUN([AM_PROG_INSTALL_STRIP],
+[AC_REQUIRE([AM_PROG_INSTALL_SH])dnl
+# Installed binaries are usually stripped using 'strip' when the user
+# run "make install-strip". However 'strip' might not be the right
+# tool to use in cross-compilation environments, therefore Automake
+# will honor the 'STRIP' environment variable to overrule this program.
+dnl Don't test for $cross_compiling = yes, because it might be 'maybe'.
+if test "$cross_compiling" != no; then
+ AC_CHECK_TOOL([STRIP], [strip], :)
+fi
+INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
+AC_SUBST([INSTALL_STRIP_PROGRAM])])
+
+# Copyright (C) 2006-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# _AM_SUBST_NOTMAKE(VARIABLE)
+# ---------------------------
+# Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in.
+# This macro is traced by Automake.
+AC_DEFUN([_AM_SUBST_NOTMAKE])
+
+# AM_SUBST_NOTMAKE(VARIABLE)
+# --------------------------
+# Public sister of _AM_SUBST_NOTMAKE.
+AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)])
+
+# Check how to create a tarball. -*- Autoconf -*-
+
+# Copyright (C) 2004-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# _AM_PROG_TAR(FORMAT)
+# --------------------
+# Check how to create a tarball in format FORMAT.
+# FORMAT should be one of 'v7', 'ustar', or 'pax'.
+#
+# Substitute a variable $(am__tar) that is a command
+# writing to stdout a FORMAT-tarball containing the directory
+# $tardir.
+# tardir=directory && $(am__tar) > result.tar
+#
+# Substitute a variable $(am__untar) that extract such
+# a tarball read from stdin.
+# $(am__untar) < result.tar
+#
+AC_DEFUN([_AM_PROG_TAR],
+[# Always define AMTAR for backward compatibility. Yes, it's still used
+# in the wild :-( We should find a proper way to deprecate it ...
+AC_SUBST([AMTAR], ['$${TAR-tar}'])
+
+# We'll loop over all known methods to create a tar archive until one works.
+_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none'
+
+m4_if([$1], [v7],
+ [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'],
+
+ [m4_case([$1],
+ [ustar],
+ [# The POSIX 1988 'ustar' format is defined with fixed-size fields.
+ # There is notably a 21 bits limit for the UID and the GID. In fact,
+ # the 'pax' utility can hang on bigger UID/GID (see automake bug#8343
+ # and bug#13588).
+ am_max_uid=2097151 # 2^21 - 1
+ am_max_gid=$am_max_uid
+ # The $UID and $GID variables are not portable, so we need to resort
+ # to the POSIX-mandated id(1) utility. Errors in the 'id' calls
+ # below are definitely unexpected, so allow the users to see them
+ # (that is, avoid stderr redirection).
+ am_uid=`id -u || echo unknown`
+ am_gid=`id -g || echo unknown`
+ AC_MSG_CHECKING([whether UID '$am_uid' is supported by ustar format])
+ if test $am_uid -le $am_max_uid; then
+ AC_MSG_RESULT([yes])
+ else
+ AC_MSG_RESULT([no])
+ _am_tools=none
+ fi
+ AC_MSG_CHECKING([whether GID '$am_gid' is supported by ustar format])
+ if test $am_gid -le $am_max_gid; then
+ AC_MSG_RESULT([yes])
+ else
+ AC_MSG_RESULT([no])
+ _am_tools=none
+ fi],
+
+ [pax],
+ [],
+
+ [m4_fatal([Unknown tar format])])
+
+ AC_MSG_CHECKING([how to create a $1 tar archive])
+
+ # Go ahead even if we have the value already cached. We do so because we
+ # need to set the values for the 'am__tar' and 'am__untar' variables.
+ _am_tools=${am_cv_prog_tar_$1-$_am_tools}
+
+ for _am_tool in $_am_tools; do
+ case $_am_tool in
+ gnutar)
+ for _am_tar in tar gnutar gtar; do
+ AM_RUN_LOG([$_am_tar --version]) && break
+ done
+ am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"'
+ am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"'
+ am__untar="$_am_tar -xf -"
+ ;;
+ plaintar)
+ # Must skip GNU tar: if it does not support --format= it doesn't create
+ # ustar tarball either.
+ (tar --version) >/dev/null 2>&1 && continue
+ am__tar='tar chf - "$$tardir"'
+ am__tar_='tar chf - "$tardir"'
+ am__untar='tar xf -'
+ ;;
+ pax)
+ am__tar='pax -L -x $1 -w "$$tardir"'
+ am__tar_='pax -L -x $1 -w "$tardir"'
+ am__untar='pax -r'
+ ;;
+ cpio)
+ am__tar='find "$$tardir" -print | cpio -o -H $1 -L'
+ am__tar_='find "$tardir" -print | cpio -o -H $1 -L'
+ am__untar='cpio -i -H $1 -d'
+ ;;
+ none)
+ am__tar=false
+ am__tar_=false
+ am__untar=false
+ ;;
+ esac
+
+ # If the value was cached, stop now. We just wanted to have am__tar
+ # and am__untar set.
+ test -n "${am_cv_prog_tar_$1}" && break
+
+ # tar/untar a dummy directory, and stop if the command works.
+ rm -rf conftest.dir
+ mkdir conftest.dir
+ echo GrepMe > conftest.dir/file
+ AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar])
+ rm -rf conftest.dir
+ if test -s conftest.tar; then
+ AM_RUN_LOG([$am__untar <conftest.tar])
+ AM_RUN_LOG([cat conftest.dir/file])
+ grep GrepMe conftest.dir/file >/dev/null 2>&1 && break
+ fi
+ done
+ rm -rf conftest.dir
+
+ AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool])
+ AC_MSG_RESULT([$am_cv_prog_tar_$1])])
+
+AC_SUBST([am__tar])
+AC_SUBST([am__untar])
+]) # _AM_PROG_TAR
+
diff --git a/config.h.in b/config.h.in
new file mode 100644
index 0000000..dd3b02c
--- /dev/null
+++ b/config.h.in
@@ -0,0 +1,25 @@
+/* config.h.in. Generated from configure.ac by autoheader. */
+
+/* Name of package */
+#undef PACKAGE
+
+/* Define to the address where bug reports for this package should be sent. */
+#undef PACKAGE_BUGREPORT
+
+/* Define to the full name of this package. */
+#undef PACKAGE_NAME
+
+/* Define to the full name and version of this package. */
+#undef PACKAGE_STRING
+
+/* Define to the one symbol short name of this package. */
+#undef PACKAGE_TARNAME
+
+/* Define to the home page for this package. */
+#undef PACKAGE_URL
+
+/* Define to the version of this package. */
+#undef PACKAGE_VERSION
+
+/* Version number of package */
+#undef VERSION
diff --git a/configure b/configure
new file mode 100755
index 0000000..89e93f5
--- /dev/null
+++ b/configure
@@ -0,0 +1,3846 @@
+#! /bin/sh
+# Guess values for system-dependent variables and create Makefiles.
+# Generated by GNU Autoconf 2.71 for parallel 20240222.
+#
+# Report bugs to <bug-parallel@gnu.org>.
+#
+#
+# Copyright (C) 1992-1996, 1998-2017, 2020-2021 Free Software Foundation,
+# Inc.
+#
+#
+# This configure script is free software; the Free Software Foundation
+# gives unlimited permission to copy, distribute and modify it.
+## -------------------- ##
+## M4sh Initialization. ##
+## -------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+as_nop=:
+if test ${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1
+then :
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else $as_nop
+ case `(set -o) 2>/dev/null` in #(
+ *posix*) :
+ set -o posix ;; #(
+ *) :
+ ;;
+esac
+fi
+
+
+
+# Reset variables that may have inherited troublesome values from
+# the environment.
+
+# IFS needs to be set, to space, tab, and newline, in precisely that order.
+# (If _AS_PATH_WALK were called with IFS unset, it would have the
+# side effect of setting IFS to empty, thus disabling word splitting.)
+# Quoting is to prevent editors from complaining about space-tab.
+as_nl='
+'
+export as_nl
+IFS=" "" $as_nl"
+
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# Ensure predictable behavior from utilities with locale-dependent output.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# We cannot yet rely on "unset" to work, but we need these variables
+# to be unset--not just set to an empty or harmless value--now, to
+# avoid bugs in old shells (e.g. pre-3.0 UWIN ksh). This construct
+# also avoids known problems related to "unset" and subshell syntax
+# in other old shells (e.g. bash 2.01 and pdksh 5.2.14).
+for as_var in BASH_ENV ENV MAIL MAILPATH CDPATH
+do eval test \${$as_var+y} \
+ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
+done
+
+# Ensure that fds 0, 1, and 2 are open.
+if (exec 3>&0) 2>/dev/null; then :; else exec 0</dev/null; fi
+if (exec 3>&1) 2>/dev/null; then :; else exec 1>/dev/null; fi
+if (exec 3>&2) ; then :; else exec 2>/dev/null; fi
+
+# The user is always right.
+if ${PATH_SEPARATOR+false} :; then
+ PATH_SEPARATOR=:
+ (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+ (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+ PATH_SEPARATOR=';'
+ }
+fi
+
+
+# Find who we are. Look in the path if we contain no directory separator.
+as_myself=
+case $0 in #((
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ case $as_dir in #(((
+ '') as_dir=./ ;;
+ */) ;;
+ *) as_dir=$as_dir/ ;;
+ esac
+ test -r "$as_dir$0" && as_myself=$as_dir$0 && break
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+ as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+ printf "%s\n" "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ exit 1
+fi
+
+
+# Use a proper internal environment variable to ensure we don't fall
+ # into an infinite loop, continuously re-executing ourselves.
+ if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then
+ _as_can_reexec=no; export _as_can_reexec;
+ # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+ *v*x* | *x*v* ) as_opts=-vx ;;
+ *v* ) as_opts=-v ;;
+ *x* ) as_opts=-x ;;
+ * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+printf "%s\n" "$0: could not re-execute with $CONFIG_SHELL" >&2
+exit 255
+ fi
+ # We don't want this to propagate to other subprocesses.
+ { _as_can_reexec=; unset _as_can_reexec;}
+if test "x$CONFIG_SHELL" = x; then
+ as_bourne_compatible="as_nop=:
+if test \${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1
+then :
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '\${1+\"\$@\"}'='\"\$@\"'
+ setopt NO_GLOB_SUBST
+else \$as_nop
+ case \`(set -o) 2>/dev/null\` in #(
+ *posix*) :
+ set -o posix ;; #(
+ *) :
+ ;;
+esac
+fi
+"
+ as_required="as_fn_return () { (exit \$1); }
+as_fn_success () { as_fn_return 0; }
+as_fn_failure () { as_fn_return 1; }
+as_fn_ret_success () { return 0; }
+as_fn_ret_failure () { return 1; }
+
+exitcode=0
+as_fn_success || { exitcode=1; echo as_fn_success failed.; }
+as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; }
+as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; }
+as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; }
+if ( set x; as_fn_ret_success y && test x = \"\$1\" )
+then :
+
+else \$as_nop
+ exitcode=1; echo positional parameters were not saved.
+fi
+test x\$exitcode = x0 || exit 1
+blah=\$(echo \$(echo blah))
+test x\"\$blah\" = xblah || exit 1
+test -x / || exit 1"
+ as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO
+ as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO
+ eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" &&
+ test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1"
+ if (eval "$as_required") 2>/dev/null
+then :
+ as_have_required=yes
+else $as_nop
+ as_have_required=no
+fi
+ if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null
+then :
+
+else $as_nop
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+as_found=false
+for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
+do
+ IFS=$as_save_IFS
+ case $as_dir in #(((
+ '') as_dir=./ ;;
+ */) ;;
+ *) as_dir=$as_dir/ ;;
+ esac
+ as_found=:
+ case $as_dir in #(
+ /*)
+ for as_base in sh bash ksh sh5; do
+ # Try only shells that exist, to save several forks.
+ as_shell=$as_dir$as_base
+ if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
+ as_run=a "$as_shell" -c "$as_bourne_compatible""$as_required" 2>/dev/null
+then :
+ CONFIG_SHELL=$as_shell as_have_required=yes
+ if as_run=a "$as_shell" -c "$as_bourne_compatible""$as_suggested" 2>/dev/null
+then :
+ break 2
+fi
+fi
+ done;;
+ esac
+ as_found=false
+done
+IFS=$as_save_IFS
+if $as_found
+then :
+
+else $as_nop
+ if { test -f "$SHELL" || test -f "$SHELL.exe"; } &&
+ as_run=a "$SHELL" -c "$as_bourne_compatible""$as_required" 2>/dev/null
+then :
+ CONFIG_SHELL=$SHELL as_have_required=yes
+fi
+fi
+
+
+ if test "x$CONFIG_SHELL" != x
+then :
+ export CONFIG_SHELL
+ # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+ *v*x* | *x*v* ) as_opts=-vx ;;
+ *v* ) as_opts=-v ;;
+ *x* ) as_opts=-x ;;
+ * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+printf "%s\n" "$0: could not re-execute with $CONFIG_SHELL" >&2
+exit 255
+fi
+
+ if test x$as_have_required = xno
+then :
+ printf "%s\n" "$0: This script requires a shell more modern than all"
+ printf "%s\n" "$0: the shells that I found on your system."
+ if test ${ZSH_VERSION+y} ; then
+ printf "%s\n" "$0: In particular, zsh $ZSH_VERSION has bugs and should"
+ printf "%s\n" "$0: be upgraded to zsh 4.3.4 or later."
+ else
+ printf "%s\n" "$0: Please tell bug-autoconf@gnu.org and
+$0: bug-parallel@gnu.org about your system, including any
+$0: error possibly output before this message. Then install
+$0: a modern shell, or manually run the script under such a
+$0: shell if you do have one."
+ fi
+ exit 1
+fi
+fi
+fi
+SHELL=${CONFIG_SHELL-/bin/sh}
+export SHELL
+# Unset more variables known to interfere with behavior of common tools.
+CLICOLOR_FORCE= GREP_OPTIONS=
+unset CLICOLOR_FORCE GREP_OPTIONS
+
+## --------------------- ##
+## M4sh Shell Functions. ##
+## --------------------- ##
+# as_fn_unset VAR
+# ---------------
+# Portably unset VAR.
+as_fn_unset ()
+{
+ { eval $1=; unset $1;}
+}
+as_unset=as_fn_unset
+
+
+# as_fn_set_status STATUS
+# -----------------------
+# Set $? to STATUS, without forking.
+as_fn_set_status ()
+{
+ return $1
+} # as_fn_set_status
+
+# as_fn_exit STATUS
+# -----------------
+# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
+as_fn_exit ()
+{
+ set +e
+ as_fn_set_status $1
+ exit $1
+} # as_fn_exit
+# as_fn_nop
+# ---------
+# Do nothing but, unlike ":", preserve the value of $?.
+as_fn_nop ()
+{
+ return $?
+}
+as_nop=as_fn_nop
+
+# as_fn_mkdir_p
+# -------------
+# Create "$as_dir" as a directory, including parents if necessary.
+as_fn_mkdir_p ()
+{
+
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || eval $as_mkdir_p || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`printf "%s\n" "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+printf "%s\n" X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
+
+
+} # as_fn_mkdir_p
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+ test -f "$1" && test -x "$1"
+} # as_fn_executable_p
+# as_fn_append VAR VALUE
+# ----------------------
+# Append the text in VALUE to the end of the definition contained in VAR. Take
+# advantage of any shell optimizations that allow amortized linear growth over
+# repeated appends, instead of the typical quadratic growth present in naive
+# implementations.
+if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null
+then :
+ eval 'as_fn_append ()
+ {
+ eval $1+=\$2
+ }'
+else $as_nop
+ as_fn_append ()
+ {
+ eval $1=\$$1\$2
+ }
+fi # as_fn_append
+
+# as_fn_arith ARG...
+# ------------------
+# Perform arithmetic evaluation on the ARGs, and store the result in the
+# global $as_val. Take advantage of shells that can avoid forks. The arguments
+# must be portable across $(()) and expr.
+if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null
+then :
+ eval 'as_fn_arith ()
+ {
+ as_val=$(( $* ))
+ }'
+else $as_nop
+ as_fn_arith ()
+ {
+ as_val=`expr "$@" || test $? -eq 1`
+ }
+fi # as_fn_arith
+
+# as_fn_nop
+# ---------
+# Do nothing but, unlike ":", preserve the value of $?.
+as_fn_nop ()
+{
+ return $?
+}
+as_nop=as_fn_nop
+
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
+# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
+# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
+# script with STATUS, using 1 if that was 0.
+as_fn_error ()
+{
+ as_status=$1; test $as_status -eq 0 && as_status=1
+ if test "$4"; then
+ as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
+ fi
+ printf "%s\n" "$as_me: error: $2" >&2
+ as_fn_exit $as_status
+} # as_fn_error
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
+
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+printf "%s\n" X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+
+ as_lineno_1=$LINENO as_lineno_1a=$LINENO
+ as_lineno_2=$LINENO as_lineno_2a=$LINENO
+ eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" &&
+ test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || {
+ # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-)
+ sed -n '
+ p
+ /[$]LINENO/=
+ ' <$as_myself |
+ sed '
+ s/[$]LINENO.*/&-/
+ t lineno
+ b
+ :lineno
+ N
+ :loop
+ s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
+ t loop
+ s/-\n.*//
+ ' >$as_me.lineno &&
+ chmod +x "$as_me.lineno" ||
+ { printf "%s\n" "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; }
+
+ # If we had to re-execute with $CONFIG_SHELL, we're ensured to have
+ # already done that, so ensure we don't try to do so again and fall
+ # in an infinite loop. This has already happened in practice.
+ _as_can_reexec=no; export _as_can_reexec
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensitive to this).
+ . "./$as_me.lineno"
+ # Exit status is that of the last command.
+ exit
+}
+
+
+# Determine whether it's possible to make 'echo' print without a newline.
+# These variables are no longer used directly by Autoconf, but are AC_SUBSTed
+# for compatibility with existing Makefiles.
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in #(((((
+-n*)
+ case `echo 'xy\c'` in
+ *c*) ECHO_T=' ';; # ECHO_T is single tab character.
+ xy) ECHO_C='\c';;
+ *) echo `echo ksh88 bug on AIX 6.1` > /dev/null
+ ECHO_T=' ';;
+ esac;;
+*)
+ ECHO_N='-n';;
+esac
+
+# For backward compatibility with old third-party macros, we provide
+# the shell variables $as_echo and $as_echo_n. New code should use
+# AS_ECHO(["message"]) and AS_ECHO_N(["message"]), respectively.
+as_echo='printf %s\n'
+as_echo_n='printf %s'
+
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+ rm -f conf$$.dir/conf$$.file
+else
+ rm -f conf$$.dir
+ mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+ if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -pR'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -pR'
+ elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+ else
+ as_ln_s='cp -pR'
+ fi
+else
+ as_ln_s='cp -pR'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p='mkdir -p "$as_dir"'
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+test -n "$DJDIR" || exec 7<&0 </dev/null
+exec 6>&1
+
+# Name of the host.
+# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status,
+# so uname gets run too.
+ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
+
+#
+# Initializations.
+#
+ac_default_prefix=/usr/local
+ac_clean_files=
+ac_config_libobj_dir=.
+LIBOBJS=
+cross_compiling=no
+subdirs=
+MFLAGS=
+MAKEFLAGS=
+
+# Identity of this package.
+PACKAGE_NAME='parallel'
+PACKAGE_TARNAME='parallel'
+PACKAGE_VERSION='20240222'
+PACKAGE_STRING='parallel 20240222'
+PACKAGE_BUGREPORT='bug-parallel@gnu.org'
+PACKAGE_URL=''
+
+ac_subst_vars='LTLIBOBJS
+LIBOBJS
+LN_S
+DOCUMENTATION_FALSE
+DOCUMENTATION_TRUE
+AM_BACKSLASH
+AM_DEFAULT_VERBOSITY
+AM_DEFAULT_V
+AM_V
+CSCOPE
+ETAGS
+CTAGS
+am__untar
+am__tar
+AMTAR
+am__leading_dot
+SET_MAKE
+AWK
+mkdir_p
+MKDIR_P
+INSTALL_STRIP_PROGRAM
+STRIP
+install_sh
+MAKEINFO
+AUTOHEADER
+AUTOMAKE
+AUTOCONF
+ACLOCAL
+VERSION
+PACKAGE
+CYGPATH_W
+am__isrc
+INSTALL_DATA
+INSTALL_SCRIPT
+INSTALL_PROGRAM
+target_alias
+host_alias
+build_alias
+LIBS
+ECHO_T
+ECHO_N
+ECHO_C
+DEFS
+mandir
+localedir
+libdir
+psdir
+pdfdir
+dvidir
+htmldir
+infodir
+docdir
+oldincludedir
+includedir
+runstatedir
+localstatedir
+sharedstatedir
+sysconfdir
+datadir
+datarootdir
+libexecdir
+sbindir
+bindir
+program_transform_name
+prefix
+exec_prefix
+PACKAGE_URL
+PACKAGE_BUGREPORT
+PACKAGE_STRING
+PACKAGE_VERSION
+PACKAGE_TARNAME
+PACKAGE_NAME
+PATH_SEPARATOR
+SHELL'
+ac_subst_files=''
+ac_user_opts='
+enable_option_checking
+enable_silent_rules
+enable_documentation
+'
+ ac_precious_vars='build_alias
+host_alias
+target_alias'
+
+
+# Initialize some variables set by options.
+ac_init_help=
+ac_init_version=false
+ac_unrecognized_opts=
+ac_unrecognized_sep=
+# The variables have the same names as the options, with
+# dashes changed to underlines.
+cache_file=/dev/null
+exec_prefix=NONE
+no_create=
+no_recursion=
+prefix=NONE
+program_prefix=NONE
+program_suffix=NONE
+program_transform_name=s,x,x,
+silent=
+site=
+srcdir=
+verbose=
+x_includes=NONE
+x_libraries=NONE
+
+# Installation directory options.
+# These are left unexpanded so users can "make install exec_prefix=/foo"
+# and all the variables that are supposed to be based on exec_prefix
+# by default will actually change.
+# Use braces instead of parens because sh, perl, etc. also accept them.
+# (The list follows the same order as the GNU Coding Standards.)
+bindir='${exec_prefix}/bin'
+sbindir='${exec_prefix}/sbin'
+libexecdir='${exec_prefix}/libexec'
+datarootdir='${prefix}/share'
+datadir='${datarootdir}'
+sysconfdir='${prefix}/etc'
+sharedstatedir='${prefix}/com'
+localstatedir='${prefix}/var'
+runstatedir='${localstatedir}/run'
+includedir='${prefix}/include'
+oldincludedir='/usr/include'
+docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
+infodir='${datarootdir}/info'
+htmldir='${docdir}'
+dvidir='${docdir}'
+pdfdir='${docdir}'
+psdir='${docdir}'
+libdir='${exec_prefix}/lib'
+localedir='${datarootdir}/locale'
+mandir='${datarootdir}/man'
+
+ac_prev=
+ac_dashdash=
+for ac_option
+do
+ # If the previous option needs an argument, assign it.
+ if test -n "$ac_prev"; then
+ eval $ac_prev=\$ac_option
+ ac_prev=
+ continue
+ fi
+
+ case $ac_option in
+ *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
+ *=) ac_optarg= ;;
+ *) ac_optarg=yes ;;
+ esac
+
+ case $ac_dashdash$ac_option in
+ --)
+ ac_dashdash=yes ;;
+
+ -bindir | --bindir | --bindi | --bind | --bin | --bi)
+ ac_prev=bindir ;;
+ -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
+ bindir=$ac_optarg ;;
+
+ -build | --build | --buil | --bui | --bu)
+ ac_prev=build_alias ;;
+ -build=* | --build=* | --buil=* | --bui=* | --bu=*)
+ build_alias=$ac_optarg ;;
+
+ -cache-file | --cache-file | --cache-fil | --cache-fi \
+ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
+ ac_prev=cache_file ;;
+ -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
+ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
+ cache_file=$ac_optarg ;;
+
+ --config-cache | -C)
+ cache_file=config.cache ;;
+
+ -datadir | --datadir | --datadi | --datad)
+ ac_prev=datadir ;;
+ -datadir=* | --datadir=* | --datadi=* | --datad=*)
+ datadir=$ac_optarg ;;
+
+ -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \
+ | --dataroo | --dataro | --datar)
+ ac_prev=datarootdir ;;
+ -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \
+ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*)
+ datarootdir=$ac_optarg ;;
+
+ -disable-* | --disable-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid feature name: \`$ac_useropt'"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"enable_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval enable_$ac_useropt=no ;;
+
+ -docdir | --docdir | --docdi | --doc | --do)
+ ac_prev=docdir ;;
+ -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*)
+ docdir=$ac_optarg ;;
+
+ -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv)
+ ac_prev=dvidir ;;
+ -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*)
+ dvidir=$ac_optarg ;;
+
+ -enable-* | --enable-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid feature name: \`$ac_useropt'"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"enable_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval enable_$ac_useropt=\$ac_optarg ;;
+
+ -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
+ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
+ | --exec | --exe | --ex)
+ ac_prev=exec_prefix ;;
+ -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
+ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
+ | --exec=* | --exe=* | --ex=*)
+ exec_prefix=$ac_optarg ;;
+
+ -gas | --gas | --ga | --g)
+ # Obsolete; use --with-gas.
+ with_gas=yes ;;
+
+ -help | --help | --hel | --he | -h)
+ ac_init_help=long ;;
+ -help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
+ ac_init_help=recursive ;;
+ -help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
+ ac_init_help=short ;;
+
+ -host | --host | --hos | --ho)
+ ac_prev=host_alias ;;
+ -host=* | --host=* | --hos=* | --ho=*)
+ host_alias=$ac_optarg ;;
+
+ -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht)
+ ac_prev=htmldir ;;
+ -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \
+ | --ht=*)
+ htmldir=$ac_optarg ;;
+
+ -includedir | --includedir | --includedi | --included | --include \
+ | --includ | --inclu | --incl | --inc)
+ ac_prev=includedir ;;
+ -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
+ | --includ=* | --inclu=* | --incl=* | --inc=*)
+ includedir=$ac_optarg ;;
+
+ -infodir | --infodir | --infodi | --infod | --info | --inf)
+ ac_prev=infodir ;;
+ -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
+ infodir=$ac_optarg ;;
+
+ -libdir | --libdir | --libdi | --libd)
+ ac_prev=libdir ;;
+ -libdir=* | --libdir=* | --libdi=* | --libd=*)
+ libdir=$ac_optarg ;;
+
+ -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
+ | --libexe | --libex | --libe)
+ ac_prev=libexecdir ;;
+ -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
+ | --libexe=* | --libex=* | --libe=*)
+ libexecdir=$ac_optarg ;;
+
+ -localedir | --localedir | --localedi | --localed | --locale)
+ ac_prev=localedir ;;
+ -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*)
+ localedir=$ac_optarg ;;
+
+ -localstatedir | --localstatedir | --localstatedi | --localstated \
+ | --localstate | --localstat | --localsta | --localst | --locals)
+ ac_prev=localstatedir ;;
+ -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
+ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*)
+ localstatedir=$ac_optarg ;;
+
+ -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
+ ac_prev=mandir ;;
+ -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
+ mandir=$ac_optarg ;;
+
+ -nfp | --nfp | --nf)
+ # Obsolete; use --without-fp.
+ with_fp=no ;;
+
+ -no-create | --no-create | --no-creat | --no-crea | --no-cre \
+ | --no-cr | --no-c | -n)
+ no_create=yes ;;
+
+ -no-recursion | --no-recursion | --no-recursio | --no-recursi \
+ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
+ no_recursion=yes ;;
+
+ -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
+ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
+ | --oldin | --oldi | --old | --ol | --o)
+ ac_prev=oldincludedir ;;
+ -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
+ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
+ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
+ oldincludedir=$ac_optarg ;;
+
+ -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
+ ac_prev=prefix ;;
+ -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
+ prefix=$ac_optarg ;;
+
+ -program-prefix | --program-prefix | --program-prefi | --program-pref \
+ | --program-pre | --program-pr | --program-p)
+ ac_prev=program_prefix ;;
+ -program-prefix=* | --program-prefix=* | --program-prefi=* \
+ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
+ program_prefix=$ac_optarg ;;
+
+ -program-suffix | --program-suffix | --program-suffi | --program-suff \
+ | --program-suf | --program-su | --program-s)
+ ac_prev=program_suffix ;;
+ -program-suffix=* | --program-suffix=* | --program-suffi=* \
+ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
+ program_suffix=$ac_optarg ;;
+
+ -program-transform-name | --program-transform-name \
+ | --program-transform-nam | --program-transform-na \
+ | --program-transform-n | --program-transform- \
+ | --program-transform | --program-transfor \
+ | --program-transfo | --program-transf \
+ | --program-trans | --program-tran \
+ | --progr-tra | --program-tr | --program-t)
+ ac_prev=program_transform_name ;;
+ -program-transform-name=* | --program-transform-name=* \
+ | --program-transform-nam=* | --program-transform-na=* \
+ | --program-transform-n=* | --program-transform-=* \
+ | --program-transform=* | --program-transfor=* \
+ | --program-transfo=* | --program-transf=* \
+ | --program-trans=* | --program-tran=* \
+ | --progr-tra=* | --program-tr=* | --program-t=*)
+ program_transform_name=$ac_optarg ;;
+
+ -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd)
+ ac_prev=pdfdir ;;
+ -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*)
+ pdfdir=$ac_optarg ;;
+
+ -psdir | --psdir | --psdi | --psd | --ps)
+ ac_prev=psdir ;;
+ -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*)
+ psdir=$ac_optarg ;;
+
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ silent=yes ;;
+
+ -runstatedir | --runstatedir | --runstatedi | --runstated \
+ | --runstate | --runstat | --runsta | --runst | --runs \
+ | --run | --ru | --r)
+ ac_prev=runstatedir ;;
+ -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \
+ | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \
+ | --run=* | --ru=* | --r=*)
+ runstatedir=$ac_optarg ;;
+
+ -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
+ ac_prev=sbindir ;;
+ -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
+ | --sbi=* | --sb=*)
+ sbindir=$ac_optarg ;;
+
+ -sharedstatedir | --sharedstatedir | --sharedstatedi \
+ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
+ | --sharedst | --shareds | --shared | --share | --shar \
+ | --sha | --sh)
+ ac_prev=sharedstatedir ;;
+ -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
+ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
+ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
+ | --sha=* | --sh=*)
+ sharedstatedir=$ac_optarg ;;
+
+ -site | --site | --sit)
+ ac_prev=site ;;
+ -site=* | --site=* | --sit=*)
+ site=$ac_optarg ;;
+
+ -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
+ ac_prev=srcdir ;;
+ -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
+ srcdir=$ac_optarg ;;
+
+ -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
+ | --syscon | --sysco | --sysc | --sys | --sy)
+ ac_prev=sysconfdir ;;
+ -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
+ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
+ sysconfdir=$ac_optarg ;;
+
+ -target | --target | --targe | --targ | --tar | --ta | --t)
+ ac_prev=target_alias ;;
+ -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
+ target_alias=$ac_optarg ;;
+
+ -v | -verbose | --verbose | --verbos | --verbo | --verb)
+ verbose=yes ;;
+
+ -version | --version | --versio | --versi | --vers | -V)
+ ac_init_version=: ;;
+
+ -with-* | --with-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid package name: \`$ac_useropt'"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"with_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval with_$ac_useropt=\$ac_optarg ;;
+
+ -without-* | --without-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid package name: \`$ac_useropt'"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"with_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval with_$ac_useropt=no ;;
+
+ --x)
+ # Obsolete; use --with-x.
+ with_x=yes ;;
+
+ -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
+ | --x-incl | --x-inc | --x-in | --x-i)
+ ac_prev=x_includes ;;
+ -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
+ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
+ x_includes=$ac_optarg ;;
+
+ -x-libraries | --x-libraries | --x-librarie | --x-librari \
+ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
+ ac_prev=x_libraries ;;
+ -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
+ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
+ x_libraries=$ac_optarg ;;
+
+ -*) as_fn_error $? "unrecognized option: \`$ac_option'
+Try \`$0 --help' for more information"
+ ;;
+
+ *=*)
+ ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
+ # Reject names that are not valid shell variable names.
+ case $ac_envvar in #(
+ '' | [0-9]* | *[!_$as_cr_alnum]* )
+ as_fn_error $? "invalid variable name: \`$ac_envvar'" ;;
+ esac
+ eval $ac_envvar=\$ac_optarg
+ export $ac_envvar ;;
+
+ *)
+ # FIXME: should be removed in autoconf 3.0.
+ printf "%s\n" "$as_me: WARNING: you should use --build, --host, --target" >&2
+ expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
+ printf "%s\n" "$as_me: WARNING: invalid host type: $ac_option" >&2
+ : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}"
+ ;;
+
+ esac
+done
+
+if test -n "$ac_prev"; then
+ ac_option=--`echo $ac_prev | sed 's/_/-/g'`
+ as_fn_error $? "missing argument to $ac_option"
+fi
+
+if test -n "$ac_unrecognized_opts"; then
+ case $enable_option_checking in
+ no) ;;
+ fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;;
+ *) printf "%s\n" "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
+ esac
+fi
+
+# Check all directory arguments for consistency.
+for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
+ datadir sysconfdir sharedstatedir localstatedir includedir \
+ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
+ libdir localedir mandir runstatedir
+do
+ eval ac_val=\$$ac_var
+ # Remove trailing slashes.
+ case $ac_val in
+ */ )
+ ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'`
+ eval $ac_var=\$ac_val;;
+ esac
+ # Be sure to have absolute directory names.
+ case $ac_val in
+ [\\/$]* | ?:[\\/]* ) continue;;
+ NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
+ esac
+ as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val"
+done
+
+# There might be people who depend on the old broken behavior: `$host'
+# used to hold the argument of --host etc.
+# FIXME: To remove some day.
+build=$build_alias
+host=$host_alias
+target=$target_alias
+
+# FIXME: To remove some day.
+if test "x$host_alias" != x; then
+ if test "x$build_alias" = x; then
+ cross_compiling=maybe
+ elif test "x$build_alias" != "x$host_alias"; then
+ cross_compiling=yes
+ fi
+fi
+
+ac_tool_prefix=
+test -n "$host_alias" && ac_tool_prefix=$host_alias-
+
+test "$silent" = yes && exec 6>/dev/null
+
+
+ac_pwd=`pwd` && test -n "$ac_pwd" &&
+ac_ls_di=`ls -di .` &&
+ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
+ as_fn_error $? "working directory cannot be determined"
+test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
+ as_fn_error $? "pwd does not report name of working directory"
+
+
+# Find the source files, if location was not specified.
+if test -z "$srcdir"; then
+ ac_srcdir_defaulted=yes
+ # Try the directory containing this script, then the parent directory.
+ ac_confdir=`$as_dirname -- "$as_myself" ||
+$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_myself" : 'X\(//\)[^/]' \| \
+ X"$as_myself" : 'X\(//\)$' \| \
+ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null ||
+printf "%s\n" X"$as_myself" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ srcdir=$ac_confdir
+ if test ! -r "$srcdir/$ac_unique_file"; then
+ srcdir=..
+ fi
+else
+ ac_srcdir_defaulted=no
+fi
+if test ! -r "$srcdir/$ac_unique_file"; then
+ test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
+ as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir"
+fi
+ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
+ac_abs_confdir=`(
+ cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg"
+ pwd)`
+# When building in place, set srcdir=.
+if test "$ac_abs_confdir" = "$ac_pwd"; then
+ srcdir=.
+fi
+# Remove unnecessary trailing slashes from srcdir.
+# Double slashes in file names in object file debugging info
+# mess up M-x gdb in Emacs.
+case $srcdir in
+*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;;
+esac
+for ac_var in $ac_precious_vars; do
+ eval ac_env_${ac_var}_set=\${${ac_var}+set}
+ eval ac_env_${ac_var}_value=\$${ac_var}
+ eval ac_cv_env_${ac_var}_set=\${${ac_var}+set}
+ eval ac_cv_env_${ac_var}_value=\$${ac_var}
+done
+
+#
+# Report the --help message.
+#
+if test "$ac_init_help" = "long"; then
+ # Omit some internal or obsolete options to make the list less imposing.
+ # This message is too long to be a string in the A/UX 3.1 sh.
+ cat <<_ACEOF
+\`configure' configures parallel 20240222 to adapt to many kinds of systems.
+
+Usage: $0 [OPTION]... [VAR=VALUE]...
+
+To assign environment variables (e.g., CC, CFLAGS...), specify them as
+VAR=VALUE. See below for descriptions of some of the useful variables.
+
+Defaults for the options are specified in brackets.
+
+Configuration:
+ -h, --help display this help and exit
+ --help=short display options specific to this package
+ --help=recursive display the short help of all the included packages
+ -V, --version display version information and exit
+ -q, --quiet, --silent do not print \`checking ...' messages
+ --cache-file=FILE cache test results in FILE [disabled]
+ -C, --config-cache alias for \`--cache-file=config.cache'
+ -n, --no-create do not create output files
+ --srcdir=DIR find the sources in DIR [configure dir or \`..']
+
+Installation directories:
+ --prefix=PREFIX install architecture-independent files in PREFIX
+ [$ac_default_prefix]
+ --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX
+ [PREFIX]
+
+By default, \`make install' will install all the files in
+\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify
+an installation prefix other than \`$ac_default_prefix' using \`--prefix',
+for instance \`--prefix=\$HOME'.
+
+For better control, use the options below.
+
+Fine tuning of the installation directories:
+ --bindir=DIR user executables [EPREFIX/bin]
+ --sbindir=DIR system admin executables [EPREFIX/sbin]
+ --libexecdir=DIR program executables [EPREFIX/libexec]
+ --sysconfdir=DIR read-only single-machine data [PREFIX/etc]
+ --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
+ --localstatedir=DIR modifiable single-machine data [PREFIX/var]
+ --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run]
+ --libdir=DIR object code libraries [EPREFIX/lib]
+ --includedir=DIR C header files [PREFIX/include]
+ --oldincludedir=DIR C header files for non-gcc [/usr/include]
+ --datarootdir=DIR read-only arch.-independent data root [PREFIX/share]
+ --datadir=DIR read-only architecture-independent data [DATAROOTDIR]
+ --infodir=DIR info documentation [DATAROOTDIR/info]
+ --localedir=DIR locale-dependent data [DATAROOTDIR/locale]
+ --mandir=DIR man documentation [DATAROOTDIR/man]
+ --docdir=DIR documentation root [DATAROOTDIR/doc/parallel]
+ --htmldir=DIR html documentation [DOCDIR]
+ --dvidir=DIR dvi documentation [DOCDIR]
+ --pdfdir=DIR pdf documentation [DOCDIR]
+ --psdir=DIR ps documentation [DOCDIR]
+_ACEOF
+
+ cat <<\_ACEOF
+
+Program names:
+ --program-prefix=PREFIX prepend PREFIX to installed program names
+ --program-suffix=SUFFIX append SUFFIX to installed program names
+ --program-transform-name=PROGRAM run sed PROGRAM on installed program names
+_ACEOF
+fi
+
+if test -n "$ac_init_help"; then
+ case $ac_init_help in
+ short | recursive ) echo "Configuration of parallel 20240222:";;
+ esac
+ cat <<\_ACEOF
+
+Optional Features:
+ --disable-option-checking ignore unrecognized --enable/--with options
+ --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no)
+ --enable-FEATURE[=ARG] include FEATURE [ARG=yes]
+ --enable-silent-rules less verbose build output (undo: "make V=1")
+ --disable-silent-rules verbose build output (undo: "make V=0")
+ --disable-documentation Omit building and installing the documentation.
+ (default=no)
+
+Report bugs to <bug-parallel@gnu.org>.
+_ACEOF
+ac_status=$?
+fi
+
+if test "$ac_init_help" = "recursive"; then
+ # If there are subdirs, report their specific --help.
+ for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
+ test -d "$ac_dir" ||
+ { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } ||
+ continue
+ ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+ ac_dir_suffix=/`printf "%s\n" "$ac_dir" | sed 's|^\.[\\/]||'`
+ # A ".." for each directory in $ac_dir_suffix.
+ ac_top_builddir_sub=`printf "%s\n" "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+ case $ac_top_builddir_sub in
+ "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+ esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+ .) # We are building in place.
+ ac_srcdir=.
+ ac_top_srcdir=$ac_top_builddir_sub
+ ac_abs_top_srcdir=$ac_pwd ;;
+ [\\/]* | ?:[\\/]* ) # Absolute name.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir
+ ac_abs_top_srcdir=$srcdir ;;
+ *) # Relative name.
+ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_build_prefix$srcdir
+ ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+ cd "$ac_dir" || { ac_status=$?; continue; }
+ # Check for configure.gnu first; this name is used for a wrapper for
+ # Metaconfig's "Configure" on case-insensitive file systems.
+ if test -f "$ac_srcdir/configure.gnu"; then
+ echo &&
+ $SHELL "$ac_srcdir/configure.gnu" --help=recursive
+ elif test -f "$ac_srcdir/configure"; then
+ echo &&
+ $SHELL "$ac_srcdir/configure" --help=recursive
+ else
+ printf "%s\n" "$as_me: WARNING: no configuration information is in $ac_dir" >&2
+ fi || ac_status=$?
+ cd "$ac_pwd" || { ac_status=$?; break; }
+ done
+fi
+
+test -n "$ac_init_help" && exit $ac_status
+if $ac_init_version; then
+ cat <<\_ACEOF
+parallel configure 20240222
+generated by GNU Autoconf 2.71
+
+Copyright (C) 2021 Free Software Foundation, Inc.
+This configure script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it.
+_ACEOF
+ exit
+fi
+
+## ------------------------ ##
+## Autoconf initialization. ##
+## ------------------------ ##
+ac_configure_args_raw=
+for ac_arg
+do
+ case $ac_arg in
+ *\'*)
+ ac_arg=`printf "%s\n" "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ as_fn_append ac_configure_args_raw " '$ac_arg'"
+done
+
+case $ac_configure_args_raw in
+ *$as_nl*)
+ ac_safe_unquote= ;;
+ *)
+ ac_unsafe_z='|&;<>()$`\\"*?[ '' ' # This string ends in space, tab.
+ ac_unsafe_a="$ac_unsafe_z#~"
+ ac_safe_unquote="s/ '\\([^$ac_unsafe_a][^$ac_unsafe_z]*\\)'/ \\1/g"
+ ac_configure_args_raw=` printf "%s\n" "$ac_configure_args_raw" | sed "$ac_safe_unquote"`;;
+esac
+
+cat >config.log <<_ACEOF
+This file contains any messages produced by compilers while
+running configure, to aid debugging if configure makes a mistake.
+
+It was created by parallel $as_me 20240222, which was
+generated by GNU Autoconf 2.71. Invocation command line was
+
+ $ $0$ac_configure_args_raw
+
+_ACEOF
+exec 5>>config.log
+{
+cat <<_ASUNAME
+## --------- ##
+## Platform. ##
+## --------- ##
+
+hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown`
+
+/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
+/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown`
+/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown`
+/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown`
+
+_ASUNAME
+
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ case $as_dir in #(((
+ '') as_dir=./ ;;
+ */) ;;
+ *) as_dir=$as_dir/ ;;
+ esac
+ printf "%s\n" "PATH: $as_dir"
+ done
+IFS=$as_save_IFS
+
+} >&5
+
+cat >&5 <<_ACEOF
+
+
+## ----------- ##
+## Core tests. ##
+## ----------- ##
+
+_ACEOF
+
+
+# Keep a trace of the command line.
+# Strip out --no-create and --no-recursion so they do not pile up.
+# Strip out --silent because we don't want to record it for future runs.
+# Also quote any args containing shell meta-characters.
+# Make two passes to allow for proper duplicate-argument suppression.
+ac_configure_args=
+ac_configure_args0=
+ac_configure_args1=
+ac_must_keep_next=false
+for ac_pass in 1 2
+do
+ for ac_arg
+ do
+ case $ac_arg in
+ -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ continue ;;
+ *\'*)
+ ac_arg=`printf "%s\n" "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ case $ac_pass in
+ 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;;
+ 2)
+ as_fn_append ac_configure_args1 " '$ac_arg'"
+ if test $ac_must_keep_next = true; then
+ ac_must_keep_next=false # Got value, back to normal.
+ else
+ case $ac_arg in
+ *=* | --config-cache | -C | -disable-* | --disable-* \
+ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
+ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
+ | -with-* | --with-* | -without-* | --without-* | --x)
+ case "$ac_configure_args0 " in
+ "$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
+ esac
+ ;;
+ -* ) ac_must_keep_next=true ;;
+ esac
+ fi
+ as_fn_append ac_configure_args " '$ac_arg'"
+ ;;
+ esac
+ done
+done
+{ ac_configure_args0=; unset ac_configure_args0;}
+{ ac_configure_args1=; unset ac_configure_args1;}
+
+# When interrupted or exit'd, cleanup temporary files, and complete
+# config.log. We remove comments because anyway the quotes in there
+# would cause problems or look ugly.
+# WARNING: Use '\'' to represent an apostrophe within the trap.
+# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug.
+trap 'exit_status=$?
+ # Sanitize IFS.
+ IFS=" "" $as_nl"
+ # Save into config.log some information that might help in debugging.
+ {
+ echo
+
+ printf "%s\n" "## ---------------- ##
+## Cache variables. ##
+## ---------------- ##"
+ echo
+ # The following way of writing the cache mishandles newlines in values,
+(
+ for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do
+ eval ac_val=\$$ac_var
+ case $ac_val in #(
+ *${as_nl}*)
+ case $ac_var in #(
+ *_cv_*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+ esac
+ case $ac_var in #(
+ _ | IFS | as_nl) ;; #(
+ BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+ *) { eval $ac_var=; unset $ac_var;} ;;
+ esac ;;
+ esac
+ done
+ (set) 2>&1 |
+ case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #(
+ *${as_nl}ac_space=\ *)
+ sed -n \
+ "s/'\''/'\''\\\\'\'''\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p"
+ ;; #(
+ *)
+ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+ ;;
+ esac |
+ sort
+)
+ echo
+
+ printf "%s\n" "## ----------------- ##
+## Output variables. ##
+## ----------------- ##"
+ echo
+ for ac_var in $ac_subst_vars
+ do
+ eval ac_val=\$$ac_var
+ case $ac_val in
+ *\'\''*) ac_val=`printf "%s\n" "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ esac
+ printf "%s\n" "$ac_var='\''$ac_val'\''"
+ done | sort
+ echo
+
+ if test -n "$ac_subst_files"; then
+ printf "%s\n" "## ------------------- ##
+## File substitutions. ##
+## ------------------- ##"
+ echo
+ for ac_var in $ac_subst_files
+ do
+ eval ac_val=\$$ac_var
+ case $ac_val in
+ *\'\''*) ac_val=`printf "%s\n" "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ esac
+ printf "%s\n" "$ac_var='\''$ac_val'\''"
+ done | sort
+ echo
+ fi
+
+ if test -s confdefs.h; then
+ printf "%s\n" "## ----------- ##
+## confdefs.h. ##
+## ----------- ##"
+ echo
+ cat confdefs.h
+ echo
+ fi
+ test "$ac_signal" != 0 &&
+ printf "%s\n" "$as_me: caught signal $ac_signal"
+ printf "%s\n" "$as_me: exit $exit_status"
+ } >&5
+ rm -f core *.core core.conftest.* &&
+ rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
+ exit $exit_status
+' 0
+for ac_signal in 1 2 13 15; do
+ trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal
+done
+ac_signal=0
+
+# confdefs.h avoids OS command line length limits that DEFS can exceed.
+rm -f -r conftest* confdefs.h
+
+printf "%s\n" "/* confdefs.h */" > confdefs.h
+
+# Predefined preprocessor variables.
+
+printf "%s\n" "#define PACKAGE_NAME \"$PACKAGE_NAME\"" >>confdefs.h
+
+printf "%s\n" "#define PACKAGE_TARNAME \"$PACKAGE_TARNAME\"" >>confdefs.h
+
+printf "%s\n" "#define PACKAGE_VERSION \"$PACKAGE_VERSION\"" >>confdefs.h
+
+printf "%s\n" "#define PACKAGE_STRING \"$PACKAGE_STRING\"" >>confdefs.h
+
+printf "%s\n" "#define PACKAGE_BUGREPORT \"$PACKAGE_BUGREPORT\"" >>confdefs.h
+
+printf "%s\n" "#define PACKAGE_URL \"$PACKAGE_URL\"" >>confdefs.h
+
+
+# Let the site file select an alternate cache file if it wants to.
+# Prefer an explicitly selected file to automatically selected ones.
+if test -n "$CONFIG_SITE"; then
+ ac_site_files="$CONFIG_SITE"
+elif test "x$prefix" != xNONE; then
+ ac_site_files="$prefix/share/config.site $prefix/etc/config.site"
+else
+ ac_site_files="$ac_default_prefix/share/config.site $ac_default_prefix/etc/config.site"
+fi
+
+for ac_site_file in $ac_site_files
+do
+ case $ac_site_file in #(
+ */*) :
+ ;; #(
+ *) :
+ ac_site_file=./$ac_site_file ;;
+esac
+ if test -f "$ac_site_file" && test -r "$ac_site_file"; then
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5
+printf "%s\n" "$as_me: loading site script $ac_site_file" >&6;}
+ sed 's/^/| /' "$ac_site_file" >&5
+ . "$ac_site_file" \
+ || { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "failed to load site script $ac_site_file
+See \`config.log' for more details" "$LINENO" 5; }
+ fi
+done
+
+if test -r "$cache_file"; then
+ # Some versions of bash will fail to source /dev/null (special files
+ # actually), so we avoid doing that. DJGPP emulates it as a regular file.
+ if test /dev/null != "$cache_file" && test -f "$cache_file"; then
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5
+printf "%s\n" "$as_me: loading cache $cache_file" >&6;}
+ case $cache_file in
+ [\\/]* | ?:[\\/]* ) . "$cache_file";;
+ *) . "./$cache_file";;
+ esac
+ fi
+else
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5
+printf "%s\n" "$as_me: creating cache $cache_file" >&6;}
+ >$cache_file
+fi
+
+
+# Auxiliary files required by this configure script.
+ac_aux_files="missing install-sh"
+
+# Locations in which to look for auxiliary files.
+ac_aux_dir_candidates="${srcdir}${PATH_SEPARATOR}${srcdir}/..${PATH_SEPARATOR}${srcdir}/../.."
+
+# Search for a directory containing all of the required auxiliary files,
+# $ac_aux_files, from the $PATH-style list $ac_aux_dir_candidates.
+# If we don't find one directory that contains all the files we need,
+# we report the set of missing files from the *first* directory in
+# $ac_aux_dir_candidates and give up.
+ac_missing_aux_files=""
+ac_first_candidate=:
+printf "%s\n" "$as_me:${as_lineno-$LINENO}: looking for aux files: $ac_aux_files" >&5
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+as_found=false
+for as_dir in $ac_aux_dir_candidates
+do
+ IFS=$as_save_IFS
+ case $as_dir in #(((
+ '') as_dir=./ ;;
+ */) ;;
+ *) as_dir=$as_dir/ ;;
+ esac
+ as_found=:
+
+ printf "%s\n" "$as_me:${as_lineno-$LINENO}: trying $as_dir" >&5
+ ac_aux_dir_found=yes
+ ac_install_sh=
+ for ac_aux in $ac_aux_files
+ do
+ # As a special case, if "install-sh" is required, that requirement
+ # can be satisfied by any of "install-sh", "install.sh", or "shtool",
+ # and $ac_install_sh is set appropriately for whichever one is found.
+ if test x"$ac_aux" = x"install-sh"
+ then
+ if test -f "${as_dir}install-sh"; then
+ printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}install-sh found" >&5
+ ac_install_sh="${as_dir}install-sh -c"
+ elif test -f "${as_dir}install.sh"; then
+ printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}install.sh found" >&5
+ ac_install_sh="${as_dir}install.sh -c"
+ elif test -f "${as_dir}shtool"; then
+ printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}shtool found" >&5
+ ac_install_sh="${as_dir}shtool install -c"
+ else
+ ac_aux_dir_found=no
+ if $ac_first_candidate; then
+ ac_missing_aux_files="${ac_missing_aux_files} install-sh"
+ else
+ break
+ fi
+ fi
+ else
+ if test -f "${as_dir}${ac_aux}"; then
+ printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}${ac_aux} found" >&5
+ else
+ ac_aux_dir_found=no
+ if $ac_first_candidate; then
+ ac_missing_aux_files="${ac_missing_aux_files} ${ac_aux}"
+ else
+ break
+ fi
+ fi
+ fi
+ done
+ if test "$ac_aux_dir_found" = yes; then
+ ac_aux_dir="$as_dir"
+ break
+ fi
+ ac_first_candidate=false
+
+ as_found=false
+done
+IFS=$as_save_IFS
+if $as_found
+then :
+
+else $as_nop
+ as_fn_error $? "cannot find required auxiliary files:$ac_missing_aux_files" "$LINENO" 5
+fi
+
+
+# These three variables are undocumented and unsupported,
+# and are intended to be withdrawn in a future Autoconf release.
+# They can cause serious problems if a builder's source tree is in a directory
+# whose full name contains unusual characters.
+if test -f "${ac_aux_dir}config.guess"; then
+ ac_config_guess="$SHELL ${ac_aux_dir}config.guess"
+fi
+if test -f "${ac_aux_dir}config.sub"; then
+ ac_config_sub="$SHELL ${ac_aux_dir}config.sub"
+fi
+if test -f "$ac_aux_dir/configure"; then
+ ac_configure="$SHELL ${ac_aux_dir}configure"
+fi
+
+# Check that the precious variables saved in the cache have kept the same
+# value.
+ac_cache_corrupted=false
+for ac_var in $ac_precious_vars; do
+ eval ac_old_set=\$ac_cv_env_${ac_var}_set
+ eval ac_new_set=\$ac_env_${ac_var}_set
+ eval ac_old_val=\$ac_cv_env_${ac_var}_value
+ eval ac_new_val=\$ac_env_${ac_var}_value
+ case $ac_old_set,$ac_new_set in
+ set,)
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
+printf "%s\n" "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,set)
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5
+printf "%s\n" "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,);;
+ *)
+ if test "x$ac_old_val" != "x$ac_new_val"; then
+ # differences in whitespace do not lead to failure.
+ ac_old_val_w=`echo x $ac_old_val`
+ ac_new_val_w=`echo x $ac_new_val`
+ if test "$ac_old_val_w" != "$ac_new_val_w"; then
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5
+printf "%s\n" "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
+ ac_cache_corrupted=:
+ else
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5
+printf "%s\n" "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;}
+ eval $ac_var=\$ac_old_val
+ fi
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5
+printf "%s\n" "$as_me: former value: \`$ac_old_val'" >&2;}
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5
+printf "%s\n" "$as_me: current value: \`$ac_new_val'" >&2;}
+ fi;;
+ esac
+ # Pass precious variables to config.status.
+ if test "$ac_new_set" = set; then
+ case $ac_new_val in
+ *\'*) ac_arg=$ac_var=`printf "%s\n" "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
+ *) ac_arg=$ac_var=$ac_new_val ;;
+ esac
+ case " $ac_configure_args " in
+ *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy.
+ *) as_fn_append ac_configure_args " '$ac_arg'" ;;
+ esac
+ fi
+done
+if $ac_cache_corrupted; then
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5
+printf "%s\n" "$as_me: error: changes in the environment can compromise the build" >&2;}
+ as_fn_error $? "run \`${MAKE-make} distclean' and/or \`rm $cache_file'
+ and start over" "$LINENO" 5
+fi
+## -------------------- ##
+## Main body of script. ##
+## -------------------- ##
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+am__api_version='1.16'
+
+
+
+ # Find a good install program. We prefer a C program (faster),
+# so one script is as good as another. But avoid the broken or
+# incompatible versions:
+# SysV /etc/install, /usr/sbin/install
+# SunOS /usr/etc/install
+# IRIX /sbin/install
+# AIX /bin/install
+# AmigaOS /C/install, which installs bootblocks on floppy discs
+# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag
+# AFS /usr/afsws/bin/install, which mishandles nonexistent args
+# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
+# OS/2's system install, which has a completely different semantic
+# ./install, which can be erroneously created by make from ./install.sh.
+# Reject install programs that cannot install multiple files.
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5
+printf %s "checking for a BSD-compatible install... " >&6; }
+if test -z "$INSTALL"; then
+if test ${ac_cv_path_install+y}
+then :
+ printf %s "(cached) " >&6
+else $as_nop
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ case $as_dir in #(((
+ '') as_dir=./ ;;
+ */) ;;
+ *) as_dir=$as_dir/ ;;
+ esac
+ # Account for fact that we put trailing slashes in our PATH walk.
+case $as_dir in #((
+ ./ | /[cC]/* | \
+ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \
+ ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \
+ /usr/ucb/* ) ;;
+ *)
+ # OSF1 and SCO ODT 3.0 have their own names for install.
+ # Don't use installbsd from OSF since it installs stuff as root
+ # by default.
+ for ac_prog in ginstall scoinst install; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir$ac_prog$ac_exec_ext"; then
+ if test $ac_prog = install &&
+ grep dspmsg "$as_dir$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # AIX install. It has an incompatible calling convention.
+ :
+ elif test $ac_prog = install &&
+ grep pwplus "$as_dir$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # program-specific install script used by HP pwplus--don't use.
+ :
+ else
+ rm -rf conftest.one conftest.two conftest.dir
+ echo one > conftest.one
+ echo two > conftest.two
+ mkdir conftest.dir
+ if "$as_dir$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir/" &&
+ test -s conftest.one && test -s conftest.two &&
+ test -s conftest.dir/conftest.one &&
+ test -s conftest.dir/conftest.two
+ then
+ ac_cv_path_install="$as_dir$ac_prog$ac_exec_ext -c"
+ break 3
+ fi
+ fi
+ fi
+ done
+ done
+ ;;
+esac
+
+ done
+IFS=$as_save_IFS
+
+rm -rf conftest.one conftest.two conftest.dir
+
+fi
+ if test ${ac_cv_path_install+y}; then
+ INSTALL=$ac_cv_path_install
+ else
+ # As a last resort, use the slow shell script. Don't cache a
+ # value for INSTALL within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the value is a relative name.
+ INSTALL=$ac_install_sh
+ fi
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5
+printf "%s\n" "$INSTALL" >&6; }
+
+# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
+# It thinks the first close brace ends the variable substitution.
+test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
+
+test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
+
+test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5
+printf %s "checking whether build environment is sane... " >&6; }
+# Reject unsafe characters in $srcdir or the absolute working directory
+# name. Accept space and tab only in the latter.
+am_lf='
+'
+case `pwd` in
+ *[\\\"\#\$\&\'\`$am_lf]*)
+ as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;;
+esac
+case $srcdir in
+ *[\\\"\#\$\&\'\`$am_lf\ \ ]*)
+ as_fn_error $? "unsafe srcdir value: '$srcdir'" "$LINENO" 5;;
+esac
+
+# Do 'set' in a subshell so we don't clobber the current shell's
+# arguments. Must try -L first in case configure is actually a
+# symlink; some systems play weird games with the mod time of symlinks
+# (eg FreeBSD returns the mod time of the symlink's containing
+# directory).
+if (
+ am_has_slept=no
+ for am_try in 1 2; do
+ echo "timestamp, slept: $am_has_slept" > conftest.file
+ set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
+ if test "$*" = "X"; then
+ # -L didn't work.
+ set X `ls -t "$srcdir/configure" conftest.file`
+ fi
+ if test "$*" != "X $srcdir/configure conftest.file" \
+ && test "$*" != "X conftest.file $srcdir/configure"; then
+
+ # If neither matched, then we have a broken ls. This can happen
+ # if, for instance, CONFIG_SHELL is bash and it inherits a
+ # broken ls alias from the environment. This has actually
+ # happened. Such a system could not be considered "sane".
+ as_fn_error $? "ls -t appears to fail. Make sure there is not a broken
+ alias in your environment" "$LINENO" 5
+ fi
+ if test "$2" = conftest.file || test $am_try -eq 2; then
+ break
+ fi
+ # Just in case.
+ sleep 1
+ am_has_slept=yes
+ done
+ test "$2" = conftest.file
+ )
+then
+ # Ok.
+ :
+else
+ as_fn_error $? "newly created file is older than distributed files!
+Check your system clock" "$LINENO" 5
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+printf "%s\n" "yes" >&6; }
+# If we didn't sleep, we still need to ensure time stamps of config.status and
+# generated files are strictly newer.
+am_sleep_pid=
+if grep 'slept: no' conftest.file >/dev/null 2>&1; then
+ ( sleep 1 ) &
+ am_sleep_pid=$!
+fi
+
+rm -f conftest.file
+
+test "$program_prefix" != NONE &&
+ program_transform_name="s&^&$program_prefix&;$program_transform_name"
+# Use a double $ so make ignores it.
+test "$program_suffix" != NONE &&
+ program_transform_name="s&\$&$program_suffix&;$program_transform_name"
+# Double any \ or $.
+# By default was `s,x,x', remove it if useless.
+ac_script='s/[\\$]/&&/g;s/;s,x,x,$//'
+program_transform_name=`printf "%s\n" "$program_transform_name" | sed "$ac_script"`
+
+
+# Expand $ac_aux_dir to an absolute path.
+am_aux_dir=`cd "$ac_aux_dir" && pwd`
+
+
+ if test x"${MISSING+set}" != xset; then
+ MISSING="\${SHELL} '$am_aux_dir/missing'"
+fi
+# Use eval to expand $SHELL
+if eval "$MISSING --is-lightweight"; then
+ am_missing_run="$MISSING "
+else
+ am_missing_run=
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5
+printf "%s\n" "$as_me: WARNING: 'missing' script is too old or missing" >&2;}
+fi
+
+if test x"${install_sh+set}" != xset; then
+ case $am_aux_dir in
+ *\ * | *\ *)
+ install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;;
+ *)
+ install_sh="\${SHELL} $am_aux_dir/install-sh"
+ esac
+fi
+
+# Installed binaries are usually stripped using 'strip' when the user
+# run "make install-strip". However 'strip' might not be the right
+# tool to use in cross-compilation environments, therefore Automake
+# will honor the 'STRIP' environment variable to overrule this program.
+if test "$cross_compiling" != no; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args.
+set dummy ${ac_tool_prefix}strip; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_STRIP+y}
+then :
+ printf %s "(cached) " >&6
+else $as_nop
+ if test -n "$STRIP"; then
+ ac_cv_prog_STRIP="$STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ case $as_dir in #(((
+ '') as_dir=./ ;;
+ */) ;;
+ *) as_dir=$as_dir/ ;;
+ esac
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+ ac_cv_prog_STRIP="${ac_tool_prefix}strip"
+ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+STRIP=$ac_cv_prog_STRIP
+if test -n "$STRIP"; then
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5
+printf "%s\n" "$STRIP" >&6; }
+else
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_STRIP"; then
+ ac_ct_STRIP=$STRIP
+ # Extract the first word of "strip", so it can be a program name with args.
+set dummy strip; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_ac_ct_STRIP+y}
+then :
+ printf %s "(cached) " >&6
+else $as_nop
+ if test -n "$ac_ct_STRIP"; then
+ ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ case $as_dir in #(((
+ '') as_dir=./ ;;
+ */) ;;
+ *) as_dir=$as_dir/ ;;
+ esac
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_STRIP="strip"
+ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP
+if test -n "$ac_ct_STRIP"; then
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5
+printf "%s\n" "$ac_ct_STRIP" >&6; }
+else
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+ if test "x$ac_ct_STRIP" = x; then
+ STRIP=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ STRIP=$ac_ct_STRIP
+ fi
+else
+ STRIP="$ac_cv_prog_STRIP"
+fi
+
+fi
+INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
+
+
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a race-free mkdir -p" >&5
+printf %s "checking for a race-free mkdir -p... " >&6; }
+if test -z "$MKDIR_P"; then
+ if test ${ac_cv_path_mkdir+y}
+then :
+ printf %s "(cached) " >&6
+else $as_nop
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin
+do
+ IFS=$as_save_IFS
+ case $as_dir in #(((
+ '') as_dir=./ ;;
+ */) ;;
+ *) as_dir=$as_dir/ ;;
+ esac
+ for ac_prog in mkdir gmkdir; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ as_fn_executable_p "$as_dir$ac_prog$ac_exec_ext" || continue
+ case `"$as_dir$ac_prog$ac_exec_ext" --version 2>&1` in #(
+ 'mkdir ('*'coreutils) '* | \
+ 'BusyBox '* | \
+ 'mkdir (fileutils) '4.1*)
+ ac_cv_path_mkdir=$as_dir$ac_prog$ac_exec_ext
+ break 3;;
+ esac
+ done
+ done
+ done
+IFS=$as_save_IFS
+
+fi
+
+ test -d ./--version && rmdir ./--version
+ if test ${ac_cv_path_mkdir+y}; then
+ MKDIR_P="$ac_cv_path_mkdir -p"
+ else
+ # As a last resort, use the slow shell script. Don't cache a
+ # value for MKDIR_P within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the value is a relative name.
+ MKDIR_P="$ac_install_sh -d"
+ fi
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5
+printf "%s\n" "$MKDIR_P" >&6; }
+
+for ac_prog in gawk mawk nawk awk
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_AWK+y}
+then :
+ printf %s "(cached) " >&6
+else $as_nop
+ if test -n "$AWK"; then
+ ac_cv_prog_AWK="$AWK" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ case $as_dir in #(((
+ '') as_dir=./ ;;
+ */) ;;
+ *) as_dir=$as_dir/ ;;
+ esac
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+ ac_cv_prog_AWK="$ac_prog"
+ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+AWK=$ac_cv_prog_AWK
+if test -n "$AWK"; then
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5
+printf "%s\n" "$AWK" >&6; }
+else
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+ test -n "$AWK" && break
+done
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5
+printf %s "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; }
+set x ${MAKE-make}
+ac_make=`printf "%s\n" "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'`
+if eval test \${ac_cv_prog_make_${ac_make}_set+y}
+then :
+ printf %s "(cached) " >&6
+else $as_nop
+ cat >conftest.make <<\_ACEOF
+SHELL = /bin/sh
+all:
+ @echo '@@@%%%=$(MAKE)=@@@%%%'
+_ACEOF
+# GNU make sometimes prints "make[1]: Entering ...", which would confuse us.
+case `${MAKE-make} -f conftest.make 2>/dev/null` in
+ *@@@%%%=?*=@@@%%%*)
+ eval ac_cv_prog_make_${ac_make}_set=yes;;
+ *)
+ eval ac_cv_prog_make_${ac_make}_set=no;;
+esac
+rm -f conftest.make
+fi
+if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+printf "%s\n" "yes" >&6; }
+ SET_MAKE=
+else
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+ SET_MAKE="MAKE=${MAKE-make}"
+fi
+
+rm -rf .tst 2>/dev/null
+mkdir .tst 2>/dev/null
+if test -d .tst; then
+ am__leading_dot=.
+else
+ am__leading_dot=_
+fi
+rmdir .tst 2>/dev/null
+
+# Check whether --enable-silent-rules was given.
+if test ${enable_silent_rules+y}
+then :
+ enableval=$enable_silent_rules;
+fi
+
+case $enable_silent_rules in # (((
+ yes) AM_DEFAULT_VERBOSITY=0;;
+ no) AM_DEFAULT_VERBOSITY=1;;
+ *) AM_DEFAULT_VERBOSITY=1;;
+esac
+am_make=${MAKE-make}
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5
+printf %s "checking whether $am_make supports nested variables... " >&6; }
+if test ${am_cv_make_support_nested_variables+y}
+then :
+ printf %s "(cached) " >&6
+else $as_nop
+ if printf "%s\n" 'TRUE=$(BAR$(V))
+BAR0=false
+BAR1=true
+V=1
+am__doit:
+ @$(TRUE)
+.PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then
+ am_cv_make_support_nested_variables=yes
+else
+ am_cv_make_support_nested_variables=no
+fi
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5
+printf "%s\n" "$am_cv_make_support_nested_variables" >&6; }
+if test $am_cv_make_support_nested_variables = yes; then
+ AM_V='$(V)'
+ AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)'
+else
+ AM_V=$AM_DEFAULT_VERBOSITY
+ AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY
+fi
+AM_BACKSLASH='\'
+
+if test "`cd $srcdir && pwd`" != "`pwd`"; then
+ # Use -I$(srcdir) only when $(srcdir) != ., so that make's output
+ # is not polluted with repeated "-I."
+ am__isrc=' -I$(srcdir)'
+ # test to see if srcdir already configured
+ if test -f $srcdir/config.status; then
+ as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5
+ fi
+fi
+
+# test whether we have cygpath
+if test -z "$CYGPATH_W"; then
+ if (cygpath --version) >/dev/null 2>/dev/null; then
+ CYGPATH_W='cygpath -w'
+ else
+ CYGPATH_W=echo
+ fi
+fi
+
+
+# Define the identity of the package.
+ PACKAGE='parallel'
+ VERSION='20240222'
+
+
+printf "%s\n" "#define PACKAGE \"$PACKAGE\"" >>confdefs.h
+
+
+printf "%s\n" "#define VERSION \"$VERSION\"" >>confdefs.h
+
+# Some tools Automake needs.
+
+ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"}
+
+
+AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"}
+
+
+AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"}
+
+
+AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"}
+
+
+MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"}
+
+# For better backward compatibility. To be removed once Automake 1.9.x
+# dies out for good. For more background, see:
+# <https://lists.gnu.org/archive/html/automake/2012-07/msg00001.html>
+# <https://lists.gnu.org/archive/html/automake/2012-07/msg00014.html>
+mkdir_p='$(MKDIR_P)'
+
+# We need awk for the "check" target (and possibly the TAP driver). The
+# system "awk" is bad on some platforms.
+# Always define AMTAR for backward compatibility. Yes, it's still used
+# in the wild :-( We should find a proper way to deprecate it ...
+AMTAR='$${TAR-tar}'
+
+
+# We'll loop over all known methods to create a tar archive until one works.
+_am_tools='gnutar pax cpio none'
+
+am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'
+
+
+
+
+
+# Variables for tags utilities; see am/tags.am
+if test -z "$CTAGS"; then
+ CTAGS=ctags
+fi
+
+if test -z "$ETAGS"; then
+ ETAGS=etags
+fi
+
+if test -z "$CSCOPE"; then
+ CSCOPE=cscope
+fi
+
+
+
+# POSIX will say in a future version that running "rm -f" with no argument
+# is OK; and we want to be able to make that assumption in our Makefile
+# recipes. So use an aggressive probe to check that the usage we want is
+# actually supported "in the wild" to an acceptable degree.
+# See automake bug#10828.
+# To make any issue more visible, cause the running configure to be aborted
+# by default if the 'rm' program in use doesn't match our expectations; the
+# user can still override this though.
+if rm -f && rm -fr && rm -rf; then : OK; else
+ cat >&2 <<'END'
+Oops!
+
+Your 'rm' program seems unable to run without file operands specified
+on the command line, even when the '-f' option is present. This is contrary
+to the behaviour of most rm programs out there, and not conforming with
+the upcoming POSIX standard: <http://austingroupbugs.net/view.php?id=542>
+
+Please tell bug-automake@gnu.org about your system, including the value
+of your $PATH and any error possibly output before this message. This
+can help us improve future automake versions.
+
+END
+ if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then
+ echo 'Configuration will proceed anyway, since you have set the' >&2
+ echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2
+ echo >&2
+ else
+ cat >&2 <<'END'
+Aborting the configuration process, to ensure you take notice of the issue.
+
+You can download and install GNU coreutils to get an 'rm' implementation
+that behaves properly: <https://www.gnu.org/software/coreutils/>.
+
+If you want to complete the configuration process using your problematic
+'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM
+to "yes", and re-run configure.
+
+END
+ as_fn_error $? "Your 'rm' program is bad, sorry." "$LINENO" 5
+ fi
+fi
+
+ac_config_headers="$ac_config_headers config.h"
+
+ac_config_files="$ac_config_files Makefile src/Makefile"
+
+
+# Check whether --enable-documentation was given.
+if test ${enable_documentation+y}
+then :
+ enableval=$enable_documentation;
+else $as_nop
+ enable_documentation=yes
+fi
+
+ if test x$enable_documentation = xyes; then
+ DOCUMENTATION_TRUE=
+ DOCUMENTATION_FALSE='#'
+else
+ DOCUMENTATION_TRUE='#'
+ DOCUMENTATION_FALSE=
+fi
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5
+printf %s "checking whether ln -s works... " >&6; }
+LN_S=$as_ln_s
+if test "$LN_S" = "ln -s"; then
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+printf "%s\n" "yes" >&6; }
+else
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5
+printf "%s\n" "no, using $LN_S" >&6; }
+fi
+
+cat >confcache <<\_ACEOF
+# This file is a shell script that caches the results of configure
+# tests run on this system so they can be shared between configure
+# scripts and configure runs, see configure's option --config-cache.
+# It is not useful on other systems. If it contains results you don't
+# want to keep, you may remove or edit it.
+#
+# config.status only pays attention to the cache file if you give it
+# the --recheck option to rerun configure.
+#
+# `ac_cv_env_foo' variables (set or unset) will be overridden when
+# loading this file, other *unset* `ac_cv_foo' will be assigned the
+# following values.
+
+_ACEOF
+
+# The following way of writing the cache mishandles newlines in values,
+# but we know of no workaround that is simple, portable, and efficient.
+# So, we kill variables containing newlines.
+# Ultrix sh set writes to stderr and can't be redirected directly,
+# and sets the high bit in the cache file unless we assign to the vars.
+(
+ for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
+ eval ac_val=\$$ac_var
+ case $ac_val in #(
+ *${as_nl}*)
+ case $ac_var in #(
+ *_cv_*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+ esac
+ case $ac_var in #(
+ _ | IFS | as_nl) ;; #(
+ BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+ *) { eval $ac_var=; unset $ac_var;} ;;
+ esac ;;
+ esac
+ done
+
+ (set) 2>&1 |
+ case $as_nl`(ac_space=' '; set) 2>&1` in #(
+ *${as_nl}ac_space=\ *)
+ # `set' does not quote correctly, so add quotes: double-quote
+ # substitution turns \\\\ into \\, and sed turns \\ into \.
+ sed -n \
+ "s/'/'\\\\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
+ ;; #(
+ *)
+ # `set' quotes correctly as required by POSIX, so do not add quotes.
+ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+ ;;
+ esac |
+ sort
+) |
+ sed '
+ /^ac_cv_env_/b end
+ t clear
+ :clear
+ s/^\([^=]*\)=\(.*[{}].*\)$/test ${\1+y} || &/
+ t end
+ s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
+ :end' >>confcache
+if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
+ if test -w "$cache_file"; then
+ if test "x$cache_file" != "x/dev/null"; then
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5
+printf "%s\n" "$as_me: updating cache $cache_file" >&6;}
+ if test ! -f "$cache_file" || test -h "$cache_file"; then
+ cat confcache >"$cache_file"
+ else
+ case $cache_file in #(
+ */* | ?:*)
+ mv -f confcache "$cache_file"$$ &&
+ mv -f "$cache_file"$$ "$cache_file" ;; #(
+ *)
+ mv -f confcache "$cache_file" ;;
+ esac
+ fi
+ fi
+ else
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5
+printf "%s\n" "$as_me: not updating unwritable cache $cache_file" >&6;}
+ fi
+fi
+rm -f confcache
+
+test "x$prefix" = xNONE && prefix=$ac_default_prefix
+# Let make expand exec_prefix.
+test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
+
+DEFS=-DHAVE_CONFIG_H
+
+ac_libobjs=
+ac_ltlibobjs=
+U=
+for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
+ # 1. Remove the extension, and $U if already installed.
+ ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
+ ac_i=`printf "%s\n" "$ac_i" | sed "$ac_script"`
+ # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR
+ # will be set to the directory where LIBOBJS objects are built.
+ as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext"
+ as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo'
+done
+LIBOBJS=$ac_libobjs
+
+LTLIBOBJS=$ac_ltlibobjs
+
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5
+printf %s "checking that generated files are newer than configure... " >&6; }
+ if test -n "$am_sleep_pid"; then
+ # Hide warnings about reused PIDs.
+ wait $am_sleep_pid 2>/dev/null
+ fi
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: done" >&5
+printf "%s\n" "done" >&6; }
+
+if test -z "${DOCUMENTATION_TRUE}" && test -z "${DOCUMENTATION_FALSE}"; then
+ as_fn_error $? "conditional \"DOCUMENTATION\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+
+: "${CONFIG_STATUS=./config.status}"
+ac_write_fail=0
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files $CONFIG_STATUS"
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5
+printf "%s\n" "$as_me: creating $CONFIG_STATUS" >&6;}
+as_write_fail=0
+cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1
+#! $SHELL
+# Generated by $as_me.
+# Run this file to recreate the current configuration.
+# Compiler output produced by configure, useful for debugging
+# configure, is in config.log if it exists.
+
+debug=false
+ac_cs_recheck=false
+ac_cs_silent=false
+
+SHELL=\${CONFIG_SHELL-$SHELL}
+export SHELL
+_ASEOF
+cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1
+## -------------------- ##
+## M4sh Initialization. ##
+## -------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+as_nop=:
+if test ${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1
+then :
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else $as_nop
+ case `(set -o) 2>/dev/null` in #(
+ *posix*) :
+ set -o posix ;; #(
+ *) :
+ ;;
+esac
+fi
+
+
+
+# Reset variables that may have inherited troublesome values from
+# the environment.
+
+# IFS needs to be set, to space, tab, and newline, in precisely that order.
+# (If _AS_PATH_WALK were called with IFS unset, it would have the
+# side effect of setting IFS to empty, thus disabling word splitting.)
+# Quoting is to prevent editors from complaining about space-tab.
+as_nl='
+'
+export as_nl
+IFS=" "" $as_nl"
+
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# Ensure predictable behavior from utilities with locale-dependent output.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# We cannot yet rely on "unset" to work, but we need these variables
+# to be unset--not just set to an empty or harmless value--now, to
+# avoid bugs in old shells (e.g. pre-3.0 UWIN ksh). This construct
+# also avoids known problems related to "unset" and subshell syntax
+# in other old shells (e.g. bash 2.01 and pdksh 5.2.14).
+for as_var in BASH_ENV ENV MAIL MAILPATH CDPATH
+do eval test \${$as_var+y} \
+ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
+done
+
+# Ensure that fds 0, 1, and 2 are open.
+if (exec 3>&0) 2>/dev/null; then :; else exec 0</dev/null; fi
+if (exec 3>&1) 2>/dev/null; then :; else exec 1>/dev/null; fi
+if (exec 3>&2) ; then :; else exec 2>/dev/null; fi
+
+# The user is always right.
+if ${PATH_SEPARATOR+false} :; then
+ PATH_SEPARATOR=:
+ (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+ (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+ PATH_SEPARATOR=';'
+ }
+fi
+
+
+# Find who we are. Look in the path if we contain no directory separator.
+as_myself=
+case $0 in #((
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ case $as_dir in #(((
+ '') as_dir=./ ;;
+ */) ;;
+ *) as_dir=$as_dir/ ;;
+ esac
+ test -r "$as_dir$0" && as_myself=$as_dir$0 && break
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+ as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+ printf "%s\n" "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ exit 1
+fi
+
+
+
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
+# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
+# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
+# script with STATUS, using 1 if that was 0.
+as_fn_error ()
+{
+ as_status=$1; test $as_status -eq 0 && as_status=1
+ if test "$4"; then
+ as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
+ fi
+ printf "%s\n" "$as_me: error: $2" >&2
+ as_fn_exit $as_status
+} # as_fn_error
+
+
+
+# as_fn_set_status STATUS
+# -----------------------
+# Set $? to STATUS, without forking.
+as_fn_set_status ()
+{
+ return $1
+} # as_fn_set_status
+
+# as_fn_exit STATUS
+# -----------------
+# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
+as_fn_exit ()
+{
+ set +e
+ as_fn_set_status $1
+ exit $1
+} # as_fn_exit
+
+# as_fn_unset VAR
+# ---------------
+# Portably unset VAR.
+as_fn_unset ()
+{
+ { eval $1=; unset $1;}
+}
+as_unset=as_fn_unset
+
+# as_fn_append VAR VALUE
+# ----------------------
+# Append the text in VALUE to the end of the definition contained in VAR. Take
+# advantage of any shell optimizations that allow amortized linear growth over
+# repeated appends, instead of the typical quadratic growth present in naive
+# implementations.
+if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null
+then :
+ eval 'as_fn_append ()
+ {
+ eval $1+=\$2
+ }'
+else $as_nop
+ as_fn_append ()
+ {
+ eval $1=\$$1\$2
+ }
+fi # as_fn_append
+
+# as_fn_arith ARG...
+# ------------------
+# Perform arithmetic evaluation on the ARGs, and store the result in the
+# global $as_val. Take advantage of shells that can avoid forks. The arguments
+# must be portable across $(()) and expr.
+if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null
+then :
+ eval 'as_fn_arith ()
+ {
+ as_val=$(( $* ))
+ }'
+else $as_nop
+ as_fn_arith ()
+ {
+ as_val=`expr "$@" || test $? -eq 1`
+ }
+fi # as_fn_arith
+
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
+
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+printf "%s\n" X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+
+# Determine whether it's possible to make 'echo' print without a newline.
+# These variables are no longer used directly by Autoconf, but are AC_SUBSTed
+# for compatibility with existing Makefiles.
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in #(((((
+-n*)
+ case `echo 'xy\c'` in
+ *c*) ECHO_T=' ';; # ECHO_T is single tab character.
+ xy) ECHO_C='\c';;
+ *) echo `echo ksh88 bug on AIX 6.1` > /dev/null
+ ECHO_T=' ';;
+ esac;;
+*)
+ ECHO_N='-n';;
+esac
+
+# For backward compatibility with old third-party macros, we provide
+# the shell variables $as_echo and $as_echo_n. New code should use
+# AS_ECHO(["message"]) and AS_ECHO_N(["message"]), respectively.
+as_echo='printf %s\n'
+as_echo_n='printf %s'
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+ rm -f conf$$.dir/conf$$.file
+else
+ rm -f conf$$.dir
+ mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+ if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -pR'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -pR'
+ elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+ else
+ as_ln_s='cp -pR'
+ fi
+else
+ as_ln_s='cp -pR'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+
+# as_fn_mkdir_p
+# -------------
+# Create "$as_dir" as a directory, including parents if necessary.
+as_fn_mkdir_p ()
+{
+
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || eval $as_mkdir_p || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`printf "%s\n" "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+printf "%s\n" X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
+
+
+} # as_fn_mkdir_p
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p='mkdir -p "$as_dir"'
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+ test -f "$1" && test -x "$1"
+} # as_fn_executable_p
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+exec 6>&1
+## ----------------------------------- ##
+## Main body of $CONFIG_STATUS script. ##
+## ----------------------------------- ##
+_ASEOF
+test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# Save the log message, to keep $0 and so on meaningful, and to
+# report actual input values of CONFIG_FILES etc. instead of their
+# values after options handling.
+ac_log="
+This file was extended by parallel $as_me 20240222, which was
+generated by GNU Autoconf 2.71. Invocation command line was
+
+ CONFIG_FILES = $CONFIG_FILES
+ CONFIG_HEADERS = $CONFIG_HEADERS
+ CONFIG_LINKS = $CONFIG_LINKS
+ CONFIG_COMMANDS = $CONFIG_COMMANDS
+ $ $0 $@
+
+on `(hostname || uname -n) 2>/dev/null | sed 1q`
+"
+
+_ACEOF
+
+case $ac_config_files in *"
+"*) set x $ac_config_files; shift; ac_config_files=$*;;
+esac
+
+case $ac_config_headers in *"
+"*) set x $ac_config_headers; shift; ac_config_headers=$*;;
+esac
+
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+# Files that config.status was made for.
+config_files="$ac_config_files"
+config_headers="$ac_config_headers"
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+ac_cs_usage="\
+\`$as_me' instantiates files and other configuration actions
+from templates according to the current configuration. Unless the files
+and actions are specified as TAGs, all are instantiated by default.
+
+Usage: $0 [OPTION]... [TAG]...
+
+ -h, --help print this help, then exit
+ -V, --version print version number and configuration settings, then exit
+ --config print configuration, then exit
+ -q, --quiet, --silent
+ do not print progress messages
+ -d, --debug don't remove temporary files
+ --recheck update $as_me by reconfiguring in the same conditions
+ --file=FILE[:TEMPLATE]
+ instantiate the configuration file FILE
+ --header=FILE[:TEMPLATE]
+ instantiate the configuration header FILE
+
+Configuration files:
+$config_files
+
+Configuration headers:
+$config_headers
+
+Report bugs to <bug-parallel@gnu.org>."
+
+_ACEOF
+ac_cs_config=`printf "%s\n" "$ac_configure_args" | sed "$ac_safe_unquote"`
+ac_cs_config_escaped=`printf "%s\n" "$ac_cs_config" | sed "s/^ //; s/'/'\\\\\\\\''/g"`
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_cs_config='$ac_cs_config_escaped'
+ac_cs_version="\\
+parallel config.status 20240222
+configured by $0, generated by GNU Autoconf 2.71,
+ with options \\"\$ac_cs_config\\"
+
+Copyright (C) 2021 Free Software Foundation, Inc.
+This config.status script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it."
+
+ac_pwd='$ac_pwd'
+srcdir='$srcdir'
+INSTALL='$INSTALL'
+MKDIR_P='$MKDIR_P'
+AWK='$AWK'
+test -n "\$AWK" || AWK=awk
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# The default lists apply if the user does not specify any file.
+ac_need_defaults=:
+while test $# != 0
+do
+ case $1 in
+ --*=?*)
+ ac_option=`expr "X$1" : 'X\([^=]*\)='`
+ ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
+ ac_shift=:
+ ;;
+ --*=)
+ ac_option=`expr "X$1" : 'X\([^=]*\)='`
+ ac_optarg=
+ ac_shift=:
+ ;;
+ *)
+ ac_option=$1
+ ac_optarg=$2
+ ac_shift=shift
+ ;;
+ esac
+
+ case $ac_option in
+ # Handling of the options.
+ -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
+ ac_cs_recheck=: ;;
+ --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
+ printf "%s\n" "$ac_cs_version"; exit ;;
+ --config | --confi | --conf | --con | --co | --c )
+ printf "%s\n" "$ac_cs_config"; exit ;;
+ --debug | --debu | --deb | --de | --d | -d )
+ debug=: ;;
+ --file | --fil | --fi | --f )
+ $ac_shift
+ case $ac_optarg in
+ *\'*) ac_optarg=`printf "%s\n" "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ '') as_fn_error $? "missing file argument" ;;
+ esac
+ as_fn_append CONFIG_FILES " '$ac_optarg'"
+ ac_need_defaults=false;;
+ --header | --heade | --head | --hea )
+ $ac_shift
+ case $ac_optarg in
+ *\'*) ac_optarg=`printf "%s\n" "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ as_fn_append CONFIG_HEADERS " '$ac_optarg'"
+ ac_need_defaults=false;;
+ --he | --h)
+ # Conflict between --help and --header
+ as_fn_error $? "ambiguous option: \`$1'
+Try \`$0 --help' for more information.";;
+ --help | --hel | -h )
+ printf "%s\n" "$ac_cs_usage"; exit ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil | --si | --s)
+ ac_cs_silent=: ;;
+
+ # This is an error.
+ -*) as_fn_error $? "unrecognized option: \`$1'
+Try \`$0 --help' for more information." ;;
+
+ *) as_fn_append ac_config_targets " $1"
+ ac_need_defaults=false ;;
+
+ esac
+ shift
+done
+
+ac_configure_extra_args=
+
+if $ac_cs_silent; then
+ exec 6>/dev/null
+ ac_configure_extra_args="$ac_configure_extra_args --silent"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+if \$ac_cs_recheck; then
+ set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+ shift
+ \printf "%s\n" "running CONFIG_SHELL=$SHELL \$*" >&6
+ CONFIG_SHELL='$SHELL'
+ export CONFIG_SHELL
+ exec "\$@"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+exec 5>>config.log
+{
+ echo
+ sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
+## Running $as_me. ##
+_ASBOX
+ printf "%s\n" "$ac_log"
+} >&5
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+
+# Handling of arguments.
+for ac_config_target in $ac_config_targets
+do
+ case $ac_config_target in
+ "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;;
+ "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
+ "src/Makefile") CONFIG_FILES="$CONFIG_FILES src/Makefile" ;;
+
+ *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
+ esac
+done
+
+
+# If the user did not use the arguments to specify the items to instantiate,
+# then the envvar interface is used. Set only those that are not.
+# We use the long form for the default assignment because of an extremely
+# bizarre bug on SunOS 4.1.3.
+if $ac_need_defaults; then
+ test ${CONFIG_FILES+y} || CONFIG_FILES=$config_files
+ test ${CONFIG_HEADERS+y} || CONFIG_HEADERS=$config_headers
+fi
+
+# Have a temporary directory for convenience. Make it in the build tree
+# simply because there is no reason against having it here, and in addition,
+# creating and moving files from /tmp can sometimes cause problems.
+# Hook for its removal unless debugging.
+# Note that there is a small window in which the directory will not be cleaned:
+# after its creation but before its name has been assigned to `$tmp'.
+$debug ||
+{
+ tmp= ac_tmp=
+ trap 'exit_status=$?
+ : "${ac_tmp:=$tmp}"
+ { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status
+' 0
+ trap 'as_fn_exit 1' 1 2 13 15
+}
+# Create a (secure) tmp directory for tmp files.
+
+{
+ tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
+ test -d "$tmp"
+} ||
+{
+ tmp=./conf$$-$RANDOM
+ (umask 077 && mkdir "$tmp")
+} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5
+ac_tmp=$tmp
+
+# Set up the scripts for CONFIG_FILES section.
+# No need to generate them if there are no CONFIG_FILES.
+# This happens for instance with `./config.status config.h'.
+if test -n "$CONFIG_FILES"; then
+
+
+ac_cr=`echo X | tr X '\015'`
+# On cygwin, bash can eat \r inside `` if the user requested igncr.
+# But we know of no other shell where ac_cr would be empty at this
+# point, so we can use a bashism as a fallback.
+if test "x$ac_cr" = x; then
+ eval ac_cr=\$\'\\r\'
+fi
+ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' </dev/null 2>/dev/null`
+if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then
+ ac_cs_awk_cr='\\r'
+else
+ ac_cs_awk_cr=$ac_cr
+fi
+
+echo 'BEGIN {' >"$ac_tmp/subs1.awk" &&
+_ACEOF
+
+
+{
+ echo "cat >conf$$subs.awk <<_ACEOF" &&
+ echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' &&
+ echo "_ACEOF"
+} >conf$$subs.sh ||
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'`
+ac_delim='%!_!# '
+for ac_last_try in false false false false false :; do
+ . ./conf$$subs.sh ||
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+
+ ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X`
+ if test $ac_delim_n = $ac_delim_num; then
+ break
+ elif $ac_last_try; then
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+ else
+ ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+ fi
+done
+rm -f conf$$subs.sh
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK &&
+_ACEOF
+sed -n '
+h
+s/^/S["/; s/!.*/"]=/
+p
+g
+s/^[^!]*!//
+:repl
+t repl
+s/'"$ac_delim"'$//
+t delim
+:nl
+h
+s/\(.\{148\}\)..*/\1/
+t more1
+s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/
+p
+n
+b repl
+:more1
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t nl
+:delim
+h
+s/\(.\{148\}\)..*/\1/
+t more2
+s/["\\]/\\&/g; s/^/"/; s/$/"/
+p
+b
+:more2
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t delim
+' <conf$$subs.awk | sed '
+/^[^""]/{
+ N
+ s/\n//
+}
+' >>$CONFIG_STATUS || ac_write_fail=1
+rm -f conf$$subs.awk
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+_ACAWK
+cat >>"\$ac_tmp/subs1.awk" <<_ACAWK &&
+ for (key in S) S_is_set[key] = 1
+ FS = ""
+
+}
+{
+ line = $ 0
+ nfields = split(line, field, "@")
+ substed = 0
+ len = length(field[1])
+ for (i = 2; i < nfields; i++) {
+ key = field[i]
+ keylen = length(key)
+ if (S_is_set[key]) {
+ value = S[key]
+ line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3)
+ len += length(value) + length(field[++i])
+ substed = 1
+ } else
+ len += 1 + keylen
+ }
+
+ print line
+}
+
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
+ sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g"
+else
+ cat
+fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \
+ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5
+_ACEOF
+
+# VPATH may cause trouble with some makes, so we remove sole $(srcdir),
+# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and
+# trailing colons and then remove the whole line if VPATH becomes empty
+# (actually we leave an empty line to preserve line numbers).
+if test "x$srcdir" = x.; then
+ ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{
+h
+s///
+s/^/:/
+s/[ ]*$/:/
+s/:\$(srcdir):/:/g
+s/:\${srcdir}:/:/g
+s/:@srcdir@:/:/g
+s/^:*//
+s/:*$//
+x
+s/\(=[ ]*\).*/\1/
+G
+s/\n//
+s/^[^=]*=[ ]*$//
+}'
+fi
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+fi # test -n "$CONFIG_FILES"
+
+# Set up the scripts for CONFIG_HEADERS section.
+# No need to generate them if there are no CONFIG_HEADERS.
+# This happens for instance with `./config.status Makefile'.
+if test -n "$CONFIG_HEADERS"; then
+cat >"$ac_tmp/defines.awk" <<\_ACAWK ||
+BEGIN {
+_ACEOF
+
+# Transform confdefs.h into an awk script `defines.awk', embedded as
+# here-document in config.status, that substitutes the proper values into
+# config.h.in to produce config.h.
+
+# Create a delimiter string that does not exist in confdefs.h, to ease
+# handling of long lines.
+ac_delim='%!_!# '
+for ac_last_try in false false :; do
+ ac_tt=`sed -n "/$ac_delim/p" confdefs.h`
+ if test -z "$ac_tt"; then
+ break
+ elif $ac_last_try; then
+ as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5
+ else
+ ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+ fi
+done
+
+# For the awk script, D is an array of macro values keyed by name,
+# likewise P contains macro parameters if any. Preserve backslash
+# newline sequences.
+
+ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]*
+sed -n '
+s/.\{148\}/&'"$ac_delim"'/g
+t rset
+:rset
+s/^[ ]*#[ ]*define[ ][ ]*/ /
+t def
+d
+:def
+s/\\$//
+t bsnl
+s/["\\]/\\&/g
+s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\
+D["\1"]=" \3"/p
+s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p
+d
+:bsnl
+s/["\\]/\\&/g
+s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\
+D["\1"]=" \3\\\\\\n"\\/p
+t cont
+s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p
+t cont
+d
+:cont
+n
+s/.\{148\}/&'"$ac_delim"'/g
+t clear
+:clear
+s/\\$//
+t bsnlc
+s/["\\]/\\&/g; s/^/"/; s/$/"/p
+d
+:bsnlc
+s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p
+b cont
+' <confdefs.h | sed '
+s/'"$ac_delim"'/"\\\
+"/g' >>$CONFIG_STATUS || ac_write_fail=1
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ for (key in D) D_is_set[key] = 1
+ FS = ""
+}
+/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ {
+ line = \$ 0
+ split(line, arg, " ")
+ if (arg[1] == "#") {
+ defundef = arg[2]
+ mac1 = arg[3]
+ } else {
+ defundef = substr(arg[1], 2)
+ mac1 = arg[2]
+ }
+ split(mac1, mac2, "(") #)
+ macro = mac2[1]
+ prefix = substr(line, 1, index(line, defundef) - 1)
+ if (D_is_set[macro]) {
+ # Preserve the white space surrounding the "#".
+ print prefix "define", macro P[macro] D[macro]
+ next
+ } else {
+ # Replace #undef with comments. This is necessary, for example,
+ # in the case of _POSIX_SOURCE, which is predefined and required
+ # on some systems where configure will not decide to define it.
+ if (defundef == "undef") {
+ print "/*", prefix defundef, macro, "*/"
+ next
+ }
+ }
+}
+{ print }
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+ as_fn_error $? "could not setup config headers machinery" "$LINENO" 5
+fi # test -n "$CONFIG_HEADERS"
+
+
+eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS "
+shift
+for ac_tag
+do
+ case $ac_tag in
+ :[FHLC]) ac_mode=$ac_tag; continue;;
+ esac
+ case $ac_mode$ac_tag in
+ :[FHL]*:*);;
+ :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;;
+ :[FH]-) ac_tag=-:-;;
+ :[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
+ esac
+ ac_save_IFS=$IFS
+ IFS=:
+ set x $ac_tag
+ IFS=$ac_save_IFS
+ shift
+ ac_file=$1
+ shift
+
+ case $ac_mode in
+ :L) ac_source=$1;;
+ :[FH])
+ ac_file_inputs=
+ for ac_f
+ do
+ case $ac_f in
+ -) ac_f="$ac_tmp/stdin";;
+ *) # Look for the file first in the build tree, then in the source tree
+ # (if the path is not absolute). The absolute path cannot be DOS-style,
+ # because $ac_f cannot contain `:'.
+ test -f "$ac_f" ||
+ case $ac_f in
+ [\\/$]*) false;;
+ *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
+ esac ||
+ as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;;
+ esac
+ case $ac_f in *\'*) ac_f=`printf "%s\n" "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
+ as_fn_append ac_file_inputs " '$ac_f'"
+ done
+
+ # Let's still pretend it is `configure' which instantiates (i.e., don't
+ # use $as_me), people would be surprised to read:
+ # /* config.h. Generated by config.status. */
+ configure_input='Generated from '`
+ printf "%s\n" "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g'
+ `' by configure.'
+ if test x"$ac_file" != x-; then
+ configure_input="$ac_file. $configure_input"
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5
+printf "%s\n" "$as_me: creating $ac_file" >&6;}
+ fi
+ # Neutralize special characters interpreted by sed in replacement strings.
+ case $configure_input in #(
+ *\&* | *\|* | *\\* )
+ ac_sed_conf_input=`printf "%s\n" "$configure_input" |
+ sed 's/[\\\\&|]/\\\\&/g'`;; #(
+ *) ac_sed_conf_input=$configure_input;;
+ esac
+
+ case $ac_tag in
+ *:-:* | *:-) cat >"$ac_tmp/stdin" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;;
+ esac
+ ;;
+ esac
+
+ ac_dir=`$as_dirname -- "$ac_file" ||
+$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$ac_file" : 'X\(//\)[^/]' \| \
+ X"$ac_file" : 'X\(//\)$' \| \
+ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
+printf "%s\n" X"$ac_file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ as_dir="$ac_dir"; as_fn_mkdir_p
+ ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+ ac_dir_suffix=/`printf "%s\n" "$ac_dir" | sed 's|^\.[\\/]||'`
+ # A ".." for each directory in $ac_dir_suffix.
+ ac_top_builddir_sub=`printf "%s\n" "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+ case $ac_top_builddir_sub in
+ "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+ esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+ .) # We are building in place.
+ ac_srcdir=.
+ ac_top_srcdir=$ac_top_builddir_sub
+ ac_abs_top_srcdir=$ac_pwd ;;
+ [\\/]* | ?:[\\/]* ) # Absolute name.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir
+ ac_abs_top_srcdir=$srcdir ;;
+ *) # Relative name.
+ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_build_prefix$srcdir
+ ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+
+ case $ac_mode in
+ :F)
+ #
+ # CONFIG_FILE
+ #
+
+ case $INSTALL in
+ [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;;
+ *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;;
+ esac
+ ac_MKDIR_P=$MKDIR_P
+ case $MKDIR_P in
+ [\\/$]* | ?:[\\/]* ) ;;
+ */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;;
+ esac
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# If the template does not know about datarootdir, expand it.
+# FIXME: This hack should be removed a few years after 2.60.
+ac_datarootdir_hack=; ac_datarootdir_seen=
+ac_sed_dataroot='
+/datarootdir/ {
+ p
+ q
+}
+/@datadir@/p
+/@docdir@/p
+/@infodir@/p
+/@localedir@/p
+/@mandir@/p'
+case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in
+*datarootdir*) ac_datarootdir_seen=yes;;
+*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
+printf "%s\n" "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ ac_datarootdir_hack='
+ s&@datadir@&$datadir&g
+ s&@docdir@&$docdir&g
+ s&@infodir@&$infodir&g
+ s&@localedir@&$localedir&g
+ s&@mandir@&$mandir&g
+ s&\\\${datarootdir}&$datarootdir&g' ;;
+esac
+_ACEOF
+
+# Neutralize VPATH when `$srcdir' = `.'.
+# Shell code in configure.ac might set extrasub.
+# FIXME: do we really want to maintain this feature?
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_sed_extra="$ac_vpsub
+$extrasub
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+:t
+/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
+s|@configure_input@|$ac_sed_conf_input|;t t
+s&@top_builddir@&$ac_top_builddir_sub&;t t
+s&@top_build_prefix@&$ac_top_build_prefix&;t t
+s&@srcdir@&$ac_srcdir&;t t
+s&@abs_srcdir@&$ac_abs_srcdir&;t t
+s&@top_srcdir@&$ac_top_srcdir&;t t
+s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
+s&@builddir@&$ac_builddir&;t t
+s&@abs_builddir@&$ac_abs_builddir&;t t
+s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
+s&@INSTALL@&$ac_INSTALL&;t t
+s&@MKDIR_P@&$ac_MKDIR_P&;t t
+$ac_datarootdir_hack
+"
+eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \
+ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+
+test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
+ { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } &&
+ { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \
+ "$ac_tmp/out"`; test -z "$ac_out"; } &&
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined" >&5
+printf "%s\n" "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined" >&2;}
+
+ rm -f "$ac_tmp/stdin"
+ case $ac_file in
+ -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";;
+ *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";;
+ esac \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+ ;;
+ :H)
+ #
+ # CONFIG_HEADER
+ #
+ if test x"$ac_file" != x-; then
+ {
+ printf "%s\n" "/* $configure_input */" >&1 \
+ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs"
+ } >"$ac_tmp/config.h" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+ if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5
+printf "%s\n" "$as_me: $ac_file is unchanged" >&6;}
+ else
+ rm -f "$ac_file"
+ mv "$ac_tmp/config.h" "$ac_file" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+ fi
+ else
+ printf "%s\n" "/* $configure_input */" >&1 \
+ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \
+ || as_fn_error $? "could not create -" "$LINENO" 5
+ fi
+# Compute "$ac_file"'s index in $config_headers.
+_am_arg="$ac_file"
+_am_stamp_count=1
+for _am_header in $config_headers :; do
+ case $_am_header in
+ $_am_arg | $_am_arg:* )
+ break ;;
+ * )
+ _am_stamp_count=`expr $_am_stamp_count + 1` ;;
+ esac
+done
+echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" ||
+$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$_am_arg" : 'X\(//\)[^/]' \| \
+ X"$_am_arg" : 'X\(//\)$' \| \
+ X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null ||
+printf "%s\n" X"$_am_arg" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`/stamp-h$_am_stamp_count
+ ;;
+
+
+ esac
+
+done # for ac_tag
+
+
+as_fn_exit 0
+_ACEOF
+ac_clean_files=$ac_clean_files_save
+
+test $ac_write_fail = 0 ||
+ as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5
+
+
+# configure is writing to config.log, and then calls config.status.
+# config.status does its own redirection, appending to config.log.
+# Unfortunately, on DOS this fails, as config.log is still kept open
+# by configure, so config.status won't be able to write to it; its
+# output is simply discarded. So we exec the FD to /dev/null,
+# effectively closing config.log, so it can be properly (re)opened and
+# appended to by config.status. When coming back to configure, we
+# need to make the FD available again.
+if test "$no_create" != yes; then
+ ac_cs_success=:
+ ac_config_status_args=
+ test "$silent" = yes &&
+ ac_config_status_args="$ac_config_status_args --quiet"
+ exec 5>/dev/null
+ $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
+ exec 5>>config.log
+ # Use ||, not &&, to avoid exiting from the if with $? = 1, which
+ # would make configure fail if this is the last instruction.
+ $ac_cs_success || as_fn_exit 1
+fi
+if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
+printf "%s\n" "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
+fi
+
+
diff --git a/configure.ac b/configure.ac
new file mode 100644
index 0000000..40ee871
--- /dev/null
+++ b/configure.ac
@@ -0,0 +1,14 @@
+AC_INIT([parallel],[20240222],[bug-parallel@gnu.org])
+AM_INIT_AUTOMAKE([-Wall -Werror foreign])
+AC_CONFIG_HEADERS([config.h])
+AC_CONFIG_FILES([
+ Makefile
+ src/Makefile
+])
+
+AC_ARG_ENABLE(documentation,
+ AS_HELP_STRING([--disable-documentation],[Omit building and installing the documentation. (default=no)]),,
+ [enable_documentation=yes])
+AM_CONDITIONAL([DOCUMENTATION], [test x$enable_documentation = xyes])
+AC_PROG_LN_S
+AC_OUTPUT
diff --git a/install-sh b/install-sh
new file mode 100755
index 0000000..ec298b5
--- /dev/null
+++ b/install-sh
@@ -0,0 +1,541 @@
+#!/bin/sh
+# install - install a program, script, or datafile
+
+scriptversion=2020-11-14.01; # UTC
+
+# This originates from X11R5 (mit/util/scripts/install.sh), which was
+# later released in X11R6 (xc/config/util/install.sh) with the
+# following copyright and license.
+#
+# Copyright (C) 1994 X Consortium
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC-
+# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+# Except as contained in this notice, the name of the X Consortium shall not
+# be used in advertising or otherwise to promote the sale, use or other deal-
+# ings in this Software without prior written authorization from the X Consor-
+# tium.
+#
+#
+# FSF changes to this file are in the public domain.
+#
+# Calling this script install-sh is preferred over install.sh, to prevent
+# 'make' implicit rules from creating a file called install from it
+# when there is no Makefile.
+#
+# This script is compatible with the BSD install script, but was written
+# from scratch.
+
+tab=' '
+nl='
+'
+IFS=" $tab$nl"
+
+# Set DOITPROG to "echo" to test this script.
+
+doit=${DOITPROG-}
+doit_exec=${doit:-exec}
+
+# Put in absolute file names if you don't have them in your path;
+# or use environment vars.
+
+chgrpprog=${CHGRPPROG-chgrp}
+chmodprog=${CHMODPROG-chmod}
+chownprog=${CHOWNPROG-chown}
+cmpprog=${CMPPROG-cmp}
+cpprog=${CPPROG-cp}
+mkdirprog=${MKDIRPROG-mkdir}
+mvprog=${MVPROG-mv}
+rmprog=${RMPROG-rm}
+stripprog=${STRIPPROG-strip}
+
+posix_mkdir=
+
+# Desired mode of installed file.
+mode=0755
+
+# Create dirs (including intermediate dirs) using mode 755.
+# This is like GNU 'install' as of coreutils 8.32 (2020).
+mkdir_umask=22
+
+backupsuffix=
+chgrpcmd=
+chmodcmd=$chmodprog
+chowncmd=
+mvcmd=$mvprog
+rmcmd="$rmprog -f"
+stripcmd=
+
+src=
+dst=
+dir_arg=
+dst_arg=
+
+copy_on_change=false
+is_target_a_directory=possibly
+
+usage="\
+Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE
+ or: $0 [OPTION]... SRCFILES... DIRECTORY
+ or: $0 [OPTION]... -t DIRECTORY SRCFILES...
+ or: $0 [OPTION]... -d DIRECTORIES...
+
+In the 1st form, copy SRCFILE to DSTFILE.
+In the 2nd and 3rd, copy all SRCFILES to DIRECTORY.
+In the 4th, create DIRECTORIES.
+
+Options:
+ --help display this help and exit.
+ --version display version info and exit.
+
+ -c (ignored)
+ -C install only if different (preserve data modification time)
+ -d create directories instead of installing files.
+ -g GROUP $chgrpprog installed files to GROUP.
+ -m MODE $chmodprog installed files to MODE.
+ -o USER $chownprog installed files to USER.
+ -p pass -p to $cpprog.
+ -s $stripprog installed files.
+ -S SUFFIX attempt to back up existing files, with suffix SUFFIX.
+ -t DIRECTORY install into DIRECTORY.
+ -T report an error if DSTFILE is a directory.
+
+Environment variables override the default commands:
+ CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG
+ RMPROG STRIPPROG
+
+By default, rm is invoked with -f; when overridden with RMPROG,
+it's up to you to specify -f if you want it.
+
+If -S is not specified, no backups are attempted.
+
+Email bug reports to bug-automake@gnu.org.
+Automake home page: https://www.gnu.org/software/automake/
+"
+
+while test $# -ne 0; do
+ case $1 in
+ -c) ;;
+
+ -C) copy_on_change=true;;
+
+ -d) dir_arg=true;;
+
+ -g) chgrpcmd="$chgrpprog $2"
+ shift;;
+
+ --help) echo "$usage"; exit $?;;
+
+ -m) mode=$2
+ case $mode in
+ *' '* | *"$tab"* | *"$nl"* | *'*'* | *'?'* | *'['*)
+ echo "$0: invalid mode: $mode" >&2
+ exit 1;;
+ esac
+ shift;;
+
+ -o) chowncmd="$chownprog $2"
+ shift;;
+
+ -p) cpprog="$cpprog -p";;
+
+ -s) stripcmd=$stripprog;;
+
+ -S) backupsuffix="$2"
+ shift;;
+
+ -t)
+ is_target_a_directory=always
+ dst_arg=$2
+ # Protect names problematic for 'test' and other utilities.
+ case $dst_arg in
+ -* | [=\(\)!]) dst_arg=./$dst_arg;;
+ esac
+ shift;;
+
+ -T) is_target_a_directory=never;;
+
+ --version) echo "$0 $scriptversion"; exit $?;;
+
+ --) shift
+ break;;
+
+ -*) echo "$0: invalid option: $1" >&2
+ exit 1;;
+
+ *) break;;
+ esac
+ shift
+done
+
+# We allow the use of options -d and -T together, by making -d
+# take the precedence; this is for compatibility with GNU install.
+
+if test -n "$dir_arg"; then
+ if test -n "$dst_arg"; then
+ echo "$0: target directory not allowed when installing a directory." >&2
+ exit 1
+ fi
+fi
+
+if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then
+ # When -d is used, all remaining arguments are directories to create.
+ # When -t is used, the destination is already specified.
+ # Otherwise, the last argument is the destination. Remove it from $@.
+ for arg
+ do
+ if test -n "$dst_arg"; then
+ # $@ is not empty: it contains at least $arg.
+ set fnord "$@" "$dst_arg"
+ shift # fnord
+ fi
+ shift # arg
+ dst_arg=$arg
+ # Protect names problematic for 'test' and other utilities.
+ case $dst_arg in
+ -* | [=\(\)!]) dst_arg=./$dst_arg;;
+ esac
+ done
+fi
+
+if test $# -eq 0; then
+ if test -z "$dir_arg"; then
+ echo "$0: no input file specified." >&2
+ exit 1
+ fi
+ # It's OK to call 'install-sh -d' without argument.
+ # This can happen when creating conditional directories.
+ exit 0
+fi
+
+if test -z "$dir_arg"; then
+ if test $# -gt 1 || test "$is_target_a_directory" = always; then
+ if test ! -d "$dst_arg"; then
+ echo "$0: $dst_arg: Is not a directory." >&2
+ exit 1
+ fi
+ fi
+fi
+
+if test -z "$dir_arg"; then
+ do_exit='(exit $ret); exit $ret'
+ trap "ret=129; $do_exit" 1
+ trap "ret=130; $do_exit" 2
+ trap "ret=141; $do_exit" 13
+ trap "ret=143; $do_exit" 15
+
+ # Set umask so as not to create temps with too-generous modes.
+ # However, 'strip' requires both read and write access to temps.
+ case $mode in
+ # Optimize common cases.
+ *644) cp_umask=133;;
+ *755) cp_umask=22;;
+
+ *[0-7])
+ if test -z "$stripcmd"; then
+ u_plus_rw=
+ else
+ u_plus_rw='% 200'
+ fi
+ cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;;
+ *)
+ if test -z "$stripcmd"; then
+ u_plus_rw=
+ else
+ u_plus_rw=,u+rw
+ fi
+ cp_umask=$mode$u_plus_rw;;
+ esac
+fi
+
+for src
+do
+ # Protect names problematic for 'test' and other utilities.
+ case $src in
+ -* | [=\(\)!]) src=./$src;;
+ esac
+
+ if test -n "$dir_arg"; then
+ dst=$src
+ dstdir=$dst
+ test -d "$dstdir"
+ dstdir_status=$?
+ # Don't chown directories that already exist.
+ if test $dstdir_status = 0; then
+ chowncmd=""
+ fi
+ else
+
+ # Waiting for this to be detected by the "$cpprog $src $dsttmp" command
+ # might cause directories to be created, which would be especially bad
+ # if $src (and thus $dsttmp) contains '*'.
+ if test ! -f "$src" && test ! -d "$src"; then
+ echo "$0: $src does not exist." >&2
+ exit 1
+ fi
+
+ if test -z "$dst_arg"; then
+ echo "$0: no destination specified." >&2
+ exit 1
+ fi
+ dst=$dst_arg
+
+ # If destination is a directory, append the input filename.
+ if test -d "$dst"; then
+ if test "$is_target_a_directory" = never; then
+ echo "$0: $dst_arg: Is a directory" >&2
+ exit 1
+ fi
+ dstdir=$dst
+ dstbase=`basename "$src"`
+ case $dst in
+ */) dst=$dst$dstbase;;
+ *) dst=$dst/$dstbase;;
+ esac
+ dstdir_status=0
+ else
+ dstdir=`dirname "$dst"`
+ test -d "$dstdir"
+ dstdir_status=$?
+ fi
+ fi
+
+ case $dstdir in
+ */) dstdirslash=$dstdir;;
+ *) dstdirslash=$dstdir/;;
+ esac
+
+ obsolete_mkdir_used=false
+
+ if test $dstdir_status != 0; then
+ case $posix_mkdir in
+ '')
+ # With -d, create the new directory with the user-specified mode.
+ # Otherwise, rely on $mkdir_umask.
+ if test -n "$dir_arg"; then
+ mkdir_mode=-m$mode
+ else
+ mkdir_mode=
+ fi
+
+ posix_mkdir=false
+ # The $RANDOM variable is not portable (e.g., dash). Use it
+ # here however when possible just to lower collision chance.
+ tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$
+
+ trap '
+ ret=$?
+ rmdir "$tmpdir/a/b" "$tmpdir/a" "$tmpdir" 2>/dev/null
+ exit $ret
+ ' 0
+
+ # Because "mkdir -p" follows existing symlinks and we likely work
+ # directly in world-writeable /tmp, make sure that the '$tmpdir'
+ # directory is successfully created first before we actually test
+ # 'mkdir -p'.
+ if (umask $mkdir_umask &&
+ $mkdirprog $mkdir_mode "$tmpdir" &&
+ exec $mkdirprog $mkdir_mode -p -- "$tmpdir/a/b") >/dev/null 2>&1
+ then
+ if test -z "$dir_arg" || {
+ # Check for POSIX incompatibilities with -m.
+ # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or
+ # other-writable bit of parent directory when it shouldn't.
+ # FreeBSD 6.1 mkdir -m -p sets mode of existing directory.
+ test_tmpdir="$tmpdir/a"
+ ls_ld_tmpdir=`ls -ld "$test_tmpdir"`
+ case $ls_ld_tmpdir in
+ d????-?r-*) different_mode=700;;
+ d????-?--*) different_mode=755;;
+ *) false;;
+ esac &&
+ $mkdirprog -m$different_mode -p -- "$test_tmpdir" && {
+ ls_ld_tmpdir_1=`ls -ld "$test_tmpdir"`
+ test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1"
+ }
+ }
+ then posix_mkdir=:
+ fi
+ rmdir "$tmpdir/a/b" "$tmpdir/a" "$tmpdir"
+ else
+ # Remove any dirs left behind by ancient mkdir implementations.
+ rmdir ./$mkdir_mode ./-p ./-- "$tmpdir" 2>/dev/null
+ fi
+ trap '' 0;;
+ esac
+
+ if
+ $posix_mkdir && (
+ umask $mkdir_umask &&
+ $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir"
+ )
+ then :
+ else
+
+ # mkdir does not conform to POSIX,
+ # or it failed possibly due to a race condition. Create the
+ # directory the slow way, step by step, checking for races as we go.
+
+ case $dstdir in
+ /*) prefix='/';;
+ [-=\(\)!]*) prefix='./';;
+ *) prefix='';;
+ esac
+
+ oIFS=$IFS
+ IFS=/
+ set -f
+ set fnord $dstdir
+ shift
+ set +f
+ IFS=$oIFS
+
+ prefixes=
+
+ for d
+ do
+ test X"$d" = X && continue
+
+ prefix=$prefix$d
+ if test -d "$prefix"; then
+ prefixes=
+ else
+ if $posix_mkdir; then
+ (umask $mkdir_umask &&
+ $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break
+ # Don't fail if two instances are running concurrently.
+ test -d "$prefix" || exit 1
+ else
+ case $prefix in
+ *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;;
+ *) qprefix=$prefix;;
+ esac
+ prefixes="$prefixes '$qprefix'"
+ fi
+ fi
+ prefix=$prefix/
+ done
+
+ if test -n "$prefixes"; then
+ # Don't fail if two instances are running concurrently.
+ (umask $mkdir_umask &&
+ eval "\$doit_exec \$mkdirprog $prefixes") ||
+ test -d "$dstdir" || exit 1
+ obsolete_mkdir_used=true
+ fi
+ fi
+ fi
+
+ if test -n "$dir_arg"; then
+ { test -z "$chowncmd" || $doit $chowncmd "$dst"; } &&
+ { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } &&
+ { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false ||
+ test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1
+ else
+
+ # Make a couple of temp file names in the proper directory.
+ dsttmp=${dstdirslash}_inst.$$_
+ rmtmp=${dstdirslash}_rm.$$_
+
+ # Trap to clean up those temp files at exit.
+ trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0
+
+ # Copy the file name to the temp name.
+ (umask $cp_umask &&
+ { test -z "$stripcmd" || {
+ # Create $dsttmp read-write so that cp doesn't create it read-only,
+ # which would cause strip to fail.
+ if test -z "$doit"; then
+ : >"$dsttmp" # No need to fork-exec 'touch'.
+ else
+ $doit touch "$dsttmp"
+ fi
+ }
+ } &&
+ $doit_exec $cpprog "$src" "$dsttmp") &&
+
+ # and set any options; do chmod last to preserve setuid bits.
+ #
+ # If any of these fail, we abort the whole thing. If we want to
+ # ignore errors from any of these, just make sure not to ignore
+ # errors from the above "$doit $cpprog $src $dsttmp" command.
+ #
+ { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } &&
+ { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } &&
+ { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } &&
+ { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } &&
+
+ # If -C, don't bother to copy if it wouldn't change the file.
+ if $copy_on_change &&
+ old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` &&
+ new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` &&
+ set -f &&
+ set X $old && old=:$2:$4:$5:$6 &&
+ set X $new && new=:$2:$4:$5:$6 &&
+ set +f &&
+ test "$old" = "$new" &&
+ $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1
+ then
+ rm -f "$dsttmp"
+ else
+ # If $backupsuffix is set, and the file being installed
+ # already exists, attempt a backup. Don't worry if it fails,
+ # e.g., if mv doesn't support -f.
+ if test -n "$backupsuffix" && test -f "$dst"; then
+ $doit $mvcmd -f "$dst" "$dst$backupsuffix" 2>/dev/null
+ fi
+
+ # Rename the file to the real destination.
+ $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null ||
+
+ # The rename failed, perhaps because mv can't rename something else
+ # to itself, or perhaps because mv is so ancient that it does not
+ # support -f.
+ {
+ # Now remove or move aside any old file at destination location.
+ # We try this two ways since rm can't unlink itself on some
+ # systems and the destination file might be busy for other
+ # reasons. In this case, the final cleanup might fail but the new
+ # file should still install successfully.
+ {
+ test ! -f "$dst" ||
+ $doit $rmcmd "$dst" 2>/dev/null ||
+ { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null &&
+ { $doit $rmcmd "$rmtmp" 2>/dev/null; :; }
+ } ||
+ { echo "$0: cannot unlink or rename $dst" >&2
+ (exit 1); exit 1
+ }
+ } &&
+
+ # Now rename the file to the real destination.
+ $doit $mvcmd "$dsttmp" "$dst"
+ }
+ fi || exit 1
+
+ trap '' 0
+ fi
+done
+
+# Local variables:
+# eval: (add-hook 'before-save-hook 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC0"
+# time-stamp-end: "; # UTC"
+# End:
diff --git a/missing b/missing
new file mode 100755
index 0000000..1fe1611
--- /dev/null
+++ b/missing
@@ -0,0 +1,215 @@
+#! /bin/sh
+# Common wrapper for a few potentially missing GNU programs.
+
+scriptversion=2018-03-07.03; # UTC
+
+# Copyright (C) 1996-2021 Free Software Foundation, Inc.
+# Originally written by Fran,cois Pinard <pinard@iro.umontreal.ca>, 1996.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+if test $# -eq 0; then
+ echo 1>&2 "Try '$0 --help' for more information"
+ exit 1
+fi
+
+case $1 in
+
+ --is-lightweight)
+ # Used by our autoconf macros to check whether the available missing
+ # script is modern enough.
+ exit 0
+ ;;
+
+ --run)
+ # Back-compat with the calling convention used by older automake.
+ shift
+ ;;
+
+ -h|--h|--he|--hel|--help)
+ echo "\
+$0 [OPTION]... PROGRAM [ARGUMENT]...
+
+Run 'PROGRAM [ARGUMENT]...', returning a proper advice when this fails due
+to PROGRAM being missing or too old.
+
+Options:
+ -h, --help display this help and exit
+ -v, --version output version information and exit
+
+Supported PROGRAM values:
+ aclocal autoconf autoheader autom4te automake makeinfo
+ bison yacc flex lex help2man
+
+Version suffixes to PROGRAM as well as the prefixes 'gnu-', 'gnu', and
+'g' are ignored when checking the name.
+
+Send bug reports to <bug-automake@gnu.org>."
+ exit $?
+ ;;
+
+ -v|--v|--ve|--ver|--vers|--versi|--versio|--version)
+ echo "missing $scriptversion (GNU Automake)"
+ exit $?
+ ;;
+
+ -*)
+ echo 1>&2 "$0: unknown '$1' option"
+ echo 1>&2 "Try '$0 --help' for more information"
+ exit 1
+ ;;
+
+esac
+
+# Run the given program, remember its exit status.
+"$@"; st=$?
+
+# If it succeeded, we are done.
+test $st -eq 0 && exit 0
+
+# Also exit now if we it failed (or wasn't found), and '--version' was
+# passed; such an option is passed most likely to detect whether the
+# program is present and works.
+case $2 in --version|--help) exit $st;; esac
+
+# Exit code 63 means version mismatch. This often happens when the user
+# tries to use an ancient version of a tool on a file that requires a
+# minimum version.
+if test $st -eq 63; then
+ msg="probably too old"
+elif test $st -eq 127; then
+ # Program was missing.
+ msg="missing on your system"
+else
+ # Program was found and executed, but failed. Give up.
+ exit $st
+fi
+
+perl_URL=https://www.perl.org/
+flex_URL=https://github.com/westes/flex
+gnu_software_URL=https://www.gnu.org/software
+
+program_details ()
+{
+ case $1 in
+ aclocal|automake)
+ echo "The '$1' program is part of the GNU Automake package:"
+ echo "<$gnu_software_URL/automake>"
+ echo "It also requires GNU Autoconf, GNU m4 and Perl in order to run:"
+ echo "<$gnu_software_URL/autoconf>"
+ echo "<$gnu_software_URL/m4/>"
+ echo "<$perl_URL>"
+ ;;
+ autoconf|autom4te|autoheader)
+ echo "The '$1' program is part of the GNU Autoconf package:"
+ echo "<$gnu_software_URL/autoconf/>"
+ echo "It also requires GNU m4 and Perl in order to run:"
+ echo "<$gnu_software_URL/m4/>"
+ echo "<$perl_URL>"
+ ;;
+ esac
+}
+
+give_advice ()
+{
+ # Normalize program name to check for.
+ normalized_program=`echo "$1" | sed '
+ s/^gnu-//; t
+ s/^gnu//; t
+ s/^g//; t'`
+
+ printf '%s\n' "'$1' is $msg."
+
+ configure_deps="'configure.ac' or m4 files included by 'configure.ac'"
+ case $normalized_program in
+ autoconf*)
+ echo "You should only need it if you modified 'configure.ac',"
+ echo "or m4 files included by it."
+ program_details 'autoconf'
+ ;;
+ autoheader*)
+ echo "You should only need it if you modified 'acconfig.h' or"
+ echo "$configure_deps."
+ program_details 'autoheader'
+ ;;
+ automake*)
+ echo "You should only need it if you modified 'Makefile.am' or"
+ echo "$configure_deps."
+ program_details 'automake'
+ ;;
+ aclocal*)
+ echo "You should only need it if you modified 'acinclude.m4' or"
+ echo "$configure_deps."
+ program_details 'aclocal'
+ ;;
+ autom4te*)
+ echo "You might have modified some maintainer files that require"
+ echo "the 'autom4te' program to be rebuilt."
+ program_details 'autom4te'
+ ;;
+ bison*|yacc*)
+ echo "You should only need it if you modified a '.y' file."
+ echo "You may want to install the GNU Bison package:"
+ echo "<$gnu_software_URL/bison/>"
+ ;;
+ lex*|flex*)
+ echo "You should only need it if you modified a '.l' file."
+ echo "You may want to install the Fast Lexical Analyzer package:"
+ echo "<$flex_URL>"
+ ;;
+ help2man*)
+ echo "You should only need it if you modified a dependency" \
+ "of a man page."
+ echo "You may want to install the GNU Help2man package:"
+ echo "<$gnu_software_URL/help2man/>"
+ ;;
+ makeinfo*)
+ echo "You should only need it if you modified a '.texi' file, or"
+ echo "any other file indirectly affecting the aspect of the manual."
+ echo "You might want to install the Texinfo package:"
+ echo "<$gnu_software_URL/texinfo/>"
+ echo "The spurious makeinfo call might also be the consequence of"
+ echo "using a buggy 'make' (AIX, DU, IRIX), in which case you might"
+ echo "want to install GNU make:"
+ echo "<$gnu_software_URL/make/>"
+ ;;
+ *)
+ echo "You might have modified some files without having the proper"
+ echo "tools for further handling them. Check the 'README' file, it"
+ echo "often tells you about the needed prerequisites for installing"
+ echo "this package. You may also peek at any GNU archive site, in"
+ echo "case some other package contains this missing '$1' program."
+ ;;
+ esac
+}
+
+give_advice "$1" | sed -e '1s/^/WARNING: /' \
+ -e '2,$s/^/ /' >&2
+
+# Propagate the correct exit status (expected to be 127 for a program
+# not found, 63 for a program that failed due to version mismatch).
+exit $st
+
+# Local variables:
+# eval: (add-hook 'before-save-hook 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC0"
+# time-stamp-end: "; # UTC"
+# End:
diff --git a/src/Makefile.am b/src/Makefile.am
new file mode 100644
index 0000000..c1facd5
--- /dev/null
+++ b/src/Makefile.am
@@ -0,0 +1,405 @@
+# SPDX-FileCopyrightText: 2002-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+bin_SCRIPTS = parallel sql niceload parcat parset parsort \
+ env_parallel env_parallel.ash env_parallel.bash \
+ env_parallel.csh env_parallel.dash env_parallel.fish \
+ env_parallel.ksh env_parallel.mksh env_parallel.pdksh \
+ env_parallel.sh env_parallel.tcsh env_parallel.zsh
+
+POD_FILES = parcat.pod parset.pod sem.pod parallel.pod \
+ env_parallel.pod niceload.pod parallel_examples.pod \
+ parallel_tutorial.pod parallel_book.pod parallel_design.pod \
+ parallel_alternatives.pod
+
+MAN_FILES = parallel.1 env_parallel.1 sem.1 sql.1 niceload.1 \
+ parallel_examples.7 parallel_tutorial.7 parallel_book.7 \
+ parallel_design.7 parallel_alternatives.7 parcat.1 parset.1 \
+ parsort.1
+
+PDF_MAN = parallel.pdf env_parallel.pdf sem.pdf sql.pdf \
+ niceload.pdf parallel_examples.pdf parallel_tutorial.pdf \
+ parallel_book.pdf parallel_design.pdf \
+ parallel_alternatives.pdf parcat.pdf parset.pdf parsort.pdf
+
+PDF_OTHER = parallel_cheat_bw.pdf parallel_options_map.pdf
+
+PDF_FILES = $(PDF_MAN) $(PDF_OTHER)
+
+RST_FILES = parallel.rst env_parallel.rst sem.rst sql.rst \
+ niceload.rst parallel_examples.rst parallel_tutorial.rst \
+ parallel_book.rst parallel_design.rst \
+ parallel_alternatives.rst parcat.rst parset.rst parsort.rst
+
+TEXI_FILES = parallel.texi env_parallel.texi sem.texi sql.texi \
+ niceload.texi parallel_examples.texi parallel_tutorial.texi \
+ parallel_book.texi parallel_design.texi \
+ parallel_alternatives.texi parcat.texi parset.texi \
+ parsort.texi
+
+HTML_FILES = parallel.html env_parallel.html sem.html sql.html \
+ niceload.html parallel_examples.html parallel_tutorial.html \
+ parallel_book.html parallel_design.html \
+ parallel_alternatives.html parcat.html parset.html \
+ parsort.html
+
+install-exec-hook:
+ rm "$(DESTDIR)$(bindir)"/sem || true
+ $(LN_S) parallel "$(DESTDIR)$(bindir)"/sem
+
+if DOCUMENTATION
+man_MANS = $(MAN_FILES)
+doc_DATA = $(HTML_FILES) $(TEXI_FILES) $(RST_FILES) $(PDF_FILES)
+endif
+
+DISTCLEANFILES = $(MAN_FILES) $(HTML_FILES) $(TEXI_FILES) \
+ $(RST_FILES) $(PDF_FILES)
+
+
+EXTRA_DIST = $(bin_SCRIPTS) sem $(POD_FILES) \
+ parallel_cheat_bw.fodt pod2graph $(DISTCLEANFILES)
+
+web: sphinx
+ true
+
+sphinx: *.rst
+ cd sphinx && make && cd ..
+
+# Build documentation file if the tool to build exists.
+# Otherwise: Use the distributed version
+parallel.1: parallel.pod
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=1 "$(srcdir)"/parallel.pod > "$(srcdir)"/parallel.1n \
+ && mv "$(srcdir)"/parallel.1n "$(srcdir)"/parallel.1 \
+ || echo "Warning: pod2man not found. Using old parallel.1"
+
+env_parallel.1: env_parallel.pod
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=1 "$(srcdir)"/env_parallel.pod > "$(srcdir)"/env_parallel.1n \
+ && mv "$(srcdir)"/env_parallel.1n "$(srcdir)"/env_parallel.1 \
+ || echo "Warning: pod2man not found. Using old env_parallel.1"
+
+parallel_examples.7: parallel_examples.pod
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=7 "$(srcdir)"/parallel_examples.pod > "$(srcdir)"/parallel_examples.7n \
+ && mv "$(srcdir)"/parallel_examples.7n "$(srcdir)"/parallel_examples.7 \
+ || echo "Warning: pod2man not found. Using old parallel_examples.7"
+
+parallel_tutorial.7: parallel_tutorial.pod
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=7 "$(srcdir)"/parallel_tutorial.pod > "$(srcdir)"/parallel_tutorial.7n \
+ && mv "$(srcdir)"/parallel_tutorial.7n "$(srcdir)"/parallel_tutorial.7 \
+ || echo "Warning: pod2man not found. Using old parallel_tutorial.7"
+
+parallel_book.7: parallel_book.pod
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=7 "$(srcdir)"/parallel_book.pod > "$(srcdir)"/parallel_book.7n \
+ && mv "$(srcdir)"/parallel_book.7n "$(srcdir)"/parallel_book.7 \
+ || echo "Warning: pod2man not found. Using old parallel_book.7"
+
+parallel_design.7: parallel_design.pod
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=7 "$(srcdir)"/parallel_design.pod > "$(srcdir)"/parallel_design.7n \
+ && mv "$(srcdir)"/parallel_design.7n "$(srcdir)"/parallel_design.7 \
+ || echo "Warning: pod2man not found. Using old parallel_design.7"
+
+parallel_alternatives.7: parallel_alternatives.pod
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=7 "$(srcdir)"/parallel_alternatives.pod > "$(srcdir)"/parallel_alternatives.7n \
+ && mv "$(srcdir)"/parallel_alternatives.7n "$(srcdir)"/parallel_alternatives.7 \
+ || echo "Warning: pod2man not found. Using old parallel_alternatives.7"
+
+sem.1: sem.pod
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=1 "$(srcdir)"/sem.pod > "$(srcdir)"/sem.1n \
+ && mv "$(srcdir)"/sem.1n "$(srcdir)"/sem.1 \
+ || echo "Warning: pod2man not found. Using old sem.1"
+
+sql.1: sql
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=1 "$(srcdir)"/sql > "$(srcdir)"/sql.1n \
+ && mv "$(srcdir)"/sql.1n "$(srcdir)"/sql.1 \
+ || echo "Warning: pod2man not found. Using old sql.1"
+
+niceload.1: niceload.pod
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=1 "$(srcdir)"/niceload.pod > "$(srcdir)"/niceload.1n \
+ && mv "$(srcdir)"/niceload.1n "$(srcdir)"/niceload.1 \
+ || echo "Warning: pod2man not found. Using old niceload.1"
+
+parcat.1: parcat.pod
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=1 "$(srcdir)"/parcat.pod > "$(srcdir)"/parcat.1n \
+ && mv "$(srcdir)"/parcat.1n "$(srcdir)"/parcat.1 \
+ || echo "Warning: pod2man not found. Using old parcat.1"
+
+parset.1: parset.pod
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=1 "$(srcdir)"/parset.pod > "$(srcdir)"/parset.1n \
+ && mv "$(srcdir)"/parset.1n "$(srcdir)"/parset.1 \
+ || echo "Warning: pod2man not found. Using old parset.1"
+
+parsort.1: parsort
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=1 "$(srcdir)"/parsort > "$(srcdir)"/parsort.1n \
+ && mv "$(srcdir)"/parsort.1n "$(srcdir)"/parsort.1 \
+ || echo "Warning: pod2man not found. Using old parsort.1"
+
+parallel.html: parallel.pod
+ pod2html --title "GNU Parallel" "$(srcdir)"/parallel.pod > "$(srcdir)"/parallel.htmln \
+ && mv "$(srcdir)"/parallel.htmln "$(srcdir)"/parallel.html \
+ || echo "Warning: pod2html not found. Using old parallel.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on parallel.html to avoid stupid pod2html race condition
+env_parallel.html: env_parallel.pod parallel.html
+ pod2html --title "GNU Parallel with environment" "$(srcdir)"/env_parallel.pod > "$(srcdir)"/env_parallel.htmln \
+ && mv "$(srcdir)"/env_parallel.htmln "$(srcdir)"/env_parallel.html \
+ || echo "Warning: pod2html not found. Using old env_parallel.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on env_parallel.html to avoid stupid pod2html race condition
+parallel_examples.html: parallel_examples.pod env_parallel.html
+ pod2html --title "GNU Parallel examples" "$(srcdir)"/parallel_examples.pod > "$(srcdir)"/parallel_examples.htmln \
+ && mv "$(srcdir)"/parallel_examples.htmln "$(srcdir)"/parallel_examples.html \
+ || echo "Warning: pod2html not found. Using old parallel_examples.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on parallel_examples.html to avoid stupid pod2html race condition
+parallel_tutorial.html: parallel_tutorial.pod parallel_examples.html
+ pod2html --title "GNU Parallel tutorial" "$(srcdir)"/parallel_tutorial.pod > "$(srcdir)"/parallel_tutorial.htmln \
+ && mv "$(srcdir)"/parallel_tutorial.htmln "$(srcdir)"/parallel_tutorial.html \
+ || echo "Warning: pod2html not found. Using old parallel_tutorial.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on parallel_tutorial.html to avoid stupid pod2html race condition
+parallel_book.html: parallel_book.pod parallel_tutorial.html
+ pod2html --title "GNU Parallel book" "$(srcdir)"/parallel_book.pod > "$(srcdir)"/parallel_book.htmln \
+ && mv "$(srcdir)"/parallel_book.htmln "$(srcdir)"/parallel_book.html \
+ || echo "Warning: pod2html not found. Using old parallel_book.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on parallel_book.html to avoid stupid pod2html race condition
+parallel_design.html: parallel_design.pod parallel_book.html
+ pod2html --title "GNU Parallel design" "$(srcdir)"/parallel_design.pod > "$(srcdir)"/parallel_design.htmln \
+ && mv "$(srcdir)"/parallel_design.htmln "$(srcdir)"/parallel_design.html \
+ || echo "Warning: pod2html not found. Using old parallel_design.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on parallel_design.html to avoid stupid pod2html race condition
+parallel_alternatives.html: parallel_alternatives.pod parallel_design.html
+ pod2html --title "GNU Parallel alternatives" "$(srcdir)"/parallel_alternatives.pod > "$(srcdir)"/parallel_alternatives.htmln \
+ && mv "$(srcdir)"/parallel_alternatives.htmln "$(srcdir)"/parallel_alternatives.html \
+ || echo "Warning: pod2html not found. Using old parallel_alternatives.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on parallel_alternatives.html to avoid stupid pod2html race condition
+sem.html: sem.pod parallel_alternatives.html
+ pod2html --title "sem (GNU Parallel)" "$(srcdir)"/sem.pod > "$(srcdir)"/sem.htmln \
+ && mv "$(srcdir)"/sem.htmln "$(srcdir)"/sem.html \
+ || echo "Warning: pod2html not found. Using old sem.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on sem.html to avoid stupid pod2html race condition
+sql.html: sql sem.html
+ pod2html --title "GNU SQL" "$(srcdir)"/sql > "$(srcdir)"/sql.htmln \
+ && mv "$(srcdir)"/sql.htmln "$(srcdir)"/sql.html \
+ || echo "Warning: pod2html not found. Using old sql.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on sql.html to avoid stupid pod2html race condition
+niceload.html: niceload.pod sql.html
+ pod2html --title "GNU niceload" "$(srcdir)"/niceload.pod > "$(srcdir)"/niceload.htmln \
+ && mv "$(srcdir)"/niceload.htmln "$(srcdir)"/niceload.html \
+ || echo "Warning: pod2html not found. Using old niceload.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on niceload.html to avoid stupid pod2html race condition
+parcat.html: parcat.pod niceload.html
+ pod2html --title "GNU parcat" "$(srcdir)"/parcat.pod > "$(srcdir)"/parcat.htmln \
+ && mv "$(srcdir)"/parcat.htmln "$(srcdir)"/parcat.html \
+ || echo "Warning: pod2html not found. Using old parcat.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on parcat.html to avoid stupid pod2html race condition
+parset.html: parset.pod parcat.html
+ pod2html --title "GNU parset" "$(srcdir)"/parset.pod > "$(srcdir)"/parset.htmln \
+ && mv "$(srcdir)"/parset.htmln "$(srcdir)"/parset.html \
+ || echo "Warning: pod2html not found. Using old parset.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on parset.html to avoid stupid pod2html race condition
+parsort.html: parsort parset.html
+ pod2html --title "GNU parsort" "$(srcdir)"/parsort > "$(srcdir)"/parsort.htmln \
+ && mv "$(srcdir)"/parsort.htmln "$(srcdir)"/parsort.html \
+ || echo "Warning: pod2html not found. Using old parsort.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+parallel.texi: parallel.pod
+ pod2texi --output="$(srcdir)"/parallel.texi "$(srcdir)"/parallel.pod \
+ || echo "Warning: pod2texi not found. Using old parallel.texi"
+
+env_parallel.texi: env_parallel.pod
+ pod2texi --output="$(srcdir)"/env_parallel.texi "$(srcdir)"/env_parallel.pod \
+ || echo "Warning: pod2texi not found. Using old env_parallel.texi"
+
+parallel_examples.texi: parallel_examples.pod
+ pod2texi --output="$(srcdir)"/parallel_examples.texi "$(srcdir)"/parallel_examples.pod \
+ || echo "Warning: pod2texi not found. Using old parallel_examples.texi"
+
+parallel_tutorial.texi: parallel_tutorial.pod
+ pod2texi --output="$(srcdir)"/parallel_tutorial.texi "$(srcdir)"/parallel_tutorial.pod \
+ || echo "Warning: pod2texi not found. Using old parallel_tutorial.texi"
+
+parallel_book.texi: parallel_book.pod
+ pod2texi --output="$(srcdir)"/parallel_book.texi "$(srcdir)"/parallel_book.pod \
+ || echo "Warning: pod2texi not found. Using old parallel_book.texi"
+
+parallel_design.texi: parallel_design.pod
+ pod2texi --output="$(srcdir)"/parallel_design.texi "$(srcdir)"/parallel_design.pod \
+ || echo "Warning: pod2texi not found. Using old parallel_design.texi"
+
+parallel_alternatives.texi: parallel_alternatives.pod
+ pod2texi --output="$(srcdir)"/parallel_alternatives.texi "$(srcdir)"/parallel_alternatives.pod \
+ || echo "Warning: pod2texi not found. Using old parallel_alternatives.texi"
+
+sem.texi: sem.pod
+ pod2texi --output="$(srcdir)"/sem.texi "$(srcdir)"/sem.pod \
+ || echo "Warning: pod2texi not found. Using old sem.texi"
+
+sql.texi: sql
+ pod2texi --output="$(srcdir)"/sql.texi "$(srcdir)"/sql \
+ || echo "Warning: pod2texi not found. Using old sql.texi"
+
+niceload.texi: niceload.pod
+ pod2texi --output="$(srcdir)"/niceload.texi "$(srcdir)"/niceload.pod \
+ || echo "Warning: pod2texi not found. Using old niceload.texi"
+
+parcat.texi: parcat.pod
+ pod2texi --output="$(srcdir)"/parcat.texi "$(srcdir)"/parcat.pod \
+ || echo "Warning: pod2texi not found. Using old parcat.texi"
+
+parset.texi: parset.pod
+ pod2texi --output="$(srcdir)"/parset.texi "$(srcdir)"/parset.pod \
+ || echo "Warning: pod2texi not found. Using old parset.texi"
+
+parsort.texi: parsort
+ pod2texi --output="$(srcdir)"/parsort.texi "$(srcdir)"/parsort \
+ || echo "Warning: pod2texi not found. Using old parsort.texi"
+
+parallel.rst: parallel.pod
+ ./pod2rst-fix < "$(srcdir)"/parallel.pod > "$(srcdir)"/parallel.rst \
+ || echo "Warning: pod2rst not found. Using old parallel.rst"
+
+env_parallel.rst: env_parallel.pod
+ ./pod2rst-fix < "$(srcdir)"/env_parallel.pod > "$(srcdir)"/env_parallel.rst \
+ || echo "Warning: pod2rst not found. Using old env_parallel.rst"
+
+parallel_examples.rst: parallel_examples.pod
+ ./pod2rst-fix < "$(srcdir)"/parallel_examples.pod > "$(srcdir)"/parallel_examples.rst \
+ || echo "Warning: pod2rst not found. Using old parallel_examples.rst"
+
+parallel_tutorial.rst: parallel_tutorial.pod
+ ./pod2rst-fix < "$(srcdir)"/parallel_tutorial.pod > "$(srcdir)"/parallel_tutorial.rst \
+ || echo "Warning: pod2rst not found. Using old parallel_tutorial.rst"
+
+parallel_book.rst: parallel_book.pod
+ ./pod2rst-fix < "$(srcdir)"/parallel_book.pod > "$(srcdir)"/parallel_book.rst \
+ || echo "Warning: pod2rst not found. Using old parallel_book.rst"
+
+parallel_design.rst: parallel_design.pod
+ ./pod2rst-fix < "$(srcdir)"/parallel_design.pod > "$(srcdir)"/parallel_design.rst \
+ || echo "Warning: pod2rst not found. Using old parallel_design.rst"
+
+parallel_alternatives.rst: parallel_alternatives.pod
+ ./pod2rst-fix < "$(srcdir)"/parallel_alternatives.pod > "$(srcdir)"/parallel_alternatives.rst \
+ || echo "Warning: pod2rst not found. Using old parallel_alternatives.rst"
+
+sem.rst: sem.pod
+ ./pod2rst-fix < "$(srcdir)"/sem.pod > "$(srcdir)"/sem.rst \
+ || echo "Warning: pod2rst not found. Using old sem.rst"
+
+sql.rst: sql
+ ./pod2rst-fix < "$(srcdir)"/sql > "$(srcdir)"/sql.rst \
+ || echo "Warning: pod2rst not found. Using old sql.rst"
+
+niceload.rst: niceload.pod
+ ./pod2rst-fix < "$(srcdir)"/niceload.pod > "$(srcdir)"/niceload.rst \
+ || echo "Warning: pod2rst not found. Using old niceload.rst"
+
+parcat.rst: parcat.pod
+ ./pod2rst-fix < "$(srcdir)"/parcat.pod > "$(srcdir)"/parcat.rst \
+ || echo "Warning: pod2rst not found. Using old parcat.rst"
+
+parset.rst: parset.pod
+ ./pod2rst-fix < "$(srcdir)"/parset.pod > "$(srcdir)"/parset.rst \
+ || echo "Warning: pod2rst not found. Using old parset.rst"
+
+parsort.rst: parsort
+ ./pod2rst-fix < "$(srcdir)"/parsort > "$(srcdir)"/parsort.rst \
+ || echo "Warning: pod2rst not found. Using old parsort.rst"
+
+parallel.pdf: parallel.pod
+ pod2pdf --output-file "$(srcdir)"/parallel.pdf "$(srcdir)"/parallel.pod --title "GNU Parallel" \
+ || echo "Warning: pod2pdf not found. Using old parallel.pdf"
+
+env_parallel.pdf: env_parallel.pod
+ pod2pdf --output-file "$(srcdir)"/env_parallel.pdf "$(srcdir)"/env_parallel.pod --title "GNU Parallel with environment" \
+ || echo "Warning: pod2pdf not found. Using old env_parallel.pdf"
+
+parallel_examples.pdf: parallel_examples.pod
+ pod2pdf --output-file "$(srcdir)"/parallel_examples.pdf "$(srcdir)"/parallel_examples.pod --title "GNU Parallel Examples" \
+ || echo "Warning: pod2pdf not found. Using old parallel_examples.pdf"
+
+parallel_tutorial.pdf: parallel_tutorial.pod
+ pod2pdf --output-file "$(srcdir)"/parallel_tutorial.pdf "$(srcdir)"/parallel_tutorial.pod --title "GNU Parallel Tutorial" \
+ || echo "Warning: pod2pdf not found. Using old parallel_tutorial.pdf"
+
+parallel_book.pdf: parallel_book.pod
+ pod2pdf --output-file "$(srcdir)"/parallel_book.pdf "$(srcdir)"/parallel_book.pod --title "GNU Parallel Book" \
+ || echo "Warning: pod2pdf not found. Using old parallel_book.pdf"
+
+parallel_design.pdf: parallel_design.pod
+ pod2pdf --output-file "$(srcdir)"/parallel_design.pdf "$(srcdir)"/parallel_design.pod --title "GNU Parallel Design" \
+ || echo "Warning: pod2pdf not found. Using old parallel_design.pdf"
+
+parallel_alternatives.pdf: parallel_alternatives.pod
+ pod2pdf --output-file "$(srcdir)"/parallel_alternatives.pdf "$(srcdir)"/parallel_alternatives.pod --title "GNU Parallel alternatives" \
+ || echo "Warning: pod2pdf not found. Using old parallel_alternatives.pdf"
+
+sem.pdf: sem.pod
+ pod2pdf --output-file "$(srcdir)"/sem.pdf "$(srcdir)"/sem.pod --title "GNU sem" \
+ || echo "Warning: pod2pdf not found. Using old sem.pdf"
+
+sql.pdf: sql
+ pod2pdf --output-file "$(srcdir)"/sql.pdf "$(srcdir)"/sql --title "GNU SQL" \
+ || echo "Warning: pod2pdf not found. Using old sql.pdf"
+
+niceload.pdf: niceload.pod
+ pod2pdf --output-file "$(srcdir)"/niceload.pdf "$(srcdir)"/niceload.pod --title "GNU niceload" \
+ || echo "Warning: pod2pdf not found. Using old niceload.pdf"
+
+parcat.pdf: parcat.pod
+ pod2pdf --output-file "$(srcdir)"/parcat.pdf "$(srcdir)"/parcat.pod --title "GNU parcat" \
+ || echo "Warning: pod2pdf not found. Using old parcat.pdf"
+
+parset.pdf: parset.pod
+ pod2pdf --output-file "$(srcdir)"/parset.pdf "$(srcdir)"/parset.pod --title "GNU parset" \
+ || echo "Warning: pod2pdf not found. Using old parset.pdf"
+
+parsort.pdf: parsort
+ pod2pdf --output-file "$(srcdir)"/parsort.pdf "$(srcdir)"/parsort --title "GNU parsort" \
+ || echo "Warning: pod2pdf not found. Using old parsort.pdf"
+
+parallel_cheat_bw.pdf: parallel_cheat_bw.fodt
+ libreoffice --headless --convert-to pdf parallel_cheat_bw.fodt \
+ || echo "Warning: libreoffice failed. Using old parallel_cheat_bw.pdf"
+
+parallel_options_map.pdf: parallel.pod pod2graph
+ ./pod2graph parallel.pod > parallel_options_map.pdf \
+ || echo "Warning: pod2graph failed. Using old parallel_options_map.pdf"
+
+sem: parallel
+ ln -fs parallel sem
+
diff --git a/src/Makefile.in b/src/Makefile.in
new file mode 100644
index 0000000..0df167f
--- /dev/null
+++ b/src/Makefile.in
@@ -0,0 +1,975 @@
+# Makefile.in generated by automake 1.16.5 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2021 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-FileCopyrightText: 2002-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+subdir = src
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)" \
+ "$(DESTDIR)$(man7dir)" "$(DESTDIR)$(docdir)"
+SCRIPTS = $(bin_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+man1dir = $(mandir)/man1
+man7dir = $(mandir)/man7
+NROFF = nroff
+MANS = $(man_MANS)
+DATA = $(doc_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CSCOPE = @CSCOPE@
+CTAGS = @CTAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+ETAGS = @ETAGS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+am__leading_dot = @am__leading_dot@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build_alias = @build_alias@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host_alias = @host_alias@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+bin_SCRIPTS = parallel sql niceload parcat parset parsort \
+ env_parallel env_parallel.ash env_parallel.bash \
+ env_parallel.csh env_parallel.dash env_parallel.fish \
+ env_parallel.ksh env_parallel.mksh env_parallel.pdksh \
+ env_parallel.sh env_parallel.tcsh env_parallel.zsh
+
+POD_FILES = parcat.pod parset.pod sem.pod parallel.pod \
+ env_parallel.pod niceload.pod parallel_examples.pod \
+ parallel_tutorial.pod parallel_book.pod parallel_design.pod \
+ parallel_alternatives.pod
+
+MAN_FILES = parallel.1 env_parallel.1 sem.1 sql.1 niceload.1 \
+ parallel_examples.7 parallel_tutorial.7 parallel_book.7 \
+ parallel_design.7 parallel_alternatives.7 parcat.1 parset.1 \
+ parsort.1
+
+PDF_MAN = parallel.pdf env_parallel.pdf sem.pdf sql.pdf \
+ niceload.pdf parallel_examples.pdf parallel_tutorial.pdf \
+ parallel_book.pdf parallel_design.pdf \
+ parallel_alternatives.pdf parcat.pdf parset.pdf parsort.pdf
+
+PDF_OTHER = parallel_cheat_bw.pdf parallel_options_map.pdf
+PDF_FILES = $(PDF_MAN) $(PDF_OTHER)
+RST_FILES = parallel.rst env_parallel.rst sem.rst sql.rst \
+ niceload.rst parallel_examples.rst parallel_tutorial.rst \
+ parallel_book.rst parallel_design.rst \
+ parallel_alternatives.rst parcat.rst parset.rst parsort.rst
+
+TEXI_FILES = parallel.texi env_parallel.texi sem.texi sql.texi \
+ niceload.texi parallel_examples.texi parallel_tutorial.texi \
+ parallel_book.texi parallel_design.texi \
+ parallel_alternatives.texi parcat.texi parset.texi \
+ parsort.texi
+
+HTML_FILES = parallel.html env_parallel.html sem.html sql.html \
+ niceload.html parallel_examples.html parallel_tutorial.html \
+ parallel_book.html parallel_design.html \
+ parallel_alternatives.html parcat.html parset.html \
+ parsort.html
+
+@DOCUMENTATION_TRUE@man_MANS = $(MAN_FILES)
+@DOCUMENTATION_TRUE@doc_DATA = $(HTML_FILES) $(TEXI_FILES) $(RST_FILES) $(PDF_FILES)
+DISTCLEANFILES = $(MAN_FILES) $(HTML_FILES) $(TEXI_FILES) \
+ $(RST_FILES) $(PDF_FILES)
+
+EXTRA_DIST = $(bin_SCRIPTS) sem $(POD_FILES) \
+ parallel_cheat_bw.fodt pod2graph $(DISTCLEANFILES)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign src/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-binSCRIPTS: $(bin_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(bin_SCRIPTS)'; test -n "$(bindir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(bindir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-binSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(bin_SCRIPTS)'; test -n "$(bindir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(bindir)'; $(am__uninstall_files_from_dir)
+install-man1: $(man_MANS)
+ @$(NORMAL_INSTALL)
+ @list1=''; \
+ list2='$(man_MANS)'; \
+ test -n "$(man1dir)" \
+ && test -n "`echo $$list1$$list2`" \
+ || exit 0; \
+ echo " $(MKDIR_P) '$(DESTDIR)$(man1dir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(man1dir)" || exit 1; \
+ { for i in $$list1; do echo "$$i"; done; \
+ if test -n "$$list2"; then \
+ for i in $$list2; do echo "$$i"; done \
+ | sed -n '/\.1[a-z]*$$/p'; \
+ fi; \
+ } | while read p; do \
+ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; echo "$$p"; \
+ done | \
+ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \
+ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \
+ sed 'N;N;s,\n, ,g' | { \
+ list=; while read file base inst; do \
+ if test "$$base" = "$$inst"; then list="$$list $$file"; else \
+ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \
+ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \
+ fi; \
+ done; \
+ for i in $$list; do echo "$$i"; done | $(am__base_list) | \
+ while read files; do \
+ test -z "$$files" || { \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \
+ done; }
+
+uninstall-man1:
+ @$(NORMAL_UNINSTALL)
+ @list=''; test -n "$(man1dir)" || exit 0; \
+ files=`{ for i in $$list; do echo "$$i"; done; \
+ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \
+ sed -n '/\.1[a-z]*$$/p'; \
+ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \
+ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \
+ dir='$(DESTDIR)$(man1dir)'; $(am__uninstall_files_from_dir)
+install-man7: $(man_MANS)
+ @$(NORMAL_INSTALL)
+ @list1=''; \
+ list2='$(man_MANS)'; \
+ test -n "$(man7dir)" \
+ && test -n "`echo $$list1$$list2`" \
+ || exit 0; \
+ echo " $(MKDIR_P) '$(DESTDIR)$(man7dir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(man7dir)" || exit 1; \
+ { for i in $$list1; do echo "$$i"; done; \
+ if test -n "$$list2"; then \
+ for i in $$list2; do echo "$$i"; done \
+ | sed -n '/\.7[a-z]*$$/p'; \
+ fi; \
+ } | while read p; do \
+ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; echo "$$p"; \
+ done | \
+ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^7][0-9a-z]*$$,7,;x' \
+ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \
+ sed 'N;N;s,\n, ,g' | { \
+ list=; while read file base inst; do \
+ if test "$$base" = "$$inst"; then list="$$list $$file"; else \
+ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man7dir)/$$inst'"; \
+ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man7dir)/$$inst" || exit $$?; \
+ fi; \
+ done; \
+ for i in $$list; do echo "$$i"; done | $(am__base_list) | \
+ while read files; do \
+ test -z "$$files" || { \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man7dir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(man7dir)" || exit $$?; }; \
+ done; }
+
+uninstall-man7:
+ @$(NORMAL_UNINSTALL)
+ @list=''; test -n "$(man7dir)" || exit 0; \
+ files=`{ for i in $$list; do echo "$$i"; done; \
+ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \
+ sed -n '/\.7[a-z]*$$/p'; \
+ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^7][0-9a-z]*$$,7,;x' \
+ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \
+ dir='$(DESTDIR)$(man7dir)'; $(am__uninstall_files_from_dir)
+install-docDATA: $(doc_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(doc_DATA)'; test -n "$(docdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(docdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(docdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(docdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(docdir)" || exit $$?; \
+ done
+
+uninstall-docDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(doc_DATA)'; test -n "$(docdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(docdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+distdir: $(BUILT_SOURCES)
+ $(MAKE) $(AM_MAKEFLAGS) distdir-am
+
+distdir-am: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(MANS) $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(man7dir)" "$(DESTDIR)$(docdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+ -test -z "$(DISTCLEANFILES)" || rm -f $(DISTCLEANFILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-docDATA install-man
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-binSCRIPTS
+ @$(NORMAL_INSTALL)
+ $(MAKE) $(AM_MAKEFLAGS) install-exec-hook
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man: install-man1 install-man7
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-binSCRIPTS uninstall-docDATA uninstall-man
+
+uninstall-man: uninstall-man1 uninstall-man7
+
+.MAKE: install-am install-exec-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-binSCRIPTS \
+ install-data install-data-am install-docDATA install-dvi \
+ install-dvi-am install-exec install-exec-am install-exec-hook \
+ install-html install-html-am install-info install-info-am \
+ install-man install-man1 install-man7 install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am \
+ uninstall-binSCRIPTS uninstall-docDATA uninstall-man \
+ uninstall-man1 uninstall-man7
+
+.PRECIOUS: Makefile
+
+
+install-exec-hook:
+ rm "$(DESTDIR)$(bindir)"/sem || true
+ $(LN_S) parallel "$(DESTDIR)$(bindir)"/sem
+
+web: sphinx
+ true
+
+sphinx: *.rst
+ cd sphinx && make && cd ..
+
+# Build documentation file if the tool to build exists.
+# Otherwise: Use the distributed version
+parallel.1: parallel.pod
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=1 "$(srcdir)"/parallel.pod > "$(srcdir)"/parallel.1n \
+ && mv "$(srcdir)"/parallel.1n "$(srcdir)"/parallel.1 \
+ || echo "Warning: pod2man not found. Using old parallel.1"
+
+env_parallel.1: env_parallel.pod
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=1 "$(srcdir)"/env_parallel.pod > "$(srcdir)"/env_parallel.1n \
+ && mv "$(srcdir)"/env_parallel.1n "$(srcdir)"/env_parallel.1 \
+ || echo "Warning: pod2man not found. Using old env_parallel.1"
+
+parallel_examples.7: parallel_examples.pod
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=7 "$(srcdir)"/parallel_examples.pod > "$(srcdir)"/parallel_examples.7n \
+ && mv "$(srcdir)"/parallel_examples.7n "$(srcdir)"/parallel_examples.7 \
+ || echo "Warning: pod2man not found. Using old parallel_examples.7"
+
+parallel_tutorial.7: parallel_tutorial.pod
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=7 "$(srcdir)"/parallel_tutorial.pod > "$(srcdir)"/parallel_tutorial.7n \
+ && mv "$(srcdir)"/parallel_tutorial.7n "$(srcdir)"/parallel_tutorial.7 \
+ || echo "Warning: pod2man not found. Using old parallel_tutorial.7"
+
+parallel_book.7: parallel_book.pod
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=7 "$(srcdir)"/parallel_book.pod > "$(srcdir)"/parallel_book.7n \
+ && mv "$(srcdir)"/parallel_book.7n "$(srcdir)"/parallel_book.7 \
+ || echo "Warning: pod2man not found. Using old parallel_book.7"
+
+parallel_design.7: parallel_design.pod
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=7 "$(srcdir)"/parallel_design.pod > "$(srcdir)"/parallel_design.7n \
+ && mv "$(srcdir)"/parallel_design.7n "$(srcdir)"/parallel_design.7 \
+ || echo "Warning: pod2man not found. Using old parallel_design.7"
+
+parallel_alternatives.7: parallel_alternatives.pod
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=7 "$(srcdir)"/parallel_alternatives.pod > "$(srcdir)"/parallel_alternatives.7n \
+ && mv "$(srcdir)"/parallel_alternatives.7n "$(srcdir)"/parallel_alternatives.7 \
+ || echo "Warning: pod2man not found. Using old parallel_alternatives.7"
+
+sem.1: sem.pod
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=1 "$(srcdir)"/sem.pod > "$(srcdir)"/sem.1n \
+ && mv "$(srcdir)"/sem.1n "$(srcdir)"/sem.1 \
+ || echo "Warning: pod2man not found. Using old sem.1"
+
+sql.1: sql
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=1 "$(srcdir)"/sql > "$(srcdir)"/sql.1n \
+ && mv "$(srcdir)"/sql.1n "$(srcdir)"/sql.1 \
+ || echo "Warning: pod2man not found. Using old sql.1"
+
+niceload.1: niceload.pod
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=1 "$(srcdir)"/niceload.pod > "$(srcdir)"/niceload.1n \
+ && mv "$(srcdir)"/niceload.1n "$(srcdir)"/niceload.1 \
+ || echo "Warning: pod2man not found. Using old niceload.1"
+
+parcat.1: parcat.pod
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=1 "$(srcdir)"/parcat.pod > "$(srcdir)"/parcat.1n \
+ && mv "$(srcdir)"/parcat.1n "$(srcdir)"/parcat.1 \
+ || echo "Warning: pod2man not found. Using old parcat.1"
+
+parset.1: parset.pod
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=1 "$(srcdir)"/parset.pod > "$(srcdir)"/parset.1n \
+ && mv "$(srcdir)"/parset.1n "$(srcdir)"/parset.1 \
+ || echo "Warning: pod2man not found. Using old parset.1"
+
+parsort.1: parsort
+ pod2man --release='$(PACKAGE_VERSION)' --center='$(PACKAGE_NAME)' \
+ --section=1 "$(srcdir)"/parsort > "$(srcdir)"/parsort.1n \
+ && mv "$(srcdir)"/parsort.1n "$(srcdir)"/parsort.1 \
+ || echo "Warning: pod2man not found. Using old parsort.1"
+
+parallel.html: parallel.pod
+ pod2html --title "GNU Parallel" "$(srcdir)"/parallel.pod > "$(srcdir)"/parallel.htmln \
+ && mv "$(srcdir)"/parallel.htmln "$(srcdir)"/parallel.html \
+ || echo "Warning: pod2html not found. Using old parallel.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on parallel.html to avoid stupid pod2html race condition
+env_parallel.html: env_parallel.pod parallel.html
+ pod2html --title "GNU Parallel with environment" "$(srcdir)"/env_parallel.pod > "$(srcdir)"/env_parallel.htmln \
+ && mv "$(srcdir)"/env_parallel.htmln "$(srcdir)"/env_parallel.html \
+ || echo "Warning: pod2html not found. Using old env_parallel.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on env_parallel.html to avoid stupid pod2html race condition
+parallel_examples.html: parallel_examples.pod env_parallel.html
+ pod2html --title "GNU Parallel examples" "$(srcdir)"/parallel_examples.pod > "$(srcdir)"/parallel_examples.htmln \
+ && mv "$(srcdir)"/parallel_examples.htmln "$(srcdir)"/parallel_examples.html \
+ || echo "Warning: pod2html not found. Using old parallel_examples.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on parallel_examples.html to avoid stupid pod2html race condition
+parallel_tutorial.html: parallel_tutorial.pod parallel_examples.html
+ pod2html --title "GNU Parallel tutorial" "$(srcdir)"/parallel_tutorial.pod > "$(srcdir)"/parallel_tutorial.htmln \
+ && mv "$(srcdir)"/parallel_tutorial.htmln "$(srcdir)"/parallel_tutorial.html \
+ || echo "Warning: pod2html not found. Using old parallel_tutorial.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on parallel_tutorial.html to avoid stupid pod2html race condition
+parallel_book.html: parallel_book.pod parallel_tutorial.html
+ pod2html --title "GNU Parallel book" "$(srcdir)"/parallel_book.pod > "$(srcdir)"/parallel_book.htmln \
+ && mv "$(srcdir)"/parallel_book.htmln "$(srcdir)"/parallel_book.html \
+ || echo "Warning: pod2html not found. Using old parallel_book.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on parallel_book.html to avoid stupid pod2html race condition
+parallel_design.html: parallel_design.pod parallel_book.html
+ pod2html --title "GNU Parallel design" "$(srcdir)"/parallel_design.pod > "$(srcdir)"/parallel_design.htmln \
+ && mv "$(srcdir)"/parallel_design.htmln "$(srcdir)"/parallel_design.html \
+ || echo "Warning: pod2html not found. Using old parallel_design.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on parallel_design.html to avoid stupid pod2html race condition
+parallel_alternatives.html: parallel_alternatives.pod parallel_design.html
+ pod2html --title "GNU Parallel alternatives" "$(srcdir)"/parallel_alternatives.pod > "$(srcdir)"/parallel_alternatives.htmln \
+ && mv "$(srcdir)"/parallel_alternatives.htmln "$(srcdir)"/parallel_alternatives.html \
+ || echo "Warning: pod2html not found. Using old parallel_alternatives.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on parallel_alternatives.html to avoid stupid pod2html race condition
+sem.html: sem.pod parallel_alternatives.html
+ pod2html --title "sem (GNU Parallel)" "$(srcdir)"/sem.pod > "$(srcdir)"/sem.htmln \
+ && mv "$(srcdir)"/sem.htmln "$(srcdir)"/sem.html \
+ || echo "Warning: pod2html not found. Using old sem.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on sem.html to avoid stupid pod2html race condition
+sql.html: sql sem.html
+ pod2html --title "GNU SQL" "$(srcdir)"/sql > "$(srcdir)"/sql.htmln \
+ && mv "$(srcdir)"/sql.htmln "$(srcdir)"/sql.html \
+ || echo "Warning: pod2html not found. Using old sql.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on sql.html to avoid stupid pod2html race condition
+niceload.html: niceload.pod sql.html
+ pod2html --title "GNU niceload" "$(srcdir)"/niceload.pod > "$(srcdir)"/niceload.htmln \
+ && mv "$(srcdir)"/niceload.htmln "$(srcdir)"/niceload.html \
+ || echo "Warning: pod2html not found. Using old niceload.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on niceload.html to avoid stupid pod2html race condition
+parcat.html: parcat.pod niceload.html
+ pod2html --title "GNU parcat" "$(srcdir)"/parcat.pod > "$(srcdir)"/parcat.htmln \
+ && mv "$(srcdir)"/parcat.htmln "$(srcdir)"/parcat.html \
+ || echo "Warning: pod2html not found. Using old parcat.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on parcat.html to avoid stupid pod2html race condition
+parset.html: parset.pod parcat.html
+ pod2html --title "GNU parset" "$(srcdir)"/parset.pod > "$(srcdir)"/parset.htmln \
+ && mv "$(srcdir)"/parset.htmln "$(srcdir)"/parset.html \
+ || echo "Warning: pod2html not found. Using old parset.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+# Depending on parset.html to avoid stupid pod2html race condition
+parsort.html: parsort parset.html
+ pod2html --title "GNU parsort" "$(srcdir)"/parsort > "$(srcdir)"/parsort.htmln \
+ && mv "$(srcdir)"/parsort.htmln "$(srcdir)"/parsort.html \
+ || echo "Warning: pod2html not found. Using old parsort.html"
+ rm -f "$(srcdir)"/pod2htm*
+
+parallel.texi: parallel.pod
+ pod2texi --output="$(srcdir)"/parallel.texi "$(srcdir)"/parallel.pod \
+ || echo "Warning: pod2texi not found. Using old parallel.texi"
+
+env_parallel.texi: env_parallel.pod
+ pod2texi --output="$(srcdir)"/env_parallel.texi "$(srcdir)"/env_parallel.pod \
+ || echo "Warning: pod2texi not found. Using old env_parallel.texi"
+
+parallel_examples.texi: parallel_examples.pod
+ pod2texi --output="$(srcdir)"/parallel_examples.texi "$(srcdir)"/parallel_examples.pod \
+ || echo "Warning: pod2texi not found. Using old parallel_examples.texi"
+
+parallel_tutorial.texi: parallel_tutorial.pod
+ pod2texi --output="$(srcdir)"/parallel_tutorial.texi "$(srcdir)"/parallel_tutorial.pod \
+ || echo "Warning: pod2texi not found. Using old parallel_tutorial.texi"
+
+parallel_book.texi: parallel_book.pod
+ pod2texi --output="$(srcdir)"/parallel_book.texi "$(srcdir)"/parallel_book.pod \
+ || echo "Warning: pod2texi not found. Using old parallel_book.texi"
+
+parallel_design.texi: parallel_design.pod
+ pod2texi --output="$(srcdir)"/parallel_design.texi "$(srcdir)"/parallel_design.pod \
+ || echo "Warning: pod2texi not found. Using old parallel_design.texi"
+
+parallel_alternatives.texi: parallel_alternatives.pod
+ pod2texi --output="$(srcdir)"/parallel_alternatives.texi "$(srcdir)"/parallel_alternatives.pod \
+ || echo "Warning: pod2texi not found. Using old parallel_alternatives.texi"
+
+sem.texi: sem.pod
+ pod2texi --output="$(srcdir)"/sem.texi "$(srcdir)"/sem.pod \
+ || echo "Warning: pod2texi not found. Using old sem.texi"
+
+sql.texi: sql
+ pod2texi --output="$(srcdir)"/sql.texi "$(srcdir)"/sql \
+ || echo "Warning: pod2texi not found. Using old sql.texi"
+
+niceload.texi: niceload.pod
+ pod2texi --output="$(srcdir)"/niceload.texi "$(srcdir)"/niceload.pod \
+ || echo "Warning: pod2texi not found. Using old niceload.texi"
+
+parcat.texi: parcat.pod
+ pod2texi --output="$(srcdir)"/parcat.texi "$(srcdir)"/parcat.pod \
+ || echo "Warning: pod2texi not found. Using old parcat.texi"
+
+parset.texi: parset.pod
+ pod2texi --output="$(srcdir)"/parset.texi "$(srcdir)"/parset.pod \
+ || echo "Warning: pod2texi not found. Using old parset.texi"
+
+parsort.texi: parsort
+ pod2texi --output="$(srcdir)"/parsort.texi "$(srcdir)"/parsort \
+ || echo "Warning: pod2texi not found. Using old parsort.texi"
+
+parallel.rst: parallel.pod
+ ./pod2rst-fix < "$(srcdir)"/parallel.pod > "$(srcdir)"/parallel.rst \
+ || echo "Warning: pod2rst not found. Using old parallel.rst"
+
+env_parallel.rst: env_parallel.pod
+ ./pod2rst-fix < "$(srcdir)"/env_parallel.pod > "$(srcdir)"/env_parallel.rst \
+ || echo "Warning: pod2rst not found. Using old env_parallel.rst"
+
+parallel_examples.rst: parallel_examples.pod
+ ./pod2rst-fix < "$(srcdir)"/parallel_examples.pod > "$(srcdir)"/parallel_examples.rst \
+ || echo "Warning: pod2rst not found. Using old parallel_examples.rst"
+
+parallel_tutorial.rst: parallel_tutorial.pod
+ ./pod2rst-fix < "$(srcdir)"/parallel_tutorial.pod > "$(srcdir)"/parallel_tutorial.rst \
+ || echo "Warning: pod2rst not found. Using old parallel_tutorial.rst"
+
+parallel_book.rst: parallel_book.pod
+ ./pod2rst-fix < "$(srcdir)"/parallel_book.pod > "$(srcdir)"/parallel_book.rst \
+ || echo "Warning: pod2rst not found. Using old parallel_book.rst"
+
+parallel_design.rst: parallel_design.pod
+ ./pod2rst-fix < "$(srcdir)"/parallel_design.pod > "$(srcdir)"/parallel_design.rst \
+ || echo "Warning: pod2rst not found. Using old parallel_design.rst"
+
+parallel_alternatives.rst: parallel_alternatives.pod
+ ./pod2rst-fix < "$(srcdir)"/parallel_alternatives.pod > "$(srcdir)"/parallel_alternatives.rst \
+ || echo "Warning: pod2rst not found. Using old parallel_alternatives.rst"
+
+sem.rst: sem.pod
+ ./pod2rst-fix < "$(srcdir)"/sem.pod > "$(srcdir)"/sem.rst \
+ || echo "Warning: pod2rst not found. Using old sem.rst"
+
+sql.rst: sql
+ ./pod2rst-fix < "$(srcdir)"/sql > "$(srcdir)"/sql.rst \
+ || echo "Warning: pod2rst not found. Using old sql.rst"
+
+niceload.rst: niceload.pod
+ ./pod2rst-fix < "$(srcdir)"/niceload.pod > "$(srcdir)"/niceload.rst \
+ || echo "Warning: pod2rst not found. Using old niceload.rst"
+
+parcat.rst: parcat.pod
+ ./pod2rst-fix < "$(srcdir)"/parcat.pod > "$(srcdir)"/parcat.rst \
+ || echo "Warning: pod2rst not found. Using old parcat.rst"
+
+parset.rst: parset.pod
+ ./pod2rst-fix < "$(srcdir)"/parset.pod > "$(srcdir)"/parset.rst \
+ || echo "Warning: pod2rst not found. Using old parset.rst"
+
+parsort.rst: parsort
+ ./pod2rst-fix < "$(srcdir)"/parsort > "$(srcdir)"/parsort.rst \
+ || echo "Warning: pod2rst not found. Using old parsort.rst"
+
+parallel.pdf: parallel.pod
+ pod2pdf --output-file "$(srcdir)"/parallel.pdf "$(srcdir)"/parallel.pod --title "GNU Parallel" \
+ || echo "Warning: pod2pdf not found. Using old parallel.pdf"
+
+env_parallel.pdf: env_parallel.pod
+ pod2pdf --output-file "$(srcdir)"/env_parallel.pdf "$(srcdir)"/env_parallel.pod --title "GNU Parallel with environment" \
+ || echo "Warning: pod2pdf not found. Using old env_parallel.pdf"
+
+parallel_examples.pdf: parallel_examples.pod
+ pod2pdf --output-file "$(srcdir)"/parallel_examples.pdf "$(srcdir)"/parallel_examples.pod --title "GNU Parallel Examples" \
+ || echo "Warning: pod2pdf not found. Using old parallel_examples.pdf"
+
+parallel_tutorial.pdf: parallel_tutorial.pod
+ pod2pdf --output-file "$(srcdir)"/parallel_tutorial.pdf "$(srcdir)"/parallel_tutorial.pod --title "GNU Parallel Tutorial" \
+ || echo "Warning: pod2pdf not found. Using old parallel_tutorial.pdf"
+
+parallel_book.pdf: parallel_book.pod
+ pod2pdf --output-file "$(srcdir)"/parallel_book.pdf "$(srcdir)"/parallel_book.pod --title "GNU Parallel Book" \
+ || echo "Warning: pod2pdf not found. Using old parallel_book.pdf"
+
+parallel_design.pdf: parallel_design.pod
+ pod2pdf --output-file "$(srcdir)"/parallel_design.pdf "$(srcdir)"/parallel_design.pod --title "GNU Parallel Design" \
+ || echo "Warning: pod2pdf not found. Using old parallel_design.pdf"
+
+parallel_alternatives.pdf: parallel_alternatives.pod
+ pod2pdf --output-file "$(srcdir)"/parallel_alternatives.pdf "$(srcdir)"/parallel_alternatives.pod --title "GNU Parallel alternatives" \
+ || echo "Warning: pod2pdf not found. Using old parallel_alternatives.pdf"
+
+sem.pdf: sem.pod
+ pod2pdf --output-file "$(srcdir)"/sem.pdf "$(srcdir)"/sem.pod --title "GNU sem" \
+ || echo "Warning: pod2pdf not found. Using old sem.pdf"
+
+sql.pdf: sql
+ pod2pdf --output-file "$(srcdir)"/sql.pdf "$(srcdir)"/sql --title "GNU SQL" \
+ || echo "Warning: pod2pdf not found. Using old sql.pdf"
+
+niceload.pdf: niceload.pod
+ pod2pdf --output-file "$(srcdir)"/niceload.pdf "$(srcdir)"/niceload.pod --title "GNU niceload" \
+ || echo "Warning: pod2pdf not found. Using old niceload.pdf"
+
+parcat.pdf: parcat.pod
+ pod2pdf --output-file "$(srcdir)"/parcat.pdf "$(srcdir)"/parcat.pod --title "GNU parcat" \
+ || echo "Warning: pod2pdf not found. Using old parcat.pdf"
+
+parset.pdf: parset.pod
+ pod2pdf --output-file "$(srcdir)"/parset.pdf "$(srcdir)"/parset.pod --title "GNU parset" \
+ || echo "Warning: pod2pdf not found. Using old parset.pdf"
+
+parsort.pdf: parsort
+ pod2pdf --output-file "$(srcdir)"/parsort.pdf "$(srcdir)"/parsort --title "GNU parsort" \
+ || echo "Warning: pod2pdf not found. Using old parsort.pdf"
+
+parallel_cheat_bw.pdf: parallel_cheat_bw.fodt
+ libreoffice --headless --convert-to pdf parallel_cheat_bw.fodt \
+ || echo "Warning: libreoffice failed. Using old parallel_cheat_bw.pdf"
+
+parallel_options_map.pdf: parallel.pod pod2graph
+ ./pod2graph parallel.pod > parallel_options_map.pdf \
+ || echo "Warning: pod2graph failed. Using old parallel_options_map.pdf"
+
+sem: parallel
+ ln -fs parallel sem
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/env_parallel b/src/env_parallel
new file mode 100755
index 0000000..0f05192
--- /dev/null
+++ b/src/env_parallel
@@ -0,0 +1,145 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2016-2024 Ole Tange, http://ole.tange.dk and Free
+# Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+#
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+grepq() {
+ # grep -q for systems without -q
+ grep >/dev/null 2>/dev/null "$@"
+}
+
+installer() {
+ source="$1"
+ script="$2"
+ into="$3"
+ if grepq $script "$into"; then
+ true already installed
+ else
+ echo $source \`which $script\` >> "$into"
+ fi
+}
+
+while test $# -gt 0; do
+ key="$1"
+
+ case $key in
+ -i|--install)
+ installer . env_parallel.bash "$HOME"/.bashrc
+ installer . env_parallel.sh "$HOME"/.shrc
+ installer . env_parallel.zsh "$HOME"/.zshenv
+ installer source env_parallel.ksh "$HOME"/.kshrc
+ installer source env_parallel.mksh "$HOME"/.mkshrc
+ echo $SHELL | grepq /pdksh &&
+ installer . env_parallel.pdksh "$HOME"/.profile
+ echo $SHELL | grepq /ash &&
+ installer . env_parallel.ash "$HOME"/.profile
+ echo $SHELL | grepq /dash &&
+ installer . env_parallel.dash "$HOME"/.profile
+ installer source env_parallel.csh "$HOME"/.cshrc
+ installer source env_parallel.tcsh "$HOME"/.tcshrc
+ mkdir -p "$HOME"/.config/fish
+ grepq env_parallel.fish "$HOME"/.config/fish/config.fish ||
+ echo '. (which env_parallel.fish)' >> "$HOME"/.config/fish/config.fish
+ echo 'Installed env_parallel in:'
+ echo " " "$HOME"/.bashrc
+ echo " " "$HOME"/.shrc
+ echo " " "$HOME"/.zshenv
+ echo " " "$HOME"/.config/fish/config.fish
+ echo " " "$HOME"/.kshrc
+ echo " " "$HOME"/.mkshrc
+ echo " " "$HOME"/.profile
+ echo " " "$HOME"/.cshrc
+ echo " " "$HOME"/.tcshrc
+ exit
+ ;;
+ *)
+ echo "Unknown option: $key"
+ ;;
+ esac
+ shift # past argument or value
+done
+
+
+cat <<'_EOS'
+You have called the dummy script "env_parallel".
+
+env_parallel only works if it is a function.
+
+You need to do this and restart your shell:
+
+bash: Put this in $HOME/.bashrc: . env_parallel.bash
+ E.g. by doing: echo '. env_parallel.bash' >> $HOME/.bashrc
+ Supports: variables, aliases, functions, arrays
+
+fish: Put this in $HOME/.config/fish/config.fish: . (which env_parallel.fish)
+ E.g. by doing:
+ echo '. (which env_parallel.fish)' >> $HOME/.config/fish/config.fish
+ Supports: variables, aliases, functions, arrays
+
+ksh: Put this in $HOME/.kshrc: source env_parallel.ksh
+ E.g. by doing: echo 'source env_parallel.ksh' >> $HOME/.kshrc
+ Supports: variables, aliases, functions, arrays
+
+mksh: Put this in $HOME/.mkshrc: source env_parallel.mksh
+ E.g. by doing: echo 'source env_parallel.mksh' >> $HOME/.mkshrc
+ Supports: variables, aliases, functions, arrays
+
+pdksh: Put this in $HOME/.profile: source env_parallel.pdksh
+ E.g. by doing: echo '. env_parallel.pdksh' >> $HOME/.profile
+ Supports: variables, aliases, functions, arrays
+
+zsh: Put this in $HOME/.zshrc: . env_parallel.zsh
+ E.g. by doing: echo '. env_parallel.zsh' >> $HOME/.zshenv
+ Supports: variables, functions, arrays
+
+ash: Put this in $HOME/.profile: . env_parallel.ash
+ E.g. by doing: echo '. env_parallel.ash' >> $HOME/.profile
+ Supports: variables, aliases
+
+dash: Put this in $HOME/.profile: . env_parallel.dash
+ E.g. by doing: echo '. env_parallel.dash' >> $HOME/.profile
+ Supports: variables, aliases
+
+csh: Put this in $HOME/.cshrc: source `which env_parallel.csh`
+ E.g. by doing: echo 'source `which env_parallel.csh`' >> $HOME/.cshrc
+ Supports: variables, aliases, arrays with no special chars
+
+tcsh: Put this in $HOME/.tcshrc: source `which env_parallel.tcsh`
+ E.g. by doing: echo 'source `which env_parallel.tcsh`' >> $HOME/.tcshrc
+ Supports: variables, aliases, arrays with no special chars
+
+To install in all shells run:
+
+ env_parallel --install
+
+In a script you need to run this before using env_parallel:
+
+bash: . env_parallel.bash
+ksh: source env_parallel.ksh
+mksh: source env_parallel.mksh
+pdksh: source env_parallel.pdksh
+zsh: . env_parallel.zsh
+ash: . env_parallel.ash
+dash: . env_parallel.dash
+
+For details: see man env_parallel
+
+_EOS
diff --git a/src/env_parallel.ash b/src/env_parallel.ash
new file mode 100755
index 0000000..b41f76e
--- /dev/null
+++ b/src/env_parallel.ash
@@ -0,0 +1,636 @@
+#!/usr/bin/env ash
+
+# This file must be sourced in sh/ash/dash/bash/ksh/mksh/zsh:
+#
+# . env_parallel.sh
+# source env_parallel.ash
+# source env_parallel.dash
+# source env_parallel.bash
+# source env_parallel.ksh
+# source env_parallel.mksh
+# source env_parallel.zsh
+#
+# after which 'env_parallel' works
+#
+#
+# Copyright (C) 2016-2024 Ole Tange, http://ole.tange.dk and Free
+# Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+#
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+# shellcheck disable=SC2006
+
+env_parallel() {
+ # env_parallel.{sh,ash,dash,bash,ksh,mksh,zsh}
+
+ # Check shell dialect
+ if [ -n "$BASH_VERSION" ]; then
+ _shell_DIALECT=bash
+ _eval_needed=false
+ _prefix_PARALLEL_ENV=_prefix_PARALLEL_ENV_bash
+ elif [ -n "$ZSH_VERSION" ]; then
+ _shell_DIALECT=zsh
+ _eval_needed=true
+ _prefix_PARALLEL_ENV=false
+ elif [ -n "$KSH_VERSION" ]; then
+ _shell_DIALECT=ksh
+ _eval_needed=false
+ _prefix_PARALLEL_ENV=false
+ else
+ # Dash/ash - can these be detected better?
+ _shell_DIALECT="sh"
+ _eval_needed=false
+ _prefix_PARALLEL_ENV=false
+ fi
+ _names_of_ALIASES() {
+ _names_of_ALIASES_$_shell_DIALECT
+ }
+ _names_of_ALIASES_sh() {
+ # alias fails on Unixware 5
+ for _i in `alias 2>/dev/null | perl -ne 's/^alias //;s/^(\S+)=.*/$1/ && print' 2>/dev/null`; do
+ # Check if this name really is an alias
+ # or just part of a multiline alias definition
+ if alias "$_i" >/dev/null 2>/dev/null; then
+ echo "$_i"
+ fi
+ done
+ }
+ _names_of_ALIASES_bash() {
+ # No aliases will return false. This error should be ignored.
+ # shellcheck disable=SC3044
+ compgen -a || true
+ }
+ _names_of_ALIASES_ksh() {
+ alias | perl -pe 's/=.*//'
+ }
+ _names_of_ALIASES_zsh() {
+ # shellcheck disable=SC2086,SC2296
+ print -l ${(k)aliases}
+ }
+ _bodies_of_ALIASES() {
+ _bodies_of_ALIASES_$_shell_DIALECT "$@"
+ }
+ _bodies_of_ALIASES_sh() {
+ # alias may return:
+ # myalias='definition' (GNU/Linux ash)
+ # alias myalias='definition' (FreeBSD ash)
+ # so remove 'alias ' from first line
+ for _i in "$@"; do
+ echo 'alias '"`alias "$_i" | perl -pe '1..1 and s/^alias //'`"
+ done
+ }
+ _bodies_of_ALIASES_bash() {
+ # shellcheck disable=SC3043
+ local _i
+ for _i in "$@"; do
+ # shellcheck disable=SC2046
+ if [ $(alias "$_i" | wc -l) = 1 ] ; then
+ true Alias is a single line. Good.
+ else
+ _warning_PAR "Alias '$_i' contains newline."
+ _warning_PAR "Make sure the command has at least one newline after '$_i'."
+ _warning_PAR "See BUGS in 'man env_parallel'."
+ fi
+ done
+ alias "$@"
+ }
+ _bodies_of_ALIASES_ksh() {
+ alias "$@" | perl -pe 's/^/alias /;
+ sub warning { print STDERR "env_parallel: Warning: @_\n"; }
+ if(/^alias (\S+)=\$.*\\n/) {
+ warning("Alias \"$1\" contains newline.");
+ warning("Make sure the command has at least one newline after \"$1\".");
+ warning("See BUGS in \"man env_parallel\".");
+ }'
+
+ }
+ _bodies_of_ALIASES_zsh() {
+ # shellcheck disable=SC3043
+ local _i
+ for _i in "$@"; do
+ echo 'alias '"$(alias "$_i")"
+ done
+ }
+ _names_of_FUNCTIONS() {
+ _names_of_FUNCTIONS_$_shell_DIALECT
+ }
+ _names_of_FUNCTIONS_bash() {
+ # shellcheck disable=SC3044
+ compgen -A function
+ }
+ _names_of_maybe_FUNCTIONS() {
+ set | perl -ne '/^([A-Z_0-9]+)\s*\(\)\s*\{?$/i and print "$1\n"'
+ }
+ _names_of_FUNCTIONS_sh() {
+ # myfunc is a function
+ # shellcheck disable=SC2046
+ LANG=C type `_names_of_maybe_FUNCTIONS` |
+ perl -ne '/^(\S+) is a function$/ and not $seen{$1}++ and print "$1\n"'
+ }
+ _names_of_FUNCTIONS_ksh() {
+ # shellcheck disable=SC3044
+ typeset +f | perl -pe 's/\(\).*//; s/ .*//;'
+ }
+ _names_of_FUNCTIONS_zsh() {
+ # shellcheck disable=SC2086,SC2296
+ print -l ${(k)functions}
+ }
+ _bodies_of_FUNCTIONS() {
+ _bodies_of_FUNCTIONS_$_shell_DIALECT "$@"
+ }
+ _bodies_of_FUNCTIONS_sh() {
+ LANG=C type "$@" | perl -ne '/^(\S+) is a function$/ or print'
+ }
+ _bodies_of_FUNCTIONS_bash() {
+ # shellcheck disable=SC3044
+ typeset -f "$@"
+ }
+ _bodies_of_FUNCTIONS_ksh() {
+ functions "$@"
+ }
+ _bodies_of_FUNCTIONS_zsh() {
+ # shellcheck disable=SC3044
+ typeset -f "$@"
+ }
+ _names_of_VARIABLES() {
+ _names_of_VARIABLES_$_shell_DIALECT
+ }
+ _names_of_VARIABLES_sh() {
+ # This may screw up if variables contain \n and =
+ set | perl -ne 's/^(\S+?)=.*/$1/ and print;'
+ }
+ _names_of_VARIABLES_bash() {
+ # shellcheck disable=SC3044
+ compgen -A variable
+ }
+ _names_of_VARIABLES_ksh() {
+ # shellcheck disable=SC3044
+ typeset +p |
+ perl -pe 's/^(type)?set( [-+][a-zA-Z0-9]*)* //; s/(\[\d+\])?=.*//' |
+ uniq
+ }
+ _names_of_VARIABLES_zsh() {
+ # shellcheck disable=SC2086,SC2296
+ print -l ${(k)parameters}
+ }
+ _bodies_of_VARIABLES() {
+ _bodies_of_VARIABLES_$_shell_DIALECT "$@"
+ }
+ _bodies_of_VARIABLES_sh() {
+ # Crappy typeset -p
+ for _i in "$@"
+ do
+ perl -e 'print @ARGV' "$_i="
+ eval echo "\"\$$_i\"" | perl -e '$/=undef; $a=<>; chop($a); print $a' |
+ perl -pe 's/[\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\^\*\<\=\>\~\|\; \"\!\$\&\202-\377]/\\$&/go;'"s/'/\\\'/g; s/[\n]/'\\n'/go;";
+ echo
+ done
+ }
+ _bodies_of_VARIABLES_bash() {
+ # shellcheck disable=SC3044
+ typeset -p "$@"
+ }
+ _bodies_of_VARIABLES_ksh() {
+ # shellcheck disable=SC3044
+ typeset -p "$@"
+ }
+ _bodies_of_VARIABLES_zsh() {
+ # shellcheck disable=SC3044
+ typeset -p "$@"
+ }
+ _ignore_HARDCODED() {
+ _ignore_HARDCODED_$_shell_DIALECT
+ }
+ _ignore_HARDCODED_sh() {
+ # These names cannot be detected
+ echo '(_|TIMEOUT|IFS)'
+ }
+ _ignore_HARDCODED_bash() {
+ # Copying $RANDOM will cause it not to be random
+ # The rest cannot be detected as read-only
+ echo '(RANDOM|_|TIMEOUT|GROUPS|FUNCNAME|DIRSTACK|PIPESTATUS|USERNAME|BASHPID|BASH_[A-Z_]+)'
+ }
+ _ignore_HARDCODED_ksh() {
+ # These names cannot be detected
+ echo '(_|TIMEOUT|IFS)'
+ }
+ _ignore_HARDCODED_zsh() {
+ # These names cannot be detected
+ echo '([-\?\#\!\$\*\@\_0]|zsh_eval_context|ZSH_EVAL_CONTEXT|LINENO|IFS|commands|functions|options|aliases|EUID|EGID|UID|GID|dis_patchars|patchars|terminfo|galiases|keymaps|parameters|jobdirs|dirstack|functrace|funcsourcetrace|zsh_scheduled_events|dis_aliases|dis_reswords|dis_saliases|modules|reswords|saliases|widgets|userdirs|historywords|nameddirs|termcap|dis_builtins|dis_functions|jobtexts|funcfiletrace|dis_galiases|builtins|history|jobstates|funcstack|run-help)'
+ }
+ _ignore_READONLY() {
+ _ignore_READONLY_$_shell_DIALECT
+ }
+ _parse_READONLY() {
+ # shellcheck disable=SC1078,SC1079,SC2026
+ perl -e '@r = map {
+ chomp;
+ # sh on UnixWare: readonly TIMEOUT
+ # ash: readonly var='val'
+ # ksh: var='val'
+ # mksh: PIPESTATUS[0]
+ s/^(readonly )?([^=\[ ]*?)(\[\d+\])?(=.*|)$/$2/ or
+ # bash: declare -ar BASH_VERSINFO=([0]="4" [1]="4")
+ # zsh: typeset -r var='val'
+ s/^\S+\s+\S+\s+(\S[^=]*)(=.*|$)/$1/;
+ $_ } <>;
+ $vars = join "|",map { quotemeta $_ } @r;
+ print $vars ? "($vars)" : "(,,nO,,VaRs,,)";
+ '
+ }
+ _ignore_READONLY_sh() {
+ readonly | _parse_READONLY
+ }
+ _ignore_READONLY_bash() {
+ readonly | _parse_READONLY
+ }
+ _ignore_READONLY_ksh() {
+ readonly | _parse_READONLY
+ }
+ _ignore_READONLY_zsh() {
+ # shellcheck disable=SC3044
+ typeset -pr | _parse_READONLY
+ }
+ _remove_bad_NAMES() {
+ # Do not transfer vars and funcs from env_parallel
+ # shellcheck disable=SC2006
+ _ignore_RO="`_ignore_READONLY`"
+ # shellcheck disable=SC2006
+ _ignore_HARD="`_ignore_HARDCODED`"
+ # To avoid depending on grep dialect, use Perl version of:
+ # grep -Ev '^(...)$' |
+ perl -ne '/^(
+ PARALLEL_ENV|
+ PARALLEL_TMP|
+ _alias_NAMES|
+ _bodies_of_ALIASES|
+ _bodies_of_FUNCTIONS|
+ _bodies_of_VARIABLES|
+ _error_PAR|
+ _function_NAMES|
+ _get_ignored_VARS|
+ _grep_REGEXP|
+ _ignore_HARD|
+ _ignore_HARDCODED|
+ _ignore_READONLY|
+ _ignore_RO|
+ _ignore_UNDERSCORE|
+ _list_alias_BODIES|
+ _list_function_BODIES|
+ _list_variable_VALUES|
+ _make_grep_REGEXP|
+ _names_of_ALIASES|
+ _names_of_FUNCTIONS|
+ _names_of_VARIABLES|
+ _names_of_maybe_FUNCTIONS|
+ _parallel_exit_CODE|
+ _prefix_PARALLEL_ENV|
+ _prefix_PARALLEL_ENV_bash|
+ _remove_bad_NAMES|
+ _remove_readonly|
+ _variable_NAMES|
+ _warning_PAR|
+ _which_PAR)$/x and next;
+ # Filter names matching --env
+ /^'"$_grep_REGEXP"'$/ or next;
+ /^'"$_ignore_UNDERSCORE"'$/ and next;
+ # Remove readonly variables
+ /^'"$_ignore_RO"'$/ and next;
+ /^'"$_ignore_HARD"'$/ and next;
+ print;'
+ }
+ _prefix_PARALLEL_ENV_bash() {
+ # shellcheck disable=SC3044
+ shopt 2>/dev/null |
+ perl -pe 's:\s+off:;: and s/^/shopt -u /;
+ s:\s+on:;: and s/^/shopt -s /;
+ s:;$:&>/dev/null;:';
+ echo 'shopt -s expand_aliases &>/dev/null';
+ }
+
+ _get_ignored_VARS() {
+ perl -e '
+ for(@ARGV){
+ $next_is_env and push @envvar, split/,/, $_;
+ $next_is_env=/^--env$/;
+ }
+ if(grep { /^_$/ } @envvar) {
+ if(not open(IN, "<", "$ENV{HOME}/.parallel/ignored_vars")) {
+ print STDERR "parallel: Error: ",
+ "Run \"parallel --record-env\" in a clean environment first.\n";
+ } else {
+ chomp(@ignored_vars = <IN>);
+ }
+ }
+ if($ENV{PARALLEL_IGNORED_NAMES}) {
+ push @ignored_vars, split/\s+/, $ENV{PARALLEL_IGNORED_NAMES};
+ chomp @ignored_vars;
+ }
+ $vars = join "|",map { quotemeta $_ } @ignored_vars;
+ print $vars ? "($vars)" : "(,,nO,,VaRs,,)";
+ ' -- "$@"
+ }
+
+ # Get the --env variables if set
+ # --env _ should be ignored
+ # and convert a b c to (a|b|c)
+ # If --env not set: Match everything (.*)
+ _make_grep_REGEXP() {
+ perl -e '
+ for(@ARGV){
+ /^_$/ and $next_is_env = 0;
+ $next_is_env and push @envvar, split/,/, $_;
+ $next_is_env = /^--env$/;
+ }
+ $vars = join "|",map { quotemeta $_ } @envvar;
+ print $vars ? "($vars)" : "(.*)";
+ ' -- "$@"
+ }
+ _which_PAR() {
+ # type returns:
+ # ll is an alias for ls -l (in ash)
+ # bash is a tracked alias for /bin/bash
+ # true is a shell builtin (in bash)
+ # myfunc is a function (in bash)
+ # myfunc is a shell function (in zsh)
+ # which is /usr/bin/which (in sh, bash)
+ # which is hashed (/usr/bin/which)
+ # gi is aliased to `grep -i' (in bash)
+ # aliased to `alias | /usr/bin/which --tty-only --read-alias --show-dot --show-tilde'
+ # Return 0 if found, 1 otherwise
+ LANG=C type "$@" |
+ perl -pe '$exit += (s/ is an alias for .*// ||
+ s/ is aliased to .*// ||
+ s/ is a function// ||
+ s/ is a shell function// ||
+ s/ is a shell builtin// ||
+ s/.* is hashed .(\S+).$/$1/ ||
+ s/.* is (a tracked alias for )?//);
+ END { exit not $exit }'
+ }
+ _warning_PAR() {
+ echo "env_parallel: Warning: $*" >&2
+ }
+ _error_PAR() {
+ echo "env_parallel: Error: $*" >&2
+ }
+
+ if _which_PAR parallel >/dev/null; then
+ true parallel found in path
+ else
+ # shellcheck disable=SC2016
+ _error_PAR 'parallel must be in $PATH.'
+ return 255
+ fi
+
+ # Grep regexp for vars given by --env
+ # shellcheck disable=SC2006
+ _grep_REGEXP="`_make_grep_REGEXP \"$@\"`"
+ unset _make_grep_REGEXP
+
+ # Deal with --env _
+ # shellcheck disable=SC2006
+ _ignore_UNDERSCORE="`_get_ignored_VARS \"$@\"`"
+ unset _get_ignored_VARS
+
+ # --record-env
+ if perl -e 'exit grep { /^--record-env$/ } @ARGV' -- "$@"; then
+ true skip
+ else
+ (_names_of_ALIASES;
+ _names_of_FUNCTIONS;
+ _names_of_VARIABLES) |
+ cat > "$HOME"/.parallel/ignored_vars
+ return 0
+ fi
+
+ # --session
+ if perl -e 'exit grep { /^--session$/ } @ARGV' -- "$@"; then
+ true skip
+ else
+ # Insert ::: between each level of session
+ # so you can pop off the last ::: at --end-session
+ # shellcheck disable=SC2006
+ PARALLEL_IGNORED_NAMES="`echo \"$PARALLEL_IGNORED_NAMES\";
+ echo :::;
+ (_names_of_ALIASES;
+ _names_of_FUNCTIONS;
+ _names_of_VARIABLES) | perl -ne '
+ BEGIN{
+ map { $ignored_vars{$_}++ }
+ split/\s+/, $ENV{PARALLEL_IGNORED_NAMES};
+ }
+ chomp;
+ for(split/\s+/) {
+ if(not $ignored_vars{$_}) {
+ print $_,\"\\n\";
+ }
+ }
+ '`"
+ export PARALLEL_IGNORED_NAMES
+ return 0
+ fi
+ if perl -e 'exit grep { /^--end.?session$/ } @ARGV' -- "$@"; then
+ true skip
+ else
+ # Pop off last ::: from PARALLEL_IGNORED_NAMES
+ # shellcheck disable=SC2006
+ PARALLEL_IGNORED_NAMES="`perl -e '
+ $ENV{PARALLEL_IGNORED_NAMES} =~ s/(.*):::.*?$/$1/s;
+ print $ENV{PARALLEL_IGNORED_NAMES}
+ '`"
+ return 0
+ fi
+ # Grep alias names
+ # shellcheck disable=SC2006
+ _alias_NAMES="`_names_of_ALIASES | _remove_bad_NAMES | xargs echo`"
+ _list_alias_BODIES="_bodies_of_ALIASES $_alias_NAMES"
+ if [ "$_alias_NAMES" = "" ] ; then
+ # no aliases selected
+ _list_alias_BODIES="true"
+ fi
+ unset _alias_NAMES
+
+ # Grep function names
+ # shellcheck disable=SC2006
+ _function_NAMES="`_names_of_FUNCTIONS | _remove_bad_NAMES | xargs echo`"
+ _list_function_BODIES="_bodies_of_FUNCTIONS $_function_NAMES"
+ if [ "$_function_NAMES" = "" ] ; then
+ # no functions selected
+ _list_function_BODIES="true"
+ fi
+ unset _function_NAMES
+
+ # Grep variable names
+ # shellcheck disable=SC2006
+ _variable_NAMES="`_names_of_VARIABLES | _remove_bad_NAMES | xargs echo`"
+ _list_variable_VALUES="_bodies_of_VARIABLES $_variable_NAMES"
+ if [ "$_variable_NAMES" = "" ] ; then
+ # no variables selected
+ _list_variable_VALUES="true"
+ fi
+ unset _variable_NAMES
+
+ if $_eval_needed ; then
+ # shellcheck disable=SC2006
+ PARALLEL_ENV="`
+ eval $_prefix_PARALLEL_ENV;
+ eval $_list_alias_BODIES;
+ eval $_list_function_BODIES;
+ eval $_list_variable_VALUES;
+ `"
+ else
+ # shellcheck disable=SC2006
+ PARALLEL_ENV="`
+ $_prefix_PARALLEL_ENV;
+ $_list_alias_BODIES;
+ $_list_function_BODIES;
+ $_list_variable_VALUES;
+ `"
+ fi
+ export PARALLEL_ENV
+ # Free up some env space
+ unset _list_alias_BODIES _list_variable_VALUES _list_function_BODIES
+ unset _bodies_of_ALIASES _bodies_of_VARIABLES _bodies_of_FUNCTIONS
+ unset _names_of_ALIASES _names_of_VARIABLES _names_of_FUNCTIONS
+ unset _ignore_HARDCODED _ignore_READONLY _ignore_UNDERSCORE
+ unset _remove_bad_NAMES _grep_REGEXP _parse_READONLY
+ unset _prefix_PARALLEL_ENV
+ unset _ignore_READONLY_sh _ignore_READONLY_bash
+ unset _ignore_READONLY_ksh _ignore_READONLY_zsh
+ unset _ignore_HARDCODED_sh _ignore_HARDCODED_bash
+ unset _ignore_HARDCODED_ksh _ignore_HARDCODED_zsh
+ unset _bodies_of_ALIASES_ksh _bodies_of_ALIASES_sh
+ unset _bodies_of_ALIASES_zsh _bodies_of_FUNCTIONS_bash
+ unset _bodies_of_FUNCTIONS_ksh _bodies_of_FUNCTIONS_sh
+ unset _bodies_of_FUNCTIONS_zsh _bodies_of_VARIABLES_bash
+ unset _bodies_of_VARIABLES_ksh _bodies_of_VARIABLES_sh
+ unset _bodies_of_VARIABLES_zsh
+ unset _names_of_ALIASES _names_of_ALIASES_bash
+ unset _names_of_ALIASES_ksh _names_of_ALIASES_sh
+ unset _names_of_ALIASES_zsh _names_of_FUNCTIONS
+ unset _names_of_FUNCTIONS_bash _names_of_FUNCTIONS_ksh
+ unset _names_of_FUNCTIONS_sh _names_of_FUNCTIONS_zsh
+ unset _names_of_VARIABLES _names_of_VARIABLES_bash
+ unset _names_of_VARIABLES_ksh _names_of_VARIABLES_sh
+ unset _names_of_VARIABLES_zsh _names_of_maybe_FUNCTIONS
+
+ # Test if environment is too big by running 'true'
+ # shellcheck disable=SC2006,SC2092
+ if `_which_PAR true` >/dev/null 2>/dev/null ; then
+ parallel "$@"
+ _parallel_exit_CODE=$?
+ # Clean up variables/functions
+ unset PARALLEL_ENV
+ unset _which_PAR _which_TRUE
+ unset _warning_PAR _error_PAR
+ # Unset _parallel_exit_CODE before return
+ eval "unset _parallel_exit_CODE; return $_parallel_exit_CODE"
+ else
+ unset PARALLEL_ENV;
+ _error_PAR "Your environment is too big."
+ _error_PAR "You can try 3 different approaches:"
+ _error_PAR "1. Run 'env_parallel --session' before you set"
+ _error_PAR " variables or define functions."
+ _error_PAR "2. Use --env and only mention the names to copy."
+ _error_PAR "3. Try running this in a clean environment once:"
+ _error_PAR " env_parallel --record-env"
+ _error_PAR " And then use '--env _'"
+ _error_PAR "For details see: man env_parallel"
+ return 255
+ fi
+}
+
+parset() {
+ _parset_PARALLEL_PRG=parallel
+ _parset_main "$@"
+}
+
+env_parset() {
+ _parset_PARALLEL_PRG=env_parallel
+ _parset_main "$@"
+}
+
+_parset_main() {
+ # If $1 contains ',' or space:
+ # Split on , to get the destination variable names
+ # If $1 is a single destination variable name:
+ # Treat it as the name of an array
+ #
+ # # Create array named myvar
+ # parset myvar echo ::: {1..10}
+ # echo ${myvar[5]}
+ #
+ # # Put output into $var_a $var_b $var_c
+ # varnames=(var_a var_b var_c)
+ # parset "${varnames[*]}" echo ::: {1..3}
+ # echo $var_c
+ #
+ # # Put output into $var_a4 $var_b4 $var_c4
+ # parset "var_a4 var_b4 var_c4" echo ::: {1..3}
+ # echo $var_c4
+
+ _parset_NAME="$1"
+ if [ "$_parset_NAME" = "" ] ; then
+ echo parset: Error: No destination variable given. >&2
+ echo parset: Error: Try: >&2
+ echo parset: Error: ' ' parset myarray echo ::: foo bar >&2
+ return 255
+ fi
+ if [ "$_parset_NAME" = "--help" ] ; then
+ echo parset: Error: Usage: >&2
+ echo parset: Error: ' ' parset varname GNU Parallel options and command >&2
+ echo
+ parallel --help
+ return 255
+ fi
+ if [ "$_parset_NAME" = "--version" ] ; then
+ # shellcheck disable=SC2006
+ echo "parset 20240222 (GNU parallel `parallel --minversion 1`)"
+ echo "Copyright (C) 2007-2024 Ole Tange, http://ole.tange.dk and Free Software"
+ echo "Foundation, Inc."
+ echo "License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>"
+ echo "This is free software: you are free to change and redistribute it."
+ echo "GNU parallel comes with no warranty."
+ echo
+ echo "Web site: https://www.gnu.org/software/parallel"
+ echo
+ echo "When using programs that use GNU Parallel to process data for publication"
+ echo "please cite as described in 'parallel --citation'."
+ echo
+ return 255
+ fi
+ shift
+
+ # Bash: declare -A myassoc=( )
+ # Zsh: typeset -A myassoc=( )
+ # Ksh: typeset -A myassoc=( )
+ # shellcheck disable=SC2039,SC2169,SC3044
+ if (typeset -p "$_parset_NAME" 2>/dev/null; echo) |
+ perl -ne 'exit not (/^declare[^=]+-A|^typeset[^=]+-A/)' ; then
+ # This is an associative array
+ # shellcheck disable=SC2006
+ eval "`$_parset_PARALLEL_PRG -k --_parset assoc,"$_parset_NAME" "$@"`"
+ # The eval returns the function!
+ else
+ # This is a normal array or a list of variable names
+ # shellcheck disable=SC2006
+ eval "`$_parset_PARALLEL_PRG -k --_parset var,"$_parset_NAME" "$@"`"
+ # The eval returns the function!
+ fi
+}
diff --git a/src/env_parallel.bash b/src/env_parallel.bash
new file mode 100755
index 0000000..6869d79
--- /dev/null
+++ b/src/env_parallel.bash
@@ -0,0 +1,640 @@
+#!/usr/bin/env bash
+
+# This file must be sourced in sh/ash/dash/bash/ksh/mksh/zsh:
+#
+# . env_parallel.sh
+# source env_parallel.ash
+# source env_parallel.dash
+# source env_parallel.bash
+# source env_parallel.ksh
+# source env_parallel.mksh
+# source env_parallel.zsh
+#
+# after which 'env_parallel' works
+#
+#
+# Copyright (C) 2016-2024 Ole Tange, http://ole.tange.dk and Free
+# Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+#
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+# shellcheck disable=SC2006
+
+env_parallel() {
+ # env_parallel.{sh,ash,dash,bash,ksh,mksh,zsh}
+
+ # Check shell dialect
+ if [ -n "$BASH_VERSION" ]; then
+ _shell_DIALECT=bash
+ _eval_needed=false
+ _prefix_PARALLEL_ENV=_prefix_PARALLEL_ENV_bash
+ elif [ -n "$ZSH_VERSION" ]; then
+ _shell_DIALECT=zsh
+ _eval_needed=true
+ _prefix_PARALLEL_ENV=false
+ elif [ -n "$KSH_VERSION" ]; then
+ _shell_DIALECT=ksh
+ _eval_needed=false
+ _prefix_PARALLEL_ENV=false
+ else
+ # Dash/ash - can these be detected better?
+ _shell_DIALECT="sh"
+ _eval_needed=false
+ _prefix_PARALLEL_ENV=false
+ fi
+ _names_of_ALIASES() {
+ _names_of_ALIASES_$_shell_DIALECT
+ }
+ _names_of_ALIASES_sh() {
+ # alias fails on Unixware 5
+ for _i in `alias 2>/dev/null | perl -ne 's/^alias //;s/^(\S+)=.*/$1/ && print' 2>/dev/null`; do
+ # Check if this name really is an alias
+ # or just part of a multiline alias definition
+ if alias "$_i" >/dev/null 2>/dev/null; then
+ echo "$_i"
+ fi
+ done
+ }
+ _names_of_ALIASES_bash() {
+ # No aliases will return false. This error should be ignored.
+ # shellcheck disable=SC3044
+ compgen -a || true
+ }
+ _names_of_ALIASES_ksh() {
+ alias | perl -pe 's/=.*//'
+ }
+ _names_of_ALIASES_zsh() {
+ # shellcheck disable=SC2086,SC2296
+ print -l ${(k)aliases}
+ }
+ _bodies_of_ALIASES() {
+ _bodies_of_ALIASES_$_shell_DIALECT "$@"
+ }
+ _bodies_of_ALIASES_sh() {
+ # alias may return:
+ # myalias='definition' (GNU/Linux ash)
+ # alias myalias='definition' (FreeBSD ash)
+ # so remove 'alias ' from first line
+ for _i in "$@"; do
+ echo 'alias '"`alias "$_i" | perl -pe '1..1 and s/^alias //'`"
+ done
+ }
+ _bodies_of_ALIASES_bash() {
+ # shellcheck disable=SC3043
+ local _i
+ for _i in "$@"; do
+ # shellcheck disable=SC2046
+ if [ $(alias "$_i" | wc -l) = 1 ] ; then
+ true Alias is a single line. Good.
+ else
+ _warning_PAR "Alias '$_i' contains newline."
+ _warning_PAR "Make sure the command has at least one newline after '$_i'."
+ _warning_PAR "See BUGS in 'man env_parallel'."
+ fi
+ done
+ alias "$@"
+ }
+ _bodies_of_ALIASES_ksh() {
+ alias "$@" | perl -pe 's/^/alias /;
+ sub warning { print STDERR "env_parallel: Warning: @_\n"; }
+ if(/^alias (\S+)=\$.*\\n/) {
+ warning("Alias \"$1\" contains newline.");
+ warning("Make sure the command has at least one newline after \"$1\".");
+ warning("See BUGS in \"man env_parallel\".");
+ }'
+
+ }
+ _bodies_of_ALIASES_zsh() {
+ # shellcheck disable=SC3043
+ local _i
+ for _i in "$@"; do
+ echo 'alias '"$(alias "$_i")"
+ done
+ }
+ _names_of_FUNCTIONS() {
+ _names_of_FUNCTIONS_$_shell_DIALECT
+ }
+ _names_of_FUNCTIONS_bash() {
+ # shellcheck disable=SC3044
+ compgen -A function
+ }
+ _names_of_maybe_FUNCTIONS() {
+ set | perl -ne '/^([A-Z_0-9]+)\s*\(\)\s*\{?$/i and print "$1\n"'
+ }
+ _names_of_FUNCTIONS_sh() {
+ # myfunc is a function
+ # shellcheck disable=SC2046
+ LANG=C type `_names_of_maybe_FUNCTIONS` |
+ perl -ne '/^(\S+) is a function$/ and not $seen{$1}++ and print "$1\n"'
+ }
+ _names_of_FUNCTIONS_ksh() {
+ # shellcheck disable=SC3044
+ typeset +f | perl -pe 's/\(\).*//; s/ .*//;'
+ }
+ _names_of_FUNCTIONS_zsh() {
+ # shellcheck disable=SC2086,SC2296
+ print -l ${(k)functions}
+ }
+ _bodies_of_FUNCTIONS() {
+ _bodies_of_FUNCTIONS_$_shell_DIALECT "$@"
+ }
+ _bodies_of_FUNCTIONS_sh() {
+ LANG=C type "$@" | perl -ne '/^(\S+) is a function$/ or print'
+ }
+ _bodies_of_FUNCTIONS_bash() {
+ # shellcheck disable=SC3044
+ typeset -f "$@"
+ }
+ _bodies_of_FUNCTIONS_ksh() {
+ functions "$@"
+ }
+ _bodies_of_FUNCTIONS_zsh() {
+ # shellcheck disable=SC3044
+ typeset -f "$@"
+ }
+ _names_of_VARIABLES() {
+ _names_of_VARIABLES_$_shell_DIALECT
+ }
+ _names_of_VARIABLES_sh() {
+ # This may screw up if variables contain \n and =
+ set | perl -ne 's/^(\S+?)=.*/$1/ and print;'
+ }
+ _names_of_VARIABLES_bash() {
+ # shellcheck disable=SC3044
+ compgen -A variable
+ }
+ _names_of_VARIABLES_ksh() {
+ # mksh: typeset +p |
+ # perl -pe 's/^(type)?set( [-+][a-zA-Z0-9]*)* //; s/(\[\d+\])?=.*//' |
+ # uniq
+ # ksh: typeset +p | perl -pe 's/^typeset .. //'
+ # shellcheck disable=SC3044
+ typeset +p |
+ perl -pe 's/^(type)?set( [-+][a-zA-Z0-9]*)* //; s/(\[\d+\])?=.*//' |
+ uniq
+ }
+ _names_of_VARIABLES_zsh() {
+ # shellcheck disable=SC2086,SC2296
+ print -l ${(k)parameters}
+ }
+ _bodies_of_VARIABLES() {
+ _bodies_of_VARIABLES_$_shell_DIALECT "$@"
+ }
+ _bodies_of_VARIABLES_sh() {
+ # Crappy typeset -p
+ for _i in "$@"
+ do
+ perl -e 'print @ARGV' "$_i="
+ eval echo "\"\$$_i\"" | perl -e '$/=undef; $a=<>; chop($a); print $a' |
+ perl -pe 's/[\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\^\*\<\=\>\~\|\; \"\!\$\&\202-\377]/\\$&/go;'"s/'/\\\'/g; s/[\n]/'\\n'/go;";
+ echo
+ done
+ }
+ _bodies_of_VARIABLES_bash() {
+ # shellcheck disable=SC3044
+ typeset -p "$@"
+ }
+ _bodies_of_VARIABLES_ksh() {
+ # shellcheck disable=SC3044
+ typeset -p "$@"
+ }
+ _bodies_of_VARIABLES_zsh() {
+ # shellcheck disable=SC3044
+ typeset -p "$@"
+ }
+ _ignore_HARDCODED() {
+ _ignore_HARDCODED_$_shell_DIALECT
+ }
+ _ignore_HARDCODED_sh() {
+ # These names cannot be detected
+ echo '(_|TIMEOUT|IFS)'
+ }
+ _ignore_HARDCODED_bash() {
+ # Copying $RANDOM will cause it not to be random
+ # The rest cannot be detected as read-only
+ echo '(RANDOM|_|TIMEOUT|GROUPS|FUNCNAME|DIRSTACK|PIPESTATUS|USERNAME|BASHPID|BASH_[A-Z_]+)'
+ }
+ _ignore_HARDCODED_ksh() {
+ # These names cannot be detected
+ echo '(_|TIMEOUT|IFS)'
+ }
+ _ignore_HARDCODED_zsh() {
+ # These names cannot be detected
+ echo '([-\?\#\!\$\*\@\_0]|zsh_eval_context|ZSH_EVAL_CONTEXT|LINENO|IFS|commands|functions|options|aliases|EUID|EGID|UID|GID|dis_patchars|patchars|terminfo|galiases|keymaps|parameters|jobdirs|dirstack|functrace|funcsourcetrace|zsh_scheduled_events|dis_aliases|dis_reswords|dis_saliases|modules|reswords|saliases|widgets|userdirs|historywords|nameddirs|termcap|dis_builtins|dis_functions|jobtexts|funcfiletrace|dis_galiases|builtins|history|jobstates|funcstack|run-help)'
+ }
+ _ignore_READONLY() {
+ _ignore_READONLY_$_shell_DIALECT
+ }
+ _parse_READONLY() {
+ # shellcheck disable=SC1078,SC1079,SC2026
+ perl -e '@r = map {
+ chomp;
+ # sh on UnixWare: readonly TIMEOUT
+ # ash: readonly var='val'
+ # ksh: var='val'
+ # mksh: PIPESTATUS[0]
+ s/^(readonly )?([^=\[ ]*?)(\[\d+\])?(=.*|)$/$2/ or
+ # bash: declare -ar BASH_VERSINFO=([0]="4" [1]="4")
+ # zsh: typeset -r var='val'
+ s/^\S+\s+\S+\s+(\S[^=]*)(=.*|$)/$1/;
+ $_ } <>;
+ $vars = join "|",map { quotemeta $_ } @r;
+ print $vars ? "($vars)" : "(,,nO,,VaRs,,)";
+ '
+ }
+ _ignore_READONLY_sh() {
+ readonly | _parse_READONLY
+ }
+ _ignore_READONLY_bash() {
+ readonly | _parse_READONLY
+ }
+ _ignore_READONLY_ksh() {
+ readonly | _parse_READONLY
+ }
+ _ignore_READONLY_zsh() {
+ # shellcheck disable=SC3044
+ typeset -pr | _parse_READONLY
+ }
+ _remove_bad_NAMES() {
+ # Do not transfer vars and funcs from env_parallel
+ # shellcheck disable=SC2006
+ _ignore_RO="`_ignore_READONLY`"
+ # shellcheck disable=SC2006
+ _ignore_HARD="`_ignore_HARDCODED`"
+ # To avoid depending on grep dialect, use Perl version of:
+ # grep -Ev '^(...)$' |
+ perl -ne '/^(
+ PARALLEL_ENV|
+ PARALLEL_TMP|
+ _alias_NAMES|
+ _bodies_of_ALIASES|
+ _bodies_of_FUNCTIONS|
+ _bodies_of_VARIABLES|
+ _error_PAR|
+ _function_NAMES|
+ _get_ignored_VARS|
+ _grep_REGEXP|
+ _ignore_HARD|
+ _ignore_HARDCODED|
+ _ignore_READONLY|
+ _ignore_RO|
+ _ignore_UNDERSCORE|
+ _list_alias_BODIES|
+ _list_function_BODIES|
+ _list_variable_VALUES|
+ _make_grep_REGEXP|
+ _names_of_ALIASES|
+ _names_of_FUNCTIONS|
+ _names_of_VARIABLES|
+ _names_of_maybe_FUNCTIONS|
+ _parallel_exit_CODE|
+ _prefix_PARALLEL_ENV|
+ _prefix_PARALLEL_ENV_bash|
+ _remove_bad_NAMES|
+ _remove_readonly|
+ _variable_NAMES|
+ _warning_PAR|
+ _which_PAR)$/x and next;
+ # Filter names matching --env
+ /^'"$_grep_REGEXP"'$/ or next;
+ /^'"$_ignore_UNDERSCORE"'$/ and next;
+ # Remove readonly variables
+ /^'"$_ignore_RO"'$/ and next;
+ /^'"$_ignore_HARD"'$/ and next;
+ print;'
+ }
+ _prefix_PARALLEL_ENV_bash() {
+ # shellcheck disable=SC3044
+ shopt 2>/dev/null |
+ perl -pe 's:\s+off:;: and s/^/shopt -u /;
+ s:\s+on:;: and s/^/shopt -s /;
+ s:;$:&>/dev/null;:';
+ echo 'shopt -s expand_aliases &>/dev/null';
+ }
+
+ _get_ignored_VARS() {
+ perl -e '
+ for(@ARGV){
+ $next_is_env and push @envvar, split/,/, $_;
+ $next_is_env=/^--env$/;
+ }
+ if(grep { /^_$/ } @envvar) {
+ if(not open(IN, "<", "$ENV{HOME}/.parallel/ignored_vars")) {
+ print STDERR "parallel: Error: ",
+ "Run \"parallel --record-env\" in a clean environment first.\n";
+ } else {
+ chomp(@ignored_vars = <IN>);
+ }
+ }
+ if($ENV{PARALLEL_IGNORED_NAMES}) {
+ push @ignored_vars, split/\s+/, $ENV{PARALLEL_IGNORED_NAMES};
+ chomp @ignored_vars;
+ }
+ $vars = join "|",map { quotemeta $_ } @ignored_vars;
+ print $vars ? "($vars)" : "(,,nO,,VaRs,,)";
+ ' -- "$@"
+ }
+
+ # Get the --env variables if set
+ # --env _ should be ignored
+ # and convert a b c to (a|b|c)
+ # If --env not set: Match everything (.*)
+ _make_grep_REGEXP() {
+ perl -e '
+ for(@ARGV){
+ /^_$/ and $next_is_env = 0;
+ $next_is_env and push @envvar, split/,/, $_;
+ $next_is_env = /^--env$/;
+ }
+ $vars = join "|",map { quotemeta $_ } @envvar;
+ print $vars ? "($vars)" : "(.*)";
+ ' -- "$@"
+ }
+ _which_PAR() {
+ # type returns:
+ # ll is an alias for ls -l (in ash)
+ # bash is a tracked alias for /bin/bash
+ # true is a shell builtin (in bash)
+ # myfunc is a function (in bash)
+ # myfunc is a shell function (in zsh)
+ # which is /usr/bin/which (in sh, bash)
+ # which is hashed (/usr/bin/which)
+ # gi is aliased to `grep -i' (in bash)
+ # aliased to `alias | /usr/bin/which --tty-only --read-alias --show-dot --show-tilde'
+ # Return 0 if found, 1 otherwise
+ LANG=C type "$@" |
+ perl -pe '$exit += (s/ is an alias for .*// ||
+ s/ is aliased to .*// ||
+ s/ is a function// ||
+ s/ is a shell function// ||
+ s/ is a shell builtin// ||
+ s/.* is hashed .(\S+).$/$1/ ||
+ s/.* is (a tracked alias for )?//);
+ END { exit not $exit }'
+ }
+ _warning_PAR() {
+ echo "env_parallel: Warning: $*" >&2
+ }
+ _error_PAR() {
+ echo "env_parallel: Error: $*" >&2
+ }
+
+ if _which_PAR parallel >/dev/null; then
+ true parallel found in path
+ else
+ # shellcheck disable=SC2016
+ _error_PAR 'parallel must be in $PATH.'
+ return 255
+ fi
+
+ # Grep regexp for vars given by --env
+ # shellcheck disable=SC2006
+ _grep_REGEXP="`_make_grep_REGEXP \"$@\"`"
+ unset _make_grep_REGEXP
+
+ # Deal with --env _
+ # shellcheck disable=SC2006
+ _ignore_UNDERSCORE="`_get_ignored_VARS \"$@\"`"
+ unset _get_ignored_VARS
+
+ # --record-env
+ if perl -e 'exit grep { /^--record-env$/ } @ARGV' -- "$@"; then
+ true skip
+ else
+ (_names_of_ALIASES;
+ _names_of_FUNCTIONS;
+ _names_of_VARIABLES) |
+ cat > "$HOME"/.parallel/ignored_vars
+ return 0
+ fi
+
+ # --session
+ if perl -e 'exit grep { /^--session$/ } @ARGV' -- "$@"; then
+ true skip
+ else
+ # Insert ::: between each level of session
+ # so you can pop off the last ::: at --end-session
+ # shellcheck disable=SC2006
+ PARALLEL_IGNORED_NAMES="`echo \"$PARALLEL_IGNORED_NAMES\";
+ echo :::;
+ (_names_of_ALIASES;
+ _names_of_FUNCTIONS;
+ _names_of_VARIABLES) | perl -ne '
+ BEGIN{
+ map { $ignored_vars{$_}++ }
+ split/\s+/, $ENV{PARALLEL_IGNORED_NAMES};
+ }
+ chomp;
+ for(split/\s+/) {
+ if(not $ignored_vars{$_}) {
+ print $_,\"\\n\";
+ }
+ }
+ '`"
+ export PARALLEL_IGNORED_NAMES
+ return 0
+ fi
+ if perl -e 'exit grep { /^--end.?session$/ } @ARGV' -- "$@"; then
+ true skip
+ else
+ # Pop off last ::: from PARALLEL_IGNORED_NAMES
+ # shellcheck disable=SC2006
+ PARALLEL_IGNORED_NAMES="`perl -e '
+ $ENV{PARALLEL_IGNORED_NAMES} =~ s/(.*):::.*?$/$1/s;
+ print $ENV{PARALLEL_IGNORED_NAMES}
+ '`"
+ return 0
+ fi
+ # Grep alias names
+ # shellcheck disable=SC2006
+ _alias_NAMES="`_names_of_ALIASES | _remove_bad_NAMES | xargs echo`"
+ _list_alias_BODIES="_bodies_of_ALIASES $_alias_NAMES"
+ if [ "$_alias_NAMES" = "" ] ; then
+ # no aliases selected
+ _list_alias_BODIES="true"
+ fi
+ unset _alias_NAMES
+
+ # Grep function names
+ # shellcheck disable=SC2006
+ _function_NAMES="`_names_of_FUNCTIONS | _remove_bad_NAMES | xargs echo`"
+ _list_function_BODIES="_bodies_of_FUNCTIONS $_function_NAMES"
+ if [ "$_function_NAMES" = "" ] ; then
+ # no functions selected
+ _list_function_BODIES="true"
+ fi
+ unset _function_NAMES
+
+ # Grep variable names
+ # shellcheck disable=SC2006
+ _variable_NAMES="`_names_of_VARIABLES | _remove_bad_NAMES | xargs echo`"
+ _list_variable_VALUES="_bodies_of_VARIABLES $_variable_NAMES"
+ if [ "$_variable_NAMES" = "" ] ; then
+ # no variables selected
+ _list_variable_VALUES="true"
+ fi
+ unset _variable_NAMES
+
+ if $_eval_needed ; then
+ # shellcheck disable=SC2006
+ PARALLEL_ENV="`
+ eval $_prefix_PARALLEL_ENV;
+ eval $_list_alias_BODIES;
+ eval $_list_function_BODIES;
+ eval $_list_variable_VALUES;
+ `"
+ else
+ # shellcheck disable=SC2006
+ PARALLEL_ENV="`
+ $_prefix_PARALLEL_ENV;
+ $_list_alias_BODIES;
+ $_list_function_BODIES;
+ $_list_variable_VALUES;
+ `"
+ fi
+ export PARALLEL_ENV
+ # Free up some env space
+ unset _list_alias_BODIES _list_variable_VALUES _list_function_BODIES
+ unset _bodies_of_ALIASES _bodies_of_VARIABLES _bodies_of_FUNCTIONS
+ unset _names_of_ALIASES _names_of_VARIABLES _names_of_FUNCTIONS
+ unset _ignore_HARDCODED _ignore_READONLY _ignore_UNDERSCORE
+ unset _remove_bad_NAMES _grep_REGEXP _parse_READONLY
+ unset _prefix_PARALLEL_ENV
+ unset _ignore_READONLY_sh _ignore_READONLY_bash
+ unset _ignore_READONLY_ksh _ignore_READONLY_zsh
+ unset _ignore_HARDCODED_sh _ignore_HARDCODED_bash
+ unset _ignore_HARDCODED_ksh _ignore_HARDCODED_zsh
+ unset _bodies_of_ALIASES_ksh _bodies_of_ALIASES_sh
+ unset _bodies_of_ALIASES_zsh _bodies_of_FUNCTIONS_bash
+ unset _bodies_of_FUNCTIONS_ksh _bodies_of_FUNCTIONS_sh
+ unset _bodies_of_FUNCTIONS_zsh _bodies_of_VARIABLES_bash
+ unset _bodies_of_VARIABLES_ksh _bodies_of_VARIABLES_sh
+ unset _bodies_of_VARIABLES_zsh
+ unset _names_of_ALIASES _names_of_ALIASES_bash
+ unset _names_of_ALIASES_ksh _names_of_ALIASES_sh
+ unset _names_of_ALIASES_zsh _names_of_FUNCTIONS
+ unset _names_of_FUNCTIONS_bash _names_of_FUNCTIONS_ksh
+ unset _names_of_FUNCTIONS_sh _names_of_FUNCTIONS_zsh
+ unset _names_of_VARIABLES _names_of_VARIABLES_bash
+ unset _names_of_VARIABLES_ksh _names_of_VARIABLES_sh
+ unset _names_of_VARIABLES_zsh _names_of_maybe_FUNCTIONS
+
+ # Test if environment is too big by running 'true'
+ # shellcheck disable=SC2006,SC2092
+ if `_which_PAR true` >/dev/null 2>/dev/null ; then
+ parallel "$@"
+ _parallel_exit_CODE=$?
+ # Clean up variables/functions
+ unset PARALLEL_ENV
+ unset _which_PAR _which_TRUE
+ unset _warning_PAR _error_PAR
+ # Unset _parallel_exit_CODE before return
+ eval "unset _parallel_exit_CODE; return $_parallel_exit_CODE"
+ else
+ unset PARALLEL_ENV;
+ _error_PAR "Your environment is too big."
+ _error_PAR "You can try 3 different approaches:"
+ _error_PAR "1. Run 'env_parallel --session' before you set"
+ _error_PAR " variables or define functions."
+ _error_PAR "2. Use --env and only mention the names to copy."
+ _error_PAR "3. Try running this in a clean environment once:"
+ _error_PAR " env_parallel --record-env"
+ _error_PAR " And then use '--env _'"
+ _error_PAR "For details see: man env_parallel"
+ return 255
+ fi
+}
+
+parset() {
+ _parset_PARALLEL_PRG=parallel
+ _parset_main "$@"
+}
+
+env_parset() {
+ _parset_PARALLEL_PRG=env_parallel
+ _parset_main "$@"
+}
+
+_parset_main() {
+ # If $1 contains ',' or space:
+ # Split on , to get the destination variable names
+ # If $1 is a single destination variable name:
+ # Treat it as the name of an array
+ #
+ # # Create array named myvar
+ # parset myvar echo ::: {1..10}
+ # echo ${myvar[5]}
+ #
+ # # Put output into $var_a $var_b $var_c
+ # varnames=(var_a var_b var_c)
+ # parset "${varnames[*]}" echo ::: {1..3}
+ # echo $var_c
+ #
+ # # Put output into $var_a4 $var_b4 $var_c4
+ # parset "var_a4 var_b4 var_c4" echo ::: {1..3}
+ # echo $var_c4
+
+ _parset_NAME="$1"
+ if [ "$_parset_NAME" = "" ] ; then
+ echo parset: Error: No destination variable given. >&2
+ echo parset: Error: Try: >&2
+ echo parset: Error: ' ' parset myarray echo ::: foo bar >&2
+ return 255
+ fi
+ if [ "$_parset_NAME" = "--help" ] ; then
+ echo parset: Error: Usage: >&2
+ echo parset: Error: ' ' parset varname GNU Parallel options and command >&2
+ echo
+ parallel --help
+ return 255
+ fi
+ if [ "$_parset_NAME" = "--version" ] ; then
+ # shellcheck disable=SC2006
+ echo "parset 20240222 (GNU parallel `parallel --minversion 1`)"
+ echo "Copyright (C) 2007-2024 Ole Tange, http://ole.tange.dk and Free Software"
+ echo "Foundation, Inc."
+ echo "License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>"
+ echo "This is free software: you are free to change and redistribute it."
+ echo "GNU parallel comes with no warranty."
+ echo
+ echo "Web site: https://www.gnu.org/software/parallel"
+ echo
+ echo "When using programs that use GNU Parallel to process data for publication"
+ echo "please cite as described in 'parallel --citation'."
+ echo
+ return 255
+ fi
+ shift
+
+ # Bash: declare -A myassoc=( )
+ # Zsh: typeset -A myassoc=( )
+ # Ksh: typeset -A myassoc=( )
+ # shellcheck disable=SC2039,SC2169,SC3044
+ if (typeset -p "$_parset_NAME" 2>/dev/null; echo) |
+ perl -ne 'exit not (/^declare[^=]+-A|^typeset[^=]+-A/)' ; then
+ # This is an associative array
+ # shellcheck disable=SC2006
+ eval "`$_parset_PARALLEL_PRG -k --_parset assoc,"$_parset_NAME" "$@"`"
+ # The eval returns the function!
+ else
+ # This is a normal array or a list of variable names
+ # shellcheck disable=SC2006
+ eval "`$_parset_PARALLEL_PRG -k --_parset var,"$_parset_NAME" "$@"`"
+ # The eval returns the function!
+ fi
+}
diff --git a/src/env_parallel.csh b/src/env_parallel.csh
new file mode 100755
index 0000000..3f00e3a
--- /dev/null
+++ b/src/env_parallel.csh
@@ -0,0 +1,142 @@
+#!/usr/bin/env csh
+
+# This file must be sourced in csh:
+#
+# source `which env_parallel.csh`
+#
+# after which 'env_parallel' works
+#
+#
+# Copyright (C) 2016-2024 Ole Tange, http://ole.tange.dk and Free
+# Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+#
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set _parallel_exit_CODE=0
+if ("`alias env_parallel`" == '' || ! $?PARALLEL_CSH) then
+ # Activate alias
+ alias env_parallel '(setenv PARALLEL_CSH "\!*"; source `which env_parallel.csh`)'
+else
+ # Get the --env variables if set
+ # --env _ should be ignored
+ # and convert a b c to (a|b|c)
+ # If --env not set: Match everything (.*)
+
+ # simple 'tempfile': Return nonexisting filename: /tmp/parXXXXX
+ alias _tempfile 'perl -e do\{\$t\=\"/tmp/par\".join\"\",map\{\(0..9,\"a\"..\"z\",\"A\"..\"Z\"\)\[rand\(62\)\]\}\(1..5\)\;\}while\(-e\$t\)\;print\"\$t\\n\"'
+ set _tMpscRIpt=`_tempfile`
+
+ cat <<'EOF' > $_tMpscRIpt
+ #!/usr/bin/perl
+
+ for(@ARGV){
+ /^_$/ and $next_is_env = 0;
+ $next_is_env and push @envvar, split/,/, $_;
+ $next_is_env = /^--env$/;
+ }
+ $vars = join "|",map { quotemeta $_ } @envvar;
+ print $vars ? "($vars)" : "(.*)";
+'EOF'
+ set _grep_REGEXP="`perl $_tMpscRIpt -- $PARALLEL_CSH`"
+
+ # Deal with --env _
+ cat <<'EOF' > $_tMpscRIpt
+ #!/usr/bin/perl
+
+ for(@ARGV){
+ $next_is_env and push @envvar, split/,/, $_;
+ $next_is_env=/^--env$/;
+ }
+ if(grep { /^_$/ } @envvar) {
+ if(not open(IN, "<", "$ENV{HOME}/.parallel/ignored_vars")) {
+ print STDERR "parallel: Error: ",
+ "Run \"parallel --record-env\" in a clean environment first.\n";
+ } else {
+ chomp(@ignored_vars = <IN>);
+ $vars = join "|",map { quotemeta $_ } @ignored_vars;
+ print $vars ? "($vars)" : "(,,nO,,VaRs,,)";
+ }
+ }
+'EOF'
+ set _ignore_UNDERSCORE="`perl $_tMpscRIpt -- $PARALLEL_CSH`"
+ rm $_tMpscRIpt
+
+ # Get the scalar and array variable names
+ set _vARnAmES=(`set | perl -ne 's/\s.*//; /^(#|_|killring|prompt2|command|PARALLEL_ENV|PARALLEL_TMP)$/ and next; /^'"$_grep_REGEXP"'$/ or next; /^'"$_ignore_UNDERSCORE"'$/ and next; print'`)
+
+ # Make a tmpfile for the variable definitions
+ set _tMpvARfILe=`_tempfile`
+ touch $_tMpvARfILe
+ # Make a tmpfile for the variable definitions + alias
+ set _tMpaLLfILe=`_tempfile`
+ foreach _vARnAmE ($_vARnAmES);
+ # These 3 lines break in csh ver. 20110502-3
+ # if not defined: next
+ eval if'(! $?'$_vARnAmE') continue'
+ # if $#myvar <= 1 echo scalar_myvar=$var
+ eval if'(${#'$_vARnAmE'} <= 1) echo scalar_'$_vARnAmE'='\"\$$_vARnAmE\" >> $_tMpvARfILe;
+ # if $#myvar > 1 echo array_myvar=$var
+ eval if'(${#'$_vARnAmE'} > 1) echo array_'$_vARnAmE'="$'$_vARnAmE'"' >> $_tMpvARfILe;
+ end
+ unset _vARnAmE _vARnAmES
+ # shell quote variables (--plain needed due to ignore if $PARALLEL is set)
+ # Convert 'scalar_myvar=...' to 'set myvar=...'
+ # Convert 'array_myvar=...' to 'set array=(...)'
+ cat $_tMpvARfILe | parallel --plain --shellquote | perl -pe 's/^scalar_(\S+).=/set $1=/ or s/^array_(\S+).=(.*)/set $1=($2)/ && s/\\ / /g;' > $_tMpaLLfILe
+ # Cleanup
+ rm $_tMpvARfILe; unset _tMpvARfILe
+
+# ALIAS TO EXPORT ALIASES:
+
+# Quote ' by putting it inside "
+# s/'/'"'"'/g;
+# ' => \047 " => \042
+# s/\047/\047\042\047\042\047/g;
+# Quoted: s/\\047/\\047\\042\\047\\042\\047/g\;
+
+# Remove () from second column
+# s/^(\S+)(\s+)\((.*)\)/\1\2\3/;
+# Quoted: s/\^\(\\S+\)\(\\s+\)\\\(\(.\*\)\\\)/\\1\\2\\3/\;
+
+# Add ' around second column
+# s/^(\S+)(\s+)(.*)/\1\2'\3'/
+# \047 => '
+# s/^(\S+)(\s+)(.*)/\1\2\047\3\047/;
+# Quoted: s/\^\(\\S+\)\(\\s+\)\(.\*\)/\\1\\2\\047\\3\\047/\;
+
+# Quote ! as \!
+# s/\!/\\\!/g;
+# Quoted: s/\\\!/\\\\\\\!/g;
+
+# Prepend with "\nalias "
+# s/^/\001alias /;
+# Quoted: s/\^/\\001alias\ /\;
+ alias | \
+ perl -ne '/^'"$_grep_REGEXP"'/ or next; /^'"$_ignore_UNDERSCORE"'[^_a-zA-Z]/ and next; print' | \
+ perl -pe s/\\047/\\047\\042\\047\\042\\047/g\;s/\^\(\\S+\)\(\\s+\)\\\(\(.\*\)\\\)/\\1\\2\\3/\;s/\^\(\\S+\)\(\\s+\)\(.\*\)/\\1\\2\\047\\3\\047/\;s/\^/\\001alias\ /\;s/\\\!/\\\\\\\!/g >> $_tMpaLLfILe
+
+ setenv PARALLEL_ENV "`cat $_tMpaLLfILe; rm $_tMpaLLfILe`";
+ unset _tMpaLLfILe;
+ # Use $PARALLEL_CSH set in calling alias
+ parallel
+ set _parallel_exit_CODE=$status
+ setenv PARALLEL_ENV
+ setenv PARALLEL_CSH
+endif
+(exit $_parallel_exit_CODE)
diff --git a/src/env_parallel.dash b/src/env_parallel.dash
new file mode 100755
index 0000000..0d09760
--- /dev/null
+++ b/src/env_parallel.dash
@@ -0,0 +1,636 @@
+#!/usr/bin/env dash
+
+# This file must be sourced in sh/ash/dash/bash/ksh/mksh/zsh:
+#
+# . env_parallel.sh
+# source env_parallel.ash
+# source env_parallel.dash
+# source env_parallel.bash
+# source env_parallel.ksh
+# source env_parallel.mksh
+# source env_parallel.zsh
+#
+# after which 'env_parallel' works
+#
+#
+# Copyright (C) 2016-2024 Ole Tange, http://ole.tange.dk and Free
+# Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+#
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+# shellcheck disable=SC2006
+
+env_parallel() {
+ # env_parallel.{sh,ash,dash,bash,ksh,mksh,zsh}
+
+ # Check shell dialect
+ if [ -n "$BASH_VERSION" ]; then
+ _shell_DIALECT=bash
+ _eval_needed=false
+ _prefix_PARALLEL_ENV=_prefix_PARALLEL_ENV_bash
+ elif [ -n "$ZSH_VERSION" ]; then
+ _shell_DIALECT=zsh
+ _eval_needed=true
+ _prefix_PARALLEL_ENV=false
+ elif [ -n "$KSH_VERSION" ]; then
+ _shell_DIALECT=ksh
+ _eval_needed=false
+ _prefix_PARALLEL_ENV=false
+ else
+ # Dash/ash - can these be detected better?
+ _shell_DIALECT="sh"
+ _eval_needed=false
+ _prefix_PARALLEL_ENV=false
+ fi
+ _names_of_ALIASES() {
+ _names_of_ALIASES_$_shell_DIALECT
+ }
+ _names_of_ALIASES_sh() {
+ # alias fails on Unixware 5
+ for _i in `alias 2>/dev/null | perl -ne 's/^alias //;s/^(\S+)=.*/$1/ && print' 2>/dev/null`; do
+ # Check if this name really is an alias
+ # or just part of a multiline alias definition
+ if alias "$_i" >/dev/null 2>/dev/null; then
+ echo "$_i"
+ fi
+ done
+ }
+ _names_of_ALIASES_bash() {
+ # No aliases will return false. This error should be ignored.
+ # shellcheck disable=SC3044
+ compgen -a || true
+ }
+ _names_of_ALIASES_ksh() {
+ alias | perl -pe 's/=.*//'
+ }
+ _names_of_ALIASES_zsh() {
+ # shellcheck disable=SC2086,SC2296
+ print -l ${(k)aliases}
+ }
+ _bodies_of_ALIASES() {
+ _bodies_of_ALIASES_$_shell_DIALECT "$@"
+ }
+ _bodies_of_ALIASES_sh() {
+ # alias may return:
+ # myalias='definition' (GNU/Linux ash)
+ # alias myalias='definition' (FreeBSD ash)
+ # so remove 'alias ' from first line
+ for _i in "$@"; do
+ echo 'alias '"`alias "$_i" | perl -pe '1..1 and s/^alias //'`"
+ done
+ }
+ _bodies_of_ALIASES_bash() {
+ # shellcheck disable=SC3043
+ local _i
+ for _i in "$@"; do
+ # shellcheck disable=SC2046
+ if [ $(alias "$_i" | wc -l) = 1 ] ; then
+ true Alias is a single line. Good.
+ else
+ _warning_PAR "Alias '$_i' contains newline."
+ _warning_PAR "Make sure the command has at least one newline after '$_i'."
+ _warning_PAR "See BUGS in 'man env_parallel'."
+ fi
+ done
+ alias "$@"
+ }
+ _bodies_of_ALIASES_ksh() {
+ alias "$@" | perl -pe 's/^/alias /;
+ sub warning { print STDERR "env_parallel: Warning: @_\n"; }
+ if(/^alias (\S+)=\$.*\\n/) {
+ warning("Alias \"$1\" contains newline.");
+ warning("Make sure the command has at least one newline after \"$1\".");
+ warning("See BUGS in \"man env_parallel\".");
+ }'
+
+ }
+ _bodies_of_ALIASES_zsh() {
+ # shellcheck disable=SC3043
+ local _i
+ for _i in "$@"; do
+ echo 'alias '"$(alias "$_i")"
+ done
+ }
+ _names_of_FUNCTIONS() {
+ _names_of_FUNCTIONS_$_shell_DIALECT
+ }
+ _names_of_FUNCTIONS_bash() {
+ # shellcheck disable=SC3044
+ compgen -A function
+ }
+ _names_of_maybe_FUNCTIONS() {
+ set | perl -ne '/^([A-Z_0-9]+)\s*\(\)\s*\{?$/i and print "$1\n"'
+ }
+ _names_of_FUNCTIONS_sh() {
+ # myfunc is a function
+ # shellcheck disable=SC2046
+ LANG=C type `_names_of_maybe_FUNCTIONS` |
+ perl -ne '/^(\S+) is a function$/ and not $seen{$1}++ and print "$1\n"'
+ }
+ _names_of_FUNCTIONS_ksh() {
+ # shellcheck disable=SC3044
+ typeset +f | perl -pe 's/\(\).*//; s/ .*//;'
+ }
+ _names_of_FUNCTIONS_zsh() {
+ # shellcheck disable=SC2086,SC2296
+ print -l ${(k)functions}
+ }
+ _bodies_of_FUNCTIONS() {
+ _bodies_of_FUNCTIONS_$_shell_DIALECT "$@"
+ }
+ _bodies_of_FUNCTIONS_sh() {
+ LANG=C type "$@" | perl -ne '/^(\S+) is a function$/ or print'
+ }
+ _bodies_of_FUNCTIONS_bash() {
+ # shellcheck disable=SC3044
+ typeset -f "$@"
+ }
+ _bodies_of_FUNCTIONS_ksh() {
+ functions "$@"
+ }
+ _bodies_of_FUNCTIONS_zsh() {
+ # shellcheck disable=SC3044
+ typeset -f "$@"
+ }
+ _names_of_VARIABLES() {
+ _names_of_VARIABLES_$_shell_DIALECT
+ }
+ _names_of_VARIABLES_sh() {
+ # This may screw up if variables contain \n and =
+ set | perl -ne 's/^(\S+?)=.*/$1/ and print;'
+ }
+ _names_of_VARIABLES_bash() {
+ # shellcheck disable=SC3044
+ compgen -A variable
+ }
+ _names_of_VARIABLES_ksh() {
+ # shellcheck disable=SC3044
+ typeset +p |
+ perl -pe 's/^(type)?set( [-+][a-zA-Z0-9]*)* //; s/(\[\d+\])?=.*//' |
+ uniq
+ }
+ _names_of_VARIABLES_zsh() {
+ # shellcheck disable=SC2086,SC2296
+ print -l ${(k)parameters}
+ }
+ _bodies_of_VARIABLES() {
+ _bodies_of_VARIABLES_$_shell_DIALECT "$@"
+ }
+ _bodies_of_VARIABLES_sh() {
+ # Crappy typeset -p
+ for _i in "$@"
+ do
+ perl -e 'print @ARGV' "$_i="
+ eval echo "\"\$$_i\"" | perl -e '$/=undef; $a=<>; chop($a); print $a' |
+ perl -pe 's/[\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\^\*\<\=\>\~\|\; \"\!\$\&\202-\377]/\\$&/go;'"s/'/\\\'/g; s/[\n]/'\\n'/go;";
+ echo
+ done
+ }
+ _bodies_of_VARIABLES_bash() {
+ # shellcheck disable=SC3044
+ typeset -p "$@"
+ }
+ _bodies_of_VARIABLES_ksh() {
+ # shellcheck disable=SC3044
+ typeset -p "$@"
+ }
+ _bodies_of_VARIABLES_zsh() {
+ # shellcheck disable=SC3044
+ typeset -p "$@"
+ }
+ _ignore_HARDCODED() {
+ _ignore_HARDCODED_$_shell_DIALECT
+ }
+ _ignore_HARDCODED_sh() {
+ # These names cannot be detected
+ echo '(_|TIMEOUT|IFS)'
+ }
+ _ignore_HARDCODED_bash() {
+ # Copying $RANDOM will cause it not to be random
+ # The rest cannot be detected as read-only
+ echo '(RANDOM|_|TIMEOUT|GROUPS|FUNCNAME|DIRSTACK|PIPESTATUS|USERNAME|BASHPID|BASH_[A-Z_]+)'
+ }
+ _ignore_HARDCODED_ksh() {
+ # These names cannot be detected
+ echo '(_|TIMEOUT|IFS)'
+ }
+ _ignore_HARDCODED_zsh() {
+ # These names cannot be detected
+ echo '([-\?\#\!\$\*\@\_0]|zsh_eval_context|ZSH_EVAL_CONTEXT|LINENO|IFS|commands|functions|options|aliases|EUID|EGID|UID|GID|dis_patchars|patchars|terminfo|galiases|keymaps|parameters|jobdirs|dirstack|functrace|funcsourcetrace|zsh_scheduled_events|dis_aliases|dis_reswords|dis_saliases|modules|reswords|saliases|widgets|userdirs|historywords|nameddirs|termcap|dis_builtins|dis_functions|jobtexts|funcfiletrace|dis_galiases|builtins|history|jobstates|funcstack|run-help)'
+ }
+ _ignore_READONLY() {
+ _ignore_READONLY_$_shell_DIALECT
+ }
+ _parse_READONLY() {
+ # shellcheck disable=SC1078,SC1079,SC2026
+ perl -e '@r = map {
+ chomp;
+ # sh on UnixWare: readonly TIMEOUT
+ # ash: readonly var='val'
+ # ksh: var='val'
+ # mksh: PIPESTATUS[0]
+ s/^(readonly )?([^=\[ ]*?)(\[\d+\])?(=.*|)$/$2/ or
+ # bash: declare -ar BASH_VERSINFO=([0]="4" [1]="4")
+ # zsh: typeset -r var='val'
+ s/^\S+\s+\S+\s+(\S[^=]*)(=.*|$)/$1/;
+ $_ } <>;
+ $vars = join "|",map { quotemeta $_ } @r;
+ print $vars ? "($vars)" : "(,,nO,,VaRs,,)";
+ '
+ }
+ _ignore_READONLY_sh() {
+ readonly | _parse_READONLY
+ }
+ _ignore_READONLY_bash() {
+ readonly | _parse_READONLY
+ }
+ _ignore_READONLY_ksh() {
+ readonly | _parse_READONLY
+ }
+ _ignore_READONLY_zsh() {
+ # shellcheck disable=SC3044
+ typeset -pr | _parse_READONLY
+ }
+ _remove_bad_NAMES() {
+ # Do not transfer vars and funcs from env_parallel
+ # shellcheck disable=SC2006
+ _ignore_RO="`_ignore_READONLY`"
+ # shellcheck disable=SC2006
+ _ignore_HARD="`_ignore_HARDCODED`"
+ # To avoid depending on grep dialect, use Perl version of:
+ # grep -Ev '^(...)$' |
+ perl -ne '/^(
+ PARALLEL_ENV|
+ PARALLEL_TMP|
+ _alias_NAMES|
+ _bodies_of_ALIASES|
+ _bodies_of_FUNCTIONS|
+ _bodies_of_VARIABLES|
+ _error_PAR|
+ _function_NAMES|
+ _get_ignored_VARS|
+ _grep_REGEXP|
+ _ignore_HARD|
+ _ignore_HARDCODED|
+ _ignore_READONLY|
+ _ignore_RO|
+ _ignore_UNDERSCORE|
+ _list_alias_BODIES|
+ _list_function_BODIES|
+ _list_variable_VALUES|
+ _make_grep_REGEXP|
+ _names_of_ALIASES|
+ _names_of_FUNCTIONS|
+ _names_of_VARIABLES|
+ _names_of_maybe_FUNCTIONS|
+ _parallel_exit_CODE|
+ _prefix_PARALLEL_ENV|
+ _prefix_PARALLEL_ENV_bash|
+ _remove_bad_NAMES|
+ _remove_readonly|
+ _variable_NAMES|
+ _warning_PAR|
+ _which_PAR)$/x and next;
+ # Filter names matching --env
+ /^'"$_grep_REGEXP"'$/ or next;
+ /^'"$_ignore_UNDERSCORE"'$/ and next;
+ # Remove readonly variables
+ /^'"$_ignore_RO"'$/ and next;
+ /^'"$_ignore_HARD"'$/ and next;
+ print;'
+ }
+ _prefix_PARALLEL_ENV_bash() {
+ # shellcheck disable=SC3044
+ shopt 2>/dev/null |
+ perl -pe 's:\s+off:;: and s/^/shopt -u /;
+ s:\s+on:;: and s/^/shopt -s /;
+ s:;$:&>/dev/null;:';
+ echo 'shopt -s expand_aliases &>/dev/null';
+ }
+
+ _get_ignored_VARS() {
+ perl -e '
+ for(@ARGV){
+ $next_is_env and push @envvar, split/,/, $_;
+ $next_is_env=/^--env$/;
+ }
+ if(grep { /^_$/ } @envvar) {
+ if(not open(IN, "<", "$ENV{HOME}/.parallel/ignored_vars")) {
+ print STDERR "parallel: Error: ",
+ "Run \"parallel --record-env\" in a clean environment first.\n";
+ } else {
+ chomp(@ignored_vars = <IN>);
+ }
+ }
+ if($ENV{PARALLEL_IGNORED_NAMES}) {
+ push @ignored_vars, split/\s+/, $ENV{PARALLEL_IGNORED_NAMES};
+ chomp @ignored_vars;
+ }
+ $vars = join "|",map { quotemeta $_ } @ignored_vars;
+ print $vars ? "($vars)" : "(,,nO,,VaRs,,)";
+ ' -- "$@"
+ }
+
+ # Get the --env variables if set
+ # --env _ should be ignored
+ # and convert a b c to (a|b|c)
+ # If --env not set: Match everything (.*)
+ _make_grep_REGEXP() {
+ perl -e '
+ for(@ARGV){
+ /^_$/ and $next_is_env = 0;
+ $next_is_env and push @envvar, split/,/, $_;
+ $next_is_env = /^--env$/;
+ }
+ $vars = join "|",map { quotemeta $_ } @envvar;
+ print $vars ? "($vars)" : "(.*)";
+ ' -- "$@"
+ }
+ _which_PAR() {
+ # type returns:
+ # ll is an alias for ls -l (in ash)
+ # bash is a tracked alias for /bin/bash
+ # true is a shell builtin (in bash)
+ # myfunc is a function (in bash)
+ # myfunc is a shell function (in zsh)
+ # which is /usr/bin/which (in sh, bash)
+ # which is hashed (/usr/bin/which)
+ # gi is aliased to `grep -i' (in bash)
+ # aliased to `alias | /usr/bin/which --tty-only --read-alias --show-dot --show-tilde'
+ # Return 0 if found, 1 otherwise
+ LANG=C type "$@" |
+ perl -pe '$exit += (s/ is an alias for .*// ||
+ s/ is aliased to .*// ||
+ s/ is a function// ||
+ s/ is a shell function// ||
+ s/ is a shell builtin// ||
+ s/.* is hashed .(\S+).$/$1/ ||
+ s/.* is (a tracked alias for )?//);
+ END { exit not $exit }'
+ }
+ _warning_PAR() {
+ echo "env_parallel: Warning: $*" >&2
+ }
+ _error_PAR() {
+ echo "env_parallel: Error: $*" >&2
+ }
+
+ if _which_PAR parallel >/dev/null; then
+ true parallel found in path
+ else
+ # shellcheck disable=SC2016
+ _error_PAR 'parallel must be in $PATH.'
+ return 255
+ fi
+
+ # Grep regexp for vars given by --env
+ # shellcheck disable=SC2006
+ _grep_REGEXP="`_make_grep_REGEXP \"$@\"`"
+ unset _make_grep_REGEXP
+
+ # Deal with --env _
+ # shellcheck disable=SC2006
+ _ignore_UNDERSCORE="`_get_ignored_VARS \"$@\"`"
+ unset _get_ignored_VARS
+
+ # --record-env
+ if perl -e 'exit grep { /^--record-env$/ } @ARGV' -- "$@"; then
+ true skip
+ else
+ (_names_of_ALIASES;
+ _names_of_FUNCTIONS;
+ _names_of_VARIABLES) |
+ cat > "$HOME"/.parallel/ignored_vars
+ return 0
+ fi
+
+ # --session
+ if perl -e 'exit grep { /^--session$/ } @ARGV' -- "$@"; then
+ true skip
+ else
+ # Insert ::: between each level of session
+ # so you can pop off the last ::: at --end-session
+ # shellcheck disable=SC2006
+ PARALLEL_IGNORED_NAMES="`echo \"$PARALLEL_IGNORED_NAMES\";
+ echo :::;
+ (_names_of_ALIASES;
+ _names_of_FUNCTIONS;
+ _names_of_VARIABLES) | perl -ne '
+ BEGIN{
+ map { $ignored_vars{$_}++ }
+ split/\s+/, $ENV{PARALLEL_IGNORED_NAMES};
+ }
+ chomp;
+ for(split/\s+/) {
+ if(not $ignored_vars{$_}) {
+ print $_,\"\\n\";
+ }
+ }
+ '`"
+ export PARALLEL_IGNORED_NAMES
+ return 0
+ fi
+ if perl -e 'exit grep { /^--end.?session$/ } @ARGV' -- "$@"; then
+ true skip
+ else
+ # Pop off last ::: from PARALLEL_IGNORED_NAMES
+ # shellcheck disable=SC2006
+ PARALLEL_IGNORED_NAMES="`perl -e '
+ $ENV{PARALLEL_IGNORED_NAMES} =~ s/(.*):::.*?$/$1/s;
+ print $ENV{PARALLEL_IGNORED_NAMES}
+ '`"
+ return 0
+ fi
+ # Grep alias names
+ # shellcheck disable=SC2006
+ _alias_NAMES="`_names_of_ALIASES | _remove_bad_NAMES | xargs echo`"
+ _list_alias_BODIES="_bodies_of_ALIASES $_alias_NAMES"
+ if [ "$_alias_NAMES" = "" ] ; then
+ # no aliases selected
+ _list_alias_BODIES="true"
+ fi
+ unset _alias_NAMES
+
+ # Grep function names
+ # shellcheck disable=SC2006
+ _function_NAMES="`_names_of_FUNCTIONS | _remove_bad_NAMES | xargs echo`"
+ _list_function_BODIES="_bodies_of_FUNCTIONS $_function_NAMES"
+ if [ "$_function_NAMES" = "" ] ; then
+ # no functions selected
+ _list_function_BODIES="true"
+ fi
+ unset _function_NAMES
+
+ # Grep variable names
+ # shellcheck disable=SC2006
+ _variable_NAMES="`_names_of_VARIABLES | _remove_bad_NAMES | xargs echo`"
+ _list_variable_VALUES="_bodies_of_VARIABLES $_variable_NAMES"
+ if [ "$_variable_NAMES" = "" ] ; then
+ # no variables selected
+ _list_variable_VALUES="true"
+ fi
+ unset _variable_NAMES
+
+ if $_eval_needed ; then
+ # shellcheck disable=SC2006
+ PARALLEL_ENV="`
+ eval $_prefix_PARALLEL_ENV;
+ eval $_list_alias_BODIES;
+ eval $_list_function_BODIES;
+ eval $_list_variable_VALUES;
+ `"
+ else
+ # shellcheck disable=SC2006
+ PARALLEL_ENV="`
+ $_prefix_PARALLEL_ENV;
+ $_list_alias_BODIES;
+ $_list_function_BODIES;
+ $_list_variable_VALUES;
+ `"
+ fi
+ export PARALLEL_ENV
+ # Free up some env space
+ unset _list_alias_BODIES _list_variable_VALUES _list_function_BODIES
+ unset _bodies_of_ALIASES _bodies_of_VARIABLES _bodies_of_FUNCTIONS
+ unset _names_of_ALIASES _names_of_VARIABLES _names_of_FUNCTIONS
+ unset _ignore_HARDCODED _ignore_READONLY _ignore_UNDERSCORE
+ unset _remove_bad_NAMES _grep_REGEXP _parse_READONLY
+ unset _prefix_PARALLEL_ENV
+ unset _ignore_READONLY_sh _ignore_READONLY_bash
+ unset _ignore_READONLY_ksh _ignore_READONLY_zsh
+ unset _ignore_HARDCODED_sh _ignore_HARDCODED_bash
+ unset _ignore_HARDCODED_ksh _ignore_HARDCODED_zsh
+ unset _bodies_of_ALIASES_ksh _bodies_of_ALIASES_sh
+ unset _bodies_of_ALIASES_zsh _bodies_of_FUNCTIONS_bash
+ unset _bodies_of_FUNCTIONS_ksh _bodies_of_FUNCTIONS_sh
+ unset _bodies_of_FUNCTIONS_zsh _bodies_of_VARIABLES_bash
+ unset _bodies_of_VARIABLES_ksh _bodies_of_VARIABLES_sh
+ unset _bodies_of_VARIABLES_zsh
+ unset _names_of_ALIASES _names_of_ALIASES_bash
+ unset _names_of_ALIASES_ksh _names_of_ALIASES_sh
+ unset _names_of_ALIASES_zsh _names_of_FUNCTIONS
+ unset _names_of_FUNCTIONS_bash _names_of_FUNCTIONS_ksh
+ unset _names_of_FUNCTIONS_sh _names_of_FUNCTIONS_zsh
+ unset _names_of_VARIABLES _names_of_VARIABLES_bash
+ unset _names_of_VARIABLES_ksh _names_of_VARIABLES_sh
+ unset _names_of_VARIABLES_zsh _names_of_maybe_FUNCTIONS
+
+ # Test if environment is too big by running 'true'
+ # shellcheck disable=SC2006,SC2092
+ if `_which_PAR true` >/dev/null 2>/dev/null ; then
+ parallel "$@"
+ _parallel_exit_CODE=$?
+ # Clean up variables/functions
+ unset PARALLEL_ENV
+ unset _which_PAR _which_TRUE
+ unset _warning_PAR _error_PAR
+ # Unset _parallel_exit_CODE before return
+ eval "unset _parallel_exit_CODE; return $_parallel_exit_CODE"
+ else
+ unset PARALLEL_ENV;
+ _error_PAR "Your environment is too big."
+ _error_PAR "You can try 3 different approaches:"
+ _error_PAR "1. Run 'env_parallel --session' before you set"
+ _error_PAR " variables or define functions."
+ _error_PAR "2. Use --env and only mention the names to copy."
+ _error_PAR "3. Try running this in a clean environment once:"
+ _error_PAR " env_parallel --record-env"
+ _error_PAR " And then use '--env _'"
+ _error_PAR "For details see: man env_parallel"
+ return 255
+ fi
+}
+
+parset() {
+ _parset_PARALLEL_PRG=parallel
+ _parset_main "$@"
+}
+
+env_parset() {
+ _parset_PARALLEL_PRG=env_parallel
+ _parset_main "$@"
+}
+
+_parset_main() {
+ # If $1 contains ',' or space:
+ # Split on , to get the destination variable names
+ # If $1 is a single destination variable name:
+ # Treat it as the name of an array
+ #
+ # # Create array named myvar
+ # parset myvar echo ::: {1..10}
+ # echo ${myvar[5]}
+ #
+ # # Put output into $var_a $var_b $var_c
+ # varnames=(var_a var_b var_c)
+ # parset "${varnames[*]}" echo ::: {1..3}
+ # echo $var_c
+ #
+ # # Put output into $var_a4 $var_b4 $var_c4
+ # parset "var_a4 var_b4 var_c4" echo ::: {1..3}
+ # echo $var_c4
+
+ _parset_NAME="$1"
+ if [ "$_parset_NAME" = "" ] ; then
+ echo parset: Error: No destination variable given. >&2
+ echo parset: Error: Try: >&2
+ echo parset: Error: ' ' parset myarray echo ::: foo bar >&2
+ return 255
+ fi
+ if [ "$_parset_NAME" = "--help" ] ; then
+ echo parset: Error: Usage: >&2
+ echo parset: Error: ' ' parset varname GNU Parallel options and command >&2
+ echo
+ parallel --help
+ return 255
+ fi
+ if [ "$_parset_NAME" = "--version" ] ; then
+ # shellcheck disable=SC2006
+ echo "parset 20240222 (GNU parallel `parallel --minversion 1`)"
+ echo "Copyright (C) 2007-2024 Ole Tange, http://ole.tange.dk and Free Software"
+ echo "Foundation, Inc."
+ echo "License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>"
+ echo "This is free software: you are free to change and redistribute it."
+ echo "GNU parallel comes with no warranty."
+ echo
+ echo "Web site: https://www.gnu.org/software/parallel"
+ echo
+ echo "When using programs that use GNU Parallel to process data for publication"
+ echo "please cite as described in 'parallel --citation'."
+ echo
+ return 255
+ fi
+ shift
+
+ # Bash: declare -A myassoc=( )
+ # Zsh: typeset -A myassoc=( )
+ # Ksh: typeset -A myassoc=( )
+ # shellcheck disable=SC2039,SC2169,SC3044
+ if (typeset -p "$_parset_NAME" 2>/dev/null; echo) |
+ perl -ne 'exit not (/^declare[^=]+-A|^typeset[^=]+-A/)' ; then
+ # This is an associative array
+ # shellcheck disable=SC2006
+ eval "`$_parset_PARALLEL_PRG -k --_parset assoc,"$_parset_NAME" "$@"`"
+ # The eval returns the function!
+ else
+ # This is a normal array or a list of variable names
+ # shellcheck disable=SC2006
+ eval "`$_parset_PARALLEL_PRG -k --_parset var,"$_parset_NAME" "$@"`"
+ # The eval returns the function!
+ fi
+}
diff --git a/src/env_parallel.fish b/src/env_parallel.fish
new file mode 100755
index 0000000..b0abc50
--- /dev/null
+++ b/src/env_parallel.fish
@@ -0,0 +1,194 @@
+#!/usr/bin/env fish
+
+# This file must be sourced in fish:
+#
+# . (which env_parallel.fish)
+#
+# after which 'env_parallel' works
+#
+#
+# Copyright (C) 2016-2024 Ole Tange, http://ole.tange.dk and Free
+# Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+#
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# If you are a fisherman feel free to improve the code
+#
+# The code needs to deal with variables like:
+# set funky (perl -e 'print pack "c*", 2..254')
+#
+# Problem:
+# Tell the difference between:
+# set tmp "a' 'b' 'c"
+# set tmparr1 "a' 'b" 'c'
+# set tmparr2 'a' "b' 'c"
+# The output from `set` is exactly the same.
+# Solution:
+# for-loop for each variable. Each value is separated with a
+# separator.
+
+function env_parallel
+ # env_parallel.fish
+
+ # --session
+ perl -e 'exit grep { /^--session/ } @ARGV' -- $argv; or begin;
+ setenv PARALLEL_IGNORED_NAMES (
+ begin;
+ functions -n
+ set -n;
+ end | perl -pe 's/\n/,/g';
+ )
+ return 0
+ end;
+ setenv PARALLEL_ENV (
+ begin;
+ set _grep_REGEXP (
+ begin;
+ perl -e '
+ for(@ARGV){
+ /^_$/ and $next_is_env = 0;
+ $next_is_env and push @envvar, split/,/, $_;
+ $next_is_env = /^--env$/;
+ }
+ $vars = join "|",map { quotemeta $_ } @envvar;
+ print $vars ? "($vars)" : "(.*)";
+ ' -- $argv;
+ end;
+ )
+ # Deal with --env _
+ set _ignore_UNDERSCORE (
+ begin;
+ perl -e '
+ for(@ARGV){
+ $next_is_env and push @envvar, split/,/, $_;
+ $next_is_env=/^--env$/;
+ }
+ if(grep { /^_$/ } @envvar) {
+ if(not open(IN, "<", "$ENV{HOME}/.parallel/ignored_vars")) {
+ print STDERR "parallel: Error: ",
+ "Run \"parallel --record-env\" in a clean environment first.\n";
+ } else {
+ chomp(@ignored_vars = <IN>);
+ }
+ }
+ if($ENV{PARALLEL_IGNORED_NAMES}) {
+ push @ignored_vars, split/,/, $ENV{PARALLEL_IGNORED_NAMES};
+ chomp @ignored_vars;
+ }
+ $vars = join "|",map { quotemeta $_ } @ignored_vars;
+ print $vars ? "($vars)" : "(,,nO,,VaRs,,)";
+ ' -- $argv;
+ end;
+ )
+
+ # --record-env
+ perl -e 'exit grep { /^--record-env$/ } @ARGV' -- $argv; or begin;
+ begin;
+ functions -n | perl -pe 's/,/\n/g';
+ set -n;
+ end | cat > $HOME/.parallel/ignored_vars;
+ end;
+
+ # Export function definitions
+ # Keep the ones from --env
+ # Ignore the ones from ~/.parallel/ignored_vars
+ # Dump each function defition
+ # Replace \001 with \002 because \001 is used by env_parallel
+ # Convert \n to \001
+ functions -n | perl -pe 's/,/\n/g' | \
+ grep -Ev '^(PARALLEL_ENV|PARALLEL_TMP)$' | \
+ grep -E "^$_grep_REGEXP"\$ | grep -vE "^$_ignore_UNDERSCORE"\$ | \
+ while read d; functions $d; end | \
+ perl -pe 's/\001/\002/g and not $printed++ and print STDERR
+ "env_parallel: Warning: ASCII value 1 in functions is not supported\n";
+ s/\n/\001/g';
+ # Convert scalar vars to fish \XX quoting
+ # Keep the ones from --env
+ # Ignore the ones from ~/.parallel/ignored_vars
+ # Ignore read only vars
+ # Execute 'set' of the content
+ eval (set -L | \
+ grep -Ev '^(PARALLEL_TMP)$' | \
+ grep -E "^$_grep_REGEXP " | grep -vE "^$_ignore_UNDERSCORE " | \
+ perl -ne 'chomp;
+ ($name,$val)=split(/ /,$_,2);
+ $name=~/^(HOME|USER|COLUMNS|FISH_VERSION|LINES|PWD|SHLVL|_|
+ history|status|version)$|\./x and next;
+ if($val=~/^'"'"'/) { next; }
+ print "set $name \"\$$name\";\n";
+ ')
+ # Generate commands to set scalar variables
+ # Keep the ones from --env
+ # Ignore the ones from ~/.parallel/ignored_vars
+ #
+ begin;
+ for v in (set -n | \
+ grep -Ev '^(PARALLEL_TMP)$' | \
+ grep -E "^$_grep_REGEXP\$" | grep -vE "^$_ignore_UNDERSCORE\$");
+ # Separate variables with the string: \000
+ # array_name1 val1\0
+ # array_name1 val2\0
+ # array_name2 val3\0
+ # array_name2 val4\0
+ eval "for i in \$$v;
+ echo -n $v \$i;
+ perl -e print\\\"\\\\0\\\";
+ end;"
+ end;
+ # A final line to flush the last variable in Perl
+ perl -e print\"\\0\";
+ end | perl -0 -ne '
+ # Remove separator string \0
+ chop;
+ # Split line into name and value
+ ($name,$val)=split(/ /,$_,2);
+ # Ignore read-only vars
+ $name=~/^(HOME|USER|COLUMNS|FISH_VERSION|LINES|PWD|SHLVL|_|
+ fish_pid|history|hostname|status|version)$/x and next;
+ # Single quote $val
+ if($val =~ /[^-_.+a-z0-9\/]/i) {
+ $val =~ s/\047/\047"\047"\047/g; # "-quote single quotes
+ $val = "\047$val\047"; # single-quote entire string
+ $val =~ s/^\047\047|\047\047$//g; # Remove unneeded '' at ends
+ } elsif ($val eq "") {
+ $val = "\047\047";
+ }
+
+ if($name ne $last and $last) {
+ # The $name is different, so this is a new variable.
+ # Print the last one.
+ # Separate list elements by 2 spaces
+ $"=" ";
+ print "set $last @qval;\n";
+ @qval=();
+ }
+ push @qval,$val;
+ $last=$name;
+ '| \
+ perl -pe 's/\001/\002/g and not $printed++ and print STDERR
+ "env_parallel: Warning: ASCII value 1 in variables is not supported\n";
+ s/\n/\001/g'
+ end;
+ )
+ # If --record-env: exit
+ perl -e 'exit grep { /^--record-env$/ } @ARGV' -- $argv; and parallel $argv;
+ set _parallel_exit_CODE $status
+ set -e PARALLEL_ENV
+ return $_parallel_exit_CODE
+end
diff --git a/src/env_parallel.ksh b/src/env_parallel.ksh
new file mode 100755
index 0000000..bc20d41
--- /dev/null
+++ b/src/env_parallel.ksh
@@ -0,0 +1,636 @@
+#!/usr/bin/env ksh
+
+# This file must be sourced in sh/ash/dash/bash/ksh/mksh/zsh:
+#
+# . env_parallel.sh
+# source env_parallel.ash
+# source env_parallel.dash
+# source env_parallel.bash
+# source env_parallel.ksh
+# source env_parallel.mksh
+# source env_parallel.zsh
+#
+# after which 'env_parallel' works
+#
+#
+# Copyright (C) 2016-2024 Ole Tange, http://ole.tange.dk and Free
+# Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+#
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+# shellcheck disable=SC2006
+
+env_parallel() {
+ # env_parallel.{sh,ash,dash,bash,ksh,mksh,zsh}
+
+ # Check shell dialect
+ if [ -n "$BASH_VERSION" ]; then
+ _shell_DIALECT=bash
+ _eval_needed=false
+ _prefix_PARALLEL_ENV=_prefix_PARALLEL_ENV_bash
+ elif [ -n "$ZSH_VERSION" ]; then
+ _shell_DIALECT=zsh
+ _eval_needed=true
+ _prefix_PARALLEL_ENV=false
+ elif [ -n "$KSH_VERSION" ]; then
+ _shell_DIALECT=ksh
+ _eval_needed=false
+ _prefix_PARALLEL_ENV=false
+ else
+ # Dash/ash - can these be detected better?
+ _shell_DIALECT="sh"
+ _eval_needed=false
+ _prefix_PARALLEL_ENV=false
+ fi
+ _names_of_ALIASES() {
+ _names_of_ALIASES_$_shell_DIALECT
+ }
+ _names_of_ALIASES_sh() {
+ # alias fails on Unixware 5
+ for _i in `alias 2>/dev/null | perl -ne 's/^alias //;s/^(\S+)=.*/$1/ && print' 2>/dev/null`; do
+ # Check if this name really is an alias
+ # or just part of a multiline alias definition
+ if alias "$_i" >/dev/null 2>/dev/null; then
+ echo "$_i"
+ fi
+ done
+ }
+ _names_of_ALIASES_bash() {
+ # No aliases will return false. This error should be ignored.
+ # shellcheck disable=SC3044
+ compgen -a || true
+ }
+ _names_of_ALIASES_ksh() {
+ alias | perl -pe 's/=.*//'
+ }
+ _names_of_ALIASES_zsh() {
+ # shellcheck disable=SC2086,SC2296
+ print -l ${(k)aliases}
+ }
+ _bodies_of_ALIASES() {
+ _bodies_of_ALIASES_$_shell_DIALECT "$@"
+ }
+ _bodies_of_ALIASES_sh() {
+ # alias may return:
+ # myalias='definition' (GNU/Linux ash)
+ # alias myalias='definition' (FreeBSD ash)
+ # so remove 'alias ' from first line
+ for _i in "$@"; do
+ echo 'alias '"`alias "$_i" | perl -pe '1..1 and s/^alias //'`"
+ done
+ }
+ _bodies_of_ALIASES_bash() {
+ # shellcheck disable=SC3043
+ local _i
+ for _i in "$@"; do
+ # shellcheck disable=SC2046
+ if [ $(alias "$_i" | wc -l) = 1 ] ; then
+ true Alias is a single line. Good.
+ else
+ _warning_PAR "Alias '$_i' contains newline."
+ _warning_PAR "Make sure the command has at least one newline after '$_i'."
+ _warning_PAR "See BUGS in 'man env_parallel'."
+ fi
+ done
+ alias "$@"
+ }
+ _bodies_of_ALIASES_ksh() {
+ alias "$@" | perl -pe 's/^/alias /;
+ sub warning { print STDERR "env_parallel: Warning: @_\n"; }
+ if(/^alias (\S+)=\$.*\\n/) {
+ warning("Alias \"$1\" contains newline.");
+ warning("Make sure the command has at least one newline after \"$1\".");
+ warning("See BUGS in \"man env_parallel\".");
+ }'
+
+ }
+ _bodies_of_ALIASES_zsh() {
+ # shellcheck disable=SC3043
+ local _i
+ for _i in "$@"; do
+ echo 'alias '"$(alias "$_i")"
+ done
+ }
+ _names_of_FUNCTIONS() {
+ _names_of_FUNCTIONS_$_shell_DIALECT
+ }
+ _names_of_FUNCTIONS_bash() {
+ # shellcheck disable=SC3044
+ compgen -A function
+ }
+ _names_of_maybe_FUNCTIONS() {
+ set | perl -ne '/^([A-Z_0-9]+)\s*\(\)\s*\{?$/i and print "$1\n"'
+ }
+ _names_of_FUNCTIONS_sh() {
+ # myfunc is a function
+ # shellcheck disable=SC2046
+ LANG=C type `_names_of_maybe_FUNCTIONS` |
+ perl -ne '/^(\S+) is a function$/ and not $seen{$1}++ and print "$1\n"'
+ }
+ _names_of_FUNCTIONS_ksh() {
+ # shellcheck disable=SC3044
+ typeset +f | perl -pe 's/\(\).*//; s/ .*//;'
+ }
+ _names_of_FUNCTIONS_zsh() {
+ # shellcheck disable=SC2086,SC2296
+ print -l ${(k)functions}
+ }
+ _bodies_of_FUNCTIONS() {
+ _bodies_of_FUNCTIONS_$_shell_DIALECT "$@"
+ }
+ _bodies_of_FUNCTIONS_sh() {
+ LANG=C type "$@" | perl -ne '/^(\S+) is a function$/ or print'
+ }
+ _bodies_of_FUNCTIONS_bash() {
+ # shellcheck disable=SC3044
+ typeset -f "$@"
+ }
+ _bodies_of_FUNCTIONS_ksh() {
+ functions "$@"
+ }
+ _bodies_of_FUNCTIONS_zsh() {
+ # shellcheck disable=SC3044
+ typeset -f "$@"
+ }
+ _names_of_VARIABLES() {
+ _names_of_VARIABLES_$_shell_DIALECT
+ }
+ _names_of_VARIABLES_sh() {
+ # This may screw up if variables contain \n and =
+ set | perl -ne 's/^(\S+?)=.*/$1/ and print;'
+ }
+ _names_of_VARIABLES_bash() {
+ # shellcheck disable=SC3044
+ compgen -A variable
+ }
+ _names_of_VARIABLES_ksh() {
+ # shellcheck disable=SC3044
+ typeset +p |
+ perl -pe 's/^(type)?set( [-+][a-zA-Z0-9]*)* //; s/(\[\d+\])?=.*//' |
+ uniq
+ }
+ _names_of_VARIABLES_zsh() {
+ # shellcheck disable=SC2086,SC2296
+ print -l ${(k)parameters}
+ }
+ _bodies_of_VARIABLES() {
+ _bodies_of_VARIABLES_$_shell_DIALECT "$@"
+ }
+ _bodies_of_VARIABLES_sh() {
+ # Crappy typeset -p
+ for _i in "$@"
+ do
+ perl -e 'print @ARGV' "$_i="
+ eval echo "\"\$$_i\"" | perl -e '$/=undef; $a=<>; chop($a); print $a' |
+ perl -pe 's/[\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\^\*\<\=\>\~\|\; \"\!\$\&\202-\377]/\\$&/go;'"s/'/\\\'/g; s/[\n]/'\\n'/go;";
+ echo
+ done
+ }
+ _bodies_of_VARIABLES_bash() {
+ # shellcheck disable=SC3044
+ typeset -p "$@"
+ }
+ _bodies_of_VARIABLES_ksh() {
+ # shellcheck disable=SC3044
+ typeset -p "$@"
+ }
+ _bodies_of_VARIABLES_zsh() {
+ # shellcheck disable=SC3044
+ typeset -p "$@"
+ }
+ _ignore_HARDCODED() {
+ _ignore_HARDCODED_$_shell_DIALECT
+ }
+ _ignore_HARDCODED_sh() {
+ # These names cannot be detected
+ echo '(_|TIMEOUT|IFS)'
+ }
+ _ignore_HARDCODED_bash() {
+ # Copying $RANDOM will cause it not to be random
+ # The rest cannot be detected as read-only
+ echo '(RANDOM|_|TIMEOUT|GROUPS|FUNCNAME|DIRSTACK|PIPESTATUS|USERNAME|BASHPID|BASH_[A-Z_]+)'
+ }
+ _ignore_HARDCODED_ksh() {
+ # These names cannot be detected
+ echo '(_|TIMEOUT|IFS)'
+ }
+ _ignore_HARDCODED_zsh() {
+ # These names cannot be detected
+ echo '([-\?\#\!\$\*\@\_0]|zsh_eval_context|ZSH_EVAL_CONTEXT|LINENO|IFS|commands|functions|options|aliases|EUID|EGID|UID|GID|dis_patchars|patchars|terminfo|galiases|keymaps|parameters|jobdirs|dirstack|functrace|funcsourcetrace|zsh_scheduled_events|dis_aliases|dis_reswords|dis_saliases|modules|reswords|saliases|widgets|userdirs|historywords|nameddirs|termcap|dis_builtins|dis_functions|jobtexts|funcfiletrace|dis_galiases|builtins|history|jobstates|funcstack|run-help)'
+ }
+ _ignore_READONLY() {
+ _ignore_READONLY_$_shell_DIALECT
+ }
+ _parse_READONLY() {
+ # shellcheck disable=SC1078,SC1079,SC2026
+ perl -e '@r = map {
+ chomp;
+ # sh on UnixWare: readonly TIMEOUT
+ # ash: readonly var='val'
+ # ksh: var='val'
+ # mksh: PIPESTATUS[0]
+ s/^(readonly )?([^=\[ ]*?)(\[\d+\])?(=.*|)$/$2/ or
+ # bash: declare -ar BASH_VERSINFO=([0]="4" [1]="4")
+ # zsh: typeset -r var='val'
+ s/^\S+\s+\S+\s+(\S[^=]*)(=.*|$)/$1/;
+ $_ } <>;
+ $vars = join "|",map { quotemeta $_ } @r;
+ print $vars ? "($vars)" : "(,,nO,,VaRs,,)";
+ '
+ }
+ _ignore_READONLY_sh() {
+ readonly | _parse_READONLY
+ }
+ _ignore_READONLY_bash() {
+ readonly | _parse_READONLY
+ }
+ _ignore_READONLY_ksh() {
+ readonly | _parse_READONLY
+ }
+ _ignore_READONLY_zsh() {
+ # shellcheck disable=SC3044
+ typeset -pr | _parse_READONLY
+ }
+ _remove_bad_NAMES() {
+ # Do not transfer vars and funcs from env_parallel
+ # shellcheck disable=SC2006
+ _ignore_RO="`_ignore_READONLY`"
+ # shellcheck disable=SC2006
+ _ignore_HARD="`_ignore_HARDCODED`"
+ # To avoid depending on grep dialect, use Perl version of:
+ # grep -Ev '^(...)$' |
+ perl -ne '/^(
+ PARALLEL_ENV|
+ PARALLEL_TMP|
+ _alias_NAMES|
+ _bodies_of_ALIASES|
+ _bodies_of_FUNCTIONS|
+ _bodies_of_VARIABLES|
+ _error_PAR|
+ _function_NAMES|
+ _get_ignored_VARS|
+ _grep_REGEXP|
+ _ignore_HARD|
+ _ignore_HARDCODED|
+ _ignore_READONLY|
+ _ignore_RO|
+ _ignore_UNDERSCORE|
+ _list_alias_BODIES|
+ _list_function_BODIES|
+ _list_variable_VALUES|
+ _make_grep_REGEXP|
+ _names_of_ALIASES|
+ _names_of_FUNCTIONS|
+ _names_of_VARIABLES|
+ _names_of_maybe_FUNCTIONS|
+ _parallel_exit_CODE|
+ _prefix_PARALLEL_ENV|
+ _prefix_PARALLEL_ENV_bash|
+ _remove_bad_NAMES|
+ _remove_readonly|
+ _variable_NAMES|
+ _warning_PAR|
+ _which_PAR)$/x and next;
+ # Filter names matching --env
+ /^'"$_grep_REGEXP"'$/ or next;
+ /^'"$_ignore_UNDERSCORE"'$/ and next;
+ # Remove readonly variables
+ /^'"$_ignore_RO"'$/ and next;
+ /^'"$_ignore_HARD"'$/ and next;
+ print;'
+ }
+ _prefix_PARALLEL_ENV_bash() {
+ # shellcheck disable=SC3044
+ shopt 2>/dev/null |
+ perl -pe 's:\s+off:;: and s/^/shopt -u /;
+ s:\s+on:;: and s/^/shopt -s /;
+ s:;$:&>/dev/null;:';
+ echo 'shopt -s expand_aliases &>/dev/null';
+ }
+
+ _get_ignored_VARS() {
+ perl -e '
+ for(@ARGV){
+ $next_is_env and push @envvar, split/,/, $_;
+ $next_is_env=/^--env$/;
+ }
+ if(grep { /^_$/ } @envvar) {
+ if(not open(IN, "<", "$ENV{HOME}/.parallel/ignored_vars")) {
+ print STDERR "parallel: Error: ",
+ "Run \"parallel --record-env\" in a clean environment first.\n";
+ } else {
+ chomp(@ignored_vars = <IN>);
+ }
+ }
+ if($ENV{PARALLEL_IGNORED_NAMES}) {
+ push @ignored_vars, split/\s+/, $ENV{PARALLEL_IGNORED_NAMES};
+ chomp @ignored_vars;
+ }
+ $vars = join "|",map { quotemeta $_ } @ignored_vars;
+ print $vars ? "($vars)" : "(,,nO,,VaRs,,)";
+ ' -- "$@"
+ }
+
+ # Get the --env variables if set
+ # --env _ should be ignored
+ # and convert a b c to (a|b|c)
+ # If --env not set: Match everything (.*)
+ _make_grep_REGEXP() {
+ perl -e '
+ for(@ARGV){
+ /^_$/ and $next_is_env = 0;
+ $next_is_env and push @envvar, split/,/, $_;
+ $next_is_env = /^--env$/;
+ }
+ $vars = join "|",map { quotemeta $_ } @envvar;
+ print $vars ? "($vars)" : "(.*)";
+ ' -- "$@"
+ }
+ _which_PAR() {
+ # type returns:
+ # ll is an alias for ls -l (in ash)
+ # bash is a tracked alias for /bin/bash
+ # true is a shell builtin (in bash)
+ # myfunc is a function (in bash)
+ # myfunc is a shell function (in zsh)
+ # which is /usr/bin/which (in sh, bash)
+ # which is hashed (/usr/bin/which)
+ # gi is aliased to `grep -i' (in bash)
+ # aliased to `alias | /usr/bin/which --tty-only --read-alias --show-dot --show-tilde'
+ # Return 0 if found, 1 otherwise
+ LANG=C type "$@" |
+ perl -pe '$exit += (s/ is an alias for .*// ||
+ s/ is aliased to .*// ||
+ s/ is a function// ||
+ s/ is a shell function// ||
+ s/ is a shell builtin// ||
+ s/.* is hashed .(\S+).$/$1/ ||
+ s/.* is (a tracked alias for )?//);
+ END { exit not $exit }'
+ }
+ _warning_PAR() {
+ echo "env_parallel: Warning: $*" >&2
+ }
+ _error_PAR() {
+ echo "env_parallel: Error: $*" >&2
+ }
+
+ if _which_PAR parallel >/dev/null; then
+ true parallel found in path
+ else
+ # shellcheck disable=SC2016
+ _error_PAR 'parallel must be in $PATH.'
+ return 255
+ fi
+
+ # Grep regexp for vars given by --env
+ # shellcheck disable=SC2006
+ _grep_REGEXP="`_make_grep_REGEXP \"$@\"`"
+ unset _make_grep_REGEXP
+
+ # Deal with --env _
+ # shellcheck disable=SC2006
+ _ignore_UNDERSCORE="`_get_ignored_VARS \"$@\"`"
+ unset _get_ignored_VARS
+
+ # --record-env
+ if perl -e 'exit grep { /^--record-env$/ } @ARGV' -- "$@"; then
+ true skip
+ else
+ (_names_of_ALIASES;
+ _names_of_FUNCTIONS;
+ _names_of_VARIABLES) |
+ cat > "$HOME"/.parallel/ignored_vars
+ return 0
+ fi
+
+ # --session
+ if perl -e 'exit grep { /^--session$/ } @ARGV' -- "$@"; then
+ true skip
+ else
+ # Insert ::: between each level of session
+ # so you can pop off the last ::: at --end-session
+ # shellcheck disable=SC2006
+ PARALLEL_IGNORED_NAMES="`echo \"$PARALLEL_IGNORED_NAMES\";
+ echo :::;
+ (_names_of_ALIASES;
+ _names_of_FUNCTIONS;
+ _names_of_VARIABLES) | perl -ne '
+ BEGIN{
+ map { $ignored_vars{$_}++ }
+ split/\s+/, $ENV{PARALLEL_IGNORED_NAMES};
+ }
+ chomp;
+ for(split/\s+/) {
+ if(not $ignored_vars{$_}) {
+ print $_,\"\\n\";
+ }
+ }
+ '`"
+ export PARALLEL_IGNORED_NAMES
+ return 0
+ fi
+ if perl -e 'exit grep { /^--end.?session$/ } @ARGV' -- "$@"; then
+ true skip
+ else
+ # Pop off last ::: from PARALLEL_IGNORED_NAMES
+ # shellcheck disable=SC2006
+ PARALLEL_IGNORED_NAMES="`perl -e '
+ $ENV{PARALLEL_IGNORED_NAMES} =~ s/(.*):::.*?$/$1/s;
+ print $ENV{PARALLEL_IGNORED_NAMES}
+ '`"
+ return 0
+ fi
+ # Grep alias names
+ # shellcheck disable=SC2006
+ _alias_NAMES="`_names_of_ALIASES | _remove_bad_NAMES | xargs echo`"
+ _list_alias_BODIES="_bodies_of_ALIASES $_alias_NAMES"
+ if [ "$_alias_NAMES" = "" ] ; then
+ # no aliases selected
+ _list_alias_BODIES="true"
+ fi
+ unset _alias_NAMES
+
+ # Grep function names
+ # shellcheck disable=SC2006
+ _function_NAMES="`_names_of_FUNCTIONS | _remove_bad_NAMES | xargs echo`"
+ _list_function_BODIES="_bodies_of_FUNCTIONS $_function_NAMES"
+ if [ "$_function_NAMES" = "" ] ; then
+ # no functions selected
+ _list_function_BODIES="true"
+ fi
+ unset _function_NAMES
+
+ # Grep variable names
+ # shellcheck disable=SC2006
+ _variable_NAMES="`_names_of_VARIABLES | _remove_bad_NAMES | xargs echo`"
+ _list_variable_VALUES="_bodies_of_VARIABLES $_variable_NAMES"
+ if [ "$_variable_NAMES" = "" ] ; then
+ # no variables selected
+ _list_variable_VALUES="true"
+ fi
+ unset _variable_NAMES
+
+ if $_eval_needed ; then
+ # shellcheck disable=SC2006
+ PARALLEL_ENV="`
+ eval $_prefix_PARALLEL_ENV;
+ eval $_list_alias_BODIES;
+ eval $_list_function_BODIES;
+ eval $_list_variable_VALUES;
+ `"
+ else
+ # shellcheck disable=SC2006
+ PARALLEL_ENV="`
+ $_prefix_PARALLEL_ENV;
+ $_list_alias_BODIES;
+ $_list_function_BODIES;
+ $_list_variable_VALUES;
+ `"
+ fi
+ export PARALLEL_ENV
+ # Free up some env space
+ unset _list_alias_BODIES _list_variable_VALUES _list_function_BODIES
+ unset _bodies_of_ALIASES _bodies_of_VARIABLES _bodies_of_FUNCTIONS
+ unset _names_of_ALIASES _names_of_VARIABLES _names_of_FUNCTIONS
+ unset _ignore_HARDCODED _ignore_READONLY _ignore_UNDERSCORE
+ unset _remove_bad_NAMES _grep_REGEXP _parse_READONLY
+ unset _prefix_PARALLEL_ENV
+ unset _ignore_READONLY_sh _ignore_READONLY_bash
+ unset _ignore_READONLY_ksh _ignore_READONLY_zsh
+ unset _ignore_HARDCODED_sh _ignore_HARDCODED_bash
+ unset _ignore_HARDCODED_ksh _ignore_HARDCODED_zsh
+ unset _bodies_of_ALIASES_ksh _bodies_of_ALIASES_sh
+ unset _bodies_of_ALIASES_zsh _bodies_of_FUNCTIONS_bash
+ unset _bodies_of_FUNCTIONS_ksh _bodies_of_FUNCTIONS_sh
+ unset _bodies_of_FUNCTIONS_zsh _bodies_of_VARIABLES_bash
+ unset _bodies_of_VARIABLES_ksh _bodies_of_VARIABLES_sh
+ unset _bodies_of_VARIABLES_zsh
+ unset _names_of_ALIASES _names_of_ALIASES_bash
+ unset _names_of_ALIASES_ksh _names_of_ALIASES_sh
+ unset _names_of_ALIASES_zsh _names_of_FUNCTIONS
+ unset _names_of_FUNCTIONS_bash _names_of_FUNCTIONS_ksh
+ unset _names_of_FUNCTIONS_sh _names_of_FUNCTIONS_zsh
+ unset _names_of_VARIABLES _names_of_VARIABLES_bash
+ unset _names_of_VARIABLES_ksh _names_of_VARIABLES_sh
+ unset _names_of_VARIABLES_zsh _names_of_maybe_FUNCTIONS
+
+ # Test if environment is too big by running 'true'
+ # shellcheck disable=SC2006,SC2092
+ if `_which_PAR true` >/dev/null 2>/dev/null ; then
+ parallel "$@"
+ _parallel_exit_CODE=$?
+ # Clean up variables/functions
+ unset PARALLEL_ENV
+ unset _which_PAR _which_TRUE
+ unset _warning_PAR _error_PAR
+ # Unset _parallel_exit_CODE before return
+ eval "unset _parallel_exit_CODE; return $_parallel_exit_CODE"
+ else
+ unset PARALLEL_ENV;
+ _error_PAR "Your environment is too big."
+ _error_PAR "You can try 3 different approaches:"
+ _error_PAR "1. Run 'env_parallel --session' before you set"
+ _error_PAR " variables or define functions."
+ _error_PAR "2. Use --env and only mention the names to copy."
+ _error_PAR "3. Try running this in a clean environment once:"
+ _error_PAR " env_parallel --record-env"
+ _error_PAR " And then use '--env _'"
+ _error_PAR "For details see: man env_parallel"
+ return 255
+ fi
+}
+
+parset() {
+ _parset_PARALLEL_PRG=parallel
+ _parset_main "$@"
+}
+
+env_parset() {
+ _parset_PARALLEL_PRG=env_parallel
+ _parset_main "$@"
+}
+
+_parset_main() {
+ # If $1 contains ',' or space:
+ # Split on , to get the destination variable names
+ # If $1 is a single destination variable name:
+ # Treat it as the name of an array
+ #
+ # # Create array named myvar
+ # parset myvar echo ::: {1..10}
+ # echo ${myvar[5]}
+ #
+ # # Put output into $var_a $var_b $var_c
+ # varnames=(var_a var_b var_c)
+ # parset "${varnames[*]}" echo ::: {1..3}
+ # echo $var_c
+ #
+ # # Put output into $var_a4 $var_b4 $var_c4
+ # parset "var_a4 var_b4 var_c4" echo ::: {1..3}
+ # echo $var_c4
+
+ _parset_NAME="$1"
+ if [ "$_parset_NAME" = "" ] ; then
+ echo parset: Error: No destination variable given. >&2
+ echo parset: Error: Try: >&2
+ echo parset: Error: ' ' parset myarray echo ::: foo bar >&2
+ return 255
+ fi
+ if [ "$_parset_NAME" = "--help" ] ; then
+ echo parset: Error: Usage: >&2
+ echo parset: Error: ' ' parset varname GNU Parallel options and command >&2
+ echo
+ parallel --help
+ return 255
+ fi
+ if [ "$_parset_NAME" = "--version" ] ; then
+ # shellcheck disable=SC2006
+ echo "parset 20240222 (GNU parallel `parallel --minversion 1`)"
+ echo "Copyright (C) 2007-2024 Ole Tange, http://ole.tange.dk and Free Software"
+ echo "Foundation, Inc."
+ echo "License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>"
+ echo "This is free software: you are free to change and redistribute it."
+ echo "GNU parallel comes with no warranty."
+ echo
+ echo "Web site: https://www.gnu.org/software/parallel"
+ echo
+ echo "When using programs that use GNU Parallel to process data for publication"
+ echo "please cite as described in 'parallel --citation'."
+ echo
+ return 255
+ fi
+ shift
+
+ # Bash: declare -A myassoc=( )
+ # Zsh: typeset -A myassoc=( )
+ # Ksh: typeset -A myassoc=( )
+ # shellcheck disable=SC2039,SC2169,SC3044
+ if (typeset -p "$_parset_NAME" 2>/dev/null; echo) |
+ perl -ne 'exit not (/^declare[^=]+-A|^typeset[^=]+-A/)' ; then
+ # This is an associative array
+ # shellcheck disable=SC2006
+ eval "`$_parset_PARALLEL_PRG -k --_parset assoc,"$_parset_NAME" "$@"`"
+ # The eval returns the function!
+ else
+ # This is a normal array or a list of variable names
+ # shellcheck disable=SC2006
+ eval "`$_parset_PARALLEL_PRG -k --_parset var,"$_parset_NAME" "$@"`"
+ # The eval returns the function!
+ fi
+}
diff --git a/src/env_parallel.mksh b/src/env_parallel.mksh
new file mode 100644
index 0000000..d307128
--- /dev/null
+++ b/src/env_parallel.mksh
@@ -0,0 +1,605 @@
+#!/usr/bin/env ksh
+
+# This file must be sourced in sh/ash/dash/bash/ksh/mksh/zsh:
+#
+# . env_parallel.sh
+# source env_parallel.ash
+# source env_parallel.dash
+# source env_parallel.bash
+# source env_parallel.ksh
+# source env_parallel.mksh
+# source env_parallel.zsh
+#
+# after which 'env_parallel' works
+#
+#
+# Copyright (C) 2016-2024 Ole Tange, http://ole.tange.dk and Free
+# Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+#
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+# shellcheck disable=SC2006
+
+env_parallel() {
+ # env_parallel.{sh,ash,dash,bash,ksh,mksh,zsh}
+
+ # Check shell dialect
+ if [ -n "$BASH_VERSION" ]; then
+ _shell_DIALECT=bash
+ _eval_needed=false
+ _prefix_PARALLEL_ENV=_prefix_PARALLEL_ENV_bash
+ elif [ -n "$ZSH_VERSION" ]; then
+ _shell_DIALECT=zsh
+ _eval_needed=true
+ _prefix_PARALLEL_ENV=false
+ elif [ -n "$KSH_VERSION" ]; then
+ _shell_DIALECT=ksh
+ _eval_needed=false
+ _prefix_PARALLEL_ENV=false
+ else
+ # Dash/ash - can these be detected better?
+ _shell_DIALECT=sh
+ _eval_needed=false
+ _prefix_PARALLEL_ENV=false
+ fi
+ _names_of_ALIASES() {
+ _names_of_ALIASES_$_shell_DIALECT
+ }
+ _names_of_ALIASES_sh() {
+ # alias fails on Unixware 5
+ for _i in `alias 2>/dev/null | perl -ne 's/^alias //;s/^(\S+)=.*/$1/ && print' 2>/dev/null`; do
+ # Check if this name really is an alias
+ # or just part of a multiline alias definition
+ if alias "$_i" >/dev/null 2>/dev/null; then
+ echo "$_i"
+ fi
+ done
+ }
+ _names_of_ALIASES_bash() {
+ # No aliases will return false. This error should be ignored.
+ compgen -a || true
+ }
+ _names_of_ALIASES_ksh() {
+ alias | perl -pe 's/=.*//'
+ }
+ _names_of_ALIASES_zsh() {
+ print -l ${(k)aliases}
+ }
+ _bodies_of_ALIASES() {
+ _bodies_of_ALIASES_$_shell_DIALECT "$@"
+ }
+ _bodies_of_ALIASES_sh() {
+ # alias may return:
+ # myalias='definition' (GNU/Linux ash)
+ # alias myalias='definition' (FreeBSD ash)
+ # so remove 'alias ' from first line
+ for _i in "$@"; do
+ echo 'alias '"`alias "$_i" | perl -pe '1..1 and s/^alias //'`"
+ done
+ }
+ _bodies_of_ALIASES_bash() {
+ local _i
+ for _i in "$@"; do
+ # shellcheck disable=SC2046
+ if [ $(alias "$_i" | wc -l) == 1 ] ; then
+ true Alias is a single line. Good.
+ else
+ _warning_PAR "Alias '$_i' contains newline."
+ _warning_PAR "Make sure the command has at least one newline after '$_i'."
+ _warning_PAR "See BUGS in 'man env_parallel'."
+ fi
+ done
+ alias "$@"
+ }
+ _bodies_of_ALIASES_ksh() {
+ alias "$@" | perl -pe 's/^/alias /;
+ sub warning { print STDERR "env_parallel: Warning: @_\n"; }
+ if(/^alias (\S+)=\$.*\\n/) {
+ warning("Alias \"$1\" contains newline.");
+ warning("Make sure the command has at least one newline after \"$1\".");
+ warning("See BUGS in \"man env_parallel\".");
+ }'
+
+ }
+ _bodies_of_ALIASES_zsh() {
+ local _i
+ for _i in "$@"; do
+ echo 'alias '"$(alias $_i)"
+ done
+ }
+ _names_of_FUNCTIONS() {
+ _names_of_FUNCTIONS_$_shell_DIALECT
+ }
+ _names_of_FUNCTIONS_bash() {
+ compgen -A function
+ }
+ _names_of_maybe_FUNCTIONS() {
+ set | perl -ne '/^([A-Z_0-9]+)\s*\(\)\s*\{?$/i and print "$1\n"'
+ }
+ _names_of_FUNCTIONS_sh() {
+ # myfunc is a function
+ # shellcheck disable=SC2046
+ LANG=C type `_names_of_maybe_FUNCTIONS` |
+ perl -ne '/^(\S+) is a function$/ and not $seen{$1}++ and print "$1\n"'
+ }
+ _names_of_FUNCTIONS_ksh() {
+ # mksh = typeset +f
+ # ksh = typeset +p -f | perl -pe 's/\(\).*//'
+ typeset +f | perl -pe 's/\(\).*//; s/ .*//;'
+ }
+ _names_of_FUNCTIONS_zsh() {
+ print -l ${(k)functions}
+ }
+ _bodies_of_FUNCTIONS() {
+ _bodies_of_FUNCTIONS_$_shell_DIALECT "$@"
+ }
+ _bodies_of_FUNCTIONS_sh() {
+ LANG=C type "$@" | perl -ne '/^(\S+) is a function$/ or print'
+ }
+ _bodies_of_FUNCTIONS_bash() {
+ typeset -f "$@"
+ }
+ _bodies_of_FUNCTIONS_ksh() {
+ functions "$@"
+ }
+ _bodies_of_FUNCTIONS_zsh() {
+ typeset -f "$@"
+ }
+ _names_of_VARIABLES() {
+ _names_of_VARIABLES_$_shell_DIALECT
+ }
+ _names_of_VARIABLES_sh() {
+ # This may screw up if variables contain \n and =
+ set | perl -ne 's/^(\S+?)=.*/$1/ and print;'
+ }
+ _names_of_VARIABLES_bash() {
+ compgen -A variable
+ }
+ _names_of_VARIABLES_ksh() {
+ # mksh: typeset +p |
+ # perl -pe 's/^(type)?set( [-+][a-zA-Z0-9]*)* //; s/(\[\d+\])?=.*//' |
+ # uniq
+ # ksh: typeset +p | perl -pe 's/^typeset .. //'
+ typeset +p |
+ perl -pe 's/^(type)?set( [-+][a-zA-Z0-9]*)* //; s/(\[\d+\])?=.*//' |
+ uniq
+ }
+ _names_of_VARIABLES_zsh() {
+ print -l ${(k)parameters}
+ }
+ _bodies_of_VARIABLES() {
+ _bodies_of_VARIABLES_$_shell_DIALECT "$@"
+ }
+ _bodies_of_VARIABLES_sh() {
+ # Crappy typeset -p
+ for _i in "$@"
+ do
+ perl -e 'print @ARGV' "$_i="
+ eval echo "\"\$$_i\"" | perl -e '$/=undef; $a=<>; chop($a); print $a' |
+ perl -pe 's/[\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\^\*\<\=\>\~\|\; \"\!\$\&\202-\377]/\\$&/go;'"s/'/\\\'/g; s/[\n]/'\\n'/go;";
+ echo
+ done
+ }
+ _bodies_of_VARIABLES_bash() {
+ typeset -p "$@"
+ }
+ _bodies_of_VARIABLES_ksh() {
+ typeset -p "$@"
+ }
+ _bodies_of_VARIABLES_zsh() {
+ typeset -p "$@"
+ }
+ _ignore_HARDCODED() {
+ _ignore_HARDCODED_$_shell_DIALECT
+ }
+ _ignore_HARDCODED_sh() {
+ # These names cannot be detected
+ echo '(_|TIMEOUT|IFS)'
+ }
+ _ignore_HARDCODED_bash() {
+ # Copying $RANDOM will cause it not to be random
+ # The rest cannot be detected as read-only
+ echo '(RANDOM|_|TIMEOUT|GROUPS|FUNCNAME|DIRSTACK|PIPESTATUS|USERNAME|BASHPID|BASH_[A-Z_]+)'
+ }
+ _ignore_HARDCODED_ksh() {
+ # These names cannot be detected
+ echo '(_|TIMEOUT|IFS)'
+ }
+ _ignore_HARDCODED_zsh() {
+ # These names cannot be detected
+ echo '([-\?\#\!\$\*\@\_0]|zsh_eval_context|ZSH_EVAL_CONTEXT|LINENO|IFS|commands|functions|options|aliases|EUID|EGID|UID|GID|dis_patchars|patchars|terminfo|galiases|keymaps|parameters|jobdirs|dirstack|functrace|funcsourcetrace|zsh_scheduled_events|dis_aliases|dis_reswords|dis_saliases|modules|reswords|saliases|widgets|userdirs|historywords|nameddirs|termcap|dis_builtins|dis_functions|jobtexts|funcfiletrace|dis_galiases|builtins|history|jobstates|funcstack|run-help)'
+ }
+ _ignore_READONLY() {
+ _ignore_READONLY_$_shell_DIALECT
+ }
+ _parse_READONLY() {
+ # shellcheck disable=SC1078,SC1079,SC2026
+ perl -e '@r = map {
+ chomp;
+ # sh on UnixWare: readonly TIMEOUT
+ # ash: readonly var='val'
+ # ksh: var='val'
+ # mksh: PIPESTATUS[0]
+ s/^(readonly )?([^=\[ ]*?)(\[\d+\])?(=.*|)$/$2/ or
+ # bash: declare -ar BASH_VERSINFO=([0]="4" [1]="4")
+ # zsh: typeset -r var='val'
+ s/^\S+\s+\S+\s+(\S[^=]*)(=.*|$)/$1/;
+ $_ } <>;
+ $vars = join "|",map { quotemeta $_ } @r;
+ print $vars ? "($vars)" : "(,,nO,,VaRs,,)";
+ '
+ }
+ _ignore_READONLY_sh() {
+ readonly | _parse_READONLY
+ }
+ _ignore_READONLY_bash() {
+ readonly | _parse_READONLY
+ }
+ _ignore_READONLY_ksh() {
+ readonly | _parse_READONLY
+ }
+ _ignore_READONLY_zsh() {
+ typeset -pr | _parse_READONLY
+ }
+ _remove_bad_NAMES() {
+ # Do not transfer vars and funcs from env_parallel
+ # shellcheck disable=SC2006
+ _ignore_RO="`_ignore_READONLY`"
+ # shellcheck disable=SC2006
+ _ignore_HARD="`_ignore_HARDCODED`"
+ # To avoid depending on grep dialect, use Perl version of:
+ # grep -Ev '^(...)$' |
+ perl -ne '/^(
+ PARALLEL_ENV|
+ PARALLEL_TMP|
+ _alias_NAMES|
+ _bodies_of_ALIASES|
+ _bodies_of_FUNCTIONS|
+ _bodies_of_VARIABLES|
+ _error_PAR|
+ _function_NAMES|
+ _get_ignored_VARS|
+ _grep_REGEXP|
+ _ignore_HARD|
+ _ignore_HARDCODED|
+ _ignore_READONLY|
+ _ignore_RO|
+ _ignore_UNDERSCORE|
+ _list_alias_BODIES|
+ _list_function_BODIES|
+ _list_variable_VALUES|
+ _make_grep_REGEXP|
+ _names_of_ALIASES|
+ _names_of_FUNCTIONS|
+ _names_of_VARIABLES|
+ _names_of_maybe_FUNCTIONS|
+ _parallel_exit_CODE|
+ _prefix_PARALLEL_ENV|
+ _prefix_PARALLEL_ENV|
+ _remove_bad_NAMES|
+ _remove_readonly|
+ _variable_NAMES|
+ _warning_PAR|
+ _which_PAR)$/x and next;
+ # Filter names matching --env
+ /^'"$_grep_REGEXP"'$/ or next;
+ /^'"$_ignore_UNDERSCORE"'$/ and next;
+ # Remove readonly variables
+ /^'"$_ignore_RO"'$/ and next;
+ /^'"$_ignore_HARD"'$/ and next;
+ print;'
+ }
+ _prefix_PARALLEL_ENV_bash() {
+ shopt 2>/dev/null |
+ perl -pe 's:\s+off:;: and s/^/shopt -u /;
+ s:\s+on:;: and s/^/shopt -s /;
+ s:;$:&>/dev/null;:';
+ echo 'shopt -s expand_aliases &>/dev/null';
+ }
+
+ _get_ignored_VARS() {
+ perl -e '
+ for(@ARGV){
+ $next_is_env and push @envvar, split/,/, $_;
+ $next_is_env=/^--env$/;
+ }
+ if(grep { /^_$/ } @envvar) {
+ if(not open(IN, "<", "$ENV{HOME}/.parallel/ignored_vars")) {
+ print STDERR "parallel: Error: ",
+ "Run \"parallel --record-env\" in a clean environment first.\n";
+ } else {
+ chomp(@ignored_vars = <IN>);
+ }
+ }
+ if($ENV{PARALLEL_IGNORED_NAMES}) {
+ push @ignored_vars, split/\s+/, $ENV{PARALLEL_IGNORED_NAMES};
+ chomp @ignored_vars;
+ }
+ $vars = join "|",map { quotemeta $_ } @ignored_vars;
+ print $vars ? "($vars)" : "(,,nO,,VaRs,,)";
+ ' -- "$@"
+ }
+
+ # Get the --env variables if set
+ # --env _ should be ignored
+ # and convert a b c to (a|b|c)
+ # If --env not set: Match everything (.*)
+ _make_grep_REGEXP() {
+ perl -e '
+ for(@ARGV){
+ /^_$/ and $next_is_env = 0;
+ $next_is_env and push @envvar, split/,/, $_;
+ $next_is_env = /^--env$/;
+ }
+ $vars = join "|",map { quotemeta $_ } @envvar;
+ print $vars ? "($vars)" : "(.*)";
+ ' -- "$@"
+ }
+ _which_PAR() {
+ # type returns:
+ # ll is an alias for ls -l (in ash)
+ # bash is a tracked alias for /bin/bash
+ # true is a shell builtin (in bash)
+ # myfunc is a function (in bash)
+ # myfunc is a shell function (in zsh)
+ # which is /usr/bin/which (in sh, bash)
+ # which is hashed (/usr/bin/which)
+ # gi is aliased to `grep -i' (in bash)
+ # aliased to `alias | /usr/bin/which --tty-only --read-alias --show-dot --show-tilde'
+ # Return 0 if found, 1 otherwise
+ LANG=C type "$@" |
+ perl -pe '$exit += (s/ is an alias for .*// ||
+ s/ is aliased to .*// ||
+ s/ is a function// ||
+ s/ is a shell function// ||
+ s/ is a shell builtin// ||
+ s/.* is hashed .(\S+).$/$1/ ||
+ s/.* is (a tracked alias for )?//);
+ END { exit not $exit }'
+ }
+ _warning_PAR() {
+ echo "env_parallel: Warning: $*" >&2
+ }
+ _error_PAR() {
+ echo "env_parallel: Error: $*" >&2
+ }
+
+ if _which_PAR parallel >/dev/null; then
+ true parallel found in path
+ else
+ # shellcheck disable=SC2016
+ _error_PAR 'parallel must be in $PATH.'
+ return 255
+ fi
+
+ # Grep regexp for vars given by --env
+ # shellcheck disable=SC2006
+ _grep_REGEXP="`_make_grep_REGEXP \"$@\"`"
+ unset _make_grep_REGEXP
+
+ # Deal with --env _
+ # shellcheck disable=SC2006
+ _ignore_UNDERSCORE="`_get_ignored_VARS \"$@\"`"
+ unset _get_ignored_VARS
+
+ # --record-env
+ if perl -e 'exit grep { /^--record-env$/ } @ARGV' -- "$@"; then
+ true skip
+ else
+ (_names_of_ALIASES;
+ _names_of_FUNCTIONS;
+ _names_of_VARIABLES) |
+ cat > "$HOME"/.parallel/ignored_vars
+ return 0
+ fi
+
+ # --session
+ if perl -e 'exit grep { /^--session$/ } @ARGV' -- "$@"; then
+ true skip
+ else
+ # Insert ::: between each level of session
+ # so you can pop off the last ::: at --end-session
+ # shellcheck disable=SC2006
+ PARALLEL_IGNORED_NAMES="`echo \"$PARALLEL_IGNORED_NAMES\";
+ echo :::;
+ (_names_of_ALIASES;
+ _names_of_FUNCTIONS;
+ _names_of_VARIABLES) | perl -ne '
+ BEGIN{
+ map { $ignored_vars{$_}++ }
+ split/\s+/, $ENV{PARALLEL_IGNORED_NAMES};
+ }
+ chomp;
+ for(split/\s+/) {
+ if(not $ignored_vars{$_}) {
+ print $_,\"\\n\";
+ }
+ }
+ '`"
+ export PARALLEL_IGNORED_NAMES
+ return 0
+ fi
+ if perl -e 'exit grep { /^--end.?session$/ } @ARGV' -- "$@"; then
+ true skip
+ else
+ # Pop off last ::: from PARALLEL_IGNORED_NAMES
+ # shellcheck disable=SC2006
+ PARALLEL_IGNORED_NAMES="`perl -e '
+ $ENV{PARALLEL_IGNORED_NAMES} =~ s/(.*):::.*?$/$1/s;
+ print $ENV{PARALLEL_IGNORED_NAMES}
+ '`"
+ return 0
+ fi
+ # Grep alias names
+ # shellcheck disable=SC2006
+ _alias_NAMES="`_names_of_ALIASES | _remove_bad_NAMES | xargs echo`"
+ _list_alias_BODIES="_bodies_of_ALIASES $_alias_NAMES"
+ if [ "$_alias_NAMES" = "" ] ; then
+ # no aliases selected
+ _list_alias_BODIES="true"
+ fi
+ unset _alias_NAMES
+
+ # Grep function names
+ # shellcheck disable=SC2006
+ _function_NAMES="`_names_of_FUNCTIONS | _remove_bad_NAMES | xargs echo`"
+ _list_function_BODIES="_bodies_of_FUNCTIONS $_function_NAMES"
+ if [ "$_function_NAMES" = "" ] ; then
+ # no functions selected
+ _list_function_BODIES="true"
+ fi
+ unset _function_NAMES
+
+ # Grep variable names
+ # shellcheck disable=SC2006
+ _variable_NAMES="`_names_of_VARIABLES | _remove_bad_NAMES | xargs echo`"
+ _list_variable_VALUES="_bodies_of_VARIABLES $_variable_NAMES"
+ if [ "$_variable_NAMES" = "" ] ; then
+ # no variables selected
+ _list_variable_VALUES="true"
+ fi
+ unset _variable_NAMES
+
+ if $eval_needed ; then
+ # shellcheck disable=SC2006
+ PARALLEL_ENV="`
+ eval $_list_alias_BODIES;
+ eval $_list_function_BODIES;
+ eval $_list_variable_VALUES;
+ `"
+ else
+ # shellcheck disable=SC2006
+ PARALLEL_ENV="`
+ $_prefix_PARALLEL_ENV;
+ $_list_alias_BODIES;
+ $_list_function_BODIES;
+ $_list_variable_VALUES;
+ `"
+ fi
+ export PARALLEL_ENV
+ # Free up some env space
+ unset _list_alias_BODIES _list_variable_VALUES _list_function_BODIES
+ unset _bodies_of_ALIASES _bodies_of_VARIABLES _bodies_of_FUNCTIONS
+ unset _names_of_ALIASES _names_of_VARIABLES _names_of_FUNCTIONS
+ unset _ignore_HARDCODED _ignore_READONLY _ignore_UNDERSCORE
+ unset _remove_bad_NAMES _grep_REGEXP
+ unset _prefix_PARALLEL_ENV
+ # Test if environment is too big by running 'true'
+ # shellcheck disable=SC2006,SC2092
+ if `_which_PAR true` >/dev/null 2>/dev/null ; then
+ parallel "$@"
+ _parallel_exit_CODE=$?
+ # Clean up variables/functions
+ unset PARALLEL_ENV
+ unset _which_PAR _which_TRUE
+ unset _warning_PAR _error_PAR
+ # Unset _parallel_exit_CODE before return
+ eval "unset _parallel_exit_CODE; return $_parallel_exit_CODE"
+ else
+ unset PARALLEL_ENV;
+ _error_PAR "Your environment is too big."
+ _error_PAR "You can try 3 different approaches:"
+ _error_PAR "1. Run 'env_parallel --session' before you set"
+ _error_PAR " variables or define functions."
+ _error_PAR "2. Use --env and only mention the names to copy."
+ _error_PAR "3. Try running this in a clean environment once:"
+ _error_PAR " env_parallel --record-env"
+ _error_PAR " And then use '--env _'"
+ _error_PAR "For details see: man env_parallel"
+ return 255
+ fi
+}
+
+parset() {
+ _parset_PARALLEL_PRG=parallel
+ _parset_main "$@"
+}
+
+env_parset() {
+ _parset_PARALLEL_PRG=env_parallel
+ _parset_main "$@"
+}
+
+_parset_main() {
+ # If $1 contains ',' or space:
+ # Split on , to get the destination variable names
+ # If $1 is a single destination variable name:
+ # Treat it as the name of an array
+ #
+ # # Create array named myvar
+ # parset myvar echo ::: {1..10}
+ # echo ${myvar[5]}
+ #
+ # # Put output into $var_a $var_b $var_c
+ # varnames=(var_a var_b var_c)
+ # parset "${varnames[*]}" echo ::: {1..3}
+ # echo $var_c
+ #
+ # # Put output into $var_a4 $var_b4 $var_c4
+ # parset "var_a4 var_b4 var_c4" echo ::: {1..3}
+ # echo $var_c4
+
+ _parset_NAME="$1"
+ if [ "$_parset_NAME" = "" ] ; then
+ echo parset: Error: No destination variable given. >&2
+ echo parset: Error: Try: >&2
+ echo parset: Error: ' ' parset myarray echo ::: foo bar >&2
+ return 255
+ fi
+ if [ "$_parset_NAME" = "--help" ] ; then
+ echo parset: Error: Usage: >&2
+ echo parset: Error: ' ' parset varname GNU Parallel options and command >&2
+ echo
+ parallel --help
+ return 255
+ fi
+ if [ "$_parset_NAME" = "--version" ] ; then
+ # shellcheck disable=SC2006
+ echo "parset 20240222 (GNU parallel `parallel --minversion 1`)"
+ echo "Copyright (C) 2007-2024 Ole Tange, http://ole.tange.dk and Free Software"
+ echo "Foundation, Inc."
+ echo "License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>"
+ echo "This is free software: you are free to change and redistribute it."
+ echo "GNU parallel comes with no warranty."
+ echo
+ echo "Web site: https://www.gnu.org/software/parallel"
+ echo
+ echo "When using programs that use GNU Parallel to process data for publication"
+ echo "please cite as described in 'parallel --citation'."
+ echo
+ return 255
+ fi
+ shift
+
+ # Bash: declare -A myassoc=( )
+ # Zsh: typeset -A myassoc=( )
+ # Ksh: typeset -A myassoc=( )
+ # shellcheck disable=SC2039,SC2169
+ if (typeset -p "$_parset_NAME" 2>/dev/null; echo) |
+ perl -ne 'exit not (/^declare[^=]+-A|^typeset[^=]+-A/)' ; then
+ # This is an associative array
+ # shellcheck disable=SC2006
+ eval "`$_parset_PARALLEL_PRG -k --_parset assoc,"$_parset_NAME" "$@"`"
+ # The eval returns the function!
+ else
+ # This is a normal array or a list of variable names
+ # shellcheck disable=SC2006
+ eval "`$_parset_PARALLEL_PRG -k --_parset var,"$_parset_NAME" "$@"`"
+ # The eval returns the function!
+ fi
+}
diff --git a/src/env_parallel.pdksh b/src/env_parallel.pdksh
new file mode 100755
index 0000000..1a25dcd
--- /dev/null
+++ b/src/env_parallel.pdksh
@@ -0,0 +1,183 @@
+#!/usr/bin/env pdksh
+
+# This file must be sourced in pdksh:
+#
+# source env_parallel.pdksh
+#
+# after which 'env_parallel' works
+#
+#
+# Copyright (C) 2016-2024 Ole Tange, http://ole.tange.dk and Free
+# Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+#
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+env_parallel() {
+ # env_parallel.pdksh
+
+ _names_of_ALIASES() {
+ compgen -a
+ }
+ _bodies_of_ALIASES() {
+ alias "$@" | perl -pe 's/^/alias /'
+ }
+ _names_of_FUNCTIONS() {
+ compgen -A function
+ }
+ _bodies_of_FUNCTIONS() {
+ typeset -f "$@"
+ }
+ _names_of_VARIABLES() {
+ compgen -A variable
+ }
+ _bodies_of_VARIABLES() {
+ typeset -p "$@"
+ }
+ _remove_bad_NAMES() {
+ _tmp_READONLY="$(mktemp)"
+ readonly > "$_tmp_READONLY"
+ # Do not transfer vars and funcs from env_parallel
+ grep -Ev '^(_names_of_ALIASES|_bodies_of_ALIASES|_names_of_maybe_FUNCTIONS|_names_of_FUNCTIONS|_bodies_of_FUNCTIONS|_names_of_VARIABLES|_bodies_of_VARIABLES|_remove_bad_NAMES|_prefix_PARALLEL_ENV|_get_ignored_VARS|_make_grep_REGEXP|_ignore_UNDERSCORE|_alias_NAMES|_list_alias_BODIES|_function_NAMES|_list_function_BODIES|_variable_NAMES|_list_variable_VALUES|_prefix_PARALLEL_ENV|PARALLEL_ENV|PARALLEL_TMP)$' |
+ # Filter names matching --env
+ grep -E "^$_grep_REGEXP"\$ | grep -vE "^$_ignore_UNDERSCORE"\$ |
+ grep -vFf $_tmp_READONLY |
+ grep -Ev '^(PIPESTATUS)'
+ rm $_tmp_READONLY
+ unset _tmp_READONLY
+ }
+ _prefix_PARALLEL_ENV() {
+ shopt 2>/dev/null |
+ perl -pe 's:\s+off:;: and s/^/shopt -u /;
+ s:\s+on:;: and s/^/shopt -s /;
+ s:;$:&>/dev/null;:';
+ echo 'shopt -s expand_aliases &>/dev/null';
+ }
+
+ _get_ignored_VARS() {
+ perl -e '
+ for(@ARGV){
+ $next_is_env and push @envvar, split/,/, $_;
+ $next_is_env=/^--env$/;
+ }
+ if(grep { /^_$/ } @envvar) {
+ if(not open(IN, "<", "$ENV{HOME}/.parallel/ignored_vars")) {
+ print STDERR "parallel: Error: ",
+ "Run \"parallel --record-env\" in a clean environment first.\n";
+ } else {
+ chomp(@ignored_vars = <IN>);
+ $vars = join "|",map { quotemeta $_ } @ignored_vars;
+ print $vars ? "($vars)" : "(,,nO,,VaRs,,)";
+ }
+ }
+ ' -- "$@"
+ }
+
+ # Get the --env variables if set
+ # --env _ should be ignored
+ # and convert a b c to (a|b|c)
+ # If --env not set: Match everything (.*)
+ _make_grep_REGEXP() {
+ perl -e '
+ for(@ARGV){
+ /^_$/ and $next_is_env = 0;
+ $next_is_env and push @envvar, split/,/, $_;
+ $next_is_env = /^--env$/;
+ }
+ $vars = join "|",map { quotemeta $_ } @envvar;
+ print $vars ? "($vars)" : "(.*)";
+ ' -- "$@"
+ }
+
+ if which parallel | grep 'no parallel in' >/dev/null; then
+ echo 'env_parallel: Error: parallel must be in $PATH.' >&2
+ return 1
+ fi
+ if which parallel >/dev/null; then
+ true which on linux
+ else
+ echo 'env_parallel: Error: parallel must be in $PATH.' >&2
+ return 1
+ fi
+
+ # Grep regexp for vars given by --env
+ _grep_REGEXP="`_make_grep_REGEXP \"$@\"`"
+ unset _make_grep_REGEXP
+
+ # Deal with --env _
+ _ignore_UNDERSCORE="`_get_ignored_VARS \"$@\"`"
+ unset _get_ignored_VARS
+
+ # --record-env
+ if perl -e 'exit grep { /^--record-env$/ } @ARGV' -- "$@"; then
+ true skip
+ else
+ (_names_of_ALIASES;
+ _names_of_FUNCTIONS;
+ _names_of_VARIABLES) |
+ cat > $HOME/.parallel/ignored_vars
+ return 0
+ fi
+
+ # Grep alias names
+ _alias_NAMES="`_names_of_ALIASES | _remove_bad_NAMES | xargs echo`"
+ _list_alias_BODIES="_bodies_of_ALIASES $_alias_NAMES"
+ if [ "$_alias_NAMES" = "" ] ; then
+ # no aliases selected
+ _list_alias_BODIES="true"
+ fi
+ unset _alias_NAMES
+
+ # Grep function names
+ _function_NAMES="`_names_of_FUNCTIONS | _remove_bad_NAMES | xargs echo`"
+ _list_function_BODIES="_bodies_of_FUNCTIONS $_function_NAMES"
+ if [ "$_function_NAMES" = "" ] ; then
+ # no functions selected
+ _list_function_BODIES="true"
+ fi
+ unset _function_NAMES
+
+ # Grep variable names
+ _variable_NAMES="`_names_of_VARIABLES | _remove_bad_NAMES | xargs echo`"
+ _list_variable_VALUES="_bodies_of_VARIABLES $_variable_NAMES"
+ if [ "$_variable_NAMES" = "" ] ; then
+ # no variables selected
+ _list_variable_VALUES="true"
+ fi
+ unset _variable_NAMES
+
+ # eval is needed for aliases - cannot explain why
+ export PARALLEL_ENV="`
+ eval $_list_alias_BODIES;
+ $_list_function_BODIES
+ $_list_variable_VALUES;
+ `";
+ unset _list_alias_BODIES _list_variable_VALUES _list_function_BODIES
+ unset _bodies_of_ALIASES _bodies_of_VARIABLES _bodies_of_FUNCTIONS
+ unset _names_of_ALIASES _names_of_VARIABLES _names_of_FUNCTIONS
+ unset _ignore_HARDCODED _ignore_READONLY _ignore_UNDERSCORE
+ unset _remove_bad_NAMES _grep_REGEXP
+ unset _prefix_PARALLEL_ENV
+ `which parallel` "$@"
+ _parallel_exit_CODE=$?
+ unset PARALLEL_ENV;
+ unset _which_PAR _which_TRUE
+ unset _warning_PAR _error_PAR
+ # Unset _parallel_exit_CODE before return
+ eval "unset _parallel_exit_CODE; return $_parallel_exit_CODE"
+}
diff --git a/src/env_parallel.pod b/src/env_parallel.pod
new file mode 100644
index 0000000..c59ba5d
--- /dev/null
+++ b/src/env_parallel.pod
@@ -0,0 +1,942 @@
+#!/usr/bin/perl -w
+
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GFDL-1.3-or-later
+# SPDX-License-Identifier: CC-BY-SA-4.0
+
+=encoding utf8
+
+=head1 NAME
+
+env_parallel - export environment to GNU parallel
+
+
+=head1 SYNOPSIS
+
+B<env_parallel> [--record-env|--session|--end-session]
+ [options for GNU Parallel]
+
+
+=head1 DESCRIPTION
+
+B<env_parallel> is a shell function that exports the current
+environment to GNU B<parallel>.
+
+If the shell function is not loaded, a dummy script will be run
+instead that explains how to install the function.
+
+B<env_parallel> is 100 ms slower at startup than pure GNU B<parallel>,
+and takes up to 30% longer to start a job (typically 15 ms).
+
+Due to the problem with environment space (see below) you are
+recommended only to transfer the environment that you need.
+
+To help you do that, you can mark names that should not be
+transferred. This can be done with either B<--session> or
+B<--record-env>.
+
+ # Record the "clean" environment (this only needs to be run once)
+ env_parallel --record-env
+
+ # Optionally edit ~/.parallel/ignored_vars (only needed once)
+
+ # Define whatever you want to use
+ myfunc() { myalias and functions $myvar work. $1.; }
+ alias myalias='echo Aliases'
+ myvar='and variables'
+
+ # Use --env _ to only transfer the names not in the "empty" environment
+ env_parallel --env _ -S localhost myfunc ::: Hooray
+
+Or:
+
+ # Do --record-env into $PARALLEL_IGNORED_NAMES
+ env_parallel --session
+
+ # Define whatever you want to use
+ myfunc() { myalias and functions $myvar work. $1.; }
+ alias myalias='echo Aliases'
+ myvar='and variables'
+
+ # env_parallel will not export names in $PARALLEL_IGNORED_NAMES
+ env_parallel -S localhost myfunc ::: Hooray
+
+ # Optionally
+ env_parallel --end-session
+
+In B<csh> B<--session> is not supported:
+
+ # Record the "clean" environment - this only needs to be run once
+ env_parallel --record-env
+
+ # Optionally edit ~/.parallel/ignored_vars - only needed once
+
+ # Define whatever you want to use
+ alias myalias 'echo Aliases $myvar \!*.'
+ set myvar='and variables'
+
+ # Use --env _ to only transfer the names not in the "empty" environment
+ env_parallel --env _ -S localhost myalias ::: work
+
+=head2 Environment space
+
+By default B<env_parallel> will export all environment variables,
+arrays, aliases, functions and shell options (see details for the
+individual shells below).
+
+But this only works if the size of the current environment is smaller
+than the maximal length of a command and smaller than half of the max
+if running remotely. E.g. The max size of Bash's command is 128 KB, so
+B<env_parallel> will fail if 'B<set | wc -c>' is bigger than 128
+KB. Technically the limit is in execve(1) which IPC::open3 uses.
+
+Bash completion functions are well-known for taking up well over 128
+KB of environment space and the primary reason for causing
+B<env_parallel> to fail.
+
+Instead you can use B<--env> to specify which variables, arrays,
+aliases and functions to export as this will only export those with
+the given name. Or follow the recommended usage in shown in
+DESCRIPTION.
+
+
+=head1 OPTIONS
+
+Same as GNU B<parallel> in addition to these:
+
+=over 4
+
+=item B<--end-session>
+
+Undo last B<--session>
+
+
+=item B<--record-env>
+
+Record all names currently defined to be ignored every time running
+B<env_parallel> in the future.
+
+
+=item B<--session>
+
+Ignore all names currently defined. Aliases, variables, arrays, and
+functions currently defined will not be transferred.
+
+But names defined I<after> running B<parallel --session> I<will> be
+transferred.
+
+This is only valid in the running shell, and can be undone with
+B<parallel --end-session>.
+
+You can run multiple B<--session> inside each other:
+
+ env_parallel --session
+ var=not
+ # var is transferred
+ env_parallel -Slocalhost 'echo var is $var' ::: ignored
+ env_parallel --session
+ # var is not transferred
+ env_parallel -Slocalhost 'echo var is $var' ::: ignored
+ env_parallel --end-session
+ # var is transferred again
+ env_parallel -Slocalhost 'echo var is $var' ::: ignored
+
+
+
+=back
+
+
+=head1 SUPPORTED SHELLS
+
+=head2 Ash
+
+=head3 Installation
+
+Put this in $HOME/.profile:
+
+ . env_parallel.ash
+
+E.g. by doing:
+
+ echo '. env_parallel.ash' >> $HOME/.profile
+
+=head3 Supported use
+
+B<--env> is supported to export only the variable, or alias with the
+given name. Multiple B<--env>s can be given.
+
+B<--session> is supported.
+
+=over 8
+
+=item aliases
+
+ alias myecho='echo aliases'
+ env_parallel myecho ::: work
+ env_parallel -S server myecho ::: work
+ env_parallel --env myecho myecho ::: work
+ env_parallel --env myecho -S server myecho ::: work
+
+ alias multiline='echo multiline
+ echo aliases'
+ env_parallel multiline ::: work
+ env_parallel -S server multiline ::: work
+ env_parallel --env multiline multiline ::: work
+ env_parallel --env multiline -S server multiline ::: work
+
+=item functions
+
+ ash cannot list defined functions - thus is not supported.
+
+=item variables
+
+ myvar=variables
+ env_parallel echo '$myvar' ::: work
+ env_parallel -S server echo '$myvar' ::: work
+ env_parallel --env myvar echo '$myvar' ::: work
+ env_parallel --env myvar -S server echo '$myvar' ::: work
+
+=item arrays
+
+Arrays are not supported by Ash.
+
+=back
+
+=head2 Bash
+
+=head3 Installation
+
+Put this in $HOME/.bashrc:
+
+ . env_parallel.bash
+
+E.g. by doing:
+
+ echo '. env_parallel.bash' >> $HOME/.bashrc
+
+=head3 Supported use
+
+B<--env> is supported to export only the variable, alias, function, or
+array with the given name. Multiple B<--env>s can be given.
+
+B<--session> is supported.
+
+=over 8
+
+=item aliases
+
+ alias myecho='echo aliases'
+ env_parallel myecho ::: work
+ env_parallel -S server myecho ::: work
+ env_parallel --env myecho myecho ::: work
+ env_parallel --env myecho -S server myecho ::: work
+
+ alias multiline='echo multiline
+ echo aliases'
+ env_parallel 'multiline {};
+ echo but only when followed by a newline' ::: work
+ env_parallel -S server 'multiline {};
+ echo but only when followed by a newline' ::: work
+ env_parallel --env multiline 'multiline {};
+ echo but only when followed by a newline' ::: work
+ env_parallel --env multiline -S server 'multiline {};
+ echo but only when followed by a newline' ::: work
+
+=item functions
+
+ myfunc() { echo functions $*; }
+ env_parallel myfunc ::: work
+ env_parallel -S server myfunc ::: work
+ env_parallel --env myfunc myfunc ::: work
+ env_parallel --env myfunc -S server myfunc ::: work
+
+=item variables
+
+ myvar=variables
+ env_parallel echo '$myvar' ::: work
+ env_parallel -S server echo '$myvar' ::: work
+ env_parallel --env myvar echo '$myvar' ::: work
+ env_parallel --env myvar -S server echo '$myvar' ::: work
+
+=item arrays
+
+ myarray=(arrays work, too)
+ env_parallel -k echo '${myarray[{}]}' ::: 0 1 2
+ env_parallel -k -S server echo '${myarray[{}]}' ::: 0 1 2
+ env_parallel -k --env myarray echo '${myarray[{}]}' ::: 0 1 2
+ env_parallel -k --env myarray -S server \
+ echo '${myarray[{}]}' ::: 0 1 2
+
+=back
+
+=head3 BUGS
+
+Due to a bug in Bash, aliases containing newlines must be followed by
+a newline in the command. Some systems are not affected by this bug,
+but will print a warning anyway.
+
+=head2 csh
+
+B<env_parallel> for B<csh> breaks B<$PARALLEL>, so do not use
+B<$PARALLEL>.
+
+=head3 Installation
+
+Put this in $HOME/.cshrc:
+
+ source `which env_parallel.csh`
+
+E.g. by doing:
+
+ echo 'source `which env_parallel.csh`' >> $HOME/.cshrc
+
+=head3 Supported use
+
+B<--env> is supported to export only the variable, alias, or
+array with the given name. Multiple B<--env>s can be given.
+
+=over 8
+
+=item aliases
+
+ alias myecho 'echo aliases'
+ env_parallel myecho ::: work
+ env_parallel -S server myecho ::: work
+ env_parallel --env myecho myecho ::: work
+ env_parallel --env myecho -S server myecho ::: work
+
+=item functions
+
+Not supported by B<csh>.
+
+=item variables
+
+ set myvar=variables
+ env_parallel echo '$myvar' ::: work
+ env_parallel -S server echo '$myvar' ::: work
+ env_parallel --env myvar echo '$myvar' ::: work
+ env_parallel --env myvar -S server echo '$myvar' ::: work
+
+=item arrays with no special chars
+
+ set myarray=(arrays work, too)
+ env_parallel -k echo \$'{myarray[{}]}' ::: 1 2 3
+ env_parallel -k -S server echo \$'{myarray[{}]}' ::: 1 2 3
+ env_parallel -k --env myarray echo \$'{myarray[{}]}' ::: 1 2 3
+ env_parallel -k --env myarray -S server \
+ echo \$'{myarray[{}]}' ::: 1 2 3
+
+=back
+
+
+=head2 Dash
+
+=head3 Installation
+
+Put this in $HOME/.profile:
+
+ . env_parallel.dash
+
+E.g. by doing:
+
+ echo '. env_parallel.dash' >> $HOME/.profile
+
+=head3 Supported use
+
+B<--env> is supported to export only the variable, or alias with the
+given name. Multiple B<--env>s can be given.
+
+B<--session> is supported.
+
+=over 8
+
+=item aliases
+
+ alias myecho='echo aliases'
+ env_parallel myecho ::: work
+ env_parallel -S server myecho ::: work
+ env_parallel --env myecho myecho ::: work
+ env_parallel --env myecho -S server myecho ::: work
+
+ alias multiline='echo multiline
+ echo aliases'
+ env_parallel multiline ::: work
+ env_parallel -S server multiline ::: work
+ env_parallel --env multiline multiline ::: work
+ env_parallel --env multiline -S server multiline ::: work
+
+=item functions
+
+ dash cannot list defined functions - thus is not supported.
+
+=item variables
+
+ myvar=variables
+ env_parallel echo '$myvar' ::: work
+ env_parallel -S server echo '$myvar' ::: work
+ env_parallel --env myvar echo '$myvar' ::: work
+ env_parallel --env myvar -S server echo '$myvar' ::: work
+
+=item arrays
+
+ dash does not support arrays.
+
+=back
+
+
+=head2 fish
+
+=head3 Installation
+
+Put this in $HOME/.config/fish/config.fish:
+
+ source (which env_parallel.fish)
+
+E.g. by doing:
+
+ echo 'source (which env_parallel.fish)' \
+ >> $HOME/.config/fish/config.fish
+
+=head3 Supported use
+
+B<--env> is supported to export only the variable, alias, function, or
+array with the given name. Multiple B<--env>s can be given.
+
+B<--session> is supported.
+
+=over 8
+
+=item aliases
+
+ alias myecho 'echo aliases'
+ env_parallel myecho ::: work
+ env_parallel -S server myecho ::: work
+ env_parallel --env myecho myecho ::: work
+ env_parallel --env myecho -S server myecho ::: work
+
+=item functions
+
+ function myfunc
+ echo functions $argv
+ end
+ env_parallel myfunc ::: work
+ env_parallel -S server myfunc ::: work
+ env_parallel --env myfunc myfunc ::: work
+ env_parallel --env myfunc -S server myfunc ::: work
+
+=item variables
+
+ set myvar variables
+ env_parallel echo '$myvar' ::: work
+ env_parallel -S server echo '$myvar' ::: work
+ env_parallel --env myvar echo '$myvar' ::: work
+ env_parallel --env myvar -S server echo '$myvar' ::: work
+
+=item arrays
+
+ set myarray arrays work, too
+ env_parallel -k echo '$myarray[{}]' ::: 1 2 3
+ env_parallel -k -S server echo '$myarray[{}]' ::: 1 2 3
+ env_parallel -k --env myarray echo '$myarray[{}]' ::: 1 2 3
+ env_parallel -k --env myarray -S server \
+ echo '$myarray[{}]' ::: 1 2 3
+
+=back
+
+
+=head2 ksh
+
+=head3 Installation
+
+Put this in $HOME/.kshrc:
+
+ source env_parallel.ksh
+
+E.g. by doing:
+
+ echo 'source env_parallel.ksh' >> $HOME/.kshrc
+
+=head3 Supported use
+
+B<--env> is supported to export only the variable, alias, function, or
+array with the given name. Multiple B<--env>s can be given.
+
+B<--session> is supported.
+
+=over 8
+
+=item aliases
+
+ alias myecho='echo aliases'
+ env_parallel myecho ::: work
+ env_parallel -S server myecho ::: work
+ env_parallel --env myecho myecho ::: work
+ env_parallel --env myecho -S server myecho ::: work
+
+ alias multiline='echo multiline
+ echo aliases'
+ env_parallel multiline ::: work
+ env_parallel -S server multiline ::: work
+ env_parallel --env multiline multiline ::: work
+ env_parallel --env multiline -S server multiline ::: work
+
+=item functions
+
+ myfunc() { echo functions $*; }
+ env_parallel myfunc ::: work
+ env_parallel -S server myfunc ::: work
+ env_parallel --env myfunc myfunc ::: work
+ env_parallel --env myfunc -S server myfunc ::: work
+
+=item variables
+
+ myvar=variables
+ env_parallel echo '$myvar' ::: work
+ env_parallel -S server echo '$myvar' ::: work
+ env_parallel --env myvar echo '$myvar' ::: work
+ env_parallel --env myvar -S server echo '$myvar' ::: work
+
+=item arrays
+
+ myarray=(arrays work, too)
+ env_parallel -k echo '${myarray[{}]}' ::: 0 1 2
+ env_parallel -k -S server echo '${myarray[{}]}' ::: 0 1 2
+ env_parallel -k --env myarray echo '${myarray[{}]}' ::: 0 1 2
+ env_parallel -k --env myarray -S server \
+ echo '${myarray[{}]}' ::: 0 1 2
+
+=back
+
+
+=head2 mksh
+
+=head3 Installation
+
+Put this in $HOME/.mkshrc:
+
+ source env_parallel.mksh
+
+E.g. by doing:
+
+ echo 'source env_parallel.mksh' >> $HOME/.mkshrc
+
+=head3 Supported use
+
+B<--env> is supported to export only the variable, alias, function, or
+array with the given name. Multiple B<--env>s can be given.
+
+B<--session> is supported.
+
+=over 8
+
+=item aliases
+
+ alias myecho='echo aliases'
+ env_parallel myecho ::: work
+ env_parallel -S server myecho ::: work
+ env_parallel --env myecho myecho ::: work
+ env_parallel --env myecho -S server myecho ::: work
+
+ alias multiline='echo multiline
+ echo aliases'
+ env_parallel multiline ::: work
+ env_parallel -S server multiline ::: work
+ env_parallel --env multiline multiline ::: work
+ env_parallel --env multiline -S server multiline ::: work
+
+=item functions
+
+ myfunc() { echo functions $*; }
+ env_parallel myfunc ::: work
+ env_parallel -S server myfunc ::: work
+ env_parallel --env myfunc myfunc ::: work
+ env_parallel --env myfunc -S server myfunc ::: work
+
+=item variables
+
+ myvar=variables
+ env_parallel echo '$myvar' ::: work
+ env_parallel -S server echo '$myvar' ::: work
+ env_parallel --env myvar echo '$myvar' ::: work
+ env_parallel --env myvar -S server echo '$myvar' ::: work
+
+=item arrays
+
+ myarray=(arrays work, too)
+ env_parallel -k echo '${myarray[{}]}' ::: 0 1 2
+ env_parallel -k -S server echo '${myarray[{}]}' ::: 0 1 2
+ env_parallel -k --env myarray echo '${myarray[{}]}' ::: 0 1 2
+ env_parallel -k --env myarray -S server \
+ echo '${myarray[{}]}' ::: 0 1 2
+
+=back
+
+
+=head2 pdksh
+
+=head3 Installation
+
+Put this in $HOME/.profile:
+
+ source env_parallel.pdksh
+
+E.g. by doing:
+
+ echo 'source env_parallel.pdksh' >> $HOME/.profile
+
+=head3 Supported use
+
+B<--env> is supported to export only the variable, alias, function, or
+array with the given name. Multiple B<--env>s can be given.
+
+B<--session> is supported.
+
+=over 8
+
+=item aliases
+
+ alias myecho="echo aliases";
+ env_parallel myecho ::: work;
+ env_parallel -S server myecho ::: work;
+ env_parallel --env myecho myecho ::: work;
+ env_parallel --env myecho -S server myecho ::: work
+
+=item functions
+
+ myfunc() { echo functions $*; };
+ env_parallel myfunc ::: work;
+ env_parallel -S server myfunc ::: work;
+ env_parallel --env myfunc myfunc ::: work;
+ env_parallel --env myfunc -S server myfunc ::: work
+
+=item variables
+
+ myvar=variables;
+ env_parallel echo "\$myvar" ::: work;
+ env_parallel -S server echo "\$myvar" ::: work;
+ env_parallel --env myvar echo "\$myvar" ::: work;
+ env_parallel --env myvar -S server echo "\$myvar" ::: work
+
+=item arrays
+
+ myarray=(arrays work, too);
+ env_parallel -k echo "\${myarray[{}]}" ::: 0 1 2;
+ env_parallel -k -S server echo "\${myarray[{}]}" ::: 0 1 2;
+ env_parallel -k --env myarray echo "\${myarray[{}]}" ::: 0 1 2;
+ env_parallel -k --env myarray -S server \
+ echo "\${myarray[{}]}" ::: 0 1 2
+
+=back
+
+
+=head2 sh
+
+=head3 Installation
+
+Put this in $HOME/.profile:
+
+ . env_parallel.sh
+
+E.g. by doing:
+
+ echo '. env_parallel.sh' >> $HOME/.profile
+
+=head3 Supported use
+
+B<--env> is supported to export only the variable, or alias with the
+given name. Multiple B<--env>s can be given.
+
+B<--session> is supported.
+
+=over 8
+
+=item aliases
+
+ sh does not support aliases.
+
+=item functions
+
+ myfunc() { echo functions $*; }
+ env_parallel myfunc ::: work
+ env_parallel -S server myfunc ::: work
+ env_parallel --env myfunc myfunc ::: work
+ env_parallel --env myfunc -S server myfunc ::: work
+
+=item variables
+
+ myvar=variables
+ env_parallel echo '$myvar' ::: work
+ env_parallel -S server echo '$myvar' ::: work
+ env_parallel --env myvar echo '$myvar' ::: work
+ env_parallel --env myvar -S server echo '$myvar' ::: work
+
+=item arrays
+
+ sh does not support arrays.
+
+=back
+
+
+=head2 tcsh
+
+B<env_parallel> for B<tcsh> breaks B<$PARALLEL>, so do not use
+B<$PARALLEL>.
+
+=head3 Installation
+
+Put this in $HOME/.tcshrc:
+
+ source `which env_parallel.tcsh`
+
+E.g. by doing:
+
+ echo 'source `which env_parallel.tcsh`' >> $HOME/.tcshrc
+
+=head3 Supported use
+
+B<--env> is supported to export only the variable, alias, or
+array with the given name. Multiple B<--env>s can be given.
+
+=over 8
+
+=item aliases
+
+ alias myecho 'echo aliases'
+ env_parallel myecho ::: work
+ env_parallel -S server myecho ::: work
+ env_parallel --env myecho myecho ::: work
+ env_parallel --env myecho -S server myecho ::: work
+
+=item functions
+
+Not supported by B<tcsh>.
+
+=item variables
+
+ set myvar=variables
+ env_parallel echo '$myvar' ::: work
+ env_parallel -S server echo '$myvar' ::: work
+ env_parallel --env myvar echo '$myvar' ::: work
+ env_parallel --env myvar -S server echo '$myvar' ::: work
+
+=item arrays with no special chars
+
+ set myarray=(arrays work, too)
+ env_parallel -k echo \$'{myarray[{}]}' ::: 1 2 3
+ env_parallel -k -S server echo \$'{myarray[{}]}' ::: 1 2 3
+ env_parallel -k --env myarray echo \$'{myarray[{}]}' ::: 1 2 3
+ env_parallel -k --env myarray -S server \
+ echo \$'{myarray[{}]}' ::: 1 2 3
+
+=back
+
+
+=head2 Zsh
+
+=head3 Installation
+
+Put this in $HOME/.zshrc:
+
+ . env_parallel.zsh
+
+E.g. by doing:
+
+ echo '. env_parallel.zsh' >> $HOME/.zshenv
+
+=head3 Supported use
+
+B<--env> is supported to export only the variable, alias, function, or
+array with the given name. Multiple B<--env>s can be given.
+
+B<--session> is supported.
+
+=over 8
+
+=item aliases
+
+ alias myecho='echo aliases'
+ env_parallel myecho ::: work
+ env_parallel -S server myecho ::: work
+ env_parallel --env myecho myecho ::: work
+ env_parallel --env myecho -S server myecho ::: work
+
+ alias multiline='echo multiline
+ echo aliases'
+ env_parallel multiline ::: work
+ env_parallel -S server multiline ::: work
+ env_parallel --env multiline multiline ::: work
+ env_parallel --env multiline -S server multiline ::: work
+
+=item functions
+
+ myfunc() { echo functions $*; }
+ env_parallel myfunc ::: work
+ env_parallel -S server myfunc ::: work
+ env_parallel --env myfunc myfunc ::: work
+ env_parallel --env myfunc -S server myfunc ::: work
+
+=item variables
+
+ myvar=variables
+ env_parallel echo '$myvar' ::: work
+ env_parallel -S server echo '$myvar' ::: work
+ env_parallel --env myvar echo '$myvar' ::: work
+ env_parallel --env myvar -S server echo '$myvar' ::: work
+
+=item arrays
+
+ myarray=(arrays work, too)
+ env_parallel -k echo '${myarray[{}]}' ::: 1 2 3
+ env_parallel -k -S server echo '${myarray[{}]}' ::: 1 2 3
+ env_parallel -k --env myarray echo '${myarray[{}]}' ::: 1 2 3
+ env_parallel -k --env myarray -S server \
+ echo '${myarray[{}]}' ::: 1 2 3
+
+=back
+
+
+=head1 EXIT STATUS
+
+Same as GNU B<parallel>.
+
+
+=head1 AUTHOR
+
+When using GNU B<env_parallel> for a publication please cite:
+
+O. Tange (2018): GNU Parallel 2018, March 2018, ISBN 9781387509881,
+DOI: 10.5281/zenodo.1146014.
+
+This helps funding further development; and it won't cost you a cent.
+If you pay 10000 EUR you should feel free to use GNU Parallel without citing.
+
+Copyright (C) 2007-10-18 Ole Tange, http://ole.tange.dk
+
+Copyright (C) 2008-2010 Ole Tange, http://ole.tange.dk
+
+Copyright (C) 2010-2024 Ole Tange, http://ole.tange.dk and Free
+Software Foundation, Inc.
+
+
+=head1 LICENSE
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3 of the License, or
+at your option any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+=head2 Documentation license I
+
+Permission is granted to copy, distribute and/or modify this
+documentation under the terms of the GNU Free Documentation License,
+Version 1.3 or any later version published by the Free Software
+Foundation; with no Invariant Sections, with no Front-Cover Texts, and
+with no Back-Cover Texts. A copy of the license is included in the
+file LICENSES/GFDL-1.3-or-later.txt.
+
+
+=head2 Documentation license II
+
+You are free:
+
+=over 9
+
+=item B<to Share>
+
+to copy, distribute and transmit the work
+
+=item B<to Remix>
+
+to adapt the work
+
+=back
+
+Under the following conditions:
+
+=over 9
+
+=item B<Attribution>
+
+You must attribute the work in the manner specified by the author or
+licensor (but not in any way that suggests that they endorse you or
+your use of the work).
+
+=item B<Share Alike>
+
+If you alter, transform, or build upon this work, you may distribute
+the resulting work only under the same, similar or a compatible
+license.
+
+=back
+
+With the understanding that:
+
+=over 9
+
+=item B<Waiver>
+
+Any of the above conditions can be waived if you get permission from
+the copyright holder.
+
+=item B<Public Domain>
+
+Where the work or any of its elements is in the public domain under
+applicable law, that status is in no way affected by the license.
+
+=item B<Other Rights>
+
+In no way are any of the following rights affected by the license:
+
+=over 2
+
+=item *
+
+Your fair dealing or fair use rights, or other applicable
+copyright exceptions and limitations;
+
+=item *
+
+The author's moral rights;
+
+=item *
+
+Rights other persons may have either in the work itself or in
+how the work is used, such as publicity or privacy rights.
+
+=back
+
+=back
+
+=over 9
+
+=item B<Notice>
+
+For any reuse or distribution, you must make clear to others the
+license terms of this work.
+
+=back
+
+A copy of the full license is included in the file as
+LICENCES/CC-BY-SA-4.0.txt
+
+
+=head1 DEPENDENCIES
+
+B<env_parallel> uses GNU B<parallel>.
+
+
+=head1 SEE ALSO
+
+B<parallel>(1), B<ash>(1), B<bash>(1), B<csh>(1), B<dash>(1),
+B<fish>(1), B<ksh>(1), B<pdksh>(1) B<tcsh>(1), B<zsh>(1).
+
+
+=cut
diff --git a/src/env_parallel.sh b/src/env_parallel.sh
new file mode 100755
index 0000000..62c4729
--- /dev/null
+++ b/src/env_parallel.sh
@@ -0,0 +1,636 @@
+#!/usr/bin/env sh
+
+# This file must be sourced in sh/ash/dash/bash/ksh/mksh/zsh:
+#
+# . env_parallel.sh
+# source env_parallel.ash
+# source env_parallel.dash
+# source env_parallel.bash
+# source env_parallel.ksh
+# source env_parallel.mksh
+# source env_parallel.zsh
+#
+# after which 'env_parallel' works
+#
+#
+# Copyright (C) 2016-2024 Ole Tange, http://ole.tange.dk and Free
+# Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+#
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+# shellcheck disable=SC2006
+
+env_parallel() {
+ # env_parallel.{sh,ash,dash,bash,ksh,mksh,zsh}
+
+ # Check shell dialect
+ if [ -n "$BASH_VERSION" ]; then
+ _shell_DIALECT=bash
+ _eval_needed=false
+ _prefix_PARALLEL_ENV=_prefix_PARALLEL_ENV_bash
+ elif [ -n "$ZSH_VERSION" ]; then
+ _shell_DIALECT=zsh
+ _eval_needed=true
+ _prefix_PARALLEL_ENV=false
+ elif [ -n "$KSH_VERSION" ]; then
+ _shell_DIALECT=ksh
+ _eval_needed=false
+ _prefix_PARALLEL_ENV=false
+ else
+ # Dash/ash - can these be detected better?
+ _shell_DIALECT="sh"
+ _eval_needed=false
+ _prefix_PARALLEL_ENV=false
+ fi
+ _names_of_ALIASES() {
+ _names_of_ALIASES_$_shell_DIALECT
+ }
+ _names_of_ALIASES_sh() {
+ # alias fails on Unixware 5
+ for _i in `alias 2>/dev/null | perl -ne 's/^alias //;s/^(\S+)=.*/$1/ && print' 2>/dev/null`; do
+ # Check if this name really is an alias
+ # or just part of a multiline alias definition
+ if alias "$_i" >/dev/null 2>/dev/null; then
+ echo "$_i"
+ fi
+ done
+ }
+ _names_of_ALIASES_bash() {
+ # No aliases will return false. This error should be ignored.
+ # shellcheck disable=SC3044
+ compgen -a || true
+ }
+ _names_of_ALIASES_ksh() {
+ alias | perl -pe 's/=.*//'
+ }
+ _names_of_ALIASES_zsh() {
+ # shellcheck disable=SC2086,SC2296
+ print -l ${(k)aliases}
+ }
+ _bodies_of_ALIASES() {
+ _bodies_of_ALIASES_$_shell_DIALECT "$@"
+ }
+ _bodies_of_ALIASES_sh() {
+ # alias may return:
+ # myalias='definition' (GNU/Linux ash)
+ # alias myalias='definition' (FreeBSD ash)
+ # so remove 'alias ' from first line
+ for _i in "$@"; do
+ echo 'alias '"`alias "$_i" | perl -pe '1..1 and s/^alias //'`"
+ done
+ }
+ _bodies_of_ALIASES_bash() {
+ # shellcheck disable=SC3043
+ local _i
+ for _i in "$@"; do
+ # shellcheck disable=SC2046
+ if [ $(alias "$_i" | wc -l) = 1 ] ; then
+ true Alias is a single line. Good.
+ else
+ _warning_PAR "Alias '$_i' contains newline."
+ _warning_PAR "Make sure the command has at least one newline after '$_i'."
+ _warning_PAR "See BUGS in 'man env_parallel'."
+ fi
+ done
+ alias "$@"
+ }
+ _bodies_of_ALIASES_ksh() {
+ alias "$@" | perl -pe 's/^/alias /;
+ sub warning { print STDERR "env_parallel: Warning: @_\n"; }
+ if(/^alias (\S+)=\$.*\\n/) {
+ warning("Alias \"$1\" contains newline.");
+ warning("Make sure the command has at least one newline after \"$1\".");
+ warning("See BUGS in \"man env_parallel\".");
+ }'
+
+ }
+ _bodies_of_ALIASES_zsh() {
+ # shellcheck disable=SC3043
+ local _i
+ for _i in "$@"; do
+ echo 'alias '"$(alias "$_i")"
+ done
+ }
+ _names_of_FUNCTIONS() {
+ _names_of_FUNCTIONS_$_shell_DIALECT
+ }
+ _names_of_FUNCTIONS_bash() {
+ # shellcheck disable=SC3044
+ compgen -A function
+ }
+ _names_of_maybe_FUNCTIONS() {
+ set | perl -ne '/^([A-Z_0-9]+)\s*\(\)\s*\{?$/i and print "$1\n"'
+ }
+ _names_of_FUNCTIONS_sh() {
+ # myfunc is a function
+ # shellcheck disable=SC2046
+ LANG=C type `_names_of_maybe_FUNCTIONS` |
+ perl -ne '/^(\S+) is a function$/ and not $seen{$1}++ and print "$1\n"'
+ }
+ _names_of_FUNCTIONS_ksh() {
+ # shellcheck disable=SC3044
+ typeset +f | perl -pe 's/\(\).*//; s/ .*//;'
+ }
+ _names_of_FUNCTIONS_zsh() {
+ # shellcheck disable=SC2086,SC2296
+ print -l ${(k)functions}
+ }
+ _bodies_of_FUNCTIONS() {
+ _bodies_of_FUNCTIONS_$_shell_DIALECT "$@"
+ }
+ _bodies_of_FUNCTIONS_sh() {
+ LANG=C type "$@" | perl -ne '/^(\S+) is a function$/ or print'
+ }
+ _bodies_of_FUNCTIONS_bash() {
+ # shellcheck disable=SC3044
+ typeset -f "$@"
+ }
+ _bodies_of_FUNCTIONS_ksh() {
+ functions "$@"
+ }
+ _bodies_of_FUNCTIONS_zsh() {
+ # shellcheck disable=SC3044
+ typeset -f "$@"
+ }
+ _names_of_VARIABLES() {
+ _names_of_VARIABLES_$_shell_DIALECT
+ }
+ _names_of_VARIABLES_sh() {
+ # This may screw up if variables contain \n and =
+ set | perl -ne 's/^(\S+?)=.*/$1/ and print;'
+ }
+ _names_of_VARIABLES_bash() {
+ # shellcheck disable=SC3044
+ compgen -A variable
+ }
+ _names_of_VARIABLES_ksh() {
+ # shellcheck disable=SC3044
+ typeset +p |
+ perl -pe 's/^(type)?set( [-+][a-zA-Z0-9]*)* //; s/(\[\d+\])?=.*//' |
+ uniq
+ }
+ _names_of_VARIABLES_zsh() {
+ # shellcheck disable=SC2086,SC2296
+ print -l ${(k)parameters}
+ }
+ _bodies_of_VARIABLES() {
+ _bodies_of_VARIABLES_$_shell_DIALECT "$@"
+ }
+ _bodies_of_VARIABLES_sh() {
+ # Crappy typeset -p
+ for _i in "$@"
+ do
+ perl -e 'print @ARGV' "$_i="
+ eval echo "\"\$$_i\"" | perl -e '$/=undef; $a=<>; chop($a); print $a' |
+ perl -pe 's/[\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\^\*\<\=\>\~\|\; \"\!\$\&\202-\377]/\\$&/go;'"s/'/\\\'/g; s/[\n]/'\\n'/go;";
+ echo
+ done
+ }
+ _bodies_of_VARIABLES_bash() {
+ # shellcheck disable=SC3044
+ typeset -p "$@"
+ }
+ _bodies_of_VARIABLES_ksh() {
+ # shellcheck disable=SC3044
+ typeset -p "$@"
+ }
+ _bodies_of_VARIABLES_zsh() {
+ # shellcheck disable=SC3044
+ typeset -p "$@"
+ }
+ _ignore_HARDCODED() {
+ _ignore_HARDCODED_$_shell_DIALECT
+ }
+ _ignore_HARDCODED_sh() {
+ # These names cannot be detected
+ echo '(_|TIMEOUT|IFS)'
+ }
+ _ignore_HARDCODED_bash() {
+ # Copying $RANDOM will cause it not to be random
+ # The rest cannot be detected as read-only
+ echo '(RANDOM|_|TIMEOUT|GROUPS|FUNCNAME|DIRSTACK|PIPESTATUS|USERNAME|BASHPID|BASH_[A-Z_]+)'
+ }
+ _ignore_HARDCODED_ksh() {
+ # These names cannot be detected
+ echo '(_|TIMEOUT|IFS)'
+ }
+ _ignore_HARDCODED_zsh() {
+ # These names cannot be detected
+ echo '([-\?\#\!\$\*\@\_0]|zsh_eval_context|ZSH_EVAL_CONTEXT|LINENO|IFS|commands|functions|options|aliases|EUID|EGID|UID|GID|dis_patchars|patchars|terminfo|galiases|keymaps|parameters|jobdirs|dirstack|functrace|funcsourcetrace|zsh_scheduled_events|dis_aliases|dis_reswords|dis_saliases|modules|reswords|saliases|widgets|userdirs|historywords|nameddirs|termcap|dis_builtins|dis_functions|jobtexts|funcfiletrace|dis_galiases|builtins|history|jobstates|funcstack|run-help)'
+ }
+ _ignore_READONLY() {
+ _ignore_READONLY_$_shell_DIALECT
+ }
+ _parse_READONLY() {
+ # shellcheck disable=SC1078,SC1079,SC2026
+ perl -e '@r = map {
+ chomp;
+ # sh on UnixWare: readonly TIMEOUT
+ # ash: readonly var='val'
+ # ksh: var='val'
+ # mksh: PIPESTATUS[0]
+ s/^(readonly )?([^=\[ ]*?)(\[\d+\])?(=.*|)$/$2/ or
+ # bash: declare -ar BASH_VERSINFO=([0]="4" [1]="4")
+ # zsh: typeset -r var='val'
+ s/^\S+\s+\S+\s+(\S[^=]*)(=.*|$)/$1/;
+ $_ } <>;
+ $vars = join "|",map { quotemeta $_ } @r;
+ print $vars ? "($vars)" : "(,,nO,,VaRs,,)";
+ '
+ }
+ _ignore_READONLY_sh() {
+ readonly | _parse_READONLY
+ }
+ _ignore_READONLY_bash() {
+ readonly | _parse_READONLY
+ }
+ _ignore_READONLY_ksh() {
+ readonly | _parse_READONLY
+ }
+ _ignore_READONLY_zsh() {
+ # shellcheck disable=SC3044
+ typeset -pr | _parse_READONLY
+ }
+ _remove_bad_NAMES() {
+ # Do not transfer vars and funcs from env_parallel
+ # shellcheck disable=SC2006
+ _ignore_RO="`_ignore_READONLY`"
+ # shellcheck disable=SC2006
+ _ignore_HARD="`_ignore_HARDCODED`"
+ # To avoid depending on grep dialect, use Perl version of:
+ # grep -Ev '^(...)$' |
+ perl -ne '/^(
+ PARALLEL_ENV|
+ PARALLEL_TMP|
+ _alias_NAMES|
+ _bodies_of_ALIASES|
+ _bodies_of_FUNCTIONS|
+ _bodies_of_VARIABLES|
+ _error_PAR|
+ _function_NAMES|
+ _get_ignored_VARS|
+ _grep_REGEXP|
+ _ignore_HARD|
+ _ignore_HARDCODED|
+ _ignore_READONLY|
+ _ignore_RO|
+ _ignore_UNDERSCORE|
+ _list_alias_BODIES|
+ _list_function_BODIES|
+ _list_variable_VALUES|
+ _make_grep_REGEXP|
+ _names_of_ALIASES|
+ _names_of_FUNCTIONS|
+ _names_of_VARIABLES|
+ _names_of_maybe_FUNCTIONS|
+ _parallel_exit_CODE|
+ _prefix_PARALLEL_ENV|
+ _prefix_PARALLEL_ENV_bash|
+ _remove_bad_NAMES|
+ _remove_readonly|
+ _variable_NAMES|
+ _warning_PAR|
+ _which_PAR)$/x and next;
+ # Filter names matching --env
+ /^'"$_grep_REGEXP"'$/ or next;
+ /^'"$_ignore_UNDERSCORE"'$/ and next;
+ # Remove readonly variables
+ /^'"$_ignore_RO"'$/ and next;
+ /^'"$_ignore_HARD"'$/ and next;
+ print;'
+ }
+ _prefix_PARALLEL_ENV_bash() {
+ # shellcheck disable=SC3044
+ shopt 2>/dev/null |
+ perl -pe 's:\s+off:;: and s/^/shopt -u /;
+ s:\s+on:;: and s/^/shopt -s /;
+ s:;$:&>/dev/null;:';
+ echo 'shopt -s expand_aliases &>/dev/null';
+ }
+
+ _get_ignored_VARS() {
+ perl -e '
+ for(@ARGV){
+ $next_is_env and push @envvar, split/,/, $_;
+ $next_is_env=/^--env$/;
+ }
+ if(grep { /^_$/ } @envvar) {
+ if(not open(IN, "<", "$ENV{HOME}/.parallel/ignored_vars")) {
+ print STDERR "parallel: Error: ",
+ "Run \"parallel --record-env\" in a clean environment first.\n";
+ } else {
+ chomp(@ignored_vars = <IN>);
+ }
+ }
+ if($ENV{PARALLEL_IGNORED_NAMES}) {
+ push @ignored_vars, split/\s+/, $ENV{PARALLEL_IGNORED_NAMES};
+ chomp @ignored_vars;
+ }
+ $vars = join "|",map { quotemeta $_ } @ignored_vars;
+ print $vars ? "($vars)" : "(,,nO,,VaRs,,)";
+ ' -- "$@"
+ }
+
+ # Get the --env variables if set
+ # --env _ should be ignored
+ # and convert a b c to (a|b|c)
+ # If --env not set: Match everything (.*)
+ _make_grep_REGEXP() {
+ perl -e '
+ for(@ARGV){
+ /^_$/ and $next_is_env = 0;
+ $next_is_env and push @envvar, split/,/, $_;
+ $next_is_env = /^--env$/;
+ }
+ $vars = join "|",map { quotemeta $_ } @envvar;
+ print $vars ? "($vars)" : "(.*)";
+ ' -- "$@"
+ }
+ _which_PAR() {
+ # type returns:
+ # ll is an alias for ls -l (in ash)
+ # bash is a tracked alias for /bin/bash
+ # true is a shell builtin (in bash)
+ # myfunc is a function (in bash)
+ # myfunc is a shell function (in zsh)
+ # which is /usr/bin/which (in sh, bash)
+ # which is hashed (/usr/bin/which)
+ # gi is aliased to `grep -i' (in bash)
+ # aliased to `alias | /usr/bin/which --tty-only --read-alias --show-dot --show-tilde'
+ # Return 0 if found, 1 otherwise
+ LANG=C type "$@" |
+ perl -pe '$exit += (s/ is an alias for .*// ||
+ s/ is aliased to .*// ||
+ s/ is a function// ||
+ s/ is a shell function// ||
+ s/ is a shell builtin// ||
+ s/.* is hashed .(\S+).$/$1/ ||
+ s/.* is (a tracked alias for )?//);
+ END { exit not $exit }'
+ }
+ _warning_PAR() {
+ echo "env_parallel: Warning: $*" >&2
+ }
+ _error_PAR() {
+ echo "env_parallel: Error: $*" >&2
+ }
+
+ if _which_PAR parallel >/dev/null; then
+ true parallel found in path
+ else
+ # shellcheck disable=SC2016
+ _error_PAR 'parallel must be in $PATH.'
+ return 255
+ fi
+
+ # Grep regexp for vars given by --env
+ # shellcheck disable=SC2006
+ _grep_REGEXP="`_make_grep_REGEXP \"$@\"`"
+ unset _make_grep_REGEXP
+
+ # Deal with --env _
+ # shellcheck disable=SC2006
+ _ignore_UNDERSCORE="`_get_ignored_VARS \"$@\"`"
+ unset _get_ignored_VARS
+
+ # --record-env
+ if perl -e 'exit grep { /^--record-env$/ } @ARGV' -- "$@"; then
+ true skip
+ else
+ (_names_of_ALIASES;
+ _names_of_FUNCTIONS;
+ _names_of_VARIABLES) |
+ cat > "$HOME"/.parallel/ignored_vars
+ return 0
+ fi
+
+ # --session
+ if perl -e 'exit grep { /^--session$/ } @ARGV' -- "$@"; then
+ true skip
+ else
+ # Insert ::: between each level of session
+ # so you can pop off the last ::: at --end-session
+ # shellcheck disable=SC2006
+ PARALLEL_IGNORED_NAMES="`echo \"$PARALLEL_IGNORED_NAMES\";
+ echo :::;
+ (_names_of_ALIASES;
+ _names_of_FUNCTIONS;
+ _names_of_VARIABLES) | perl -ne '
+ BEGIN{
+ map { $ignored_vars{$_}++ }
+ split/\s+/, $ENV{PARALLEL_IGNORED_NAMES};
+ }
+ chomp;
+ for(split/\s+/) {
+ if(not $ignored_vars{$_}) {
+ print $_,\"\\n\";
+ }
+ }
+ '`"
+ export PARALLEL_IGNORED_NAMES
+ return 0
+ fi
+ if perl -e 'exit grep { /^--end.?session$/ } @ARGV' -- "$@"; then
+ true skip
+ else
+ # Pop off last ::: from PARALLEL_IGNORED_NAMES
+ # shellcheck disable=SC2006
+ PARALLEL_IGNORED_NAMES="`perl -e '
+ $ENV{PARALLEL_IGNORED_NAMES} =~ s/(.*):::.*?$/$1/s;
+ print $ENV{PARALLEL_IGNORED_NAMES}
+ '`"
+ return 0
+ fi
+ # Grep alias names
+ # shellcheck disable=SC2006
+ _alias_NAMES="`_names_of_ALIASES | _remove_bad_NAMES | xargs echo`"
+ _list_alias_BODIES="_bodies_of_ALIASES $_alias_NAMES"
+ if [ "$_alias_NAMES" = "" ] ; then
+ # no aliases selected
+ _list_alias_BODIES="true"
+ fi
+ unset _alias_NAMES
+
+ # Grep function names
+ # shellcheck disable=SC2006
+ _function_NAMES="`_names_of_FUNCTIONS | _remove_bad_NAMES | xargs echo`"
+ _list_function_BODIES="_bodies_of_FUNCTIONS $_function_NAMES"
+ if [ "$_function_NAMES" = "" ] ; then
+ # no functions selected
+ _list_function_BODIES="true"
+ fi
+ unset _function_NAMES
+
+ # Grep variable names
+ # shellcheck disable=SC2006
+ _variable_NAMES="`_names_of_VARIABLES | _remove_bad_NAMES | xargs echo`"
+ _list_variable_VALUES="_bodies_of_VARIABLES $_variable_NAMES"
+ if [ "$_variable_NAMES" = "" ] ; then
+ # no variables selected
+ _list_variable_VALUES="true"
+ fi
+ unset _variable_NAMES
+
+ if $_eval_needed ; then
+ # shellcheck disable=SC2006
+ PARALLEL_ENV="`
+ eval $_prefix_PARALLEL_ENV;
+ eval $_list_alias_BODIES;
+ eval $_list_function_BODIES;
+ eval $_list_variable_VALUES;
+ `"
+ else
+ # shellcheck disable=SC2006
+ PARALLEL_ENV="`
+ $_prefix_PARALLEL_ENV;
+ $_list_alias_BODIES;
+ $_list_function_BODIES;
+ $_list_variable_VALUES;
+ `"
+ fi
+ export PARALLEL_ENV
+ # Free up some env space
+ unset _list_alias_BODIES _list_variable_VALUES _list_function_BODIES
+ unset _bodies_of_ALIASES _bodies_of_VARIABLES _bodies_of_FUNCTIONS
+ unset _names_of_ALIASES _names_of_VARIABLES _names_of_FUNCTIONS
+ unset _ignore_HARDCODED _ignore_READONLY _ignore_UNDERSCORE
+ unset _remove_bad_NAMES _grep_REGEXP _parse_READONLY
+ unset _prefix_PARALLEL_ENV
+ unset _ignore_READONLY_sh _ignore_READONLY_bash
+ unset _ignore_READONLY_ksh _ignore_READONLY_zsh
+ unset _ignore_HARDCODED_sh _ignore_HARDCODED_bash
+ unset _ignore_HARDCODED_ksh _ignore_HARDCODED_zsh
+ unset _bodies_of_ALIASES_ksh _bodies_of_ALIASES_sh
+ unset _bodies_of_ALIASES_zsh _bodies_of_FUNCTIONS_bash
+ unset _bodies_of_FUNCTIONS_ksh _bodies_of_FUNCTIONS_sh
+ unset _bodies_of_FUNCTIONS_zsh _bodies_of_VARIABLES_bash
+ unset _bodies_of_VARIABLES_ksh _bodies_of_VARIABLES_sh
+ unset _bodies_of_VARIABLES_zsh
+ unset _names_of_ALIASES _names_of_ALIASES_bash
+ unset _names_of_ALIASES_ksh _names_of_ALIASES_sh
+ unset _names_of_ALIASES_zsh _names_of_FUNCTIONS
+ unset _names_of_FUNCTIONS_bash _names_of_FUNCTIONS_ksh
+ unset _names_of_FUNCTIONS_sh _names_of_FUNCTIONS_zsh
+ unset _names_of_VARIABLES _names_of_VARIABLES_bash
+ unset _names_of_VARIABLES_ksh _names_of_VARIABLES_sh
+ unset _names_of_VARIABLES_zsh _names_of_maybe_FUNCTIONS
+
+ # Test if environment is too big by running 'true'
+ # shellcheck disable=SC2006,SC2092
+ if `_which_PAR true` >/dev/null 2>/dev/null ; then
+ parallel "$@"
+ _parallel_exit_CODE=$?
+ # Clean up variables/functions
+ unset PARALLEL_ENV
+ unset _which_PAR _which_TRUE
+ unset _warning_PAR _error_PAR
+ # Unset _parallel_exit_CODE before return
+ eval "unset _parallel_exit_CODE; return $_parallel_exit_CODE"
+ else
+ unset PARALLEL_ENV;
+ _error_PAR "Your environment is too big."
+ _error_PAR "You can try 3 different approaches:"
+ _error_PAR "1. Run 'env_parallel --session' before you set"
+ _error_PAR " variables or define functions."
+ _error_PAR "2. Use --env and only mention the names to copy."
+ _error_PAR "3. Try running this in a clean environment once:"
+ _error_PAR " env_parallel --record-env"
+ _error_PAR " And then use '--env _'"
+ _error_PAR "For details see: man env_parallel"
+ return 255
+ fi
+}
+
+parset() {
+ _parset_PARALLEL_PRG=parallel
+ _parset_main "$@"
+}
+
+env_parset() {
+ _parset_PARALLEL_PRG=env_parallel
+ _parset_main "$@"
+}
+
+_parset_main() {
+ # If $1 contains ',' or space:
+ # Split on , to get the destination variable names
+ # If $1 is a single destination variable name:
+ # Treat it as the name of an array
+ #
+ # # Create array named myvar
+ # parset myvar echo ::: {1..10}
+ # echo ${myvar[5]}
+ #
+ # # Put output into $var_a $var_b $var_c
+ # varnames=(var_a var_b var_c)
+ # parset "${varnames[*]}" echo ::: {1..3}
+ # echo $var_c
+ #
+ # # Put output into $var_a4 $var_b4 $var_c4
+ # parset "var_a4 var_b4 var_c4" echo ::: {1..3}
+ # echo $var_c4
+
+ _parset_NAME="$1"
+ if [ "$_parset_NAME" = "" ] ; then
+ echo parset: Error: No destination variable given. >&2
+ echo parset: Error: Try: >&2
+ echo parset: Error: ' ' parset myarray echo ::: foo bar >&2
+ return 255
+ fi
+ if [ "$_parset_NAME" = "--help" ] ; then
+ echo parset: Error: Usage: >&2
+ echo parset: Error: ' ' parset varname GNU Parallel options and command >&2
+ echo
+ parallel --help
+ return 255
+ fi
+ if [ "$_parset_NAME" = "--version" ] ; then
+ # shellcheck disable=SC2006
+ echo "parset 20240222 (GNU parallel `parallel --minversion 1`)"
+ echo "Copyright (C) 2007-2024 Ole Tange, http://ole.tange.dk and Free Software"
+ echo "Foundation, Inc."
+ echo "License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>"
+ echo "This is free software: you are free to change and redistribute it."
+ echo "GNU parallel comes with no warranty."
+ echo
+ echo "Web site: https://www.gnu.org/software/parallel"
+ echo
+ echo "When using programs that use GNU Parallel to process data for publication"
+ echo "please cite as described in 'parallel --citation'."
+ echo
+ return 255
+ fi
+ shift
+
+ # Bash: declare -A myassoc=( )
+ # Zsh: typeset -A myassoc=( )
+ # Ksh: typeset -A myassoc=( )
+ # shellcheck disable=SC2039,SC2169,SC3044
+ if (typeset -p "$_parset_NAME" 2>/dev/null; echo) |
+ perl -ne 'exit not (/^declare[^=]+-A|^typeset[^=]+-A/)' ; then
+ # This is an associative array
+ # shellcheck disable=SC2006
+ eval "`$_parset_PARALLEL_PRG -k --_parset assoc,"$_parset_NAME" "$@"`"
+ # The eval returns the function!
+ else
+ # This is a normal array or a list of variable names
+ # shellcheck disable=SC2006
+ eval "`$_parset_PARALLEL_PRG -k --_parset var,"$_parset_NAME" "$@"`"
+ # The eval returns the function!
+ fi
+}
diff --git a/src/env_parallel.tcsh b/src/env_parallel.tcsh
new file mode 100755
index 0000000..dcb7a0b
--- /dev/null
+++ b/src/env_parallel.tcsh
@@ -0,0 +1,142 @@
+#!/usr/bin/env tcsh
+
+# This file must be sourced in tcsh:
+#
+# source `which env_parallel.tcsh`
+#
+# after which 'env_parallel' works
+#
+#
+# Copyright (C) 2016-2024 Ole Tange, http://ole.tange.dk and Free
+# Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+#
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set _parallel_exit_CODE=0
+if ("`alias env_parallel`" == '' || ! $?PARALLEL_CSH) then
+ # Activate alias
+ alias env_parallel '(setenv PARALLEL_CSH "\!*"; source `which env_parallel.csh`)'
+else
+ # Get the --env variables if set
+ # --env _ should be ignored
+ # and convert a b c to (a|b|c)
+ # If --env not set: Match everything (.*)
+
+ # simple 'tempfile': Return nonexisting filename: /tmp/parXXXXX
+ alias _tempfile 'perl -e do\{\$t\=\"/tmp/par\".join\"\",map\{\(0..9,\"a\"..\"z\",\"A\"..\"Z\"\)\[rand\(62\)\]\}\(1..5\)\;\}while\(-e\$t\)\;print\"\$t\\n\"'
+ set _tMpscRIpt=`_tempfile`
+
+ cat <<'EOF' > $_tMpscRIpt
+ #!/usr/bin/perl
+
+ for(@ARGV){
+ /^_$/ and $next_is_env = 0;
+ $next_is_env and push @envvar, split/,/, $_;
+ $next_is_env = /^--env$/;
+ }
+ $vars = join "|",map { quotemeta $_ } @envvar;
+ print $vars ? "($vars)" : "(.*)";
+'EOF'
+ set _grep_REGEXP="`perl $_tMpscRIpt -- $PARALLEL_CSH`"
+
+ # Deal with --env _
+ cat <<'EOF' > $_tMpscRIpt
+ #!/usr/bin/perl
+
+ for(@ARGV){
+ $next_is_env and push @envvar, split/,/, $_;
+ $next_is_env=/^--env$/;
+ }
+ if(grep { /^_$/ } @envvar) {
+ if(not open(IN, "<", "$ENV{HOME}/.parallel/ignored_vars")) {
+ print STDERR "parallel: Error: ",
+ "Run \"parallel --record-env\" in a clean environment first.\n";
+ } else {
+ chomp(@ignored_vars = <IN>);
+ $vars = join "|",map { quotemeta $_ } @ignored_vars;
+ print $vars ? "($vars)" : "(,,nO,,VaRs,,)";
+ }
+ }
+'EOF'
+ set _ignore_UNDERSCORE="`perl $_tMpscRIpt -- $PARALLEL_CSH`"
+ rm $_tMpscRIpt
+
+ # Get the scalar and array variable names
+ set _vARnAmES=(`set | perl -ne 's/\s.*//; /^(#|_|killring|prompt2|command|PARALLEL_ENV|PARALLEL_TMP)$/ and next; /^'"$_grep_REGEXP"'$/ or next; /^'"$_ignore_UNDERSCORE"'$/ and next; print'`)
+
+ # Make a tmpfile for the variable definitions
+ set _tMpvARfILe=`_tempfile`
+ touch $_tMpvARfILe
+ # Make a tmpfile for the variable definitions + alias
+ set _tMpaLLfILe=`_tempfile`
+ foreach _vARnAmE ($_vARnAmES);
+ # These 3 lines break in csh ver. 20110502-3
+ # if not defined: next
+ eval if'(! $?'$_vARnAmE') continue'
+ # if $#myvar <= 1 echo scalar_myvar=$var
+ eval if'(${#'$_vARnAmE'} <= 1) echo scalar_'$_vARnAmE'='\"\$$_vARnAmE\" >> $_tMpvARfILe;
+ # if $#myvar > 1 echo array_myvar=$var
+ eval if'(${#'$_vARnAmE'} > 1) echo array_'$_vARnAmE'="$'$_vARnAmE'"' >> $_tMpvARfILe;
+ end
+ unset _vARnAmE _vARnAmES
+ # shell quote variables (--plain needed due to ignore if $PARALLEL is set)
+ # Convert 'scalar_myvar=...' to 'set myvar=...'
+ # Convert 'array_myvar=...' to 'set array=(...)'
+ cat $_tMpvARfILe | parallel --plain --shellquote | perl -pe 's/^scalar_(\S+).=/set $1=/ or s/^array_(\S+).=(.*)/set $1=($2)/ && s/\\ / /g;' > $_tMpaLLfILe
+ # Cleanup
+ rm $_tMpvARfILe; unset _tMpvARfILe
+
+# ALIAS TO EXPORT ALIASES:
+
+# Quote ' by putting it inside "
+# s/'/'"'"'/g;
+# ' => \047 " => \042
+# s/\047/\047\042\047\042\047/g;
+# Quoted: s/\\047/\\047\\042\\047\\042\\047/g\;
+
+# Remove () from second column
+# s/^(\S+)(\s+)\((.*)\)/\1\2\3/;
+# Quoted: s/\^\(\\S+\)\(\\s+\)\\\(\(.\*\)\\\)/\\1\\2\\3/\;
+
+# Add ' around second column
+# s/^(\S+)(\s+)(.*)/\1\2'\3'/
+# \047 => '
+# s/^(\S+)(\s+)(.*)/\1\2\047\3\047/;
+# Quoted: s/\^\(\\S+\)\(\\s+\)\(.\*\)/\\1\\2\\047\\3\\047/\;
+
+# Quote ! as \!
+# s/\!/\\\!/g;
+# Quoted: s/\\\!/\\\\\\\!/g;
+
+# Prepend with "\nalias "
+# s/^/\001alias /;
+# Quoted: s/\^/\\001alias\ /\;
+ alias | \
+ perl -ne '/^'"$_grep_REGEXP"'/ or next; /^'"$_ignore_UNDERSCORE"'[^_a-zA-Z]/ and next; print' | \
+ perl -pe s/\\047/\\047\\042\\047\\042\\047/g\;s/\^\(\\S+\)\(\\s+\)\\\(\(.\*\)\\\)/\\1\\2\\3/\;s/\^\(\\S+\)\(\\s+\)\(.\*\)/\\1\\2\\047\\3\\047/\;s/\^/\\001alias\ /\;s/\\\!/\\\\\\\!/g >> $_tMpaLLfILe
+
+ setenv PARALLEL_ENV "`cat $_tMpaLLfILe; rm $_tMpaLLfILe`";
+ unset _tMpaLLfILe;
+ # Use $PARALLEL_CSH set in calling alias
+ parallel
+ set _parallel_exit_CODE=$status
+ setenv PARALLEL_ENV
+ setenv PARALLEL_CSH
+endif
+(exit $_parallel_exit_CODE)
diff --git a/src/env_parallel.zsh b/src/env_parallel.zsh
new file mode 100755
index 0000000..674d8e6
--- /dev/null
+++ b/src/env_parallel.zsh
@@ -0,0 +1,636 @@
+#!/usr/bin/env zsh
+
+# This file must be sourced in sh/ash/dash/bash/ksh/mksh/zsh:
+#
+# . env_parallel.sh
+# source env_parallel.ash
+# source env_parallel.dash
+# source env_parallel.bash
+# source env_parallel.ksh
+# source env_parallel.mksh
+# source env_parallel.zsh
+#
+# after which 'env_parallel' works
+#
+#
+# Copyright (C) 2016-2024 Ole Tange, http://ole.tange.dk and Free
+# Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+#
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+# shellcheck disable=SC2006
+
+env_parallel() {
+ # env_parallel.{sh,ash,dash,bash,ksh,mksh,zsh}
+
+ # Check shell dialect
+ if [ -n "$BASH_VERSION" ]; then
+ _shell_DIALECT=bash
+ _eval_needed=false
+ _prefix_PARALLEL_ENV=_prefix_PARALLEL_ENV_bash
+ elif [ -n "$ZSH_VERSION" ]; then
+ _shell_DIALECT=zsh
+ _eval_needed=true
+ _prefix_PARALLEL_ENV=false
+ elif [ -n "$KSH_VERSION" ]; then
+ _shell_DIALECT=ksh
+ _eval_needed=false
+ _prefix_PARALLEL_ENV=false
+ else
+ # Dash/ash - can these be detected better?
+ _shell_DIALECT="sh"
+ _eval_needed=false
+ _prefix_PARALLEL_ENV=false
+ fi
+ _names_of_ALIASES() {
+ _names_of_ALIASES_$_shell_DIALECT
+ }
+ _names_of_ALIASES_sh() {
+ # alias fails on Unixware 5
+ for _i in `alias 2>/dev/null | perl -ne 's/^alias //;s/^(\S+)=.*/$1/ && print' 2>/dev/null`; do
+ # Check if this name really is an alias
+ # or just part of a multiline alias definition
+ if alias "$_i" >/dev/null 2>/dev/null; then
+ echo "$_i"
+ fi
+ done
+ }
+ _names_of_ALIASES_bash() {
+ # No aliases will return false. This error should be ignored.
+ # shellcheck disable=SC3044
+ compgen -a || true
+ }
+ _names_of_ALIASES_ksh() {
+ alias | perl -pe 's/=.*//'
+ }
+ _names_of_ALIASES_zsh() {
+ # shellcheck disable=SC2086,SC2296
+ print -l ${(k)aliases}
+ }
+ _bodies_of_ALIASES() {
+ _bodies_of_ALIASES_$_shell_DIALECT "$@"
+ }
+ _bodies_of_ALIASES_sh() {
+ # alias may return:
+ # myalias='definition' (GNU/Linux ash)
+ # alias myalias='definition' (FreeBSD ash)
+ # so remove 'alias ' from first line
+ for _i in "$@"; do
+ echo 'alias '"`alias "$_i" | perl -pe '1..1 and s/^alias //'`"
+ done
+ }
+ _bodies_of_ALIASES_bash() {
+ # shellcheck disable=SC3043
+ local _i
+ for _i in "$@"; do
+ # shellcheck disable=SC2046
+ if [ $(alias "$_i" | wc -l) = 1 ] ; then
+ true Alias is a single line. Good.
+ else
+ _warning_PAR "Alias '$_i' contains newline."
+ _warning_PAR "Make sure the command has at least one newline after '$_i'."
+ _warning_PAR "See BUGS in 'man env_parallel'."
+ fi
+ done
+ alias "$@"
+ }
+ _bodies_of_ALIASES_ksh() {
+ alias "$@" | perl -pe 's/^/alias /;
+ sub warning { print STDERR "env_parallel: Warning: @_\n"; }
+ if(/^alias (\S+)=\$.*\\n/) {
+ warning("Alias \"$1\" contains newline.");
+ warning("Make sure the command has at least one newline after \"$1\".");
+ warning("See BUGS in \"man env_parallel\".");
+ }'
+
+ }
+ _bodies_of_ALIASES_zsh() {
+ # shellcheck disable=SC3043
+ local _i
+ for _i in "$@"; do
+ echo 'alias '"$(alias "$_i")"
+ done
+ }
+ _names_of_FUNCTIONS() {
+ _names_of_FUNCTIONS_$_shell_DIALECT
+ }
+ _names_of_FUNCTIONS_bash() {
+ # shellcheck disable=SC3044
+ compgen -A function
+ }
+ _names_of_maybe_FUNCTIONS() {
+ set | perl -ne '/^([A-Z_0-9]+)\s*\(\)\s*\{?$/i and print "$1\n"'
+ }
+ _names_of_FUNCTIONS_sh() {
+ # myfunc is a function
+ # shellcheck disable=SC2046
+ LANG=C type `_names_of_maybe_FUNCTIONS` |
+ perl -ne '/^(\S+) is a function$/ and not $seen{$1}++ and print "$1\n"'
+ }
+ _names_of_FUNCTIONS_ksh() {
+ # shellcheck disable=SC3044
+ typeset +f | perl -pe 's/\(\).*//; s/ .*//;'
+ }
+ _names_of_FUNCTIONS_zsh() {
+ # shellcheck disable=SC2086,SC2296
+ print -l ${(k)functions}
+ }
+ _bodies_of_FUNCTIONS() {
+ _bodies_of_FUNCTIONS_$_shell_DIALECT "$@"
+ }
+ _bodies_of_FUNCTIONS_sh() {
+ LANG=C type "$@" | perl -ne '/^(\S+) is a function$/ or print'
+ }
+ _bodies_of_FUNCTIONS_bash() {
+ # shellcheck disable=SC3044
+ typeset -f "$@"
+ }
+ _bodies_of_FUNCTIONS_ksh() {
+ functions "$@"
+ }
+ _bodies_of_FUNCTIONS_zsh() {
+ # shellcheck disable=SC3044
+ typeset -f "$@"
+ }
+ _names_of_VARIABLES() {
+ _names_of_VARIABLES_$_shell_DIALECT
+ }
+ _names_of_VARIABLES_sh() {
+ # This may screw up if variables contain \n and =
+ set | perl -ne 's/^(\S+?)=.*/$1/ and print;'
+ }
+ _names_of_VARIABLES_bash() {
+ # shellcheck disable=SC3044
+ compgen -A variable
+ }
+ _names_of_VARIABLES_ksh() {
+ # shellcheck disable=SC3044
+ typeset +p |
+ perl -pe 's/^(type)?set( [-+][a-zA-Z0-9]*)* //; s/(\[\d+\])?=.*//' |
+ uniq
+ }
+ _names_of_VARIABLES_zsh() {
+ # shellcheck disable=SC2086,SC2296
+ print -l ${(k)parameters}
+ }
+ _bodies_of_VARIABLES() {
+ _bodies_of_VARIABLES_$_shell_DIALECT "$@"
+ }
+ _bodies_of_VARIABLES_sh() {
+ # Crappy typeset -p
+ for _i in "$@"
+ do
+ perl -e 'print @ARGV' "$_i="
+ eval echo "\"\$$_i\"" | perl -e '$/=undef; $a=<>; chop($a); print $a' |
+ perl -pe 's/[\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\^\*\<\=\>\~\|\; \"\!\$\&\202-\377]/\\$&/go;'"s/'/\\\'/g; s/[\n]/'\\n'/go;";
+ echo
+ done
+ }
+ _bodies_of_VARIABLES_bash() {
+ # shellcheck disable=SC3044
+ typeset -p "$@"
+ }
+ _bodies_of_VARIABLES_ksh() {
+ # shellcheck disable=SC3044
+ typeset -p "$@"
+ }
+ _bodies_of_VARIABLES_zsh() {
+ # shellcheck disable=SC3044
+ typeset -p "$@"
+ }
+ _ignore_HARDCODED() {
+ _ignore_HARDCODED_$_shell_DIALECT
+ }
+ _ignore_HARDCODED_sh() {
+ # These names cannot be detected
+ echo '(_|TIMEOUT|IFS)'
+ }
+ _ignore_HARDCODED_bash() {
+ # Copying $RANDOM will cause it not to be random
+ # The rest cannot be detected as read-only
+ echo '(RANDOM|_|TIMEOUT|GROUPS|FUNCNAME|DIRSTACK|PIPESTATUS|USERNAME|BASHPID|BASH_[A-Z_]+)'
+ }
+ _ignore_HARDCODED_ksh() {
+ # These names cannot be detected
+ echo '(_|TIMEOUT|IFS)'
+ }
+ _ignore_HARDCODED_zsh() {
+ # These names cannot be detected
+ echo '([-\?\#\!\$\*\@\_0]|zsh_eval_context|ZSH_EVAL_CONTEXT|LINENO|IFS|commands|functions|options|aliases|EUID|EGID|UID|GID|dis_patchars|patchars|terminfo|galiases|keymaps|parameters|jobdirs|dirstack|functrace|funcsourcetrace|zsh_scheduled_events|dis_aliases|dis_reswords|dis_saliases|modules|reswords|saliases|widgets|userdirs|historywords|nameddirs|termcap|dis_builtins|dis_functions|jobtexts|funcfiletrace|dis_galiases|builtins|history|jobstates|funcstack|run-help)'
+ }
+ _ignore_READONLY() {
+ _ignore_READONLY_$_shell_DIALECT
+ }
+ _parse_READONLY() {
+ # shellcheck disable=SC1078,SC1079,SC2026
+ perl -e '@r = map {
+ chomp;
+ # sh on UnixWare: readonly TIMEOUT
+ # ash: readonly var='val'
+ # ksh: var='val'
+ # mksh: PIPESTATUS[0]
+ s/^(readonly )?([^=\[ ]*?)(\[\d+\])?(=.*|)$/$2/ or
+ # bash: declare -ar BASH_VERSINFO=([0]="4" [1]="4")
+ # zsh: typeset -r var='val'
+ s/^\S+\s+\S+\s+(\S[^=]*)(=.*|$)/$1/;
+ $_ } <>;
+ $vars = join "|",map { quotemeta $_ } @r;
+ print $vars ? "($vars)" : "(,,nO,,VaRs,,)";
+ '
+ }
+ _ignore_READONLY_sh() {
+ readonly | _parse_READONLY
+ }
+ _ignore_READONLY_bash() {
+ readonly | _parse_READONLY
+ }
+ _ignore_READONLY_ksh() {
+ readonly | _parse_READONLY
+ }
+ _ignore_READONLY_zsh() {
+ # shellcheck disable=SC3044
+ typeset -pr | _parse_READONLY
+ }
+ _remove_bad_NAMES() {
+ # Do not transfer vars and funcs from env_parallel
+ # shellcheck disable=SC2006
+ _ignore_RO="`_ignore_READONLY`"
+ # shellcheck disable=SC2006
+ _ignore_HARD="`_ignore_HARDCODED`"
+ # To avoid depending on grep dialect, use Perl version of:
+ # grep -Ev '^(...)$' |
+ perl -ne '/^(
+ PARALLEL_ENV|
+ PARALLEL_TMP|
+ _alias_NAMES|
+ _bodies_of_ALIASES|
+ _bodies_of_FUNCTIONS|
+ _bodies_of_VARIABLES|
+ _error_PAR|
+ _function_NAMES|
+ _get_ignored_VARS|
+ _grep_REGEXP|
+ _ignore_HARD|
+ _ignore_HARDCODED|
+ _ignore_READONLY|
+ _ignore_RO|
+ _ignore_UNDERSCORE|
+ _list_alias_BODIES|
+ _list_function_BODIES|
+ _list_variable_VALUES|
+ _make_grep_REGEXP|
+ _names_of_ALIASES|
+ _names_of_FUNCTIONS|
+ _names_of_VARIABLES|
+ _names_of_maybe_FUNCTIONS|
+ _parallel_exit_CODE|
+ _prefix_PARALLEL_ENV|
+ _prefix_PARALLEL_ENV_bash|
+ _remove_bad_NAMES|
+ _remove_readonly|
+ _variable_NAMES|
+ _warning_PAR|
+ _which_PAR)$/x and next;
+ # Filter names matching --env
+ /^'"$_grep_REGEXP"'$/ or next;
+ /^'"$_ignore_UNDERSCORE"'$/ and next;
+ # Remove readonly variables
+ /^'"$_ignore_RO"'$/ and next;
+ /^'"$_ignore_HARD"'$/ and next;
+ print;'
+ }
+ _prefix_PARALLEL_ENV_bash() {
+ # shellcheck disable=SC3044
+ shopt 2>/dev/null |
+ perl -pe 's:\s+off:;: and s/^/shopt -u /;
+ s:\s+on:;: and s/^/shopt -s /;
+ s:;$:&>/dev/null;:';
+ echo 'shopt -s expand_aliases &>/dev/null';
+ }
+
+ _get_ignored_VARS() {
+ perl -e '
+ for(@ARGV){
+ $next_is_env and push @envvar, split/,/, $_;
+ $next_is_env=/^--env$/;
+ }
+ if(grep { /^_$/ } @envvar) {
+ if(not open(IN, "<", "$ENV{HOME}/.parallel/ignored_vars")) {
+ print STDERR "parallel: Error: ",
+ "Run \"parallel --record-env\" in a clean environment first.\n";
+ } else {
+ chomp(@ignored_vars = <IN>);
+ }
+ }
+ if($ENV{PARALLEL_IGNORED_NAMES}) {
+ push @ignored_vars, split/\s+/, $ENV{PARALLEL_IGNORED_NAMES};
+ chomp @ignored_vars;
+ }
+ $vars = join "|",map { quotemeta $_ } @ignored_vars;
+ print $vars ? "($vars)" : "(,,nO,,VaRs,,)";
+ ' -- "$@"
+ }
+
+ # Get the --env variables if set
+ # --env _ should be ignored
+ # and convert a b c to (a|b|c)
+ # If --env not set: Match everything (.*)
+ _make_grep_REGEXP() {
+ perl -e '
+ for(@ARGV){
+ /^_$/ and $next_is_env = 0;
+ $next_is_env and push @envvar, split/,/, $_;
+ $next_is_env = /^--env$/;
+ }
+ $vars = join "|",map { quotemeta $_ } @envvar;
+ print $vars ? "($vars)" : "(.*)";
+ ' -- "$@"
+ }
+ _which_PAR() {
+ # type returns:
+ # ll is an alias for ls -l (in ash)
+ # bash is a tracked alias for /bin/bash
+ # true is a shell builtin (in bash)
+ # myfunc is a function (in bash)
+ # myfunc is a shell function (in zsh)
+ # which is /usr/bin/which (in sh, bash)
+ # which is hashed (/usr/bin/which)
+ # gi is aliased to `grep -i' (in bash)
+ # aliased to `alias | /usr/bin/which --tty-only --read-alias --show-dot --show-tilde'
+ # Return 0 if found, 1 otherwise
+ LANG=C type "$@" |
+ perl -pe '$exit += (s/ is an alias for .*// ||
+ s/ is aliased to .*// ||
+ s/ is a function// ||
+ s/ is a shell function// ||
+ s/ is a shell builtin// ||
+ s/.* is hashed .(\S+).$/$1/ ||
+ s/.* is (a tracked alias for )?//);
+ END { exit not $exit }'
+ }
+ _warning_PAR() {
+ echo "env_parallel: Warning: $*" >&2
+ }
+ _error_PAR() {
+ echo "env_parallel: Error: $*" >&2
+ }
+
+ if _which_PAR parallel >/dev/null; then
+ true parallel found in path
+ else
+ # shellcheck disable=SC2016
+ _error_PAR 'parallel must be in $PATH.'
+ return 255
+ fi
+
+ # Grep regexp for vars given by --env
+ # shellcheck disable=SC2006
+ _grep_REGEXP="`_make_grep_REGEXP \"$@\"`"
+ unset _make_grep_REGEXP
+
+ # Deal with --env _
+ # shellcheck disable=SC2006
+ _ignore_UNDERSCORE="`_get_ignored_VARS \"$@\"`"
+ unset _get_ignored_VARS
+
+ # --record-env
+ if perl -e 'exit grep { /^--record-env$/ } @ARGV' -- "$@"; then
+ true skip
+ else
+ (_names_of_ALIASES;
+ _names_of_FUNCTIONS;
+ _names_of_VARIABLES) |
+ cat > "$HOME"/.parallel/ignored_vars
+ return 0
+ fi
+
+ # --session
+ if perl -e 'exit grep { /^--session$/ } @ARGV' -- "$@"; then
+ true skip
+ else
+ # Insert ::: between each level of session
+ # so you can pop off the last ::: at --end-session
+ # shellcheck disable=SC2006
+ PARALLEL_IGNORED_NAMES="`echo \"$PARALLEL_IGNORED_NAMES\";
+ echo :::;
+ (_names_of_ALIASES;
+ _names_of_FUNCTIONS;
+ _names_of_VARIABLES) | perl -ne '
+ BEGIN{
+ map { $ignored_vars{$_}++ }
+ split/\s+/, $ENV{PARALLEL_IGNORED_NAMES};
+ }
+ chomp;
+ for(split/\s+/) {
+ if(not $ignored_vars{$_}) {
+ print $_,\"\\n\";
+ }
+ }
+ '`"
+ export PARALLEL_IGNORED_NAMES
+ return 0
+ fi
+ if perl -e 'exit grep { /^--end.?session$/ } @ARGV' -- "$@"; then
+ true skip
+ else
+ # Pop off last ::: from PARALLEL_IGNORED_NAMES
+ # shellcheck disable=SC2006
+ PARALLEL_IGNORED_NAMES="`perl -e '
+ $ENV{PARALLEL_IGNORED_NAMES} =~ s/(.*):::.*?$/$1/s;
+ print $ENV{PARALLEL_IGNORED_NAMES}
+ '`"
+ return 0
+ fi
+ # Grep alias names
+ # shellcheck disable=SC2006
+ _alias_NAMES="`_names_of_ALIASES | _remove_bad_NAMES | xargs echo`"
+ _list_alias_BODIES="_bodies_of_ALIASES $_alias_NAMES"
+ if [ "$_alias_NAMES" = "" ] ; then
+ # no aliases selected
+ _list_alias_BODIES="true"
+ fi
+ unset _alias_NAMES
+
+ # Grep function names
+ # shellcheck disable=SC2006
+ _function_NAMES="`_names_of_FUNCTIONS | _remove_bad_NAMES | xargs echo`"
+ _list_function_BODIES="_bodies_of_FUNCTIONS $_function_NAMES"
+ if [ "$_function_NAMES" = "" ] ; then
+ # no functions selected
+ _list_function_BODIES="true"
+ fi
+ unset _function_NAMES
+
+ # Grep variable names
+ # shellcheck disable=SC2006
+ _variable_NAMES="`_names_of_VARIABLES | _remove_bad_NAMES | xargs echo`"
+ _list_variable_VALUES="_bodies_of_VARIABLES $_variable_NAMES"
+ if [ "$_variable_NAMES" = "" ] ; then
+ # no variables selected
+ _list_variable_VALUES="true"
+ fi
+ unset _variable_NAMES
+
+ if $_eval_needed ; then
+ # shellcheck disable=SC2006
+ PARALLEL_ENV="`
+ eval $_prefix_PARALLEL_ENV;
+ eval $_list_alias_BODIES;
+ eval $_list_function_BODIES;
+ eval $_list_variable_VALUES;
+ `"
+ else
+ # shellcheck disable=SC2006
+ PARALLEL_ENV="`
+ $_prefix_PARALLEL_ENV;
+ $_list_alias_BODIES;
+ $_list_function_BODIES;
+ $_list_variable_VALUES;
+ `"
+ fi
+ export PARALLEL_ENV
+ # Free up some env space
+ unset _list_alias_BODIES _list_variable_VALUES _list_function_BODIES
+ unset _bodies_of_ALIASES _bodies_of_VARIABLES _bodies_of_FUNCTIONS
+ unset _names_of_ALIASES _names_of_VARIABLES _names_of_FUNCTIONS
+ unset _ignore_HARDCODED _ignore_READONLY _ignore_UNDERSCORE
+ unset _remove_bad_NAMES _grep_REGEXP _parse_READONLY
+ unset _prefix_PARALLEL_ENV
+ unset _ignore_READONLY_sh _ignore_READONLY_bash
+ unset _ignore_READONLY_ksh _ignore_READONLY_zsh
+ unset _ignore_HARDCODED_sh _ignore_HARDCODED_bash
+ unset _ignore_HARDCODED_ksh _ignore_HARDCODED_zsh
+ unset _bodies_of_ALIASES_ksh _bodies_of_ALIASES_sh
+ unset _bodies_of_ALIASES_zsh _bodies_of_FUNCTIONS_bash
+ unset _bodies_of_FUNCTIONS_ksh _bodies_of_FUNCTIONS_sh
+ unset _bodies_of_FUNCTIONS_zsh _bodies_of_VARIABLES_bash
+ unset _bodies_of_VARIABLES_ksh _bodies_of_VARIABLES_sh
+ unset _bodies_of_VARIABLES_zsh
+ unset _names_of_ALIASES _names_of_ALIASES_bash
+ unset _names_of_ALIASES_ksh _names_of_ALIASES_sh
+ unset _names_of_ALIASES_zsh _names_of_FUNCTIONS
+ unset _names_of_FUNCTIONS_bash _names_of_FUNCTIONS_ksh
+ unset _names_of_FUNCTIONS_sh _names_of_FUNCTIONS_zsh
+ unset _names_of_VARIABLES _names_of_VARIABLES_bash
+ unset _names_of_VARIABLES_ksh _names_of_VARIABLES_sh
+ unset _names_of_VARIABLES_zsh _names_of_maybe_FUNCTIONS
+
+ # Test if environment is too big by running 'true'
+ # shellcheck disable=SC2006,SC2092
+ if `_which_PAR true` >/dev/null 2>/dev/null ; then
+ parallel "$@"
+ _parallel_exit_CODE=$?
+ # Clean up variables/functions
+ unset PARALLEL_ENV
+ unset _which_PAR _which_TRUE
+ unset _warning_PAR _error_PAR
+ # Unset _parallel_exit_CODE before return
+ eval "unset _parallel_exit_CODE; return $_parallel_exit_CODE"
+ else
+ unset PARALLEL_ENV;
+ _error_PAR "Your environment is too big."
+ _error_PAR "You can try 3 different approaches:"
+ _error_PAR "1. Run 'env_parallel --session' before you set"
+ _error_PAR " variables or define functions."
+ _error_PAR "2. Use --env and only mention the names to copy."
+ _error_PAR "3. Try running this in a clean environment once:"
+ _error_PAR " env_parallel --record-env"
+ _error_PAR " And then use '--env _'"
+ _error_PAR "For details see: man env_parallel"
+ return 255
+ fi
+}
+
+parset() {
+ _parset_PARALLEL_PRG=parallel
+ _parset_main "$@"
+}
+
+env_parset() {
+ _parset_PARALLEL_PRG=env_parallel
+ _parset_main "$@"
+}
+
+_parset_main() {
+ # If $1 contains ',' or space:
+ # Split on , to get the destination variable names
+ # If $1 is a single destination variable name:
+ # Treat it as the name of an array
+ #
+ # # Create array named myvar
+ # parset myvar echo ::: {1..10}
+ # echo ${myvar[5]}
+ #
+ # # Put output into $var_a $var_b $var_c
+ # varnames=(var_a var_b var_c)
+ # parset "${varnames[*]}" echo ::: {1..3}
+ # echo $var_c
+ #
+ # # Put output into $var_a4 $var_b4 $var_c4
+ # parset "var_a4 var_b4 var_c4" echo ::: {1..3}
+ # echo $var_c4
+
+ _parset_NAME="$1"
+ if [ "$_parset_NAME" = "" ] ; then
+ echo parset: Error: No destination variable given. >&2
+ echo parset: Error: Try: >&2
+ echo parset: Error: ' ' parset myarray echo ::: foo bar >&2
+ return 255
+ fi
+ if [ "$_parset_NAME" = "--help" ] ; then
+ echo parset: Error: Usage: >&2
+ echo parset: Error: ' ' parset varname GNU Parallel options and command >&2
+ echo
+ parallel --help
+ return 255
+ fi
+ if [ "$_parset_NAME" = "--version" ] ; then
+ # shellcheck disable=SC2006
+ echo "parset 20240222 (GNU parallel `parallel --minversion 1`)"
+ echo "Copyright (C) 2007-2024 Ole Tange, http://ole.tange.dk and Free Software"
+ echo "Foundation, Inc."
+ echo "License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>"
+ echo "This is free software: you are free to change and redistribute it."
+ echo "GNU parallel comes with no warranty."
+ echo
+ echo "Web site: https://www.gnu.org/software/parallel"
+ echo
+ echo "When using programs that use GNU Parallel to process data for publication"
+ echo "please cite as described in 'parallel --citation'."
+ echo
+ return 255
+ fi
+ shift
+
+ # Bash: declare -A myassoc=( )
+ # Zsh: typeset -A myassoc=( )
+ # Ksh: typeset -A myassoc=( )
+ # shellcheck disable=SC2039,SC2169,SC3044
+ if (typeset -p "$_parset_NAME" 2>/dev/null; echo) |
+ perl -ne 'exit not (/^declare[^=]+-A|^typeset[^=]+-A/)' ; then
+ # This is an associative array
+ # shellcheck disable=SC2006
+ eval "`$_parset_PARALLEL_PRG -k --_parset assoc,"$_parset_NAME" "$@"`"
+ # The eval returns the function!
+ else
+ # This is a normal array or a list of variable names
+ # shellcheck disable=SC2006
+ eval "`$_parset_PARALLEL_PRG -k --_parset var,"$_parset_NAME" "$@"`"
+ # The eval returns the function!
+ fi
+}
diff --git a/src/niceload b/src/niceload
new file mode 100755
index 0000000..1c6a86d
--- /dev/null
+++ b/src/niceload
@@ -0,0 +1,1173 @@
+#!/usr/bin/perl -w
+
+# Copyright (C) 2004-2010 Ole Tange, http://ole.tange.dk
+#
+# Copyright (C) 2010-2024 Ole Tange, http://ole.tange.dk and
+# Free Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+#
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+use strict;
+use Getopt::Long;
+$Global::progname="niceload";
+$Global::version = 20240222;
+Getopt::Long::Configure("bundling","require_order");
+get_options_from_array(\@ARGV) || die_usage();
+if($opt::version) {
+ version();
+ exit 0;
+}
+if($opt::help) {
+ help();
+ exit 0;
+}
+if($opt::factor and $opt::suspend) {
+ # You cannot have --suspend and --factor
+ help();
+ exit;
+}
+
+if(not (defined $opt::start_io or defined $opt::run_io
+ or defined $opt::start_load or defined $opt::run_load
+ or defined $opt::start_mem or defined $opt::run_mem
+ or defined $opt::start_noswap or defined $opt::run_noswap
+ or defined $opt::io or defined $opt::load
+ or defined $opt::mem or defined $opt::noswap)) {
+ # Default is --runload=1
+ $opt::run_load = 1;
+}
+
+if(not defined $opt::start_io) { $opt::start_io = $opt::io; }
+if(not defined $opt::run_io) { $opt::run_io = $opt::io; }
+if(not defined $opt::start_load) { $opt::start_load = $opt::load; }
+if(not defined $opt::run_load) { $opt::run_load = $opt::load; }
+if(not defined $opt::start_mem) { $opt::start_mem = $opt::mem; }
+if(not defined $opt::run_mem) { $opt::run_mem = $opt::mem; }
+if(not defined $opt::start_noswap) { $opt::start_noswap = $opt::noswap; }
+if(not defined $opt::run_noswap) { $opt::run_noswap = $opt::noswap; }
+
+if(defined $opt::load) { multiply_binary_prefix($opt::load); }
+if(defined $opt::baseline) { collect_net_baseline(); }
+
+my $limit = Limit->new();
+my $process = Process->new($opt::nice,@ARGV);
+$::exitstatus = 0;
+if(@opt::prg) {
+ # Find all pids of prg
+ my($children_of, $parent_of, $name_of) = pid_table();
+ my @exact_name_pids;
+ my @substr_name_pids;
+ for my $name (@opt::prg) {
+ push(@exact_name_pids,
+ grep { index($name_of->{$_},$name) == 0 and $_ } keys %$name_of);
+ push(@substr_name_pids,
+ grep { index($name_of->{$_},$name) != -1 and $_ } keys %$name_of);
+ }
+ # Remove current pid
+ @exact_name_pids = grep { $_ != $$ } @exact_name_pids;
+ @substr_name_pids = grep { $_ != $$ } @substr_name_pids;
+ my @pids;
+ if(@exact_name_pids) {
+ @pids = @exact_name_pids;
+ } elsif(@substr_name_pids) {
+ warning("@opt::prg no exact matches. Using substrings.");
+ my %name_pids;
+ for(sort @substr_name_pids) {
+ # If the process has run for long, then time column will
+ # enter the name, so remove leading digits
+ $name_of->{$_} =~ s/^\d+ //;
+ # Remove arguments
+ $name_of->{$_} =~ s/ .*//;
+ push @{$name_pids{$name_of->{$_}}},$_;
+ }
+ warning("Niceloading",
+ map { "$_ (".(join" ",sort @{$name_pids{$_}}).")" } keys %name_pids
+ );
+ @pids = @substr_name_pids;
+ } else {
+ error("@opt::prg no matches.");
+ exit(1);
+ }
+ $process->set_pid(@pids);
+ $::resume_process = $process;
+ $SIG{TERM} = $SIG{INT} = \&resume;
+} elsif(@opt::pid) {
+ # Support --pid 3567,25678
+ @opt::pid = map { split /,/, $_ } @opt::pid;
+ $process->set_pid(@opt::pid);
+ $::resume_process = $process;
+ $SIG{TERM} = $SIG{INT} = \&resume;
+} elsif (@ARGV) {
+ # Wait until limit is below start_limit and run_limit
+ while($limit->over_start_limit()
+ or
+ ($limit->hard() and $limit->over_run_limit())) {
+ $limit->sleep_for_recheck();
+ }
+ $process->start();
+}
+
+while($process->is_alive()) {
+ if($limit->over_run_limit()) {
+ $process->suspend();
+ $limit->sleep_for_recheck();
+ if(not $limit->hard()) {
+ $process->resume();
+ $limit->sleep_while_running();
+ }
+ } else {
+ $process->resume();
+ $limit->sleep_while_running();
+ }
+}
+
+exit($::exitstatus);
+
+{
+ my %pid_parentpid_cmd;
+
+ sub pid_table {
+ # Returns:
+ # %children_of = { pid -> children of pid }
+ # %parent_of = { pid -> pid of parent }
+ # %name_of = { pid -> commandname }
+
+ if(not %pid_parentpid_cmd) {
+ # Filter for SysV-style `ps`
+ my $sysv = q( ps -ef | perl -ane '1..1 and /^(.*)CO?MM?A?N?D/ and $s=length $1;).
+ q(s/^.{$s}//; print "@F[1,2] $_"' );
+ # Crazy msys: ' is not accepted on the cmd line, but " are treated as '
+ my $msys = q( ps -ef | perl -ane "1..1 and /^(.*)CO?MM?A?N?D/ and $s=length $1;).
+ q(s/^.{$s}//; print qq{@F[1,2] $_}" );
+ # BSD-style `ps`
+ my $bsd = q(ps -o pid,ppid,command -ax);
+ %pid_parentpid_cmd =
+ (
+ 'aix' => $sysv,
+ 'cygwin' => $sysv,
+ 'darwin' => $bsd,
+ 'dec_osf' => $sysv,
+ 'dragonfly' => $bsd,
+ 'freebsd' => $bsd,
+ 'gnu' => $sysv,
+ 'hpux' => $sysv,
+ 'linux' => $sysv,
+ 'mirbsd' => $bsd,
+ 'msys' => $msys,
+ 'MSWin32' => $sysv,
+ 'netbsd' => $bsd,
+ 'nto' => $sysv,
+ 'openbsd' => $bsd,
+ 'solaris' => $sysv,
+ 'svr5' => $sysv,
+ 'syllable' => "echo ps not supported",
+ );
+ }
+ $pid_parentpid_cmd{$^O} or ::die_bug("pid_parentpid_cmd for $^O missing");
+
+ my (@pidtable,%parent_of,%children_of,%name_of);
+ # Table with pid -> children of pid
+ @pidtable = `$pid_parentpid_cmd{$^O}`;
+ my $p=$$;
+ for (@pidtable) {
+ # must match: 24436 21224 busybox ash
+ # must match: 24436 21224 <<empty on MacOSX running cubase>>
+ # or: perl -e 'while($0=" "){}'
+ if(/^\s*(\S+)\s+(\S+)\s+(\S+.*)/
+ or
+ $^O eq "darwin" and /^\s*(\S+)\s+(\S+)\s+()$/) {
+ $parent_of{$1} = $2;
+ push @{$children_of{$2}}, $1;
+ $name_of{$1} = $3;
+ } else {
+ ::die_bug("pidtable format: $_");
+ }
+ }
+ return(\%children_of, \%parent_of, \%name_of);
+ }
+}
+
+sub resume {
+ $::resume_process->resume();
+ exit(0);
+}
+
+sub status {
+ my @w = @_;
+ my $fh = *STDERR;
+ print $fh @w;
+ flush $fh;
+}
+
+sub warning {
+ my @w = @_;
+ my $prog = $Global::progname || "niceload";
+ status(map { ($prog, ": Warning: ", $_, "\n"); } @w);
+}
+
+sub error {
+ my @w = @_;
+ my $prog = $Global::progname || "niceload";
+ status(map { ($prog, ": Error: ", $_, "\n"); } @w);
+}
+
+sub uniq {
+ # Remove duplicates and return unique values
+ return keys %{{ map { $_ => 1 } @_ }};
+}
+
+sub multiply_binary_prefix {
+ # Evalualte numbers with binary prefix
+ # k=10^3, m=10^6, g=10^9, t=10^12, p=10^15, e=10^18, z=10^21, y=10^24
+ # K=2^10, M=2^20, G=2^30, T=2^40, P=2^50, E=2^70, Z=2^80, Y=2^80
+ # Ki=2^10, Mi=2^20, Gi=2^30, Ti=2^40, Pi=2^50, Ei=2^70, Zi=2^80, Yi=2^80
+ # ki=2^10, mi=2^20, gi=2^30, ti=2^40, pi=2^50, ei=2^70, zi=2^80, yi=2^80
+ # 13G = 13*1024*1024*1024 = 13958643712
+ my $s = shift;
+ $s =~ s/k/*1000/g;
+ $s =~ s/M/*1000*1000/g;
+ $s =~ s/G/*1000*1000*1000/g;
+ $s =~ s/T/*1000*1000*1000*1000/g;
+ $s =~ s/P/*1000*1000*1000*1000*1000/g;
+ $s =~ s/E/*1000*1000*1000*1000*1000*1000/g;
+ $s =~ s/Z/*1000*1000*1000*1000*1000*1000*1000/g;
+ $s =~ s/Y/*1000*1000*1000*1000*1000*1000*1000*1000/g;
+ $s =~ s/X/*1000*1000*1000*1000*1000*1000*1000*1000*1000/g;
+
+ $s =~ s/Ki?/*1024/gi;
+ $s =~ s/Mi?/*1024*1024/gi;
+ $s =~ s/Gi?/*1024*1024*1024/gi;
+ $s =~ s/Ti?/*1024*1024*1024*1024/gi;
+ $s =~ s/Pi?/*1024*1024*1024*1024*1024/gi;
+ $s =~ s/Ei?/*1024*1024*1024*1024*1024*1024/gi;
+ $s =~ s/Zi?/*1024*1024*1024*1024*1024*1024*1024/gi;
+ $s =~ s/Yi?/*1024*1024*1024*1024*1024*1024*1024*1024/gi;
+ $s =~ s/Xi?/*1024*1024*1024*1024*1024*1024*1024*1024*1024/gi;
+ $s = eval $s;
+ return $s;
+}
+
+sub get_options_from_array {
+ # Run GetOptions on @array
+ # Returns:
+ # true if parsing worked
+ # false if parsing failed
+ # @array is changed
+ my $array_ref = shift;
+ # A bit of shuffling of @ARGV needed as GetOptionsFromArray is not
+ # supported everywhere
+ my @save_argv;
+ my $this_is_ARGV = (\@::ARGV == $array_ref);
+ if(not $this_is_ARGV) {
+ @save_argv = @::ARGV;
+ @::ARGV = @{$array_ref};
+ }
+ my @retval = GetOptions
+ ("debug|D" => \$opt::debug,
+ "factor|f=s" => \$opt::factor,
+ "hard|H" => \$opt::hard,
+ "soft|S" => \$opt::soft,
+ "sensor=s" => \$opt::sensor,
+
+ "si|sio|startio|start-io=s" => \$opt::start_io,
+ "ri|rio|runio|run-io=s" => \$opt::run_io,
+ "io|I=s" => \$opt::io,
+
+ "sl|startload|start-load=s" => \$opt::start_load,
+ "rl|runload|run-load=s" => \$opt::run_load,
+ "load|L|l=s" => \$opt::load,
+
+ "sm|startmem|start-mem=s" => \$opt::start_mem,
+ "rm|runmem|run-mem=s" => \$opt::run_mem,
+ "mem|M=s" => \$opt::mem,
+
+ "sn|startnoswap|start-noswap|start-no-swap" => \$opt::start_noswap,
+ "rn|runnoswap|run-noswap|run-no-swap" => \$opt::run_noswap,
+ "noswap|N" => \$opt::noswap,
+
+ "battery|B" => \$opt::battery,
+ "net" => \$opt::net,
+ "nethops=i" => \$opt::nethops,
+ "baseline" => \$opt::baseline,
+
+ "nice|n=i" => \$opt::nice,
+ "program|prg=s" => \@opt::prg,
+ "process|pid|p=s" => \@opt::pid,
+ "suspend|s=s" => \$opt::suspend,
+ "recheck|t=s" => \$opt::recheck,
+ "quote|q" => \$opt::quote,
+ "help|h" => \$opt::help,
+ "verbose|v" => \$opt::verbose,
+ "version|V" => \$opt::version,
+ );
+ if($opt::battery) {
+ # niceload -l -1 --sensor \
+ # 'cat /sys/class/power_supply/BAT0/status \
+ # /proc/acpi/battery/BAT0/state 2>/dev/null |
+ # grep -i -q discharging; echo $?'
+ $opt::sensor = ('cat /sys/class/power_supply/BAT0/status '.
+ '/proc/acpi/battery/BAT0/state 2>/dev/null | '.
+ 'grep -i -q discharging; echo $?');
+ $opt::load = -1;
+ }
+ if($opt::net) {
+ $opt::nethops ||= 3;
+ }
+ if($opt::nethops) {
+ # niceload -l 0.01 --sensor 'netsensor_script'
+ $opt::sensor = netsensor_script($opt::nethops);
+ $opt::load ||= 0.01;
+ }
+ if(not $this_is_ARGV) {
+ @{$array_ref} = @::ARGV;
+ @::ARGV = @save_argv;
+ }
+ return @retval;
+}
+
+sub shell_quote_scalar {
+ # Quote for other shells
+ my $a = $_[0];
+ if(defined $a) {
+ # zsh wants '=' quoted
+ # Solaris sh wants ^ quoted.
+ # $a =~ s/([\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\^\*\>\<\~\|\; \"\!\$\&\'\202-\377])/\\$1/g;
+ # This is 1% faster than the above
+ if(($a =~ s/[\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\^\*\<\=\>\~\|\; \"\!\$\&\'\202-\377]/\\$&/go)
+ +
+ # quote newline as '\n'
+ ($a =~ s/[\n]/'\n'/go)) {
+ # A string was replaced
+ # No need to test for "" or \0
+ } elsif($a eq "") {
+ $a = "''";
+ } elsif($a eq "\0") {
+ $a = "";
+ }
+ }
+ return $a;
+}
+
+sub die_usage {
+ help();
+ exit 1;
+}
+
+
+sub help {
+ print q{
+Usage:
+ niceload [-v] [-n niceness] [-L loadavg] [-I io] [-N] [-M mem]
+ [-s suspend_sec|-f factor] [-H] [-S]
+ command or -p pid
+};
+}
+
+
+sub die_bug {
+ my $bugid = shift;
+ print STDERR
+ ("$Global::progname: This should not happen. You have found a bug.\n",
+ "Please contact <parallel\@gnu.org> and include:\n",
+ "* The version number: $Global::version\n",
+ "* The bugid: $bugid\n",
+ "* The command line being run\n",
+ "* The files being read (put the files on a webserver if they are big)\n",
+ "\n",
+ "If you get the error on smaller/fewer files, please include those instead.\n");
+ exit(255);
+}
+
+sub now {
+ # Returns time since epoch as in seconds with 3 decimals
+ # Uses:
+ # @Global::use
+ # Returns:
+ # $time = time now with millisecond accuracy
+ if(not $Global::use{"Time::HiRes"}) {
+ if(eval "use Time::HiRes qw ( time );") {
+ eval "sub TimeHiRestime { return Time::HiRes::time };";
+ } else {
+ eval "sub TimeHiRestime { return time() };";
+ }
+ $Global::use{"Time::HiRes"} = 1;
+ }
+
+ return (int(TimeHiRestime()*1000))/1000;
+}
+
+sub usleep {
+ # Sleep this many milliseconds.
+ my $ms = shift;
+ ::debug("Sleeping ",$ms," millisecs\n");
+ my $start = now();
+ my $now;
+ do {
+ # Something makes 'select' wake up too early
+ # when using --sensor
+ select(undef, undef, undef, $ms/1000);
+ $now = now();
+ } while($now < $start + $ms/1000);
+}
+
+sub debug {
+ if($opt::debug) {
+ print STDERR @_;
+ }
+}
+
+
+sub my_dump {
+ # Returns:
+ # ascii expression of object if Data::Dump(er) is installed
+ # error code otherwise
+ my @dump_this = (@_);
+ eval "use Data::Dump qw(dump);";
+ if ($@) {
+ # Data::Dump not installed
+ eval "use Data::Dumper;";
+ if ($@) {
+ my $err = "Neither Data::Dump nor Data::Dumper is installed\n".
+ "Not dumping output\n";
+ print STDERR $err;
+ return $err;
+ } else {
+ return Dumper(@dump_this);
+ }
+ } else {
+ eval "use Data::Dump qw(dump);";
+ return (Data::Dump::dump(@dump_this));
+ }
+}
+
+
+sub version {
+ # Returns: N/A
+ print join("\n",
+ "GNU $Global::progname $Global::version",
+ "Copyright (C) 2004,2005,2006,2007,2008,2009 Ole Tange",
+ "Copyright (C) 2010,2011 Ole Tange and Free Software Foundation, Inc.",
+ "License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>",
+ "This is free software: you are free to change and redistribute it.",
+ "GNU $Global::progname comes with no warranty.",
+ "",
+ "Web site: http://www.gnu.org/software/parallel\n"
+ );
+}
+
+
+sub max {
+ # Returns:
+ # Maximum value of array
+ my $max;
+ for (@_) {
+ # Skip undefs
+ defined $_ or next;
+ defined $max or do { $max = $_; next; }; # Set $_ to the first non-undef
+ $max = ($max > $_) ? $max : $_;
+ }
+ return $max;
+}
+
+sub min {
+ # Returns:
+ # Minimum value of array
+ my $min;
+ for (@_) {
+ # Skip undefs
+ defined $_ or next;
+ defined $min or do { $min = $_; next; }; # Set $_ to the first non-undef
+ $min = ($min < $_) ? $min : $_;
+ }
+ return $min;
+}
+
+sub collect_net_baseline {
+ # Collect what a normal (unloaded) net connection looks line
+}
+
+
+sub netsensor_script {
+ # Script for --sensor when using --net
+ my $hops = shift;
+ my $perlscript = q{
+ use Net::Traceroute;
+ use Net::Ping;
+
+ my $medtrc = MedianTraceroute->new(shift);
+ $medtrc->set_remedian($medtrc->ping());
+ $medtrc->set_remedian($medtrc->ping());
+ while(1) {
+ my $ms = $medtrc->ping();
+ my $m = $medtrc->remedian();
+ if($m*1.5 < $ms) {
+ # Bad 1 = median*1.5 < current latency
+ } else {
+ # OK 0 = median*1.5 > current latency
+ $medtrc->set_remedian($ms);
+ }
+ printf("%d\n",$m*1.5 < $ms);
+ sleep(1);
+ }
+
+ package MedianTraceroute;
+
+ sub new {
+ my $class = shift;
+ my $hop = shift;
+ # Find router
+ my $tr = Net::Traceroute->new(host => "8.8.8.8",
+ max_ttl => $hop);
+ if($tr->found) {
+ $host = $tr->hop_query_host($hop, 0);
+ } else {
+ # ns1.censurfridns.dk
+ $tr = Net::Traceroute->new(host => "89.233.43.71",
+ max_ttl => $hop);
+ if($tr->found) {
+ $host = $tr->hop_query_host($hop, 0);
+ } else {
+ die("Cannot traceroute to 8.8.8.8 and 89.233.43.71");
+ }
+ }
+ my $p = Net::Ping->new();
+ $p->hires();
+
+ return bless {
+ 'hop' => $hop,
+ 'host' => $host,
+ 'pinger' => $p,
+ 'remedian_idx' => 0,
+ 'remedian_arr' => [],
+ 'remedian' => undef,
+ }, ref($class) || $class;
+ }
+
+ sub ping {
+ my $self = shift;
+ for(1..3) {
+ # Ping should never take longer than 5.5 sec
+ my ($ret, $duration, $ip) =
+ $self->{'pinger'}->ping($self->{'host'}, 5.5);
+ if($ret) {
+ return $duration;
+ }
+ }
+ warn("Ping failed 3 times.");
+ }
+
+ sub remedian {
+ my $self = shift;
+ return $self->{'remedian'};
+ }
+
+ sub set_remedian {
+ # Set median of the last 999^3 (=997002999) values using Remedian
+ #
+ # Rousseeuw, Peter J., and Gilbert W. Bassett Jr. "The remedian: A
+ # robust averaging method for large data sets." Journal of the
+ # American Statistical Association 85.409 (1990): 97-104.
+ my $self = shift;
+ my $val = shift;
+ my $i = $self->{'remedian_idx'}++;
+ my $rref = $self->{'remedian_arr'};
+ $rref->[0][$i%999] = $val;
+ $rref->[1][$i/999%999] = (sort @{$rref->[0]})[$#{$rref->[0]}/2];
+ $rref->[2][$i/999/999%999] = (sort @{$rref->[1]})[$#{$rref->[1]}/2];
+ $self->{'remedian'} = (sort @{$rref->[2]})[$#{$rref->[2]}/2];
+ }
+ };
+ return "perl -e ".shell_quote_scalar($perlscript)." $hops";
+}
+
+
+package Process;
+
+sub new {
+ my $class = shift;
+ my $nice = shift;
+ my @ARGV = @_;
+ if($nice) {
+ unshift(@ARGV, "nice", "-n", $nice);
+ }
+ return bless {
+ 'running' => 0, # Is the process running now?
+ 'command' => [@ARGV],
+ }, ref($class) || $class;
+}
+
+sub pgrp {
+ my $self = shift;
+ my @pgrp;
+ if(not $self->{'pgrp'}) {
+ for(@{$self->{'pids'}}) {
+ push @pgrp,-getpgrp($_);
+ }
+ @pgrp = ::uniq(@pgrp);
+ @{$self->{'pgrp'}} = @pgrp;
+ }
+ return @{$self->{'pgrp'}};
+}
+
+sub set_pid {
+ my $self = shift;
+ push(@{$self->{'pids'}},@_);
+ $self->{'running'} = 1;
+ $::exitstatus = 0;
+}
+
+sub start {
+ # Start the program
+ my $self = shift;
+ ::debug("Starting @{$self->{'command'}}\n");
+ $self->{'running'} = 1;
+ if($self->{'pid'} = fork) {
+ # set signal handler to kill children if parent is killed
+ push @{$self->{'pids'}}, $self->{'pid'};
+ $Global::process = $self;
+ $SIG{CHLD} = \&REAPER;
+ $SIG{INT}=\&kill_child_INT;
+ $SIG{TSTP}=\&kill_child_TSTP;
+ $SIG{CONT}=\&kill_child_CONT;
+ sleep 1; # Give child time to setpgrp(0,0);
+ } else {
+ setpgrp(0,0);
+ ::debug("Child pid: $$, pgrp: ",getpgrp $$,"\n");
+ ::debug("@{$self->{'command'}}\n");
+ if($opt::quote) {
+ system(@{$self->{'command'}});
+ } else {
+ system("@{$self->{'command'}}");
+ }
+ $::exitstatus = $? >> 8;
+ $::exitsignal = $? & 127;
+ ::debug("Child exit $::exitstatus\n");
+ exit($::exitstatus);
+ }
+}
+
+use POSIX ":sys_wait_h";
+use POSIX qw(:sys_wait_h);
+
+sub REAPER {
+ my $stiff;
+ while (($stiff = waitpid(-1, &WNOHANG)) > 0) {
+ # do something with $stiff if you want
+ $::exitstatus = $? >> 8;
+ $::exitsignal = $? & 127;
+ }
+ $SIG{CHLD} = \&REAPER; # install *after* calling waitpid
+}
+
+
+sub kill_child_CONT {
+ my $self = $Global::process;
+ ::debug("SIGCONT received. Killing @{$self->{'pgrp'}}\n");
+ kill CONT => $self->pgrp();
+}
+
+
+sub kill_child_TSTP {
+ my $self = $Global::process;
+ ::debug("SIGTSTP received. Killing $self->{'pid'} and self ($$)\n");
+ kill TSTP => $self->pgrp();
+ kill STOP => -$$;
+ kill STOP => $$;
+}
+
+
+sub kill_child_INT {
+ my $self = $Global::process;
+ ::debug("SIGINT received.\n");
+ if(not @opt::pid) {
+ ::debug("Killing $self->{'pid'} Exit\n");
+ kill INT => $self->pgrp();
+ } else {
+ ::debug("Continue pids $self->{'pid'} Exit\n");
+ kill CONT => $self->pgrp();
+ }
+ exit;
+}
+
+
+sub resume {
+ my $self = shift;
+ ::debug("Resume @{$self->{'pids'}}\n");
+ if(not $self->{'running'}) {
+ # - = PID group
+ map { kill "CONT", -$_ } @{$self->{'pids'}};
+ # If using -p it is not in a group
+ map { kill "CONT", $_ } @{$self->{'pids'}};
+ $self->{'running'} = 1;
+ }
+}
+
+
+sub suspend {
+ my $self = shift;
+ ::debug("Suspend @{$self->{'pids'}}\n");
+ if($self->{'running'}) {
+ # - = PID group
+ map { kill "STOP", -$_ } @{$self->{'pids'}};
+ # If using -p it is not in a group
+ map { kill "STOP", $_ } @{$self->{'pids'}};
+ $self->{'running'} = 0;
+ }
+}
+
+
+sub is_alive {
+ # The process is dead if none of the pids exist
+ my $self = shift;
+ my ($exists) = 0;
+ for my $pid (@{$self->{'pids'}}) {
+ if(kill 0 => $pid) { $exists++ }
+ }
+ ::debug("is_alive: $exists\n");
+ return $exists;
+}
+
+
+package Limit;
+
+sub new {
+ my $class = shift;
+ my %limits = @_;
+ my $hard = $opt::soft ? 0 : $opt::hard;
+ my $runio = $opt::run_io ? ::multiply_binary_prefix($opt::run_io) : 0;
+ my $startio = $opt::start_io ? ::multiply_binary_prefix($opt::start_io) : 0;
+ my $runload = $opt::run_load ? ::multiply_binary_prefix($opt::run_load) : 0;
+ my $startload = $opt::start_load ? ::multiply_binary_prefix($opt::start_load) : 0;
+ my $runmem = $opt::run_mem ? ::multiply_binary_prefix($opt::run_mem) : 0;
+ my $startmem = $opt::start_mem ? ::multiply_binary_prefix($opt::start_mem) : 0;
+ my $runnoswap = $opt::run_noswap ? ::multiply_binary_prefix($opt::run_noswap) : 0;
+ my $startnoswap = $opt::start_noswap ? ::multiply_binary_prefix($opt::start_noswap) : 0;
+ my $recheck = $opt::recheck ? ::multiply_binary_prefix($opt::recheck) : 1; # Default
+ my $runtime = $opt::suspend ? ::multiply_binary_prefix($opt::suspend) : 1; # Default
+
+ return bless {
+ 'hard' => $hard,
+ 'recheck' => $recheck,
+ 'runio' => $runio,
+ 'startio' => $startio,
+ 'runload' => $runload,
+ 'startload' => $startload,
+ 'runmem' => $runmem,
+ 'startmem' => $startmem,
+ 'runnoswap' => $runnoswap,
+ 'startnoswap' => $startnoswap,
+ 'factor' => $opt::factor || 1,
+ 'recheck' => $recheck,
+ 'runtime' => $runtime,
+ 'over_run_limit' => 1,
+ 'over_start_limit' => 1,
+ 'verbose' => $opt::verbose,
+ }, ref($class) || $class;
+}
+
+
+sub over_run_limit {
+ my $self = shift;
+ my $status = 0;
+ if($self->{'runmem'}) {
+ # mem should be between 0-10ish
+ # 100% available => 0 (1-1)
+ # 50% available => 1 (2-1)
+ # 10% available => 9 (10-1)
+ my $mem = $self->mem_status();
+ ::debug("Run memory: $self->{'runmem'}/$mem\n");
+ $status += (::max(1,$self->{'runmem'}/$mem)-1);
+ }
+ if($self->{'runload'}) {
+ # If used with other limits load should be between 0-10ish
+ no warnings 'numeric';
+ my $load = $self->load_status();
+ if($self->{'runload'} > 0) {
+ # Stop if the load is above the limit
+ $status += ::max(0,$load - $self->{'runload'});
+ } else {
+ # Stop if the load is below the limit (for sensor)
+ $status += ::max(0,-$load - $self->{'runload'});
+ }
+ }
+ if($self->{'runnoswap'}) {
+ # swap should be between 0-10ish
+ # swap in or swap out or no swap = 0
+ # else log(swapin*swapout)
+ my $swap = $self->swap_status();
+ $status += log(::max(1, $swap - $self->{'runnoswap'}));
+ }
+ if($self->{'runio'}) {
+ my $io = $self->io_status();
+ $status += ::max(0,$io - $self->{'runio'});
+ }
+ $self->{'over_run_limit'} = $status;
+ if(not $opt::recheck) {
+ $self->{'recheck'} = $self->{'factor'} * $self->{'over_run_limit'};
+ }
+ ::debug("over_run_limit: $status\n");
+ return $self->{'over_run_limit'};
+}
+
+sub over_start_limit {
+ my $self = shift;
+ my $status = 0;
+ if($self->{'startmem'}) {
+ # mem should be between 0-10ish
+ # 100% available => 0 (1-1)
+ # 50% available => 1 (2-1)
+ # 10% available => 9 (10-1)
+ my $mem = $self->mem_status();
+ ::debug("Start memory: $self->{'startmem'}/$mem\n");
+ $status += (::max(1,$self->{'startmem'}/$mem)-1);
+ }
+ if($self->{'startload'}) {
+ # load should be between 0-10ish
+ # 0 load => 0
+ no warnings 'numeric';
+ my $load = $self->load_status();
+ if($self->{'startload'} > 0) {
+ # Stop if the load is above the limit
+ $status += ::max(0,$load - $self->{'startload'});
+ } else {
+ # Stop if the load is below the limit (for sensor)
+ $status += ::max(0,-$load - $self->{'startload'});
+ }
+ }
+ if($self->{'startnoswap'}) {
+ # swap should be between 0-10ish
+ # swap in or swap out or no swap = 0
+ # else log(swapin*swapout)
+ my $swap = $self->swap_status();
+ $status += log(::max(1, $swap - $self->{'startnoswap'}));
+ }
+ if($self->{'startio'}) {
+ my $io = $self->io_status();
+ $status += ::max(0,$io - $self->{'startio'});
+ }
+ $self->{'over_start_limit'} = $status;
+ if(not $opt::recheck) {
+ $self->{'recheck'} = $self->{'factor'} * $self->{'over_start_limit'};
+ }
+ ::debug("over_start_limit: $status\n");
+ return $self->{'over_start_limit'};
+}
+
+
+sub hard {
+ my $self = shift;
+ return $self->{'hard'};
+}
+
+
+sub verbose {
+ my $self = shift;
+ return $self->{'verbose'};
+}
+
+
+sub sleep_for_recheck {
+ my $self = shift;
+ if($self->{'recheck'} < 0.01) {
+ # Never sleep less than 0.01 sec
+ $self->{'recheck'} = 0.01;
+ }
+ if($self->verbose()) {
+ $self->{'recheck'} = int($self->{'recheck'}*100)/100;
+ print STDERR "Sleeping $self->{'recheck'}s\n";
+ }
+ ::debug("recheck in $self->{'recheck'}s\n");
+ ::usleep(1000*$self->{'recheck'});
+}
+
+
+sub sleep_while_running {
+ my $self = shift;
+ ::debug("check in $self->{'runtime'}s\n");
+ if($self->verbose()) {
+ $self->{'runtime'} = int($self->{'runtime'}*100)/100;
+ print STDERR "Running $self->{'runtime'}s\n";
+ }
+ ::usleep(1000*$self->{'runtime'});
+}
+
+
+sub nonblockGetLines {
+ # An non-blocking filehandle read that returns an array of lines read
+ # Returns: ($eof,@lines)
+ # Example: --sensor 'vmstat 1 | perl -ane '\''$|=1; 4..0 and print $F[8],"\n"'\'
+ my ($fh,$timeout) = @_;
+
+ $timeout = 0 unless defined $timeout;
+ my $rfd = '';
+ $::nonblockGetLines_last{$fh} = ''
+ unless defined $::nonblockGetLines_last{$fh};
+
+ vec($rfd,fileno($fh),1) = 1;
+ return unless select($rfd, undef, undef, $timeout)>=0;
+ # I'm not sure the following is necessary?
+ return unless vec($rfd,fileno($fh),1);
+ my $buf = '';
+ my $n = sysread($fh,$buf,1024*1024);
+
+ my $eof = eof($fh);
+ # If we're done, make sure to send the last unfinished line
+ return ($eof,$::nonblockGetLines_last{$fh}) unless $n;
+ # Prepend the last unfinished line
+ $buf = $::nonblockGetLines_last{$fh}.$buf;
+ # And save any newly unfinished lines
+ $::nonblockGetLines_last{$fh} =
+ (substr($buf,-1) !~ /[\r\n]/ && $buf =~ s/([^\r\n]*)$//)
+ ? $1 : '';
+ $buf ? ($eof,split(/\n/,$buf)) : ($eof);
+}
+
+sub read_sensor {
+ my $self = shift;
+ ::debug("read_sensor: ");
+ my $fh = $self->{'sensor_fh'};
+ if(not $fh) {
+ # Start the sensor
+ $self->{'sensor_pid'} =
+ open($fh, "-|", $opt::sensor) ||
+ ::die_bug("Cannot open: $opt::sensor");
+ $self->{'sensor_fh'} = $fh;
+ }
+ # Read as much as we can (non_block)
+ my ($eof,@lines) = nonblockGetLines($fh);
+
+ # new load = last full line
+ foreach my $line (@lines) {
+ if(defined $line) {
+ ::debug("Pipe saw: [$line] eof=$eof\n");
+ $Global::last_sensor_reading = $line;
+ }
+ }
+ if($eof) {
+ # End of file => Restart the sensor
+ close $fh;
+# waitpid($self->{'sensor_pid'},0);
+ $self->{'sensor_pid'} =
+ open($fh, "-|", $opt::sensor) ||
+ ::die_bug("Cannot open: $opt::sensor");
+ $self->{'sensor_fh'} = $fh;
+ }
+
+ return $Global::last_sensor_reading;
+}
+
+sub load_status {
+ # Returns:
+ # loadavg or sensor measurement
+ my $self = shift;
+
+ if($opt::sensor) {
+ if(not defined $self->{'load_status'} or
+ $self->{'load_status_cache_time'} + $self->{'recheck'} < time) {
+ $self->{'load_status'} = $self->read_sensor();
+ while (not defined $self->{'load_status'}) {
+ sleep 1;
+ $self->{'load_status'} = $self->read_sensor();
+ }
+ $self->{'load_status_cache_time'} = time - 0.001;
+ }
+ } else {
+ # Normal load avg
+ # Cache for some seconds
+ if(not defined $self->{'load_status'} or
+ $self->{'load_status_cache_time'} + $self->{'recheck'} < time) {
+ $self->{'load_status'} = load_status_linux() if $^O ne 'darwin';
+ $self->{'load_status'} = load_status_darwin() if $^O eq 'darwin';
+ $self->{'load_status_cache_time'} = time;
+ }
+ }
+ ::debug("load_status: ".$self->{'load_status'}."\n");
+ return $self->{'load_status'};
+}
+
+sub undef_as_zero {
+ my $a = shift;
+ return $a ? $a : 0;
+}
+
+
+sub load_status_linux {
+ my ($loadavg);
+ if(open(IN,"/proc/loadavg")) {
+ # Linux specific (but fast)
+ my $upString = <IN>;
+ if($upString =~ m/^(\d+\.\d+)/) {
+ $loadavg = $1;
+ } else {
+ ::die_bug("proc_loadavg");
+ }
+ close IN;
+ } elsif (open(IN,"LANG=C uptime|")) {
+ my $upString = <IN>;
+ if($upString =~ m/averages?.\s*(\d+\.\d+)/) {
+ $loadavg = $1;
+ } else {
+ ::die_bug("uptime");
+ }
+ close IN;
+ }
+ return $loadavg;
+}
+
+sub load_status_darwin {
+ my $loadavg = `sysctl vm.loadavg`;
+ if($loadavg =~ /vm\.loadavg: \{ ([0-9.]+) ([0-9.]+) ([0-9.]+) \}/) {
+ $loadavg = $1;
+ } elsif (open(IN,"LANG=C uptime|")) {
+ my $upString = <IN>;
+ if($upString =~ m/averages?.\s*(\d+\.\d+)/) {
+ $loadavg = $1;
+ } else {
+ ::die_bug("uptime");
+ }
+ close IN;
+ }
+ return $loadavg;
+}
+
+
+sub swap_status {
+ # Returns:
+ # (swap in)*(swap out) kb
+ my $self = shift;
+ my $status;
+ # Cache for some seconds
+ if(not defined $self->{'swap_status'} or
+ $self->{'swap_status_cache_time'}+$self->{'recheck'} < time) {
+ $status = swap_status_linux() if $^O ne 'darwin';
+ $status = swap_status_darwin() if $^O eq 'darwin';
+ $self->{'swap_status'} = ::max($status,0);
+ $self->{'swap_status_cache_time'} = time;
+ }
+ ::debug("swap_status: $self->{'swap_status'}\n");
+ return $self->{'swap_status'};
+}
+
+
+sub swap_status_linux {
+ my $swap_activity;
+ $swap_activity = "vmstat 1 2 | tail -n1 | awk '{print \$7*\$8}'";
+ # Run swap_activity measuring.
+ return qx{ $swap_activity };
+}
+
+sub swap_status_darwin {
+ # Mach Virtual Memory Statistics: (page size of 4096 bytes, cache hits 0%)
+ # free active spec inactive wire faults copy 0fill reactive pageins pageout
+ # 298987 251463 162637 69437 265724 29730558 299022 2308237 1 110058 0
+ # 298991 251479 162637 69437 265726 43 4 16 0 0 0
+ my ($pagesize, $pageins, $pageouts);
+ my @vm_stat = `vm_stat 1 | head -n4`;
+ $pagesize = $1 if $vm_stat[0] =~ m/page size of (\d+) bytes/;
+ $pageins = (split(/\s+/,$vm_stat[3]))[9];
+ $pageouts = (split(/\s+/,$vm_stat[3]))[10];
+ return ($pageins*$pageouts*$pagesize)/1024;
+}
+
+
+sub mem_status {
+ # Returns:
+ # number of bytes (free+cache)
+ my $self = shift;
+ # Cache for one second
+ if(not defined $self->{'mem_status'} or
+ $self->{'mem_status_cache_time'}+$self->{'recheck'} < time) {
+ $self->{'mem_status'} = mem_status_linux() if $^O ne 'darwin';
+ $self->{'mem_status'} = mem_status_darwin() if $^O eq 'darwin';
+ $self->{'mem_status_cache_time'} = time;
+ }
+ ::debug("mem_status: $self->{'mem_status'}\n");
+ return $self->{'mem_status'};
+}
+
+
+sub mem_status_linux {
+ # total used free shared buffers cached
+ # Mem: 3366496 2901664 464832 0 179228 1850692
+ # -/+ buffers/cache: 871744 2494752
+ # Swap: 6445476 1396860 5048616
+ my @free = `free`;
+ my $free = (split(/\s+/,$free[2]))[3];
+ return $free*1024;
+}
+
+sub mem_status_darwin {
+ # Mach Virtual Memory Statistics: (page size of 4096 bytes, cache hits 0%)
+ # free active spec inactive wire faults copy 0fill reactive pageins pageout
+ # 298987 251463 162637 69437 265724 29730558 299022 2308237 1 110058 0
+ # 298991 251479 162637 69437 265726 43 4 16 0 0 0
+ my ($pagesize, $pages_free, $pages_speculative);
+ my @vm_stat = `vm_stat 1 | head -n4`;
+ $pagesize = $1 if $vm_stat[0] =~ m/page size of (\d+) bytes/;
+ $pages_free = (split(/\s+/,$vm_stat[3]))[0];
+ $pages_speculative = (split(/\s+/,$vm_stat[3]))[2];
+ return ($pages_free+$pages_speculative)*$pagesize;
+}
+
+
+sub io_status {
+ # Returns:
+ # max percent for all devices
+ my $self = shift;
+ # Cache for one second
+ if(not defined $self->{'io_status'} or
+ $self->{'io_status_cache_time'}+$self->{'recheck'} < time) {
+ $self->{'io_status'} = io_status_linux() if $^O ne 'darwin';
+ $self->{'io_status'} = io_status_darwin() if $^O eq 'darwin';
+ $self->{'io_status_cache_time'} = time;
+ }
+ ::debug("io_status: $self->{'io_status'}\n");
+ return $self->{'io_status'};
+}
+
+
+sub io_status_linux {
+ # Device rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util
+ # sda 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
+ my @iostat_out = `LANG=C iostat -x 1 2`;
+ # throw away all execpt the last Device-section
+ my @iostat;
+ for(reverse @iostat_out) {
+ /Device/ and last;
+ my @col = (split(/\s+/,$_));
+ # Util% is last column
+ push @iostat, pop @col;
+ }
+ my $io = ::max(@iostat);
+ return undef_as_zero($io)/10;
+}
+
+sub io_status_darwin {
+ # disk0 disk1 disk2
+ # KB/t tps MB/s KB/t tps MB/s KB/t tps MB/s
+ # 14.95 15 0.22 11.18 35 0.38 2.00 0 0.00
+ # 0.00 0 0.00 0.00 0 0.00 0.00 0 0.00
+ my @iostat_out = `LANG=C iostat -d -w 1 -c 2`;
+ # return the MB/s of the last second (not the %util)
+ my @iostat = split(/\s+/, $iostat_out[3]);
+ my $io = $iostat[3] + $iostat[6] + $iostat[9];
+ return ::min($io, 10);
+}
+
+$::exitsignal = $::exitstatus = 0; # Dummy
diff --git a/src/niceload.pod b/src/niceload.pod
new file mode 100644
index 0000000..f7ba7b8
--- /dev/null
+++ b/src/niceload.pod
@@ -0,0 +1,433 @@
+#!/usr/bin/perl -w
+
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GFDL-1.3-or-later
+# SPDX-License-Identifier: CC-BY-SA-4.0
+
+=head1 NAME
+
+niceload - slow down a program when the load average is above a certain limit
+
+=head1 SYNOPSIS
+
+B<niceload> [-v] [-h] [-n nice] [-I io] [-L load] [-M mem] [-N]
+[--sensor program] [-t time] [-s time|-f factor]
+( command | -p PID [-p PID ...] | --prg program )
+
+
+=head1 DESCRIPTION
+
+GNU B<niceload> will slow down a program when the load average (or
+other system activity) is above a certain limit. When the limit is
+reached the program will be suspended for some time. Then resumed
+again for some time. Then the load average is checked again and we
+start over.
+
+Instead of load average B<niceload> can also look at disk I/O, amount
+of free memory, or swapping activity.
+
+If the load is 3.00 then the default settings will run a program
+like this:
+
+run 1 second, suspend (3.00-1.00) seconds, run 1 second, suspend
+(3.00-1.00) seconds, run 1 second, ...
+
+=head1 OPTIONS
+
+=over 9
+
+=item B<-B>
+
+=item B<--battery>
+
+Suspend if the system is running on battery. Shorthand for:
+-l -1 --sensor 'cat /sys/class/power_supply/BAT0/status
+/proc/acpi/battery/BAT0/state 2>/dev/null | grep -i -q discharging;
+echo $?'
+
+
+=item B<-f> I<FACTOR>
+
+=item B<--factor> I<FACTOR>
+
+Suspend time factor. Dynamically set B<-s> as amount over limit *
+factor. Default is 1.
+
+
+=item B<-H>
+
+=item B<--hard>
+
+Hard limit. B<--hard> will suspend the process until the system is
+under the limits. The default is B<--soft>.
+
+
+=item B<--io> I<iolimit>
+
+=item B<-I> I<iolimit>
+
+Limit for I/O. The amount of disk I/O will be computed as a value 0 -
+10, where 0 is no I/O and 10 is at least one disk is 100% saturated.
+
+B<--io> will set both B<--start-io> and B<--run-io>.
+
+
+=item B<--load> I<loadlimit>
+
+=item B<-L> I<loadlimit>
+
+Limit for load average.
+
+B<--load> will set both B<--start-load> and B<--run-load>.
+
+
+=item B<--mem> I<memlimit>
+
+=item B<-M> I<memlimit>
+
+Limit for free memory. This is the amount of bytes available as free
++ cache. This limit is treated opposite other limits: If the system
+is above the limit the program will run, if it is below the limit the
+program will stop
+
+I<memlimit> can be postfixed with K, M, G, T, or P which would
+multiply the size with 1024, 1048576, 1073741824, or 1099511627776
+respectively.
+
+B<--mem> will set both B<--start-mem> and B<--run-mem>.
+
+
+=item B<--noswap>
+
+=item B<-N>
+
+No swapping. If the system is swapping both in and out it is a good
+indication that the system is memory stressed.
+
+B<--noswap> is over limit if the system is swapping both in and out.
+
+B<--noswap> will set both B<--start-noswap> and B<--run-noswap>.
+
+
+=item B<--net>
+
+Shorthand for B<--nethops 3>.
+
+
+=item B<--nethops> I<h>
+
+Network nice. Pause if the internet connection is overloaded.
+
+B<niceload> finds a router I<h> hops closer to the internet. It
+B<ping>s this every second. If the latency is more than 50% bigger
+than the median, it is regarded as being over the limit.
+
+B<--nethops> can be combined with B<--hard>. Without B<--hard> the
+program may be able to queue up so much traffic that it will take
+longer than the B<--suspend> time to clear it. B<--hard> is useful for
+traffic that does not break by being suspended for a longer time.
+
+B<--nethops> can be combined with a high B<--suspend>. This way a
+program can be allowed to do a bit of traffic now and then. This is
+useful to keep the connection alive.
+
+
+=item B<-n> I<niceness>
+
+=item B<--nice> I<niceness>
+
+Sets niceness. See B<nice>(1).
+
+
+=item B<-p> I<PID>[,I<PID>]
+
+=item B<--pid> I<PID>[,I<PID>]
+
+Process IDs of processes to suspend. You can specify multiple process
+IDs with multiple B<-p> I<PID> or by separating the PIDs with comma.
+
+
+=item B<--prg> I<program>
+
+=item B<--program> I<program>
+
+Name of running program to suspend. You can specify multiple programs
+with multiple B<--prg> I<program>. If no processes with the name
+I<program> is found, B<niceload> with search for substrings containing
+I<program>.
+
+
+=item B<--quote>
+
+=item B<-q>
+
+Quote the command line. Useful if the command contains chars like *,
+$, >, and " that should not be interpreted by the shell.
+
+
+=item B<--run-io> I<iolimit>
+
+=item B<--ri> I<iolimit>
+
+=item B<--run-load> I<loadlimit>
+
+=item B<--rl> I<loadlimit>
+
+=item B<--run-mem> I<memlimit>
+
+=item B<--rm> I<memlimit>
+
+Run limit. The running program will be slowed down if the system is
+above the limit. See: B<--io>, B<--load>, B<--mem>, B<--noswap>.
+
+
+=item B<--sensor> I<sensor program>
+
+Read sensor. Use I<sensor program> to read a sensor.
+
+This will keep the CPU temperature below 80 deg C on GNU/Linux:
+
+ niceload -l 80000 -f 0.001 --sensor 'sort -n /sys/devices/platform/coretemp*/temp*_input' gzip *
+
+This will stop if the disk space < 100000.
+
+ niceload -H -l -100000 --sensor "df . | awk '{ print \$4 }'" echo
+
+
+=item B<--start-io> I<iolimit>
+
+=item B<--si> I<iolimit>
+
+=item B<--start-load> I<loadlimit>
+
+=item B<--sl> I<loadlimit>
+
+=item B<--start-mem> I<memlimit>
+
+=item B<--sm> I<memlimit>
+
+Start limit. The program will not start until the system is below the
+limit. See: B<--io>, B<--load>, B<--mem>, B<--noswap>.
+
+
+=item B<--soft>
+
+=item B<-S>
+
+Soft limit. B<niceload> will suspend a process for a while and then
+let it run for a second thus only slowing down a process while the
+system is over one of the given limits. This is the default.
+
+
+=item B<--suspend> I<SEC>
+
+=item B<-s> I<SEC>
+
+Suspend time. Suspend the command this many seconds when the max load
+average is reached.
+
+
+=item B<--recheck> I<SEC>
+
+=item B<-t> I<SEC>
+
+Recheck load time. Sleep SEC seconds before checking load
+again. Default is 1 second.
+
+
+=item B<--verbose>
+
+=item B<-v>
+
+Verbose. Print some extra output on what is happening. Use B<-v> until
+you know what your are doing.
+
+=back
+
+=head1 EXAMPLE: See niceload in action
+
+In terminal 1 run: top
+
+In terminal 2 run:
+
+B<niceload -q perl -e '$|=1;do{$l==$r or print "."; $l=$r}until(($r=time-$^T)>>B<50)'>
+
+This will print a '.' every second for 50 seconds and eat a lot of
+CPU. When the load rises to 1.0 the process is suspended.
+
+
+=head1 EXAMPLE: Run updatedb
+
+Running B<updatedb> can often starve the system for disk I/O and thus result in a high load.
+
+Run B<updatedb> but suspend B<updatedb> if the load is above 2.00:
+
+B<niceload -L 2 updatedb>
+
+
+=head1 EXAMPLE: Run rsync
+
+B<rsync> can, just like B<updatedb>, starve the system for disk I/O
+and thus result in a high load.
+
+Run B<rsync> but keep load below 3.4. If load reaches 7 sleep for
+(7-3.4)*12 seconds:
+
+B<niceload -L 3.4 -f 12 rsync -Ha /home/ /backup/home/>
+
+
+=head1 EXAMPLE: Ensure enough disk cache
+
+Assume the program B<foo> uses 2 GB files intensively. B<foo> will run
+fast if the files are in disk cache and be slow as a crawl if they are
+not in the cache.
+
+To ensure 2 GB are reserved for disk cache run:
+
+B<niceload --hard --run-mem 2g foo>
+
+This will not guarantee that the 2 GB memory will be used for the
+files for B<foo>, but it will stop B<foo> if the memory for disk cache
+is too low.
+
+
+=head1 ENVIRONMENT VARIABLES
+
+None. In future versions $NICELOAD will be able to contain default settings.
+
+=head1 EXIT STATUS
+
+Exit status should be the same as the command being run (untested).
+
+=head1 REPORTING BUGS
+
+Report bugs to <bug-parallel@gnu.org>.
+
+=head1 AUTHOR
+
+Copyright (C) 2004-11-19 Ole Tange, http://ole.tange.dk
+
+Copyright (C) 2005-2010 Ole Tange, http://ole.tange.dk
+
+Copyright (C) 2010-2024 Ole Tange, http://ole.tange.dk and Free
+Software Foundation, Inc.
+
+=head1 LICENSE
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3 of the License, or
+at your option any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+=head2 Documentation license I
+
+Permission is granted to copy, distribute and/or modify this
+documentation under the terms of the GNU Free Documentation License,
+Version 1.3 or any later version published by the Free Software
+Foundation; with no Invariant Sections, with no Front-Cover Texts, and
+with no Back-Cover Texts. A copy of the license is included in the
+file LICENSES/GFDL-1.3-or-later.txt.
+
+=head2 Documentation license II
+
+You are free:
+
+=over 9
+
+=item B<to Share>
+
+to copy, distribute and transmit the work
+
+=item B<to Remix>
+
+to adapt the work
+
+=back
+
+Under the following conditions:
+
+=over 9
+
+=item B<Attribution>
+
+You must attribute the work in the manner specified by the author or
+licensor (but not in any way that suggests that they endorse you or
+your use of the work).
+
+=item B<Share Alike>
+
+If you alter, transform, or build upon this work, you may distribute
+the resulting work only under the same, similar or a compatible
+license.
+
+=back
+
+With the understanding that:
+
+=over 9
+
+=item B<Waiver>
+
+Any of the above conditions can be waived if you get permission from
+the copyright holder.
+
+=item B<Public Domain>
+
+Where the work or any of its elements is in the public domain under
+applicable law, that status is in no way affected by the license.
+
+=item B<Other Rights>
+
+In no way are any of the following rights affected by the license:
+
+=over 2
+
+=item *
+
+Your fair dealing or fair use rights, or other applicable
+copyright exceptions and limitations;
+
+=item *
+
+The author's moral rights;
+
+=item *
+
+Rights other persons may have either in the work itself or in
+how the work is used, such as publicity or privacy rights.
+
+=back
+
+=back
+
+=over 9
+
+=item B<Notice>
+
+For any reuse or distribution, you must make clear to others the
+license terms of this work.
+
+=back
+
+A copy of the full license is included in the file as
+LICENCES/CC-BY-SA-4.0.txt
+
+
+=head1 DEPENDENCIES
+
+GNU B<niceload> uses Perl, and the Perl modules POSIX, and
+Getopt::Long.
+
+=head1 SEE ALSO
+
+B<parallel>(1), B<nice>(1), B<uptime>(1)
+
+=cut
diff --git a/src/parallel b/src/parallel
new file mode 100755
index 0000000..e45fcf0
--- /dev/null
+++ b/src/parallel
@@ -0,0 +1,15425 @@
+#!/usr/bin/env perl
+
+# Copyright (C) 2007-2024 Ole Tange, http://ole.tange.dk and Free
+# Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <https://www.gnu.org/licenses/>
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+#
+# SPDX-FileCopyrightText: 2007-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# open3 used in Job::start
+use IPC::Open3;
+use POSIX;
+# gensym used in Job::start
+use Symbol qw(gensym);
+# tempfile used in Job::start
+use File::Temp qw(tempfile tempdir);
+# mkpath used in openresultsfile
+use File::Path;
+# GetOptions used in get_options_from_array
+use Getopt::Long;
+# Used to ensure code quality
+use strict;
+use File::Basename;
+
+sub set_input_source_header($$) {
+ my ($command_ref,$input_source_fh_ref) = @_;
+ if(defined $opt::header and not $opt::pipe) {
+ # split with colsep or \t
+ # $header force $colsep = \t if undef?
+ my $delimiter = defined $opt::colsep ? $opt::colsep : "\t";
+ # regexp for {=
+ my $left = "\Q$Global::parensleft\E";
+ my $l = $Global::parensleft;
+ # regexp for =}
+ my $right = "\Q$Global::parensright\E";
+ my $r = $Global::parensright;
+ if($opt::header ne "0") {
+ my $id = 1;
+ for my $fh (@$input_source_fh_ref) {
+ my $line = <$fh>;
+ chomp($line);
+ $line =~ s/\r$//;
+ ::debug("init", "Delimiter: '$delimiter'");
+ for my $s (split /$delimiter/o, $line) {
+ ::debug("init", "Colname: '$s'");
+ # Replace {colname} with {2}
+ for(@$command_ref, @Global::ret_files,
+ @Global::transfer_files, $opt::tagstring,
+ $opt::workdir, $opt::results, $opt::retries,
+ @Global::template_contents, @Global::template_names,
+ @opt::filter) {
+ # Skip if undefined
+ $_ or next;
+ s:\{$s(|/|//|\.|/\.)\}:\{$id$1\}:g;
+ # {=header1 ... =} => {=1 ... =}
+ s:$left $s (.*?) $right:$l$id$1$r:gx;
+ }
+ $Global::input_source_header{$id} = $s;
+ $id++;
+ }
+ }
+ }
+ # Make it possible to do:
+ # parallel --header 0 echo {file2} {file1} :::: file1 file2
+ my $id = 1;
+ for my $s (@opt::a) {
+ # ::: are put into files and given a filehandle
+ # ignore these and only keep the filenames.
+ fileno $s and next;
+ for(@$command_ref, @Global::ret_files,
+ @Global::transfer_files, $opt::tagstring,
+ $opt::workdir, $opt::results, $opt::retries,
+ @Global::template_contents, @Global::template_names,
+ @opt::filter) {
+ # Skip if undefined
+ $_ or next;
+ s:\{\Q$s\E(|/|//|\.|/\.)\}:\{$id$1\}:g;
+ # {=header1 ... =} => {=1 ... =}
+ s:$left \Q$s\E (.*?) $right:$l$id$1$r:gx;
+ }
+ $Global::input_source_header{$id} = $s;
+ $id++;
+ }
+ } else {
+ my $id = 1;
+ for my $fh (@$input_source_fh_ref) {
+ $Global::input_source_header{$id} = $id;
+ $id++;
+ }
+ }
+}
+
+sub max_jobs_running() {
+ # Compute $Global::max_jobs_running as the max number of jobs
+ # running on each sshlogin.
+ # Returns:
+ # $Global::max_jobs_running
+ if(not $Global::max_jobs_running) {
+ for my $sshlogin (values %Global::host) {
+ $sshlogin->max_jobs_running();
+ }
+ }
+ if(not $Global::max_jobs_running) {
+ ::error(255,"Cannot run any jobs.");
+ }
+ return $Global::max_jobs_running;
+}
+
+sub halt() {
+ # Compute exit value,
+ # wait for children to complete
+ # and exit
+ if($opt::halt and $Global::halt_when ne "never") {
+ if(not defined $Global::halt_exitstatus) {
+ if($Global::halt_pct) {
+ $Global::halt_exitstatus =
+ ::ceil($Global::total_failed /
+ ($Global::total_started || 1) * 100);
+ } elsif($Global::halt_count) {
+ $Global::halt_exitstatus =
+ ::min(undef_as_zero($Global::total_failed),101);
+ }
+ }
+ wait_and_exit($Global::halt_exitstatus);
+ } else {
+ if($Global::semaphore) {
+ # --semaphore runs a single job:
+ # Use exit value of that
+ wait_and_exit($Global::halt_exitstatus);
+ } else {
+ # 0 = all jobs succeeded
+ # 1-100 = n jobs failed
+ # 101 = >100 jobs failed
+ wait_and_exit(min(undef_as_zero($Global::exitstatus),101));
+ }
+ }
+}
+
+
+sub __PIPE_MODE__() {}
+
+
+sub pipepart_setup() {
+ # Compute the blocksize
+ # Generate the commands to extract the blocks
+ # Push the commands on queue
+ # Changes:
+ # @Global::cat_prepends
+ # $Global::JobQueue
+ if($opt::tee) {
+ # Prepend each command with
+ # < file
+ my $cat_string = "< ".Q($opt::a[0]);
+ for(1..$Global::JobQueue->total_jobs()) {
+ push @Global::cat_appends, $cat_string;
+ push @Global::cat_prepends, "";
+ }
+ } else {
+ if(not $opt::blocksize) {
+ # --blocksize with 10 jobs per jobslot
+ $opt::blocksize = -10;
+ }
+ if($opt::roundrobin) {
+ # --blocksize with 1 job per jobslot
+ $opt::blocksize = -1;
+ }
+ if($opt::blocksize < 0) {
+ my $size = 0;
+ # Compute size of -a
+ for(@opt::a) {
+ if(-f $_) {
+ $size += -s $_;
+ } elsif(-b $_) {
+ $size += size_of_block_dev($_);
+ } elsif(-e $_) {
+ ::error(255,"$_ is neither a file nor a block device");
+ } else {
+ ::error(255,"File not found: $_");
+ }
+ }
+ # Run in total $job_slots*(- $blocksize) jobs
+ # Set --blocksize = size / no of proc / (- $blocksize)
+ $Global::dummy_jobs = 1;
+ $Global::blocksize = 1 +
+ int($size / max_jobs_running() /
+ -multiply_binary_prefix($opt::blocksize));
+ }
+ @Global::cat_prepends = (map { pipe_part_files($_) }
+ # ::: are put into files and given a filehandle
+ # ignore these and only keep the filenames.
+ grep { ! fileno $_ } @opt::a);
+ # Unget the empty arg as many times as there are parts
+ $Global::JobQueue->{'commandlinequeue'}{'arg_queue'}->unget(
+ map { [Arg->new("\0noarg")] } @Global::cat_prepends
+ );
+ }
+}
+
+sub pipe_tee_setup() {
+ # Create temporary fifos
+ # Run 'tee fifo1 fifo2 fifo3 ... fifoN' in the background
+ # This will spread the input to fifos
+ # Generate commands that reads from fifo1..N:
+ # cat fifo | user_command
+ # Changes:
+ # @Global::cat_prepends
+ my @fifos;
+ for(1..$Global::JobQueue->total_jobs()) {
+ push @fifos, tmpfifo();
+ }
+ # cat foo | tee fifo1 fifo2 fifo3 fifo4 fifo5 > /dev/null
+ if(not fork()){
+ # Test if tee supports --output-error=warn-nopipe
+ `echo | tee --output-error=warn-nopipe /dev/null >/dev/null 2>/dev/null`;
+ my $opt = $? ? "" : "--output-error=warn-nopipe";
+ ::debug("init","tee $opt");
+ if($opt::dryrun) {
+ # This is not exactly what is run, but it gives the basic idea
+ print "mkfifo @fifos\n";
+ print "tee $opt @fifos >/dev/null &\n";
+ } else {
+ # Let tee inherit our stdin
+ # and redirect stdout to null
+ open STDOUT, ">","/dev/null";
+ if($opt) {
+ exec "tee", $opt, @fifos;
+ } else {
+ exec "tee", @fifos;
+ }
+ }
+ exit(0);
+ }
+ # For each fifo
+ # (rm fifo1; grep 1) < fifo1
+ # (rm fifo2; grep 2) < fifo2
+ # (rm fifo3; grep 3) < fifo3
+ # Remove the tmpfifo as soon as it is open
+ @Global::cat_prepends = map { "(rm $_;" } shell_quote(@fifos);
+ @Global::cat_appends = map { ") < $_" } shell_quote(@fifos);
+}
+
+
+sub parcat_script() {
+ # TODO if script fails: Use parallel -j0 --plain --lb cat ::: fifos
+ my $script = q'{
+ use POSIX qw(:errno_h);
+ use IO::Select;
+ use strict;
+ use threads;
+ use Thread::Queue;
+ use Fcntl qw(:DEFAULT :flock);
+
+ my $opened :shared;
+ my $q = Thread::Queue->new();
+ my $okq = Thread::Queue->new();
+ my @producers;
+
+ if(not @ARGV) {
+ if(-t *STDIN) {
+ print "Usage:\n";
+ print " parcat file(s)\n";
+ print " cat argfile | parcat\n";
+ } else {
+ # Read arguments from stdin
+ chomp(@ARGV = <STDIN>);
+ }
+ }
+ my $files_to_open = 0;
+ # Default: fd = stdout
+ my $fd = 1;
+ for (@ARGV) {
+ # --rm = remove file when opened
+ /^--rm$/ and do { $opt::rm = 1; next; };
+ # -1 = output to fd 1, -2 = output to fd 2
+ /^-(\d+)$/ and do { $fd = $1; next; };
+ push @producers, threads->create("producer", $_, $fd);
+ $files_to_open++;
+ }
+
+ sub producer {
+ # Open a file/fifo, set non blocking, enqueue fileno of the file handle
+ my $file = shift;
+ my $output_fd = shift;
+ open(my $fh, "<", $file) || do {
+ print STDERR "parcat: Cannot open $file: $!\n";
+ exit(1);
+ };
+ # Remove file when it has been opened
+ if($opt::rm) {
+ unlink $file;
+ }
+ set_fh_non_blocking($fh);
+ $opened++;
+ # Pass the fileno to parent
+ $q->enqueue(fileno($fh),$output_fd);
+ # Get an OK that the $fh is opened and we can release the $fh
+ while(1) {
+ my $ok = $okq->dequeue();
+ if($ok == fileno($fh)) { last; }
+ # Not ours - very unlikely to happen
+ $okq->enqueue($ok);
+ }
+ return;
+ }
+
+ my $s = IO::Select->new();
+ my %buffer;
+
+ sub add_file {
+ my $infd = shift;
+ my $outfd = shift;
+ open(my $infh, "<&=", $infd) || die;
+ open(my $outfh, ">&=", $outfd) || die;
+ $s->add($infh);
+ # Tell the producer now opened here and can be released
+ $okq->enqueue($infd);
+ # Initialize the buffer
+ @{$buffer{$infh}{$outfd}} = ();
+ $Global::fh{$outfd} = $outfh;
+ }
+
+ sub add_files {
+ # Non-blocking dequeue
+ my ($infd,$outfd);
+ do {
+ ($infd,$outfd) = $q->dequeue_nb(2);
+ if(defined($outfd)) {
+ add_file($infd,$outfd);
+ }
+ } while(defined($outfd));
+ }
+
+ sub add_files_block {
+ # Blocking dequeue
+ my ($infd,$outfd) = $q->dequeue(2);
+ add_file($infd,$outfd);
+ }
+
+
+ my $fd;
+ my (@ready,$infh,$rv,$buf);
+ do {
+ # Wait until at least one file is opened
+ add_files_block();
+ while($q->pending or keys %buffer) {
+ add_files();
+ while(keys %buffer) {
+ @ready = $s->can_read(0.01);
+ if(not @ready) {
+ add_files();
+ }
+ for $infh (@ready) {
+ # There is only one key, namely the output file descriptor
+ for my $outfd (keys %{$buffer{$infh}}) {
+ # TODO test if 60800 is optimal (2^17 is used elsewhere)
+ $rv = sysread($infh, $buf, 60800);
+ if (!$rv) {
+ if($! == EAGAIN) {
+ # Would block: Nothing read
+ next;
+ } else {
+ # Nothing read, but would not block:
+ # This file is done
+ $s->remove($infh);
+ for(@{$buffer{$infh}{$outfd}}) {
+ syswrite($Global::fh{$outfd},$_);
+ }
+ delete $buffer{$infh};
+ # Closing the $infh causes it to block
+ # close $infh;
+ add_files();
+ next;
+ }
+ }
+ # Something read.
+ # Find \n or \r for full line
+ my $i = (rindex($buf,"\n")+1);
+ if($i) {
+ # Print full line
+ for(@{$buffer{$infh}{$outfd}}, substr($buf,0,$i)) {
+ syswrite($Global::fh{$outfd},$_);
+ }
+ # @buffer = remaining half line
+ $buffer{$infh}{$outfd} = [substr($buf,$i,$rv-$i)];
+ } else {
+ # Something read, but not a full line
+ push @{$buffer{$infh}{$outfd}}, $buf;
+ }
+ redo;
+ }
+ }
+ }
+ }
+ } while($opened < $files_to_open);
+
+ for (@producers) {
+ $_->join();
+ }
+
+ sub set_fh_non_blocking {
+ # Set filehandle as non-blocking
+ # Inputs:
+ # $fh = filehandle to be blocking
+ # Returns:
+ # N/A
+ my $fh = shift;
+ my $flags;
+ fcntl($fh, &F_GETFL, $flags) || die $!; # Get the current flags on the filehandle
+ $flags |= &O_NONBLOCK; # Add non-blocking to the flags
+ fcntl($fh, &F_SETFL, $flags) || die $!; # Set the flags on the filehandle
+ }
+ }';
+ return ::spacefree(3, $script);
+}
+
+sub sharder_script() {
+ my $script = q{
+ use B;
+ # Column separator
+ my $sep = shift;
+ # Which columns to shard on (count from 1)
+ my $col = shift;
+ # Which columns to shard on (count from 0)
+ my $col0 = $col - 1;
+ # Perl expression
+ my $perlexpr = shift;
+ my $bins = @ARGV;
+ # Open fifos for writing, fh{0..$bins}
+ my $t = 0;
+ my %fh;
+ for(@ARGV) {
+ open $fh{$t++}, ">", $_;
+ # open blocks until it is opened by reader
+ # so unlink only happens when it is ready
+ unlink $_;
+ }
+ if($perlexpr) {
+ my $subref = eval("sub { no strict; no warnings; $perlexpr }");
+ while(<STDIN>) {
+ # Split into $col columns (no need to split into more)
+ @F = split $sep, $_, $col+1;
+ {
+ local $_ = $F[$col0];
+ &$subref();
+ $fh = $fh{ hex(B::hash($_))%$bins };
+ }
+ print $fh $_;
+ }
+ } else {
+ while(<STDIN>) {
+ # Split into $col columns (no need to split into more)
+ @F = split $sep, $_, $col+1;
+ $fh = $fh{ hex(B::hash($F[$col0]))%$bins };
+ print $fh $_;
+ }
+ }
+ # Close all open fifos
+ close values %fh;
+ };
+ return ::spacefree(1, $script);
+}
+
+sub binner_script() {
+ my $script = q{
+ use B;
+ # Column separator
+ my $sep = shift;
+ # Which columns to shard on (count from 1)
+ my $col = shift;
+ # Which columns to shard on (count from 0)
+ my $col0 = $col - 1;
+ # Perl expression
+ my $perlexpr = shift;
+ my $bins = @ARGV;
+ # Open fifos for writing, fh{0..$bins}
+ my $t = 0;
+ my %fh;
+ # Let the last output fifo be the 0'th
+ open $fh{$t++}, ">", pop @ARGV;
+ for(@ARGV) {
+ open $fh{$t++}, ">", $_;
+ # open blocks until it is opened by reader
+ # so unlink only happens when it is ready
+ unlink $_;
+ }
+ if($perlexpr) {
+ my $subref = eval("sub { no strict; no warnings; $perlexpr }");
+ while(<STDIN>) {
+ # Split into $col columns (no need to split into more)
+ @F = split $sep, $_, $col+1;
+ {
+ local $_ = $F[$col0];
+ &$subref();
+ $fh = $fh{ $_%$bins };
+ }
+ print $fh $_;
+ }
+ } else {
+ while(<STDIN>) {
+ # Split into $col columns (no need to split into more)
+ @F = split $sep, $_, $col+1;
+ $fh = $fh{ $F[$col0]%$bins };
+ print $fh $_;
+ }
+ }
+ # Close all open fifos
+ close values %fh;
+ };
+ return ::spacefree(1, $script);
+}
+
+sub pipe_shard_setup() {
+ # Create temporary fifos
+ # Run 'shard.pl sep col fifo1 fifo2 fifo3 ... fifoN' in the background
+ # This will spread the input to fifos
+ # Generate commands that reads from fifo1..N:
+ # cat fifo | user_command
+ # Changes:
+ # @Global::cat_prepends
+ my @shardfifos;
+ my @parcatfifos;
+ # TODO $opt::jobs should be evaluated (100%)
+ # TODO $opt::jobs should be number of total_jobs if there are arguments
+ max_jobs_running();
+ my $njobs = $Global::max_jobs_running;
+ for my $m (0..$njobs-1) {
+ for my $n (0..$njobs-1) {
+ # sharding to A B C D
+ # parcatting all As together
+ $parcatfifos[$n][$m] = $shardfifos[$m][$n] = tmpfifo();
+ }
+ }
+ my $shardbin = ($opt::shard || $opt::bin);
+ my $script;
+ if($opt::bin) {
+ $script = binner_script();
+ } else {
+ $script = sharder_script();
+ }
+
+ # cat foo | sharder sep col fifo1 fifo2 fifo3 ... fifoN
+
+ if($shardbin =~ /^[a-z_][a-z_0-9]*(\s|$)/i) {
+ # Group by column name
+ # (Yes, this will also wrongly match a perlexpr like: chop)
+ my($read,$char,@line);
+ # A full line, but nothing more (the rest must be read by the child)
+ # $Global::header used to prepend block to each job
+ do {
+ $read = sysread(STDIN,$char,1);
+ push @line, $char;
+ } while($read and $char ne "\n");
+ $Global::header = join "", @line;
+ }
+ my ($col, $perlexpr, $subref) =
+ column_perlexpr($shardbin, $Global::header, $opt::colsep);
+ if(not fork()) {
+ # Let the sharder inherit our stdin
+ # and redirect stdout to null
+ open STDOUT, ">","/dev/null";
+ # The PERL_HASH_SEED must be the same for all sharders
+ # so B::hash will return the same value for any given input
+ $ENV{'PERL_HASH_SEED'} = $$;
+ exec qw(parallel -0 --block 100k -q --pipe -j), $njobs,
+ qw(--roundrobin -u perl -e), $script, ($opt::colsep || ","),
+ $col, $perlexpr, '{}', (map { (':::+', @{$_}) } @parcatfifos);
+ }
+ # For each fifo
+ # (rm fifo1; grep 1) < fifo1
+ # (rm fifo2; grep 2) < fifo2
+ # (rm fifo3; grep 3) < fifo3
+ my $parcat = Q(parcat_script());
+ if(not $parcat) {
+ ::error(255,"'parcat' must be in path.");
+ }
+ @Global::cat_prepends =
+ map { "perl -e $parcat ".
+ join(" ",shell_quote(@$_))." | "} @parcatfifos;
+}
+
+sub pipe_part_files(@) {
+ # Given the bigfile:
+ # - find header and split positions
+ # - make commands that 'cat's the partial file
+ # Input:
+ # $file = the file to read
+ # Returns:
+ # @commands that will cat_partial each part
+ my ($file) = @_;
+ my $buf = "";
+ if(not -f $file and not -b $file) {
+ ::error(255,"--pipepart only works on seekable files, not streams/pipes.",
+ "$file is not a seekable file.");
+ }
+
+ my $fh = open_or_exit("<",$file);
+ my $firstlinelen = 0;
+ if($opt::skip_first_line) {
+ my $newline;
+ # Read a full line one byte at a time
+ while($firstlinelen += sysread($fh,$newline,1,0)) {
+ $newline eq "\n" and last;
+ }
+ }
+ my $header = find_header(\$buf,$fh);
+ # find positions
+ my @pos = find_split_positions($file,int($Global::blocksize),
+ $header,$firstlinelen);
+ # Make @cat_prepends
+ my @cat_prepends = ();
+ for(my $i=0; $i<$#pos; $i++) {
+ push(@cat_prepends,
+ cat_partial($file, $firstlinelen, $firstlinelen+length($header),
+ $pos[$i], $pos[$i+1]));
+ }
+ return @cat_prepends;
+}
+
+sub find_header($$) {
+ # Compute the header based on $opt::header
+ # Input:
+ # $buf_ref = reference to read-in buffer
+ # $fh = filehandle to read from
+ # Uses:
+ # $opt::header
+ # $Global::blocksize
+ # $Global::header
+ # Returns:
+ # $header string
+ my ($buf_ref, $fh) = @_;
+ my $header = "";
+ # $Global::header may be set in group_by_loop()
+ if($Global::header) { return $Global::header }
+ if($opt::header) {
+ if($opt::header eq ":") { $opt::header = "(.*\n)"; }
+ # Number = number of lines
+ $opt::header =~ s/^(\d+)$/"(.*\n)"x$1/e;
+ while(sysread($fh,$$buf_ref,int($Global::blocksize),length $$buf_ref)) {
+ if($$buf_ref =~ s/^($opt::header)//) {
+ $header = $1;
+ last;
+ }
+ }
+ }
+ return $header;
+}
+
+sub find_split_positions($$$) {
+ # Find positions in bigfile where recend is followed by recstart
+ # Input:
+ # $file = the file to read
+ # $block = (minimal) --block-size of each chunk
+ # $header = header to be skipped
+ # Uses:
+ # $opt::recstart
+ # $opt::recend
+ # Returns:
+ # @positions of block start/end
+ my($file, $block, $header, $firstlinelen) = @_;
+ my $skiplen = $firstlinelen + length $header;
+ my $size = -s $file;
+ if(-b $file) {
+ # $file is a blockdevice
+ $size = size_of_block_dev($file);
+ }
+ $block = int $block;
+ if($opt::groupby) {
+ return split_positions_for_group_by($file,$size,$block,
+ $header,$firstlinelen);
+ }
+ # The optimal dd blocksize for mint, redhat, solaris, openbsd = 2^17..2^20
+ # The optimal dd blocksize for freebsd = 2^15..2^17
+ # The optimal dd blocksize for ubuntu (AMD6376) = 2^16
+ my $dd_block_size = 131072; # 2^17
+ my @pos;
+ my ($recstart,$recend) = recstartrecend();
+ my $recendrecstart = $recend.$recstart;
+ my $fh = ::open_or_exit("<",$file);
+ push(@pos,$skiplen);
+ for(my $pos = $block+$skiplen; $pos < $size; $pos += $block) {
+ my $buf;
+ if($recendrecstart eq "") {
+ # records ends anywhere
+ push(@pos,$pos);
+ } else {
+ # Seek the the block start
+ if(not sysseek($fh, $pos, 0)) {
+ ::error("Cannot seek to $pos in $file");
+ edit(255);
+ }
+ while(sysread($fh,$buf,$dd_block_size,length $buf)) {
+ if($opt::regexp) {
+ # If match /$recend$recstart/ => Record position
+ if($buf =~ m:^(.*$recend)$recstart:os) {
+ # Start looking for next record _after_ this match
+ $pos += length($1);
+ push(@pos,$pos);
+ last;
+ }
+ } else {
+ # If match $recend$recstart => Record position
+ # TODO optimize to only look at the appended
+ # $dd_block_size + len $recendrecstart
+ # TODO increase $dd_block_size to optimize for longer records
+ my $i = index64(\$buf,$recendrecstart);
+ if($i != -1) {
+ # Start looking for next record _after_ this match
+ $pos += $i + length($recend);
+ push(@pos,$pos);
+ last;
+ }
+ }
+ }
+ }
+ }
+ if($pos[$#pos] != $size) {
+ # Last splitpoint was not at end of the file: add $size as the last
+ push @pos, $size;
+ }
+ close $fh;
+ return @pos;
+}
+
+sub split_positions_for_group_by($$$$) {
+ my($fh);
+ my %value;
+ sub value_at($) {
+ my $pos = shift;
+ if(not defined $value{$pos}) {
+ if($pos != 0) {
+ seek($fh, $pos-1, 0) || die;
+ # Read half line
+ <$fh>;
+ }
+ # Read full line
+ my $linepos = tell($fh);
+ if(not defined $value{$linepos}) {
+ $_ = <$fh>;
+ if(defined $_) {
+ # Not end of file
+ my @F;
+ if(defined $group_by::col) {
+ $opt::colsep ||= "\t";
+ @F = split /$opt::colsep/, $_;
+ $_ = $F[$group_by::col];
+ }
+ eval $group_by::perlexpr;
+ }
+ $value{$linepos} = [$_,$linepos];
+ }
+ $value{$pos} = $value{$linepos};
+ }
+ return (@{$value{$pos}});
+ }
+
+ sub binary_search_end($$$) {
+ my ($s,$spos,$epos) = @_;
+ # value_at($spos) == $s
+ # value_at($epos) != $s
+ my $posdif = $epos - $spos;
+ my ($v,$vpos);
+ while($posdif) {
+ ($v,$vpos) = value_at($spos+$posdif);
+ if($v eq $s) {
+ $spos = $vpos;
+ $posdif = $epos - $spos;
+ } else {
+ $epos = $vpos;
+ }
+ $posdif = int($posdif/2);
+ }
+ return($v,$vpos);
+ }
+
+ sub binary_search_start($$$) {
+ my ($s,$spos,$epos) = @_;
+ # value_at($spos) != $s
+ # value_at($epos) == $s
+ my $posdif = $epos - $spos;
+ my ($v,$vpos);
+ while($posdif) {
+ ($v,$vpos) = value_at($spos+$posdif);
+ if($v eq $s) {
+ $epos = $vpos;
+ } else {
+ $spos = $vpos;
+ $posdif = $epos - $spos;
+ }
+ $posdif = int($posdif/2);
+ }
+ return($v,$vpos);
+ }
+
+ my ($file,$size,$block,$header,$firstlinelen) = @_;
+ my @pos;
+ $fh = open_or_exit("<",$file);
+ # Set $Global::group_by_column $Global::group_by_perlexpr
+ group_by_loop($fh,$opt::recsep);
+ if($opt::max_args) {
+ # Split after n values
+ my ($a,$apos);
+ # $xpos = linestart, $x = value at $xpos
+ $apos = $firstlinelen + length $header;
+ for(($a,$apos) = value_at($apos); $apos < $size;) {
+ push @pos, $apos;
+ ($a,$apos) = binary_search_end($a,$apos,$size);
+ if(eof($fh)) {
+ push @pos, $size; last;
+ }
+ }
+ # @pos = start of every value
+ # Merge n values
+ # -nX = keep every X'th position
+ my $i = 0;
+ @pos = grep { not ($i++ % $opt::max_args) } @pos;
+ } else {
+ # Split after any value group
+ # Preferable < $blocksize
+ my ($a,$b,$c,$apos,$bpos,$cpos);
+ # $xpos = linestart, $x = value at $xpos, $apos < $bpos < $cpos
+ $apos = $firstlinelen + length $header;
+ for(($a,$apos) = value_at($apos); $apos < $size;) {
+ push @pos, $apos;
+ $bpos = $apos + $block;
+ ($b,$bpos) = value_at($bpos);
+ if(eof($fh)) {
+ # EOF is less than 1 block away
+ push @pos, $size; last;
+ }
+ $cpos = $bpos + $block;
+ ($c,$cpos) = value_at($cpos);
+ if($a eq $b) {
+ while($b eq $c) {
+ # Move bpos, cpos a block forward until $a == $b != $c
+ $bpos = $cpos;
+ $cpos += $block;
+ ($c,$cpos) = value_at($cpos);
+ if($cpos >= $size) {
+ $cpos = $size;
+ last;
+ }
+ }
+ # $a == $b != $c
+ # Binary search for $b ending between ($bpos,$cpos)
+ ($b,$bpos) = binary_search_end($b,$bpos,$cpos);
+ } else {
+ if($b eq $c) {
+ # $a != $b == $c
+ # Binary search for $b starting between ($apos,$bpos)
+ ($b,$bpos) = binary_search_start($b,$apos,$bpos);
+ } else {
+ # $a != $b != $c
+ # Binary search for $b ending between ($bpos,$cpos)
+ ($b,$bpos) = binary_search_end($b,$bpos,$cpos);
+ }
+ }
+ ($a,$apos) = ($b,$bpos);
+ }
+ }
+ if($pos[$#pos] != $size) {
+ # Last splitpoint was not at end of the file: add it
+ push @pos, $size;
+ }
+ return @pos;
+}
+
+sub cat_partial($@) {
+ # Efficient command to copy from byte X to byte Y
+ # Input:
+ # $file = the file to read
+ # ($start, $end, [$start2, $end2, ...]) = start byte, end byte
+ # Returns:
+ # Efficient command to copy $start..$end, $start2..$end2, ... to stdout
+ my($file, @start_end) = @_;
+ my($start, $i);
+ # Convert (start,end) to (start,len)
+ my @start_len = map {
+ if(++$i % 2) { $start = $_; } else { $_-$start }
+ } @start_end;
+ # The optimal block size differs
+ # It has been measured on:
+ # AMD 6376: n*4k-1; small n
+ # AMD Neo N36L: 44k-200k
+ # Intel i7-3632QM: 55k-
+ # ARM Cortex A53: 4k-28k
+ # Intel i5-2410M: 36k-46k
+ #
+ # I choose 2^15-1 = 32767
+ # q{
+ # expseq() {
+ # perl -E '
+ # $last = pop @ARGV;
+ # $first = shift || 1;
+ # $inc = shift || 1.03;
+ # for($i=$first; $i<=$last;$i*=$inc) { say int $i }
+ # ' "$@"
+ # }
+ #
+ # seq 111111111 > big;
+ # f() { ppar --test $1 -a big --pipepart --block -1 'md5sum > /dev/null'; }
+ # export -f f;
+ # expseq 1000 1.001 300000 | shuf | parallel -j1 --jl jl-md5sum f;
+ # };
+ my $script = spacefree
+ (0,
+ q{
+ while(@ARGV) {
+ sysseek(STDIN,shift,0) || die;
+ $left = shift;
+ while($read =
+ sysread(STDIN,$buf, $left > 32767 ? 32767 : $left)){
+ $left -= $read;
+ syswrite(STDOUT,$buf);
+ }
+ }
+ });
+ return "<". Q($file) .
+ " perl -e '$script' @start_len |";
+}
+
+sub column_perlexpr($$$) {
+ # Compute the column number (if any), perlexpression from combined
+ # string (such as --shard key, --groupby key, {=n perlexpr=}
+ # Input:
+ # $column_perlexpr = string with column and perl expression
+ # $header = header from input file (if column is column name)
+ # $colsep = column separator regexp
+ # Returns:
+ # $col = column number
+ # $perlexpr = perl expression
+ # $subref = compiled perl expression as sub reference
+ my ($column_perlexpr, $header, $colsep) = @_;
+ my ($col, $perlexpr, $subref);
+ if($column_perlexpr =~ /^[-a-z0-9_]+(\s|$)/i) {
+ # Column name/number (possibly prefix)
+ if($column_perlexpr =~ s/^(-?\d+)(\s|$)//) {
+ # Column number (possibly prefix)
+ $col = $1;
+ } elsif($column_perlexpr =~ s/^([a-z0-9_]+)(\s+|$)//i) {
+ # Column name (possibly prefix)
+ my $colname = $1;
+ # Split on --copsep pattern
+ my @headers = split /$colsep/, $header;
+ my %headers;
+ @headers{@headers} = (1..($#headers+1));
+ $col = $headers{$colname};
+ if(not defined $col) {
+ ::error(255,"Column '$colname' $colsep not found in header",keys %headers);
+ }
+ }
+ }
+ # What is left of $column_perlexpr is $perlexpr (possibly empty)
+ $perlexpr = $column_perlexpr;
+ $subref = eval("sub { no strict; no warnings; $perlexpr }");
+ return($col, $perlexpr, $subref);
+}
+
+sub group_by_loop($$) {
+ # Generate perl code for group-by loop
+ # Insert a $recsep when the column value changes
+ # The column value can be computed with $perlexpr
+ my($fh,$recsep) = @_;
+ my $groupby = $opt::groupby;
+ if($groupby =~ /^[a-z_][a-z_0-9]*(\s|$)/i) {
+ # Group by column name
+ # (Yes, this will also wrongly match a perlexpr like: chop)
+ my($read,$char,@line);
+ # Read a full line, but nothing more
+ # (the rest must be read by the child)
+ # $Global::header used to prepend block to each job
+ do {
+ $read = sysread($fh,$char,1);
+ push @line, $char;
+ } while($read and $char ne "\n");
+ $Global::header = join "", @line;
+ }
+ $opt::colsep ||= "\t";
+ ($group_by::col, $group_by::perlexpr, $group_by::subref) =
+ column_perlexpr($groupby, $Global::header, $opt::colsep);
+ # Numbered 0..n-1 due to being used by $F[n]
+ if($group_by::col) { $group_by::col--; }
+
+ my $loop = ::spacefree(0,q{
+ BEGIN{ $last = "RECSEP"; }
+ {
+ local $_=COLVALUE;
+ PERLEXPR;
+ if(($last) ne $_) {
+ print "RECSEP";
+ $last = $_;
+ }
+ }
+ });
+ if(defined $group_by::col) {
+ $loop =~ s/COLVALUE/\$F[$group_by::col]/g;
+ } else {
+ $loop =~ s/COLVALUE/\$_/g;
+ }
+ $loop =~ s/PERLEXPR/$group_by::perlexpr/g;
+ $loop =~ s/RECSEP/$recsep/g;
+ return $loop;
+}
+
+sub pipe_group_by_setup() {
+ # Record separator with 119 bit random value
+ $opt::recend = '';
+ $opt::recstart =
+ join "", map { (0..9,"a".."z","A".."Z")[rand(62)] } (1..20);
+ $opt::remove_rec_sep = 1;
+ my @filter;
+ push @filter, "perl";
+ if($opt::groupby =~ /^[a-z0-9_]+(\s|$)/i) {
+ # This is column number/name
+ # Use -a (auto-split)
+ push @filter, "-a";
+ $opt::colsep ||= "\t";
+ my $sep = $opt::colsep;
+ $sep =~ s/\t/\\t/g;
+ $sep =~ s/\"/\\"/g;
+ # man perlrun: -Fpattern [...] You can't use literal whitespace
+ $sep =~ s/ /\\040/g;
+ push @filter, "-F$sep";
+ }
+ push @filter, "-pe";
+ push @filter, group_by_loop(*STDIN,$opt::recstart);
+ ::debug("init", "@filter\n");
+ open(STDIN, '-|', @filter) || die ("Cannot start @filter");
+ if(which("mbuffer")) {
+ # You get a speed up of 30% by going through mbuffer
+ open(STDIN, '-|', "mbuffer", "-q","-m6M","-b5") ||
+ die ("Cannot start mbuffer");
+ }
+}
+
+sub spreadstdin() {
+ # read a record
+ # Spawn a job and print the record to it.
+ # Uses:
+ # $Global::blocksize
+ # STDIN
+ # $opt::r
+ # $Global::max_lines
+ # $Global::max_number_of_args
+ # $opt::regexp
+ # $Global::start_no_new_jobs
+ # $opt::roundrobin
+ # %Global::running
+ # Returns: N/A
+
+ my $buf = "";
+ my ($recstart,$recend) = recstartrecend();
+ my $recendrecstart = $recend.$recstart;
+ my $chunk_number = 1;
+ my $one_time_through;
+ my $two_gb = 2**31-1;
+ my $blocksize = int($Global::blocksize);
+ my $in = *STDIN;
+ my $timeout = $Global::blocktimeout;
+
+ if($opt::skip_first_line) {
+ my $newline;
+ # Read a full line one byte at a time
+ while(sysread($in,$newline,1,0)) {
+ $newline eq "\n" and last;
+ }
+ }
+ my $header = find_header(\$buf,$in);
+ my $anything_written;
+ my $eof;
+ my $garbage_read;
+
+ sub read_block() {
+ # Read a --blocksize from STDIN
+ # possibly interrupted by --blocktimeout
+ # Add up to the next full block
+ my $readsize = $blocksize - (length $buf) % $blocksize;
+ my ($nread,$alarm);
+ eval {
+ local $SIG{ALRM} = sub { die "alarm\n" }; # NB: \n required
+ # --blocktimeout (or 0 if not set)
+ alarm $timeout;
+ if($] >= 5.026) {
+ do {
+ $nread = sysread $in, $buf, $readsize, length $buf;
+ $readsize -= $nread;
+ } while($readsize and $nread);
+ } else {
+ # Less efficient reading, but 32-bit sysread compatible
+ do {
+ $nread = sysread($in,substr($buf,length $buf,0),$readsize,0);
+ $readsize -= $nread;
+ } while($readsize and $nread);
+ }
+ alarm 0;
+ };
+ if ($@) {
+ die unless $@ eq "alarm\n"; # propagate unexpected errors
+ $alarm = 1;
+ } else {
+ $alarm = 0;
+ }
+ $eof = not ($nread or $alarm);
+ }
+
+ sub pass_n_line_records() {
+ # Pass records of N lines
+ my $n_lines = $buf =~ tr/\n/\n/;
+ my $last_newline_pos = rindex64(\$buf,"\n");
+ # Go backwards until there are full n-line records
+ while($n_lines % $Global::max_lines) {
+ $n_lines--;
+ $last_newline_pos = rindex64(\$buf,"\n",$last_newline_pos-1);
+ }
+ # Chop at $last_newline_pos as that is where n-line record ends
+ $anything_written +=
+ write_record_to_pipe($chunk_number++,\$header,\$buf,
+ $recstart,$recend,$last_newline_pos+1);
+ shorten(\$buf,$last_newline_pos+1);
+ }
+
+ sub pass_n_regexps() {
+ # Pass records of N regexps
+ # -N => (start..*?end){n}
+ # -L -N => (start..*?end){n*l}
+ if(not $garbage_read) {
+ $garbage_read = 1;
+ if($buf !~ /^$recstart/o) {
+ # Buf does not start with $recstart => There is garbage.
+ # Make a single record of the garbage
+ if($buf =~
+ /(?s:^)(
+ (?:(?:(?!$recend$recstart)(?s:.))*?$recend)
+ )
+ # Followed by recstart
+ (?=$recstart)/mox and length $1 > 0) {
+ $anything_written +=
+ write_record_to_pipe($chunk_number++,\$header,\$buf,
+ $recstart,$recend,length $1);
+ shorten(\$buf,length $1);
+ }
+ }
+ }
+
+ my $n_records =
+ $Global::max_number_of_args * ($Global::max_lines || 1);
+ # (?!negative lookahead) is needed to avoid backtracking
+ # See: https://unix.stackexchange.com/questions/439356/
+ # (?s:.) = (.|[\n]) but faster
+ while($buf =~
+ /(?s:^)(
+ # n more times recstart.*recend
+ (?:$recstart(?:(?!$recend$recstart)(?s:.))*?$recend){$n_records}
+ )
+ # Followed by recstart
+ (?=$recstart)/mox and length $1 > 0) {
+ $anything_written +=
+ write_record_to_pipe($chunk_number++,\$header,\$buf,
+ $recstart,$recend,length $1);
+ shorten(\$buf,length $1);
+ }
+ }
+
+ sub pass_regexp() {
+ # Find the last recend-recstart in $buf
+ $eof and return;
+ # (?s:.) = (.|[\n]) but faster
+ if($buf =~ /^((?s:.)*$recend)$recstart(?s:.)*?$/mox) {
+ $anything_written +=
+ write_record_to_pipe($chunk_number++,\$header,\$buf,
+ $recstart,$recend,length $1);
+ shorten(\$buf,length $1);
+ }
+ }
+
+ sub pass_csv_record() {
+ # Pass CVS record
+ # We define a CSV record as an even number of " + end of line
+ # This works if you use " as quoting character
+ my $last_newline_pos = length $buf;
+ # Go backwards from the last \n and search for a position
+ # where there is an even number of "
+ do {
+ # find last EOL
+ $last_newline_pos = rindex64(\$buf,"\n",$last_newline_pos-1);
+ # While uneven "
+ } while((substr($buf,0,$last_newline_pos) =~ y/"/"/)%2
+ and $last_newline_pos >= 0);
+ # Chop at $last_newline_pos as that is where CSV record ends
+ $anything_written +=
+ write_record_to_pipe($chunk_number++,\$header,\$buf,
+ $recstart,$recend,$last_newline_pos+1);
+ shorten(\$buf,$last_newline_pos+1);
+ }
+
+ sub pass_n() {
+ # Pass n records of --recend/--recstart
+ # -N => (start..*?end){n}
+ my $i = 0;
+ my $read_n_lines =
+ $Global::max_number_of_args * ($Global::max_lines || 1);
+ while(($i = nindex(\$buf,$recendrecstart,$read_n_lines)) != -1
+ and
+ length $buf) {
+ $i += length $recend; # find the actual splitting location
+ $anything_written +=
+ write_record_to_pipe($chunk_number++,\$header,\$buf,
+ $recstart,$recend,$i);
+ shorten(\$buf,$i);
+ }
+ }
+
+ sub pass() {
+ # Pass records of --recend/--recstart
+ # Split record at fixed string
+ # Find the last recend+recstart in $buf
+ $eof and return;
+ my $i = rindex64(\$buf,$recendrecstart);
+ if($i != -1) {
+ $i += length $recend; # find the actual splitting location
+ $anything_written +=
+ write_record_to_pipe($chunk_number++,\$header,\$buf,
+ $recstart,$recend,$i);
+ shorten(\$buf,$i);
+ }
+ }
+
+ sub increase_blocksize_maybe() {
+ if(not $anything_written
+ and not $opt::blocktimeout
+ and not $Global::no_autoexpand_block) {
+ # Nothing was written - maybe the block size < record size?
+ # Increase blocksize exponentially up to 2GB-1 (2GB causes problems)
+ if($blocksize < $two_gb) {
+ my $old_blocksize = $blocksize;
+ $blocksize = ::min(ceil($blocksize * 1.3 + 1), $two_gb);
+ ::warning("A record was longer than $old_blocksize. " .
+ "Increasing to --blocksize $blocksize.");
+ }
+ }
+ }
+
+ while(1) {
+ $anything_written = 0;
+ read_block();
+ if($opt::r) {
+ # Remove empty lines
+ $buf =~ s/^\s*\n//gm;
+ if(length $buf == 0) {
+ if($eof) {
+ last;
+ } else {
+ next;
+ }
+ }
+ }
+ if($Global::max_lines and not $Global::max_number_of_args) {
+ # Pass n-line records
+ pass_n_line_records();
+ } elsif($opt::csv) {
+ # Pass a full CSV record
+ pass_csv_record();
+ } elsif($opt::regexp) {
+ # Split record at regexp
+ if($Global::max_number_of_args) {
+ pass_n_regexps();
+ } else {
+ pass_regexp();
+ }
+ } else {
+ # Pass normal --recend/--recstart record
+ if($Global::max_number_of_args) {
+ pass_n();
+ } else {
+ pass();
+ }
+ }
+ $eof and last;
+ increase_blocksize_maybe();
+ ::debug("init", "Round\n");
+ }
+ ::debug("init", "Done reading input\n");
+
+ # If there is anything left in the buffer write it
+ write_record_to_pipe($chunk_number++, \$header, \$buf, $recstart,
+ $recend, length $buf);
+
+ if($opt::retries) {
+ $Global::no_more_input = 1;
+ # We need to start no more jobs: At most we need to retry some
+ # of the already running.
+ my @running = values %Global::running;
+ # Stop any virgins.
+ for my $job (@running) {
+ if(defined $job and $job->virgin()) {
+ close $job->fh(0,"w");
+ }
+ }
+ # Wait for running jobs to be done
+ my $sleep = 1;
+ while($Global::total_running > 0) {
+ $sleep = ::reap_usleep($sleep);
+ start_more_jobs();
+ }
+ }
+ $Global::start_no_new_jobs ||= 1;
+ if($opt::roundrobin) {
+ # Flush blocks to roundrobin procs
+ my $sleep = 1;
+ while(%Global::running) {
+ my $something_written = 0;
+ for my $job (values %Global::running) {
+ if($job->block_length()) {
+ $something_written += $job->non_blocking_write();
+ } else {
+ close $job->fh(0,"w");
+ }
+ }
+ if($something_written) {
+ $sleep = $sleep/2+0.001;
+ }
+ $sleep = ::reap_usleep($sleep);
+ }
+ }
+}
+
+sub recstartrecend() {
+ # Uses:
+ # $opt::recstart
+ # $opt::recend
+ # Returns:
+ # $recstart,$recend with default values and regexp conversion
+ my($recstart,$recend);
+ if(defined($opt::recstart) and defined($opt::recend)) {
+ # If both --recstart and --recend is given then both must match
+ $recstart = $opt::recstart;
+ $recend = $opt::recend;
+ } elsif(defined($opt::recstart)) {
+ # If --recstart is given it must match start of record
+ $recstart = $opt::recstart;
+ $recend = "";
+ } elsif(defined($opt::recend)) {
+ # If --recend is given then it must match end of record
+ $recstart = "";
+ $recend = $opt::recend;
+ if($opt::regexp and $recend eq '') {
+ # --regexp --recend ''
+ $recend = '(?s:.)';
+ }
+ }
+
+ if($opt::regexp) {
+ # Do not allow /x comments - to avoid having to quote space
+ $recstart = "(?-x:".$recstart.")";
+ $recend = "(?-x:".$recend.")";
+ # If $recstart/$recend contains '|'
+ # the | should only apply to the regexp
+ $recstart = "(?:".$recstart.")";
+ $recend = "(?:".$recend.")";
+ } else {
+ # $recstart/$recend = printf strings (\n)
+ $recstart =~ s/\\([0rnt\'\"\\])/"qq|\\$1|"/gee;
+ $recend =~ s/\\([0rnt\'\"\\])/"qq|\\$1|"/gee;
+ }
+ return ($recstart,$recend);
+}
+
+sub nindex($$) {
+ # See if string is in buffer N times
+ # Returns:
+ # the position where the Nth copy is found
+ my ($buf_ref, $str, $n) = @_;
+ my $i = 0;
+ for(1..$n) {
+ $i = index64($buf_ref,$str,$i+1);
+ if($i == -1) { last }
+ }
+ return $i;
+}
+
+{
+ my @robin_queue;
+ my $sleep = 1;
+
+ sub round_robin_write($$$$$) {
+ # Input:
+ # $header_ref = ref to $header string
+ # $block_ref = ref to $block to be written
+ # $recstart = record start string
+ # $recend = record end string
+ # $endpos = end position of $block
+ # Uses:
+ # %Global::running
+ # Returns:
+ # $something_written = amount of bytes written
+ my ($header_ref,$buffer_ref,$recstart,$recend,$endpos) = @_;
+ my $written = 0;
+ my $block_passed = 0;
+ while(not $block_passed) {
+ # Continue flushing existing buffers
+ # until one is empty and a new block is passed
+ if(@robin_queue) {
+ # Rotate queue once so new blocks get a fair chance
+ # to be given to another slot
+ push @robin_queue, shift @robin_queue;
+ } else {
+ # Make a queue to spread the blocks evenly
+ push @robin_queue, (sort { $a->seq() <=> $b->seq() }
+ values %Global::running);
+ }
+ do {
+ $written = 0;
+ for my $job (@robin_queue) {
+ if($job->block_length() > 0) {
+ $written += $job->non_blocking_write();
+ } else {
+ $job->set_block($header_ref, $buffer_ref,
+ $endpos, $recstart, $recend);
+ $block_passed = 1;
+ $written += $job->non_blocking_write();
+ last;
+ }
+ }
+ if($written) {
+ $sleep = $sleep/1.5+0.001;
+ }
+ # Don't sleep if something is written
+ } while($written and not $block_passed);
+ $sleep = ::reap_usleep($sleep);
+ }
+ return $written;
+ }
+}
+
+sub index64($$$) {
+ # Do index on strings > 2GB.
+ # index in Perl < v5.22 does not work for > 2GB
+ # Input:
+ # as index except STR which must be passed as a reference
+ # Output:
+ # as index
+ my $ref = shift;
+ my $match = shift;
+ my $pos = shift || 0;
+ my $max2gb = 2**31-1;
+ my $strlen = length($$ref);
+ # No point in doing extra work if we don't need to.
+ if($strlen < $max2gb or $] > 5.022) {
+ return index($$ref, $match, $pos);
+ }
+
+ my $matchlen = length($match);
+ my $ret;
+ my $offset = $pos;
+ while($offset < $strlen) {
+ $ret = index(
+ substr($$ref, $offset, $max2gb),
+ $match, $pos-$offset);
+ if($ret != -1) {
+ return $ret + $offset;
+ }
+ $offset += ($max2gb - $matchlen - 1);
+ }
+ return -1;
+}
+
+sub rindex64($@) {
+ # Do rindex on strings > 2GB.
+ # rindex in Perl < v5.22 does not work for > 2GB
+ # Input:
+ # as rindex except STR which must be passed as a reference
+ # Output:
+ # as rindex
+ my $ref = shift;
+ my $match = shift;
+ my $pos = shift;
+ my $block_size = 2**31-1;
+ my $strlen = length($$ref);
+ # Default: search from end
+ $pos = defined $pos ? $pos : $strlen;
+ # No point in doing extra work if we don't need to.
+ if($strlen < $block_size or $] > 5.022) {
+ return rindex($$ref, $match, $pos);
+ }
+
+ my $matchlen = length($match);
+ my $ret;
+ my $offset = $pos - $block_size + $matchlen;
+ if($offset < 0) {
+ # The offset is less than a $block_size
+ # Set the $offset to 0 and
+ # Adjust block_size accordingly
+ $block_size = $block_size + $offset;
+ $offset = 0;
+ }
+ while($offset >= 0) {
+ $ret = rindex(
+ substr($$ref, $offset, $block_size),
+ $match);
+ if($ret != -1) {
+ return $ret + $offset;
+ }
+ $offset -= ($block_size - $matchlen - 1);
+ }
+ return -1;
+}
+
+sub shorten($$) {
+ # Do: substr($buf,0,$i) = "";
+ # Some Perl versions do not support $i > 2GB, so do this in 2GB chunks
+ # Input:
+ # $buf_ref = \$buf
+ # $i = position to shorten to
+ # Returns: N/A
+ my ($buf_ref, $i) = @_;
+ my $two_gb = 2**31-1;
+ while($i > $two_gb) {
+ substr($$buf_ref,0,$two_gb) = "";
+ $i -= $two_gb;
+ }
+ substr($$buf_ref,0,$i) = "";
+}
+
+sub write_record_to_pipe($$$$$$) {
+ # Fork then
+ # Write record from pos 0 .. $endpos to pipe
+ # Input:
+ # $chunk_number = sequence number - to see if already run
+ # $header_ref = reference to header string to prepend
+ # $buffer_ref = reference to record to write
+ # $recstart = start string of record
+ # $recend = end string of record
+ # $endpos = position in $buffer_ref where record ends
+ # Uses:
+ # $Global::job_already_run
+ # $opt::roundrobin
+ # @Global::virgin_jobs
+ # Returns:
+ # Number of chunks written (0 or 1)
+ my ($chunk_number, $header_ref, $buffer_ref,
+ $recstart, $recend, $endpos) = @_;
+ if($endpos == 0) { return 0; }
+ if(vec($Global::job_already_run,$chunk_number,1)) { return 1; }
+ if($opt::roundrobin) {
+ # Write the block to one of the already running jobs
+ return round_robin_write($header_ref, $buffer_ref,
+ $recstart, $recend, $endpos);
+ }
+ # If no virgin found, backoff
+ my $sleep = 0.0001; # 0.01 ms - better performance on highend
+ while(not @Global::virgin_jobs) {
+ ::debug("pipe", "No virgin jobs");
+ $sleep = ::reap_usleep($sleep);
+ # Jobs may not be started because of loadavg
+ # or too little time between each ssh login
+ # or retrying failed jobs.
+ start_more_jobs();
+ }
+ my $job = shift @Global::virgin_jobs;
+ $job->set_block($header_ref, $buffer_ref, $endpos, $recstart, $recend);
+ $job->write_block();
+ return 1;
+}
+
+
+sub __SEM_MODE__() {}
+
+
+sub acquire_semaphore() {
+ # Acquires semaphore. If needed: spawns to the background
+ # Uses:
+ # @Global::host
+ # Returns:
+ # The semaphore to be released when jobs is complete
+ $Global::host{':'} = SSHLogin->new(":");
+ my $sem = Semaphore->new($Semaphore::name,
+ $Global::host{':'}->max_jobs_running());
+ $sem->acquire();
+ if($Semaphore::fg) {
+ # skip
+ } else {
+ if(fork()) {
+ exit(0);
+ } else {
+ # If run in the background, the PID will change
+ $sem->pid_change();
+ }
+ }
+ return $sem;
+}
+
+
+sub __PARSE_OPTIONS__() {}
+
+sub shell_completion() {
+ if($opt::shellcompletion eq "zsh") {
+ # if shell == zsh
+ zsh_competion();
+ } elsif($opt::shellcompletion eq "bash") {
+ # if shell == bash
+ bash_competion();
+ } elsif($opt::shellcompletion eq "auto") {
+ if($Global::shell =~ m:/zsh$|^zsh$:) {
+ # if shell == zsh
+ zsh_competion();
+ } elsif($Global::shell =~ m:/bash$|^bash$:) {
+ # if shell == bash
+ bash_competion();
+ } else {
+ ::error(255,"--shellcompletion is not implemented for ".
+ "'$Global::shell'.");
+ }
+ } else {
+ ::error(255,"--shellcompletion is not implemented for ".
+ "'$opt::shellcompletion'.");
+ }
+}
+
+sub bash_competion() {
+ # Print:
+ # complete -F _comp_parallel parallel;
+ # _comp_parallel() {
+ # COMPREPLY=($(compgen -W "--options" --
+ # "${COMP_WORDS[$COMP_CWORD]}"));
+ # };
+ my @bash_completion =
+ ("complete -F _comp_parallel parallel;",
+ '_comp_parallel() { COMPREPLY=($(compgen -W "');
+ my @och = options_completion_hash();
+ while(@och) {
+ $_ = shift @och;
+ # Split input like:
+ # "joblog|jl=s[Logfile for executed jobs]:logfile:_files"
+ if(/^(.*?)(\[.*?])?(:[^:]*)?(:.*)?$/) {
+ my $opt = $1;
+ my $desc = $2;
+ my $argdesc = $3;
+ my $func = $4;
+ # opt=s => opt
+ $opt =~ s/[:=].$//;
+ if($opt =~ /^_/) {
+ # internal options start with --_
+ # skip
+ } else {
+ push @bash_completion,
+ (map { (length $_ == 1) ? "-$_ " : "--$_ " }
+ split /\|/, $opt);
+ }
+ }
+ shift @och;
+ }
+ push @bash_completion,'" -- "${COMP_WORDS[$COMP_CWORD]}")); };'."\n";
+ print @bash_completion;
+}
+
+sub zsh_competion() {
+ # Print code used for completion in zsh
+ my @zsh_completion =
+ ("compdef _comp_parallel parallel; ",
+ "setopt localoptions extended_glob; ",
+ "_comp_parallel() { ",
+ "_arguments ");
+ my @och = options_completion_hash();
+ while(@och) {
+ $_ = shift @och;
+ # Split input like:
+ # "joblog|jl=s[Logfile for executed jobs]:logfile:_files"
+ if(/^(.*?)(\[.*?])?(:[^:]*)?(:.*)?$/) {
+ my $opt = $1;
+ my $desc = $2;
+ my $argdesc = $3;
+ my $func = $4;
+ # opt=s => opt
+ $opt =~ s/[:=].$//;
+ if($opt =~ /^_/) {
+ # internal options start with --_
+ # skip
+ } else {
+ # {-o,--option}
+ my $zsh_opt = join(",",
+ (map { (length $_ == 1) ? "-$_" : "--$_" }
+ split /\|/, $opt));
+ if($zsh_opt =~ /,/) { $zsh_opt = "{$zsh_opt}"; }
+ $desc =~ s/'/'"'"'/g;
+ $argdesc =~ s/'/'"'"'/g;
+ $func =~ s/'/'"'"'/g;
+ push @zsh_completion, $zsh_opt."'".$desc.$argdesc.$func."' ";
+ }
+ }
+ shift @och;
+ }
+ push @zsh_completion,
+ q{'(-)1:command:{_command_names -e}' },
+ q{'*::arguments:_normal'},
+ "};\n";
+ print @zsh_completion;
+}
+
+sub options_hash() {
+ # Returns:
+ # %hash = for GetOptions
+ my %och = options_completion_hash();
+ my %oh;
+ my ($k,$v);
+ while(($k,$v) = each %och) {
+ # Remove description
+ $k =~ s/\[.*//;
+ $oh{$k} = $v;
+ }
+ return %oh;
+}
+
+sub options_completion_hash() {
+ # Returns:
+ # %hash = for GetOptions and shell completion
+ return
+ ("debug|D=s" => \$opt::D,
+ "xargs[Insert as many arguments as the command line length permits]"
+ => \$opt::xargs,
+ "m[Multiple arguments]" => \$opt::m,
+ ("X[Insert as many arguments with context as the command line ".
+ "length permits]"
+ => \$opt::X),
+ "v[Verbose]" => \@opt::v,
+ "sql=s[Use --sql-master instead (obsolete)]:DBURL" => \$opt::retired,
+ ("sql-master|sqlmaster=s".
+ "[Submit jobs via SQL server. DBURL must point to a table, which ".
+ "will contain --joblog, the values, and output]:DBURL"
+ => \$opt::sqlmaster),
+ ("sql-worker|sqlworker=s".
+ "[Execute jobs via SQL server. Read the input sources variables ".
+ "from the table pointed to by DBURL.]:DBURL"
+ => \$opt::sqlworker),
+ ("sql-and-worker|sqlandworker=s".
+ "[--sql-master DBURL --sql-worker DBURL]:DBURL"
+ => \$opt::sqlandworker),
+ ("joblog|jl=s[Logfile for executed jobs]:logfile:_files"
+ => \$opt::joblog),
+ ("results|result|res=s[Save the output into files]:name:_files"
+ => \$opt::results),
+ "resume[Resumes from the last unfinished job]" => \$opt::resume,
+ ("resume-failed|resumefailed".
+ "[Retry all failed and resume from the last unfinished job]"
+ => \$opt::resume_failed),
+ ("retry-failed|retryfailed[Retry all failed jobs in joblog]"
+ => \$opt::retry_failed),
+ "silent[Silent]" => \$opt::silent,
+ ("keep-order|keeporder|k".
+ "[Keep sequence of output same as the order of input]"
+ => \$opt::keeporder),
+ ("no-keep-order|nokeeporder|nok|no-k".
+ "[Overrides an earlier --keep-order (e.g. if set in ".
+ "~/.parallel/config)]"
+ => \$opt::nokeeporder),
+ "group[Group output]" => \$opt::group,
+ "g" => \$opt::retired,
+ ("ungroup|u".
+ "[Output is printed as soon as possible and bypasses GNU parallel ".
+ "internal processing]"
+ => \$opt::ungroup),
+ ("latest-line|latestline|ll".
+ "[Print latest line of each job]"
+ => \$opt::latestline),
+ ("line-buffer|line-buffered|linebuffer|linebuffered|lb".
+ "[Buffer output on line basis]"
+ => \$opt::linebuffer),
+ ("tmux".
+ "[Use tmux for output. Start a tmux session and run each job in a ".
+ "window in that session. No other output will be produced]"
+ => \$opt::tmux),
+ ("tmux-pane|tmuxpane".
+ "[Use tmux for output but put output into panes in the first ".
+ "window. Useful if you want to monitor the progress of less than ".
+ "100 concurrent jobs]"
+ => \$opt::tmuxpane),
+ "null|0[Use NUL as delimiter]" => \$opt::null,
+ "quote|q[Quote command]" => \$opt::quote,
+ # Replacement strings
+ ("parens=s[Use parensstring instead of {==}]:parensstring"
+ => \$opt::parens),
+ ('rpl=s[Define replacement string]:"tag perl expression"'
+ => \@opt::rpl),
+ "plus[Add more replacement strings]" => \$opt::plus,
+ ("I=s".
+ "[Use the replacement string replace-str instead of {}]:replace-str"
+ => \$opt::I),
+ ("extensionreplace|er=s".
+ "[Use the replacement string replace-str instead of {.} for input ".
+ "line without extension]:replace-str"
+ => \$opt::U),
+ "U=s" => \$opt::retired,
+ ("basenamereplace|bnr=s".
+ "[Use the replacement string replace-str instead of {/} for ".
+ "basename of input line]:replace-str"
+ => \$opt::basenamereplace),
+ ("dirnamereplace|dnr=s".
+ "[Use the replacement string replace-str instead of {//} for ".
+ "dirname of input line]:replace-str"
+ => \$opt::dirnamereplace),
+ ("basenameextensionreplace|bner=s".
+ "[Use the replacement string replace-str instead of {/.} for ".
+ "basename of input line without extension]:replace-str"
+ => \$opt::basenameextensionreplace),
+ ("seqreplace=s".
+ "[Use the replacement string replace-str instead of {#} for job ".
+ "sequence number]:replace-str"
+ => \$opt::seqreplace),
+ ("slotreplace=s".
+ "[Use the replacement string replace-str instead of {%} for job ".
+ "slot number]:replace-str"
+ => \$opt::slotreplace),
+ ("delay=s".
+ "[Delay starting next job by duration]:duration" => \$opt::delay),
+ ("ssh-delay|sshdelay=f".
+ "[Delay starting next ssh by duration]:duration"
+ => \$opt::sshdelay),
+ ("load=s".
+ "[Only start jobs if load is less than max-load]:max-load"
+ => \$opt::load),
+ "noswap[Do not start job is computer is swapping]" => \$opt::noswap,
+ ("max-line-length-allowed|maxlinelengthallowed".
+ "[Print maximal command line length]"
+ => \$opt::max_line_length_allowed),
+ ("number-of-cpus|numberofcpus".
+ "[Print the number of physical CPU cores and exit (obsolete)]"
+ => \$opt::number_of_cpus),
+ ("number-of-sockets|numberofsockets".
+ "[Print the number of CPU sockets and exit]"
+ => \$opt::number_of_sockets),
+ ("number-of-cores|numberofcores".
+ "[Print the number of physical CPU cores and exit]"
+ => \$opt::number_of_cores),
+ ("number-of-threads|numberofthreads".
+ "[Print the number of hyperthreaded CPU cores and exit]"
+ => \$opt::number_of_threads),
+ ("use-sockets-instead-of-threads|usesocketsinsteadofthreads".
+ "[Determine how GNU Parallel counts the number of CPUs]"
+ => \$opt::use_sockets_instead_of_threads),
+ ("use-cores-instead-of-threads|usecoresinsteadofthreads".
+ "[Determine how GNU Parallel counts the number of CPUs]"
+ => \$opt::use_cores_instead_of_threads),
+ ("use-cpus-instead-of-cores|usecpusinsteadofcores".
+ "[Determine how GNU Parallel counts the number of CPUs]"
+ => \$opt::use_cpus_instead_of_cores),
+ ("shell-quote|shellquote|shell_quote".
+ "[Does not run the command but quotes it. Useful for making ".
+ "quoted composed commands for GNU parallel]"
+ => \@opt::shellquote),
+ ('nice=i[Run the command at this niceness]:niceness:($(seq -20 19))'
+ => \$opt::nice),
+ "tag[Tag lines with arguments]" => \$opt::tag,
+ ("tag-string|tagstring=s".
+ "[Tag lines with a string]:str" => \$opt::tagstring),
+ "ctag[Color tag]:str" => \$opt::ctag,
+ "ctag-string|ctagstring=s[Colour tagstring]:str" => \$opt::ctagstring,
+ "color|colour[Colourize output]" => \$opt::color,
+ ("color-failed|colour-failed|colorfailed|colourfailed|".
+ "color-fail|colour-fail|colorfail|colourfail|cf".
+ "[Colour failed jobs red]"
+ => \$opt::colorfailed),
+ ("onall[Run all the jobs on all computers given with --sshlogin]"
+ => \$opt::onall),
+ "nonall[--onall with no arguments]" => \$opt::nonall,
+ ("filter-hosts|filterhosts|filter-host[Remove down hosts]"
+ => \$opt::filter_hosts),
+ ('sshlogin|S=s'.
+ '[Distribute jobs to remote computers]'.
+ ':[@hostgroups/][ncpus/]sshlogin'.
+ '[,[@hostgroups/][ncpus/]sshlogin[,...]] or @hostgroup'.
+ ':_users') => \@opt::sshlogin,
+ ("sshloginfile|slf=s".
+ "[File with sshlogins on separate lines. Lines starting with '#' ".
+ "are ignored.]:filename:_files"
+ => \@opt::sshloginfile),
+ ("controlmaster|M".
+ "[Use ssh's ControlMaster to make ssh connections faster]"
+ => \$opt::controlmaster),
+ ("ssh=s".
+ "[Use this command instead of ssh for remote access]:sshcommand"
+ => \$opt::ssh),
+ ("transfer-file|transferfile|transfer-files|transferfiles|tf=s".
+ "[Transfer filename to remote computers]:filename:_files"
+ => \@opt::transfer_files),
+ ("return=s[Transfer files from remote computers]:filename:_files"
+ => \@opt::return),
+ ("trc=s[--transfer --return filename --cleanup]:filename:_files"
+ => \@opt::trc),
+ "transfer[Transfer files to remote computers]" => \$opt::transfer,
+ "cleanup[Remove transferred files]" => \$opt::cleanup,
+ ("basefile|bf=s".
+ "[Transfer file to each sshlogin before first job is started]".
+ ":file:_files"
+ => \@opt::basefile),
+ ("template|tmpl=s".
+ "[Replace replacement strings in file and save it in repl]".
+ ":file=repl:_files"
+ => \%opt::template),
+ "B=s" => \$opt::retired,
+ "ctrl-c|ctrlc" => \$opt::retired,
+ "no-ctrl-c|no-ctrlc|noctrlc" => \$opt::retired,
+ ("work-dir|workdir|wd=s".
+ "[Jobs will be run in the dir mydir. (default: the current dir ".
+ "for the local machine, the login dir for remote computers)]".
+ ":mydir:_cd"
+ => \$opt::workdir),
+ "W=s" => \$opt::retired,
+ ("rsync-opts|rsyncopts=s[Options to pass on to rsync]:options"
+ => \$opt::rsync_opts),
+ ("tmpdir|tempdir=s[Directory for temporary files]:dirname:_cd"
+ => \$opt::tmpdir),
+ ("use-compress-program|compress-program|".
+ "usecompressprogram|compressprogram=s".
+ "[Use prg for compressing temporary files]:prg:_commands"
+ => \$opt::compress_program),
+ ("use-decompress-program|decompress-program|".
+ "usedecompressprogram|decompressprogram=s".
+ "[Use prg for decompressing temporary files]:prg:_commands"
+ => \$opt::decompress_program),
+ "compress[Compress temporary files]" => \$opt::compress,
+ "open-tty|o[Open terminal tty]" => \$opt::open_tty,
+ "tty[Open terminal tty]" => \$opt::tty,
+ "T" => \$opt::retired,
+ "H=i" => \$opt::retired,
+ ("dry-run|dryrun|dr".
+ "[Print the job to run on stdout (standard output), but do not ".
+ "run the job]"
+ => \$opt::dryrun),
+ "progress[Show progress of computations]" => \$opt::progress,
+ ("eta[Show the estimated number of seconds before finishing]"
+ => \$opt::eta),
+ "bar[Show progress as a progress bar]" => \$opt::bar,
+ ("total-jobs|totaljobs|total=s".
+ "[Set total number of jobs]" => \$opt::totaljobs),
+ "shuf[Shuffle jobs]" => \$opt::shuf,
+ ("arg-sep|argsep=s".
+ "[Use sep-str instead of ::: as separator string]:sep-str"
+ => \$opt::arg_sep),
+ ("arg-file-sep|argfilesep=s".
+ "[Use sep-str instead of :::: as separator string ".
+ "between command and argument files]:sep-str"
+ => \$opt::arg_file_sep),
+ ('trim=s[Trim white space in input]:trim_method:'.
+ '((n\:"No trim" l\:"Left\ trim" r\:"Right trim" '.
+ 'lr\:"Both trim" rl\:"Both trim"))'
+ => \$opt::trim),
+ "env=s[Copy environment variable var]:var:_vars" => \@opt::env,
+ "recordenv|record-env[Record environment]" => \$opt::record_env,
+ ('session'.
+ '[Record names in current environment in $PARALLEL_IGNORED_NAMES '.
+ 'and exit. Only used with env_parallel. '.
+ 'Aliases, functions, and variables with names i]'
+ => \$opt::session),
+ ('plain[Ignore --profile, $PARALLEL, and ~/.parallel/config]'
+ => \$opt::plain),
+ ("profile|J=s".
+ "[Use profile profilename for options]:profilename:_files"
+ => \@opt::profile),
+ "tollef" => \$opt::tollef,
+ "gnu[Behave like GNU parallel]" => \$opt::gnu,
+ "link|xapply[Link input sources]" => \$opt::link,
+ "linkinputsource|xapplyinputsource=i" => \@opt::linkinputsource,
+ # Before changing these lines, please read
+ # https://www.gnu.org/software/parallel/parallel_design.html#citation-notice
+ # https://git.savannah.gnu.org/cgit/parallel.git/tree/doc/citation-notice-faq.txt
+ # You accept to be put in a public hall-of-shame by removing
+ # these lines
+ ("bibtex|citation".
+ "[Print the citation notice and BibTeX entry for GNU parallel, ".
+ "silence citation notice for all future runs, and exit. ".
+ "It will not run any commands]"
+ => \$opt::citation),
+ "will-cite|willcite|nn|nonotice|no-notice" => \$opt::willcite,
+ # Termination and retries
+ ('halt-on-error|haltonerror|halt=s'.
+ '[When should GNU parallel terminate]'.
+ ':when:((now\:"kill all running jobs and halt immediately" '.
+ 'soon\:"wait for all running jobs to complete, start no new jobs"))'
+ => \$opt::halt),
+ 'limit=s[Dynamic job limit]:"command args"' => \$opt::limit,
+ ("memfree=s".
+ "[Minimum memory free when starting another job]:size"
+ => \$opt::memfree),
+ ("memsuspend=s".
+ "[Suspend jobs when there is less memory available]:size"
+ => \$opt::memsuspend),
+ "retries=s[Try failing jobs n times]:n" => \$opt::retries,
+ ("timeout=s".
+ "[Time out for command. If the command runs for longer than ".
+ "duration seconds it will get killed as per --term-seq]:duration"
+ => \$opt::timeout),
+ ("term-seq|termseq=s".
+ "[Termination sequence]:sequence" => \$opt::termseq),
+ # xargs-compatibility - implemented, man, testsuite
+ ("max-procs|maxprocs|P|jobs|j=s".
+ "[Add N to/Subtract N from/Multiply N% with/ the number of CPU ".
+ "threads or read parameter from file]:+N/-N/N%/N/procfile:_files"
+ => \$opt::jobs),
+ ("delimiter|d=s[Input items are terminated by delim]:delim"
+ => \$opt::d),
+ ("max-chars|maxchars|s=s[Limit length of command]:max-chars"
+ => \$opt::max_chars),
+ ("arg-file|argfile|a=s".
+ "[Use input-file as input source]:input-file:_files" => \@opt::a),
+ "no-run-if-empty|norunifempty|r[Do not run empty input]" => \$opt::r,
+ ("replace|i:s".
+ "[This option is deprecated; use -I instead]:replace-str"
+ => \$opt::i),
+ "E=s" => \$opt::eof,
+ ("eof|e:s[Set the end of file string to eof-str]:eof-str"
+ => \$opt::eof),
+ ("process-slot-var|processslotvar=s".
+ "[Set this variable to job slot number]:varname"
+ => \$opt::process_slot_var),
+ ("max-args|maxargs|n=s".
+ "[Use at most max-args arguments per command line]:max-args"
+ => \$opt::max_args),
+ ("max-replace-args|maxreplaceargs|N=s".
+ "[Use at most max-args arguments per command line]:max-args"
+ => \$opt::max_replace_args),
+ "col-sep|colsep|C=s[Column separator]:regexp" => \$opt::colsep,
+ "csv[Treat input as CSV-format]"=> \$opt::csv,
+ ("help|h[Print a summary of the options to GNU parallel and exit]"
+ => \$opt::help),
+ ("L=s[When used with --pipe: Read records of recsize]:recsize"
+ => \$opt::L),
+ ("max-lines|maxlines|l:f".
+ "[When used with --pipe: Read records of recsize lines]:recsize"
+ => \$opt::max_lines),
+ "interactive|p[Ask user before running a job]" => \$opt::interactive,
+ ("verbose|t[Print the job to be run on stderr (standard error)]"
+ => \$opt::verbose),
+ ("version|V[Print the version GNU parallel and exit]"
+ => \$opt::version),
+ ('min-version|minversion=i'.
+ '[Print the version GNU parallel and exit]'.
+ ':version:($(parallel --minversion 0))'
+ => \$opt::minversion),
+ ("show-limits|showlimits".
+ "[Display limits given by the operating system]"
+ => \$opt::show_limits),
+ ("exit|x[Exit if the size (see the -s option) is exceeded]"
+ => \$opt::x),
+ # Semaphore
+ "semaphore[Work as a counting semaphore]" => \$opt::semaphore,
+ ("semaphore-timeout|semaphoretimeout|st=s".
+ "[If secs > 0: If the semaphore is not released within secs ".
+ "seconds, take it anyway]:secs"
+ => \$opt::semaphoretimeout),
+ ("semaphore-name|semaphorename|id=s".
+ "[Use name as the name of the semaphore]:name"
+ => \$opt::semaphorename),
+ "fg[Run command in foreground]" => \$opt::fg,
+ "bg[Run command in background]" => \$opt::bg,
+ "wait[Wait for all commands to complete]" => \$opt::wait,
+ # Shebang #!/usr/bin/parallel --shebang
+ ("shebang|hashbang".
+ "[GNU parallel can be called as a shebang (#!) command as the ".
+ "first line of a script. The content of the file will be treated ".
+ "as inputsource]"
+ => \$opt::shebang),
+ ("_pipe-means-argfiles[internal]"
+ => \$opt::_pipe_means_argfiles),
+ "Y" => \$opt::retired,
+ ("skip-first-line|skipfirstline".
+ "[Do not use the first line of input]"
+ => \$opt::skip_first_line),
+ "_bug" => \$opt::_bug,
+ "_unsafe" => \$opt::_unsafe,
+ # --pipe
+ ("pipe|spreadstdin".
+ "[Spread input to jobs on stdin (standard input)]" => \$opt::pipe),
+ ("round-robin|roundrobin|round".
+ "[Distribute chunks of standard input in a round robin fashion]"
+ => \$opt::roundrobin),
+ "recstart=s" => \$opt::recstart,
+ ("recend=s".
+ "[Split record between endstring and startstring]:endstring"
+ => \$opt::recend),
+ ("regexp|regex".
+ "[Interpret --recstart and --recend as regular expressions]"
+ => \$opt::regexp),
+ ("remove-rec-sep|removerecsep|rrs".
+ "[Remove record separator]" => \$opt::remove_rec_sep),
+ ("output-as-files|outputasfiles|files[Save output to files]"
+ => \$opt::files),
+ ("output-as-files0|outputasfiles0|files0".
+ "[Save output to files separated by NUL]"
+ => \$opt::files0),
+ ("block-size|blocksize|block=s".
+ "[Size of block in bytes to read at a time]:size"
+ => \$opt::blocksize),
+ ("block-timeout|blocktimeout|bt=s".
+ "[Timeout for reading block when using --pipe]:duration"
+ => \$opt::blocktimeout),
+ "header=s[Use regexp as header]:regexp" => \$opt::header,
+ "cat[Create a temporary file with content]" => \$opt::cat,
+ "fifo[Create a temporary fifo with content]" => \$opt::fifo,
+ ("pipe-part|pipepart[Pipe parts of a physical file]"
+ => \$opt::pipepart),
+ "tee[Pipe all data to all jobs]" => \$opt::tee,
+ ("shard=s".
+ "[Use shardexpr as shard key and shard input to the jobs]:shardexpr"
+ => \$opt::shard),
+ ("bin=s".
+ "[Use binexpr as binning key and bin input to the jobs]:binexpr"
+ => \$opt::bin),
+ "group-by|groupby=s[Group input by value]:val" => \$opt::groupby,
+ #
+ ("hgrp|hostgrp|hostgroup|hostgroups[Enable hostgroups on arguments]"
+ => \$opt::hostgroups),
+ "embed[Embed GNU parallel in a shell script]" => \$opt::embed,
+ ("filter=s[Only run jobs where filter is true]:filter"
+ => \@opt::filter),
+ "combineexec|combine-exec|combineexecutable|combine-executable=s".
+ "[Embed GNU parallel in a shell script]" => \$opt::combineexec,
+ ("filter=s[Only run jobs where filter is true]:filter"
+ => \@opt::filter),
+ "_parset=s[Generate shell code for parset]" => \$opt::_parset,
+ ("shell-completion|shellcompletion=s".
+ "[Generate shell code for shell completion]:shell:(bash zsh)"
+ => \$opt::shellcompletion),
+ # Parameter for testing optimal values
+ "_test=s" => \$opt::_test,
+ );
+}
+
+sub get_options_from_array($@) {
+ # Run GetOptions on @array
+ # Input:
+ # $array_ref = ref to @ARGV to parse
+ # @keep_only = Keep only these options (e.g. --profile)
+ # Uses:
+ # @ARGV
+ # Returns:
+ # true if parsing worked
+ # false if parsing failed
+ # @$array_ref is changed
+ my ($array_ref, @keep_only) = @_;
+ if(not @$array_ref) {
+ # Empty array: No need to look more at that
+ return 1;
+ }
+ # A bit of shuffling of @ARGV needed as GetOptionsFromArray is not
+ # supported everywhere
+ my @save_argv;
+ my $this_is_ARGV = (\@::ARGV == $array_ref);
+ if(not $this_is_ARGV) {
+ @save_argv = @::ARGV;
+ @::ARGV = @{$array_ref};
+ }
+ # If @keep_only set: Ignore all values except @keep_only
+ my %options = options_hash();
+ if(@keep_only) {
+ my (%keep,@dummy);
+ @keep{@keep_only} = @keep_only;
+ for my $k (grep { not $keep{$_} } keys %options) {
+ # Store the value of the option in @dummy
+ $options{$k} = \@dummy;
+ }
+ }
+ my $retval = GetOptions(%options);
+ if(not $this_is_ARGV) {
+ @{$array_ref} = @::ARGV;
+ @::ARGV = @save_argv;
+ }
+ return $retval;
+}
+
+sub parse_parset() {
+ $Global::progname = "parset";
+ @Global::parset_vars = split /[ ,]/, $opt::_parset;
+ my $var_or_assoc = shift @Global::parset_vars;
+ # Legal names: var _v2ar arrayentry[2]
+ my @illegal = (grep { not /^[a-zA-Z_][a-zA-Z_0-9]*(\[\d+\])?$/ }
+ @Global::parset_vars);
+ if(@illegal) {
+ ::error
+ (255,"@illegal is an invalid variable name.",
+ "Variable names must be letter followed by letters or digits.",
+ "Usage:",
+ " parset varname GNU Parallel options and command");
+ }
+ if($var_or_assoc eq "assoc") {
+ my $var = shift @Global::parset_vars;
+ print "$var=(";
+ $Global::parset = "assoc";
+ $Global::parset_endstring=")\n";
+ } elsif($var_or_assoc eq "var") {
+ if($#Global::parset_vars > 0) {
+ $Global::parset = "var";
+ } else {
+ my $var = shift @Global::parset_vars;
+ print "$var=(";
+ $Global::parset = "array";
+ $Global::parset_endstring=")\n";
+ }
+ } else {
+ ::die_bug("parset: unknown '$opt::_parset'");
+ }
+}
+
+sub parse_options(@) {
+ # Returns: N/A
+ init_globals();
+ my @argv_before = @ARGV;
+ @ARGV = read_options();
+ # Before changing these line, please read
+ # https://www.gnu.org/software/parallel/parallel_design.html#citation-notice
+ # https://git.savannah.gnu.org/cgit/parallel.git/tree/doc/citation-notice-faq.txt
+ # You accept to be added to a public hall-of-shame by removing the lines
+ if(defined $opt::citation) {
+ citation(\@argv_before,\@ARGV);
+ wait_and_exit(0);
+ }
+ # no-* overrides *
+ if($opt::nokeeporder) { $opt::keeporder = undef; }
+
+ if(@opt::v) { $Global::verbose = $#opt::v+1; } # Convert -v -v to v=2
+ if($opt::_bug) { ::die_bug("test-bug"); }
+ $Global::debug = $opt::D;
+ #
+ ## Shell
+ #
+ $Global::shell = $ENV{'PARALLEL_SHELL'} || parent_shell($$)
+ || $ENV{'SHELL'} || "/bin/sh";
+ if(not -x $Global::shell and not which($Global::shell)) {
+ ::error(255,"Shell '$Global::shell' not found.");
+ }
+ ::debug("init","Global::shell $Global::shell\n");
+ $Global::cshell = $Global::shell =~ m:(/[-a-z]*)?csh:;
+ $Global::fish = $Global::shell =~ m:(/[-a-z]*)?fish:;
+ if(defined $opt::_parset) { parse_parset(); }
+ if(defined $opt::X) { $Global::ContextReplace = 1; }
+ if(defined $opt::silent) { $Global::verbose = 0; }
+ if(defined $opt::null) { $/ = "\0"; }
+ if(defined $opt::files) { $Global::files = 1; $Global::files_sep = "\n"; }
+ if(defined $opt::files0) { $Global::files = 1; $Global::files_sep = "\0"; }
+ if(defined $opt::d) { $/ = unquote_printf($opt::d) }
+ parse_replacement_string_options();
+ $opt::tag ||= $opt::ctag;
+ $opt::tagstring ||= $opt::ctagstring;
+ if(defined $opt::ctag or defined $opt::ctagstring
+ or defined $opt::color) {
+ $Global::color = 1;
+ }
+ if($opt::linebuffer or $opt::latestline) {
+ $Global::linebuffer = 1;
+ Job::latestline_init();
+ }
+ if(defined $opt::tag and not defined $opt::tagstring) {
+ # Default = {}
+ $opt::tagstring = $Global::parensleft.$Global::parensright;
+ }
+ if(defined $opt::tagstring) {
+ $opt::tagstring = unquote_printf($opt::tagstring);
+ if($opt::tagstring =~
+ /\Q$Global::parensleft\E.*\S+.*\Q$Global::parensright\E/
+ and
+ $Global::linebuffer) {
+ # --tagstring contains {= ... =} and --linebuffer =>
+ # recompute replacement string for each use (do not cache)
+ $Global::cache_replacement_eval = 0;
+ }
+ }
+ if(defined $opt::interactive) { $Global::interactive = $opt::interactive; }
+ if(defined $opt::quote) { $Global::quoting = 1; }
+ if(defined $opt::r) { $Global::ignore_empty = 1; }
+ if(defined $opt::verbose) { $Global::stderr_verbose = 1; }
+ if(defined $opt::eof) { $Global::end_of_file_string = $opt::eof; }
+ if(defined $opt::max_args) {
+ $opt::max_args = multiply_binary_prefix($opt::max_args);
+ $Global::max_number_of_args = $opt::max_args;
+ if($opt::pipepart and $opt::groupby) { $Global::max_number_of_args = 1; }
+ }
+ if(defined $opt::blocktimeout) {
+ $Global::blocktimeout = int(multiply_time_units($opt::blocktimeout));
+ if($Global::blocktimeout < 1) {
+ ::error(255,"--block-timeout must be at least 1");
+ }
+ }
+ if(defined $opt::timeout) {
+ $Global::timeoutq = TimeoutQueue->new($opt::timeout);
+ }
+ if(defined $opt::tmpdir) { $ENV{'TMPDIR'} = $opt::tmpdir; }
+ $ENV{'PARALLEL_RSYNC_OPTS'} = $opt::rsync_opts ||
+ $ENV{'PARALLEL_RSYNC_OPTS'} || '-rlDzR';
+ # Default: Same nice level as GNU Parallel is started at
+ $opt::nice ||= eval { getpriority(0,0) } || 0;
+ if(defined $opt::help) { usage(); exit(0); }
+ if(defined $opt::shellcompletion) { shell_completion(); exit(0); }
+ if(defined $opt::embed) { embed(); exit(0); }
+ if(defined $opt::sqlandworker) {
+ $opt::sqlmaster = $opt::sqlworker = $opt::sqlandworker;
+ }
+ if(defined $opt::tmuxpane) { $opt::tmux = $opt::tmuxpane; }
+ if(defined $opt::colsep) { $Global::trim = 'lr'; }
+ if(defined $opt::csv) {
+ if(not $Global::use{"Text::CSV"} ||= eval "use Text::CSV; 1;") {
+ ::error(255,"The perl module Text::CSV is not installed.",
+ "Try installing libtext-csv-perl or perl-Text-CSV.");
+ }
+ $opt::colsep = defined $opt::colsep ? $opt::colsep : ",";
+ my $csv_setting = { binary => 1, sep_char => $opt::colsep };
+ my $sep = $csv_setting->{sep_char};
+ $Global::csv = Text::CSV->new($csv_setting)
+ or die "Cannot use CSV: ".Text::CSV->error_diag ();
+ }
+ if(defined $opt::header) {
+ $opt::colsep = defined $opt::colsep ? $opt::colsep : "\t";
+ }
+ if(defined $opt::trim) { $Global::trim = $opt::trim; }
+ if(defined $opt::arg_sep) { $Global::arg_sep = $opt::arg_sep; }
+ if(defined $opt::arg_file_sep) {
+ $Global::arg_file_sep = $opt::arg_file_sep;
+ }
+ if(not defined $opt::process_slot_var) {
+ $opt::process_slot_var = 'PARALLEL_JOBSLOT0';
+ }
+ if(defined $opt::number_of_sockets) {
+ print SSHLogin::no_of_sockets(),"\n"; wait_and_exit(0);
+ }
+ if(defined $opt::number_of_cpus) {
+ print SSHLogin::no_of_cores(),"\n"; wait_and_exit(0);
+ }
+ if(defined $opt::number_of_cores) {
+ print SSHLogin::no_of_cores(),"\n"; wait_and_exit(0);
+ }
+ if(defined $opt::number_of_threads) {
+ print SSHLogin::no_of_threads(),"\n"; wait_and_exit(0);
+ }
+ if(defined $opt::max_line_length_allowed) {
+ print Limits::Command::real_max_length(),"\n"; wait_and_exit(0);
+ }
+ if(defined $opt::max_chars) {
+ $opt::max_chars = multiply_binary_prefix($opt::max_chars);
+ }
+ if(defined $opt::version) { version(); wait_and_exit(0); }
+ if(defined $opt::record_env) { record_env(); wait_and_exit(0); }
+ if(@opt::sshlogin) { @Global::sshlogin = @opt::sshlogin; }
+ if(@opt::sshloginfile) { read_sshloginfiles(@opt::sshloginfile); }
+ if(@opt::return) { push @Global::ret_files, @opt::return; }
+ if($opt::transfer) {
+ push @Global::transfer_files, $opt::i || $opt::I || "{}";
+ }
+ push @Global::transfer_files, @opt::transfer_files;
+ if(%opt::template) {
+ while (my ($source, $template_name) = each %opt::template) {
+ push @Global::template_names, $template_name;
+ push @Global::template_contents, slurp_or_exit($source);
+ }
+ }
+ if(not defined $opt::recstart and
+ not defined $opt::recend) { $opt::recend = "\n"; }
+ $Global::blocksize = multiply_binary_prefix($opt::blocksize || "1M");
+ if($Global::blocksize > 2**31-1 and not $opt::pipepart) {
+ warning("--blocksize >= 2G causes problems. Using 2G-1.");
+ $Global::blocksize = 2**31-1;
+ }
+ if($^O eq "cygwin" and
+ ($opt::pipe or $opt::pipepart or $opt::roundrobin)
+ and $Global::blocksize > 65535) {
+ warning("--blocksize >= 64K causes problems on Cygwin.");
+ }
+ $opt::memfree = multiply_binary_prefix($opt::memfree);
+ $opt::memsuspend = multiply_binary_prefix($opt::memsuspend);
+ $Global::memlimit = $opt::memsuspend + $opt::memfree;
+ check_invalid_option_combinations();
+ if((defined $opt::fifo or defined $opt::cat) and not $opt::pipepart) {
+ $opt::pipe = 1;
+ }
+ if(defined $opt::minversion) {
+ print $Global::version,"\n";
+ if($Global::version < $opt::minversion) {
+ wait_and_exit(255);
+ } else {
+ wait_and_exit(0);
+ }
+ }
+ if(not defined $opt::delay) {
+ # Set --delay to --sshdelay if not set
+ $opt::delay = $opt::sshdelay;
+ }
+ $Global::sshdelayauto = $opt::sshdelay =~ s/auto$//;
+ $opt::sshdelay = multiply_time_units($opt::sshdelay);
+ $Global::delayauto = $opt::delay =~ s/auto$//;
+ $opt::delay = multiply_time_units($opt::delay);
+ if($opt::compress_program) {
+ $opt::compress = 1;
+ $opt::decompress_program ||= $opt::compress_program." -dc";
+ }
+
+ if(defined $opt::results) {
+ # Is the output a dir or CSV-file?
+ if($opt::results =~ /\.csv$/i) {
+ # CSV with , as separator
+ $Global::csvsep = ",";
+ $Global::membuffer ||= 1;
+ } elsif($opt::results =~ /\.tsv$/i) {
+ # CSV with TAB as separator
+ $Global::csvsep = "\t";
+ $Global::membuffer ||= 1;
+ } elsif($opt::results =~ /\.json$/i) {
+ # JSON output
+ $Global::jsonout ||= 1;
+ $Global::membuffer ||= 1;
+ }
+ }
+ if($opt::compress) {
+ my ($compress, $decompress) = find_compression_program();
+ $opt::compress_program ||= $compress;
+ $opt::decompress_program ||= $decompress;
+ if(($opt::results and not $Global::csvsep) or $Global::files) {
+ # No need for decompressing
+ $opt::decompress_program = "cat >/dev/null";
+ }
+ }
+ if(defined $opt::dryrun) {
+ # Force grouping due to bug #51039: --dry-run --timeout 3600 -u breaks
+ $opt::ungroup = 0;
+ $opt::group = 1;
+ }
+ if(defined $opt::nonall) {
+ # Append a dummy empty argument if there are no arguments
+ # on the command line to avoid reading from STDIN.
+ # arg_sep = random 50 char
+ # \0noarg => nothing (not the empty string)
+ $Global::arg_sep = join "",
+ map { (0..9,"a".."z","A".."Z")[rand(62)] } (1..50);
+ push @ARGV, $Global::arg_sep, "\0noarg";
+ }
+ if(defined $opt::tee) {
+ if(not defined $opt::jobs) {
+ $opt::jobs = 0;
+ }
+ }
+ if(defined $opt::tty) {
+ # Defaults for --tty: -j1 -u
+ # Can be overridden with -jXXX -g
+ if(not defined $opt::jobs) {
+ $opt::jobs = 1;
+ }
+ if(not defined $opt::group) {
+ $opt::ungroup = 1;
+ }
+ }
+ if(@opt::trc) {
+ push @Global::ret_files, @opt::trc;
+ if(not @Global::transfer_files) {
+ # Defaults to --transferfile {}
+ push @Global::transfer_files, $opt::i || $opt::I || "{}";
+ }
+ $opt::cleanup = 1;
+ }
+ if(defined $opt::max_lines) {
+ if($opt::max_lines eq "-0") {
+ # -l -0 (swallowed -0)
+ $opt::max_lines = 1;
+ $opt::null = 1;
+ $/ = "\0";
+ } else {
+ $opt::max_lines = multiply_binary_prefix($opt::max_lines);
+ if ($opt::max_lines == 0) {
+ # If not given (or if 0 is given) => 1
+ $opt::max_lines = 1;
+ }
+ }
+
+ $Global::max_lines = $opt::max_lines;
+ if(not $opt::pipe) {
+ # --pipe -L means length of record - not max_number_of_args
+ $Global::max_number_of_args ||= $Global::max_lines;
+ }
+ }
+
+ # Read more than one arg at a time (-L, -N)
+ if(defined $opt::L) {
+ $opt::L = multiply_binary_prefix($opt::L);
+ $Global::max_lines = $opt::L;
+ if(not $opt::pipe) {
+ # --pipe -L means length of record - not max_number_of_args
+ $Global::max_number_of_args ||= $Global::max_lines;
+ }
+ }
+ if(defined $opt::max_replace_args) {
+ $opt::max_replace_args =
+ multiply_binary_prefix($opt::max_replace_args);
+ $Global::max_number_of_args = $opt::max_replace_args;
+ $Global::ContextReplace = 1;
+ }
+ if((defined $opt::L or defined $opt::max_replace_args)
+ and
+ not ($opt::xargs or $opt::m)) {
+ $Global::ContextReplace = 1;
+ }
+ # Deal with ::: :::+ :::: ::::+ and -a +file
+ my @ARGV_with_argsep = @ARGV;
+ @ARGV = read_args_from_command_line();
+ if(defined $opt::combineexec) {
+ pack_combined_executable(\@argv_before,\@ARGV_with_argsep,\@ARGV);
+ exit(0);
+ }
+ parse_semaphore();
+
+ if(defined $opt::eta) { $opt::progress = $opt::eta; }
+ if(defined $opt::bar) { $opt::progress = $opt::bar; }
+ if(defined $opt::bar or defined $opt::latestline) {
+ my $fh = $Global::status_fd || *STDERR;
+ # Activate decode_utf8
+ eval q{
+ # Enable utf8 if possible
+ use utf8;
+ binmode $fh, "encoding(utf8)";
+ *decode_utf8 = \&Encode::decode_utf8;
+ };
+ if(eval { decode_utf8("x") }) {
+ # Great: decode works
+ } else {
+ # UTF8-decode not supported: Dummy decode
+ eval q{sub decode_utf8($;$) { $_[0]; }};
+ }
+ # Activate decode_utf8
+ eval q{
+ # Enable utf8 if possible
+ use utf8;
+ use Encode qw( encode_utf8 );
+ use Text::CharWidth qw( mbswidth );
+ use Unicode::Normalize qw( NFC NFD );
+ };
+ if(eval { mbswidth("ヌー平行") }) {
+ # Great: mbswidth works
+ } else {
+ # mbswidth not supported: Dummy mbswidth
+ eval q{ sub mbswidth { return length @_; } };
+ }
+ }
+
+ # If you want GNU Parallel to be maintained in the future you
+ # should keep this.
+ # *YOU* will be harming free software by removing the notice.
+ #
+ # Funding a free software project is hard. GNU Parallel is no
+ # exception. On top of that it seems the less visible a project
+ # is, the harder it is to get funding. And the nature of GNU
+ # Parallel is that it will never be seen by "the guy with the
+ # checkbook", but only by the people doing the actual work.
+ #
+ # This problem has been covered by others - though no solution has
+ # been found:
+ # https://www.slideshare.net/NadiaEghbal/consider-the-maintainer
+ # https://www.numfocus.org/blog/why-is-numpy-only-now-getting-funded/
+ #
+ # The FAQ tells you why the citation notice exists:
+ # https://git.savannah.gnu.org/cgit/parallel.git/tree/doc/citation-notice-faq.txt
+ #
+ # If you want GNU Parallel to be maintained in the future, and not
+ # just wither away like so many other free software tools, you
+ # need to help finance the development.
+ #
+ # The citation notice is a simple way of doing so, as citations
+ # makes it possible to me to get a job where I can maintain GNU
+ # Parallel as part of the job.
+ #
+ # This means you can help financing development
+ #
+ # WITHOUT PAYING A SINGLE CENT!
+ #
+ # Before implementing the citation notice it was discussed with
+ # the users:
+ # https://lists.gnu.org/archive/html/parallel/2013-11/msg00006.html
+ #
+ # Having to spend 10 seconds on running 'parallel --citation' once
+ # is no doubt not an ideal solution, but no one has so far come up
+ # with an ideal solution - neither for funding GNU Parallel nor
+ # other free software.
+ #
+ # If you believe you have the perfect solution, you should try it
+ # out, and if it works, you should post it on the email
+ # list. Ideas that will cost work and which have not been tested
+ # are, however, unlikely to be prioritized.
+ #
+ # Please note that GPL version 3 gives you the right to fork GNU
+ # Parallel under a new name, but it does not give you the right to
+ # distribute modified copies with the citation notice disabled in
+ # a way where the software can be confused with GNU Parallel. To
+ # do that you need to be the owner of the GNU Parallel
+ # trademark. The xt:Commerce case shows this.
+ #
+ # Description of the xt:Commerce case in OLG Duesseldorf
+ # https://web.archive.org/web/20180715073746/http://www.inta.org/INTABulletin/Pages/GERMANYGeneralPublicLicenseDoesNotPermitUseofThird-PartyTrademarksforAdvertisingModifiedVersionsofOpen-SourceSoftware.aspx
+ #
+ # The verdict in German
+ # https://www.admody.com/urteilsdatenbank/cafe6fdaeed3/OLG-Duesseldorf_Urteil_vom_28-September-2010_Az_I-20-U-41-09
+ # https://web.archive.org/web/20180715073717/https://www.admody.com/urteilsdatenbank/cafe6fdaeed3/OLG-Duesseldorf_Urteil_vom_28-September-2010_Az_I-20-U-41-09
+ #
+ # Other free software limiting derivates by the same name:
+ # https://en.wikipedia.org/wiki/Red_Hat_Enterprise_Linux_derivatives#Legal_aspects
+ # https://tm.joomla.org/trademark-faq.html
+ # https://www.mozilla.org/en-US/foundation/trademarks/faq/
+ #
+ # Running 'parallel --citation' one single time takes less than 10
+ # seconds, and will silence the citation notice for future
+ # runs. If that is too much trouble for you, why not use one of
+ # the alternatives instead?
+ # See a list in: 'man parallel_alternatives'
+ #
+ # If you want GNU Parallel to be maintained in the future, you
+ # should keep this line:
+ citation_notice();
+ # This is because _YOU_ actively make it harder to justify
+ # spending time developing GNU Parallel by removing it.
+
+ # If you disagree, please read (especially 77-):
+ # https://www.fordfoundation.org/media/2976/roads-and-bridges-the-unseen-labor-behind-our-digital-infrastructure.pdf
+
+ # *YOU* will be harming free software by removing the notice. You
+ # accept to be added to a public hall of shame by removing the
+ # line. That includes you, George and Andreas.
+
+ parse_halt();
+
+ if($ENV{'PARALLEL_ENV'}) {
+ # Read environment and set $Global::parallel_env
+ # Must be done before is_acceptable_command_line_length()
+ my $penv = $ENV{'PARALLEL_ENV'};
+ # unset $PARALLEL_ENV: It should not be given to children
+ # because it takes up a lot of env space
+ delete $ENV{'PARALLEL_ENV'};
+ if(-e $penv) {
+ # This is a file/fifo: Replace envvar with content of file
+ $penv = slurp_or_exit($penv);
+ }
+ # Map \001 to \n to make it easer to quote \n in $PARALLEL_ENV
+ $penv =~ s/\001/\n/g;
+ if($penv =~ /\0/) {
+ ::warning('\0 (NUL) in environment is not supported');
+ }
+ $Global::parallel_env = $penv;
+ }
+
+ parse_sshlogin();
+ if(defined $opt::show_limits) { show_limits(); }
+
+ if(remote_hosts() and
+ (defined $opt::X or defined $opt::m or defined $opt::xargs)) {
+ # As we do not know the max line length on the remote machine
+ # long commands generated by xargs may fail
+ # If $opt::max_replace_args is set, it is probably safe
+ ::warning("Using -X or -m with --sshlogin may fail.");
+ }
+
+ if(not defined $opt::jobs) { $opt::jobs = "100%"; }
+ open_joblog();
+ open_json_csv();
+ if(defined $opt::sqlmaster or defined $opt::sqlworker) {
+ $Global::sql = SQL->new($opt::sqlmaster || $opt::sqlworker);
+ }
+ if(defined $opt::sqlworker) { $Global::membuffer ||= 1; }
+ # The sqlmaster groups the arguments, so the should just read one
+ if(defined $opt::sqlworker and not defined $opt::sqlmaster) {
+ $Global::max_number_of_args = 1;
+ }
+ if(defined $Global::color or defined $opt::colorfailed) {
+ Job::init_color();
+ }
+}
+
+sub check_invalid_option_combinations() {
+ if(defined $opt::timeout and
+ $opt::timeout !~ /^\d+(\.\d+)?%?$|^(\d+(\.\d+)?[dhms])+$/i) {
+ ::error(255,"--timeout must be seconds or percentage.");
+ }
+ if(defined $opt::fifo and defined $opt::cat) {
+ ::error(255,"--fifo cannot be combined with --cat.");
+ }
+ if(defined $opt::retries and defined $opt::roundrobin) {
+ ::error(255,"--retries cannot be combined with --roundrobin.");
+ }
+ if(defined $opt::pipepart and
+ (defined $opt::L or defined $opt::max_lines
+ or defined $opt::max_replace_args)) {
+ ::error(255,"--pipepart is incompatible with --max-replace-args, ".
+ "--max-lines, and -L.");
+ }
+ if(defined $opt::group and defined $opt::ungroup) {
+ ::error(255,"--group cannot be combined with --ungroup.");
+ }
+ if(defined $opt::group and defined $opt::linebuffer) {
+ ::error(255,"--group cannot be combined with --line-buffer.");
+ }
+ if(defined $opt::ungroup and defined $opt::linebuffer) {
+ ::error(255,"--ungroup cannot be combined with --line-buffer.");
+ }
+ if(defined $opt::tollef and not defined $opt::gnu) {
+ ::error(255,"--tollef has been retired.",
+ "Remove --tollef or use --gnu to override --tollef.");
+ }
+ if(defined $opt::retired) {
+ ::error(255,
+ "-g has been retired. Use --group.",
+ "-B has been retired. Use --bf.",
+ "-T has been retired. Use --tty.",
+ "-U has been retired. Use --er.",
+ "-W has been retired. Use --wd.",
+ "-Y has been retired. Use --shebang.",
+ "-H has been retired. Use --halt.",
+ "--sql has been retired. Use --sqlmaster.",
+ "--ctrlc has been retired.",
+ "--noctrlc has been retired.");
+ }
+ if(defined $opt::groupby) {
+ if(not defined $opt::pipe and not defined $opt::pipepart) {
+ $opt::pipe = 1;
+ }
+ if(defined $opt::remove_rec_sep) {
+ ::error(255,"--remove-rec-sep is not compatible with --groupby");
+ }
+ if(defined $opt::recstart) {
+ ::error(255,"--recstart is not compatible with --groupby");
+ }
+ if($opt::recend ne "\n") {
+ ::error(255,"--recend is not compatible with --groupby");
+ }
+ }
+ sub unsafe_warn {
+ # use --_unsafe to only generate a warning
+ if($opt::_unsafe) { ::warning(@_); } else { ::error(255,@_); }
+ }
+ if(defined $opt::results) {
+ if($opt::nonall or $opt::onall) {
+ unsafe_warn("--(n)onall + --results not supported (yet).");
+ }
+ }
+ sub test_safe_chars {
+ my $var = shift;
+ if($ENV{$var} =~ m{^[-a-z0-9_+,.%:/= ]*$}i) {
+ # OK
+ } else {
+ unsafe_warn("\$$var can only contain [-a-z0-9_+,.%:/= ].");
+ }
+ }
+ if($ENV{'TMPDIR'} =~ /\n/) {
+ if(defined $opt::files) {
+ ::warning("Use --files0 when \$TMPDIR contains newline.");
+ } elsif($Global::cshell
+ and
+ (defined $opt::cat or defined $opt::fifo)) {
+ ::warning("--cat/--fifo fails under csh ".
+ "if \$TMPDIR contains newline.");
+ }
+ } elsif($ENV{'TMPDIR'} =~ /\257/) {
+ unsafe_warn("\$TMPDIR with \\257 (\257) is not supported.");
+ } else{
+ test_safe_chars('TMPDIR');
+ }
+ map { test_safe_chars($_); } qw(PARALLEL_HOME XDG_CONFIG_DIRS
+ PARALLEL_REMOTE_TMPDIR XDG_CACHE_HOME);
+}
+
+sub init_globals() {
+ # Defaults:
+ $Global::version = 20240222;
+ $Global::progname = 'parallel';
+ $::name = "GNU Parallel";
+ $Global::infinity = 2**31;
+ $Global::debug = 0;
+ $Global::verbose = 0;
+ # Don't quote every part of the command line
+ $Global::quoting = 0;
+ # Quote replacement strings
+ $Global::quote_replace = 1;
+ $Global::total_completed = 0;
+ $Global::cache_replacement_eval = 1;
+ # Read only table with default --rpl values
+ %Global::replace =
+ (
+ '{}' => '',
+ '{#}' => '1 $_=$job->seq()',
+ '{%}' => '1 $_=$job->slot()',
+ '{/}' => 's:.*/::',
+ '{//}' =>
+ ('$Global::use{"File::Basename"} ||= eval "use File::Basename; 1;"; '.
+ '$_ = dirname($_);'),
+ '{/.}' => 's:.*/::; s:\.[^/.]*$::;',
+ '{.}' => 's:\.[^/.]*$::',
+ );
+ %Global::plus =
+ (
+ # {} = {+/}/{/}
+ # = {.}.{+.} = {+/}/{/.}.{+.}
+ # = {..}.{+..} = {+/}/{/..}.{+..}
+ # = {...}.{+...} = {+/}/{/...}.{+...}
+ '{+/}' => 's:/[^/]*$:: || s:.*$::',
+ # a.b => b; a => ''
+ '{+.}' => 's:.*\.:: || s:.*$::',
+ # a.b.c => b.c; a.b => ''; a => ''
+ '{+..}' => 's:.*\.([^/.]*\.[^/.]*)$:$1: || s:.*$::',
+ '{+...}' => 's:.*\.([^/.]*\.[^/.]*\.[^/.]*)$:$1: || s:.*$::',
+ '{..}' => 's:\.[^/.]*\.[^/.]*$::',
+ '{...}' => 's:\.[^/.]*\.[^/.]*\.[^/.]*$::',
+ '{/..}' => 's:.*/::; s:\.[^/.]*\.[^/.]*$::',
+ '{/...}' => 's:.*/::; s:\.[^/.]*\.[^/.]*\.[^/.]*$::',
+ # n choose k = Binomial coefficient
+ '{choose_k}' => ('for $t (2..$#arg)'.
+ '{ if($arg[$t-1] ge $arg[$t]) { skip() } }'),
+ # unique values: Skip job if any args are the same
+ '{uniq}' => 'if(::uniq(@arg) != @arg) { skip(); }',
+ # {##} = number of jobs
+ '{##}' => '1 $_=total_jobs()',
+ # {0#} = 0-padded seq
+ '{0#}' => ('1 $f=1+int((log(total_jobs())/log(10)));'.
+ '$_=sprintf("%0${f}d",seq())'),
+ # {0%} = 0-padded jobslot
+ '{0%}' => ('1 $f=1+int((log($Global::max_jobs_running||1)/log(10)));'.
+ '$_=sprintf("%0${f}d",slot())'),
+ # {seq-1} = seq-1 = counting from 0
+ '{seq(.*?)}' => '$_=eval q{$job->seq()}.qq{$$1}',
+ # {seq-1} = jobslot-1 = counting from 0
+ '{slot(.*?)}' => '$_=eval q{$job->slot()}.qq{$$1}',
+
+ ## Bash inspired replacement strings
+ # Bash ${a:-myval}
+ '{:-([^}]+?)}' => '$_ ||= $$1',
+ # Bash ${a:2}
+ '{:(\d+?)}' => 'substr($_,0,$$1) = ""',
+ # Bash ${a:2:3}
+ '{:(\d+?):(\d+?)}' => '$_ = substr($_,$$1,$$2);',
+ # echo {#z.*z.} ::: z.z.z.foo => z.foo
+ # echo {##z.*z.} ::: z.z.z.foo => foo
+ # Bash ${a#bc}
+ '{#([^#}][^}]*?)}' =>
+ '$nongreedy=::make_regexp_ungreedy($$1);s/^$nongreedy(.*)/$1/;',
+ # Bash ${a##bc}
+ '{##([^#}][^}]*?)}' => 's/^$$1//;',
+ # echo {%.z.*z} ::: foo.z.z.z => foo.z
+ # echo {%%.z.*z} ::: foo.z.z.z => foo
+ # Bash ${a%def}
+ '{%([^}]+?)}' =>
+ '$nongreedy=::make_regexp_ungreedy($$1);s/(.*)$nongreedy$/$1/;',
+ # Bash ${a%%def}
+ '{%%([^}]+?)}' => 's/$$1$//;',
+ # Bash ${a/def/ghi} ${a/def/}
+ '{/([^#%}/]+?)/([^}]*?)}' => 's/$$1/$$2/;',
+ # Bash ${a/#def/ghi} ${a/#def/}
+ '{/#([^}]+?)/([^}]*?)}' => 's/^$$1/$$2/g;',
+ # Bash ${a/%def/ghi} ${a/%def/}
+ '{/%([^}]+?)/([^}]*?)}' => 's/$$1$/$$2/g;',
+ # Bash ${a//def/ghi} ${a//def/}
+ '{//([^}]+?)/([^}]*?)}' => 's/$$1/$$2/g;',
+ # Bash ${a^a}
+ '{^([^}]+?)}' => 's/^($$1)/uc($1)/e;',
+ # Bash ${a^^a}
+ '{^^([^}]+?)}' => 's/($$1)/uc($1)/eg;',
+ # Bash ${a,A}
+ '{,([^}]+?)}' => 's/^($$1)/lc($1)/e;',
+ # Bash ${a,,A}
+ '{,,([^}]+?)}' => 's/($$1)/lc($1)/eg;',
+
+ # {slot} = $PARALLEL_JOBSLOT
+ '{slot}' => '1 $_="\${PARALLEL_JOBSLOT}";uq()',
+ # {host} = ssh host
+ '{host}' => '1 $_="\${PARALLEL_SSHHOST}";uq()',
+ # {sshlogin} = sshlogin
+ '{sshlogin}' => '1 $_="\${PARALLEL_SSHLOGIN}";uq()',
+ # {hgrp} = hostgroups of the host
+ '{hgrp}' => '1 $_="\${PARALLEL_HOSTGROUPS}";uq()',
+ # {agrp} = hostgroups of the argument
+ '{agrp}' => '1 $_="\${PARALLEL_ARGHOSTGROUPS}";uq()',
+ );
+ # Modifiable copy of %Global::replace
+ %Global::rpl = %Global::replace;
+ $/ = "\n";
+ $Global::ignore_empty = 0;
+ $Global::interactive = 0;
+ $Global::stderr_verbose = 0;
+ $Global::default_simultaneous_sshlogins = 9;
+ $Global::exitstatus = 0;
+ $Global::arg_sep = ":::";
+ $Global::arg_file_sep = "::::";
+ $Global::trim = 'n';
+ $Global::max_jobs_running = 0;
+ $Global::job_already_run = '';
+ $ENV{'TMPDIR'} ||= "/tmp";
+ $ENV{'PARALLEL_REMOTE_TMPDIR'} ||= "/tmp";
+ # bug #55398: set $OLDPWD when using --wd
+ $ENV{'OLDPWD'} = $ENV{'PWD'};
+ if(not $ENV{HOME}) {
+ # $ENV{HOME} is sometimes not set if called from PHP
+ ::warning("\$HOME not set. Using /tmp.");
+ $ENV{HOME} = "/tmp";
+ }
+ # no warnings to allow for undefined $XDG_*
+ no warnings 'uninitialized';
+ # If $PARALLEL_HOME is set, but does not exist, try making it.
+ if(defined $ENV{'PARALLEL_HOME'}) {
+ eval { File::Path::mkpath($ENV{'PARALLEL_HOME'}); };
+ }
+ # $xdg_config_home is needed to make env_parallel.fish stop complaining
+ my $xdg_config_home = $ENV{'XDG_CONFIG_HOME'};
+ # Use the first config dir that exists from:
+ # $PARALLEL_HOME
+ # $XDG_CONFIG_HOME/parallel
+ # $(each XDG_CONFIG_DIRS)/parallel
+ # $HOME/.parallel
+ #
+ # Keep only dirs that exist
+ @Global::config_dirs =
+ (grep { -d $_ }
+ $ENV{'PARALLEL_HOME'},
+ (map { "$_/parallel" }
+ $xdg_config_home,
+ split /:/, $ENV{'XDG_CONFIG_DIRS'}),
+ $ENV{'HOME'} . "/.parallel");
+ # Use first dir as config dir
+ $Global::config_dir = $Global::config_dirs[0] ||
+ $ENV{'HOME'} . "/.parallel";
+ if($ENV{'PARALLEL_HOME'} =~ /./ and not -d $ENV{'PARALLEL_HOME'}) {
+ ::warning("\$PARALLEL_HOME ($ENV{'PARALLEL_HOME'}) does not exist.");
+ ::warning("Using $Global::config_dir");
+ }
+ # Use the first cache dir that exists from:
+ # $PARALLEL_HOME
+ # $XDG_CACHE_HOME/parallel
+ # Keep only dirs that exist
+ @Global::cache_dirs = (grep { -d $_ }
+ $ENV{'PARALLEL_HOME'},
+ $ENV{'XDG_CACHE_HOME'}."/parallel");
+ $Global::cache_dir = $Global::cache_dirs[0] ||
+ $ENV{'HOME'} . "/.parallel";
+ Job::init_color();
+}
+
+sub parse_halt() {
+ # $opt::halt flavours
+ # Uses:
+ # $opt::halt
+ # $Global::halt_when
+ # $Global::halt_fail
+ # $Global::halt_success
+ # $Global::halt_pct
+ # $Global::halt_count
+ if(defined $opt::halt) {
+ my %halt_expansion = (
+ "0" => "never",
+ "1" => "soon,fail=1",
+ "2" => "now,fail=1",
+ "-1" => "soon,success=1",
+ "-2" => "now,success=1",
+ );
+ # Expand -2,-1,0,1,2 into long form
+ $opt::halt = $halt_expansion{$opt::halt} || $opt::halt;
+ # --halt 5% == --halt soon,fail=5%
+ $opt::halt =~ s/^(\d+)%$/soon,fail=$1%/;
+ # Split: soon,fail=5%
+ my ($when,$fail_success,$pct_count) = split /[,=]/, $opt::halt;
+ if(not grep { $when eq $_ } qw(never soon now)) {
+ ::error(255,"--halt must have 'never', 'soon', or 'now'.");
+ }
+ $Global::halt_when = $when;
+ if($when ne "never") {
+ if($fail_success eq "fail") {
+ $Global::halt_fail = 1;
+ } elsif($fail_success eq "success") {
+ $Global::halt_success = 1;
+ } elsif($fail_success eq "done") {
+ $Global::halt_done = 1;
+ } else {
+ ::error(255,"--halt $when must be followed by ".
+ ",success or ,fail.");
+ }
+ if($pct_count =~ /^(\d+)%$/) {
+ $Global::halt_pct = $1/100;
+ } elsif($pct_count =~ /^(\d+)$/) {
+ $Global::halt_count = $1;
+ } else {
+ ::error(255,"--halt $when,$fail_success ".
+ "must be followed by ,number or ,percent%.");
+ }
+ }
+ }
+}
+
+sub parse_replacement_string_options() {
+ # Deal with --rpl
+ # Uses:
+ # %Global::rpl
+ # $Global::parensleft
+ # $Global::parensright
+ # $opt::parens
+ # $Global::parensleft
+ # $Global::parensright
+ # $opt::plus
+ # %Global::plus
+ # $opt::I
+ # $opt::U
+ # $opt::i
+ # $opt::basenamereplace
+ # $opt::dirnamereplace
+ # $opt::seqreplace
+ # $opt::slotreplace
+ # $opt::basenameextensionreplace
+
+ sub rpl($$) {
+ # Modify %Global::rpl
+ # Replace $old with $new
+ my ($old,$new) = @_;
+ if($old ne $new) {
+ $Global::rpl{$new} = $Global::rpl{$old};
+ delete $Global::rpl{$old};
+ }
+ }
+ my $parens = "{==}";
+ if(defined $opt::parens) { $parens = $opt::parens; }
+ my $parenslen = 0.5*length $parens;
+ $Global::parensleft = substr($parens,0,$parenslen);
+ $Global::parensright = substr($parens,$parenslen);
+ if(defined $opt::plus) { %Global::rpl = (%Global::plus,%Global::rpl); }
+ if(defined $opt::I) { rpl('{}',$opt::I); }
+ if(defined $opt::i and $opt::i) { rpl('{}',$opt::i); }
+ if(defined $opt::U) { rpl('{.}',$opt::U); }
+ if(defined $opt::basenamereplace) { rpl('{/}',$opt::basenamereplace); }
+ if(defined $opt::dirnamereplace) { rpl('{//}',$opt::dirnamereplace); }
+ if(defined $opt::seqreplace) { rpl('{#}',$opt::seqreplace); }
+ if(defined $opt::slotreplace) { rpl('{%}',$opt::slotreplace); }
+ if(defined $opt::basenameextensionreplace) {
+ rpl('{/.}',$opt::basenameextensionreplace);
+ }
+ for(@opt::rpl) {
+ # Create $Global::rpl entries for --rpl options
+ # E.g: "{..} s:\.[^.]+$:;s:\.[^.]+$:;"
+ my ($shorthand,$long) = split/\s/,$_,2;
+ $Global::rpl{$shorthand} = $long;
+ }
+}
+
+sub parse_semaphore() {
+ # Semaphore defaults
+ # Must be done before computing number of processes and max_line_length
+ # because when running as a semaphore GNU Parallel does not read args
+ # Uses:
+ # $opt::semaphore
+ # $Global::semaphore
+ # $opt::semaphoretimeout
+ # $Semaphore::timeout
+ # $opt::semaphorename
+ # $Semaphore::name
+ # $opt::fg
+ # $Semaphore::fg
+ # $opt::wait
+ # $Semaphore::wait
+ # $opt::bg
+ # @opt::a
+ # @Global::unget_argv
+ # $Global::default_simultaneous_sshlogins
+ # $opt::jobs
+ # $Global::interactive
+ $Global::semaphore ||= ($0 =~ m:(^|/)sem$:); # called as 'sem'
+ if(defined $opt::semaphore) { $Global::semaphore = 1; }
+ if(defined $opt::semaphoretimeout) { $Global::semaphore = 1; }
+ if(defined $opt::semaphorename) { $Global::semaphore = 1; }
+ if(defined $opt::fg and not $opt::tmux and not $opt::tmuxpane) {
+ $Global::semaphore = 1;
+ }
+ if(defined $opt::bg) { $Global::semaphore = 1; }
+ if(defined $opt::wait and not $opt::sqlmaster) {
+ $Global::semaphore = 1; @ARGV = "true";
+ }
+ if($Global::semaphore) {
+ if(@opt::a) {
+ # Assign the first -a to STDIN
+ open(STDIN,"<",shift @opt::a);
+ if(@opt::a) {
+ # We currently have no way of dealing with more -a
+ ::error(255,"A semaphore cannot take input from ".
+ "more files\n");
+ }
+ }
+ @opt::a = ("/dev/null");
+ # Append a dummy empty argument
+ # \0 => nothing (not the empty string)
+ push(@Global::unget_argv, [Arg->new("\0noarg")]);
+ $Semaphore::timeout = int(multiply_time_units($opt::semaphoretimeout))
+ || 0;
+ if(defined $opt::semaphorename) {
+ $Semaphore::name = $opt::semaphorename;
+ } else {
+ local $/ = "\n";
+ $Semaphore::name = `tty`;
+ chomp $Semaphore::name;
+ }
+ $Semaphore::fg = $opt::fg;
+ $Semaphore::wait = $opt::wait;
+ $Global::default_simultaneous_sshlogins = 1;
+ if(not defined $opt::jobs) {
+ $opt::jobs = 1;
+ }
+ if($Global::interactive and $opt::bg) {
+ ::error(255,"Jobs running in the ".
+ "background cannot be interactive.");
+ }
+ }
+}
+
+sub record_env() {
+ # Record current %ENV-keys in $PARALLEL_HOME/ignored_vars
+ # Returns: N/A
+ my $ignore_filename = $Global::config_dir . "/ignored_vars";
+ write_or_exit($ignore_filename,map { $_,"\n" } keys %ENV);
+}
+
+sub open_joblog() {
+ # Open joblog as specified by --joblog
+ # Uses:
+ # $opt::resume
+ # $opt::resume_failed
+ # $opt::joblog
+ # $opt::results
+ # $Global::job_already_run
+ # %Global::fh
+ my $append = 0;
+ if(($opt::resume or $opt::resume_failed)
+ and
+ not ($opt::joblog or $opt::results)) {
+ ::error(255,"--resume and --resume-failed require --joblog ".
+ "or --results.");
+ }
+ if(defined $opt::joblog and $opt::joblog =~ s/^\+//) {
+ # --joblog +filename = append to filename
+ $append = 1;
+ }
+ if($opt::joblog
+ and
+ ($opt::sqlmaster
+ or
+ not $opt::sqlworker)) {
+ # Do not log if --sqlworker
+ if($opt::resume || $opt::resume_failed || $opt::retry_failed) {
+ if(open(my $joblog_fh, "<", $opt::joblog)) {
+ # Enable utf8 if possible
+ eval q{ binmode $joblog_fh, "encoding(utf8)"; };
+ # Read the joblog
+ # Override $/ with \n because -d might be set
+ local $/ = "\n";
+ # If there is a header: Open as append later
+ $append = <$joblog_fh>;
+ my $joblog_regexp;
+ if($opt::retry_failed) {
+ # Make a regexp that matches commands with exit+signal=0
+ # 4 host 1360490623.067 3.445 1023 1222 0 0 command
+ $joblog_regexp='^(\d+)(?:\t[^\t]+){5}\t0\t0\t';
+ my @group;
+ while(<$joblog_fh>) {
+ if(/$joblog_regexp/o) {
+ # This is 30% faster than set_job_already_run($1);
+ vec($Global::job_already_run,($1||0),1) = 1;
+ $Global::total_completed++;
+ $group[$1-1] = "true";
+ } elsif(/(\d+)\s+\S+(\s+[-0-9.]+){6}\s+(.*)$/) {
+ # Grab out the command
+ $group[$1-1] = $3;
+ } else {
+ chomp;
+ ::error(255,"Format of '$opt::joblog' is ".
+ "wrong: $_");
+ }
+ }
+ if(@group) {
+ my ($outfh,$name) = ::tmpfile(SUFFIX => ".arg");
+ unlink($name);
+ # Put args into argfile
+ if(grep /\0/, @group) {
+ # force --null to deal with \n in commandlines
+ ::warning("Command lines contain newline. ".
+ "Forcing --null.");
+ $opt::null = 1;
+ $/ = "\0";
+ }
+ # Replace \0 with '\n' as used in print_joblog()
+ print $outfh (map { s/\0/\n/g; $_,$/ }
+ map { $_ } @group);
+ seek $outfh, 0, 0;
+ exit_if_disk_full();
+ # Set filehandle to -a
+ @opt::a = ($outfh);
+ }
+ # Remove $command (so -a is run)
+ @ARGV = ();
+ }
+ if($opt::resume || $opt::resume_failed) {
+ if($opt::resume_failed) {
+ # Make a regexp that matches commands with exit+signal=0
+ # 4 host 1360490623.067 3.445 1023 1222 0 0 command
+ $joblog_regexp='^(\d+)(?:\t[^\t]+){5}\t0\t0\t';
+ } else {
+ # Just match the job number
+ $joblog_regexp='^(\d+)';
+ }
+ while(<$joblog_fh>) {
+ if(/$joblog_regexp/o) {
+ # This is 30% faster than set_job_already_run($1);
+ vec($Global::job_already_run,($1||0),1) = 1;
+ $Global::total_completed++;
+ } elsif(not /\d+\s+[^\s]+\s+([-0-9.]+\s+){6}/) {
+ ::error(255,"Format of '$opt::joblog' ".
+ "is wrong: $_");
+ }
+ }
+ }
+ close $joblog_fh;
+ }
+ # $opt::null may be set if the commands contain \n
+ if($opt::null) { $/ = "\0"; }
+ }
+ if($opt::dryrun) {
+ # Do not write to joblog in a dry-run
+
+ } elsif($append) {
+ # Append to joblog
+ $Global::joblog = open_or_exit(">>", $opt::joblog);
+ } else {
+ if($opt::joblog eq "-") {
+ # Use STDOUT as joblog
+ $Global::joblog = $Global::fh{1};
+ } else {
+ # Overwrite the joblog
+ $Global::joblog = open_or_exit(">", $opt::joblog);
+ }
+ print $Global::joblog
+ join("\t", "Seq", "Host", "Starttime", "JobRuntime",
+ "Send", "Receive", "Exitval", "Signal", "Command"
+ ). "\n";
+ }
+ }
+}
+
+sub open_json_csv() {
+ if($opt::results) {
+ # Output as JSON/CSV/TSV
+ if($opt::results eq "-.csv"
+ or
+ $opt::results eq "-.tsv"
+ or
+ $opt::results eq "-.json") {
+ # Output as JSON/CSV/TSV on stdout
+ open $Global::csv_fh, ">&", "STDOUT" or
+ ::die_bug("Can't dup STDOUT in csv: $!");
+ # Do not print any other output to STDOUT
+ # by forcing all other output to /dev/null
+ open my $fd, ">", "/dev/null" or
+ ::die_bug("Can't >/dev/null in csv: $!");
+ $Global::fh{1} = $fd;
+ $Global::fh{2} = $fd;
+ } elsif($Global::csvsep or $Global::jsonout) {
+ $Global::csv_fh = open_or_exit(">",$opt::results);
+ }
+ }
+}
+
+sub find_compression_program() {
+ # Find a fast compression program
+ # Returns:
+ # $compress_program = compress program with options
+ # $decompress_program = decompress program with options
+
+ # Search for these. Sorted by speed on 128 core
+
+ # seq 120000000|shuf > 1gb &
+ # apt-get update
+ # apt install make g++ htop
+ # wget -O - pi.dk/3 | bash
+ # apt install zstd clzip liblz4-tool lzop pigz pxz gzip plzip pbzip2 lzma xz-utils lzip bzip2 lbzip2 lrzip pixz
+ # git clone https://github.com/facebook/zstd.git
+ # (cd zstd/contrib/pzstd; make -j; cp pzstd /usr/local/bin)
+ # echo 'lrzip -L $((-$1))' >/usr/local/bin/lrz
+ # chmod +x /usr/local/bin/lrz
+ # wait
+ # onethread="zstd clzip lz4 lzop gzip lzma xz bzip2"
+ # multithread="pzstd pigz pxz plzip pbzip2 lzip lbzip2 lrz pixz"
+ # parallel --shuf -j1 --joblog jl-m --arg-sep , parallel --compress-program \'{3}" "-{2}\' cat ::: 1gb '>'/dev/null , 1 2 3 , {1..3} , $multithread
+ # parallel --shuf -j50% --delay 1 --joblog jl-s --arg-sep , parallel --compress-program \'{3}" "-{2}\' cat ::: 1gb '>'/dev/null , 1 2 3 , {1..3} , $onethread
+ # sort -nk4 jl-?
+
+ # 1-core:
+ # 2-cores: pzstd zstd lz4 lzop pigz gzip lbzip2 pbzip2 lrz bzip2 lzma pxz plzip xz lzip clzip
+ # 4-cores:
+ # 8-cores: pzstd lz4 zstd pigz lzop lbzip2 pbzip2 gzip lzip lrz plzip pxz bzip2 lzma xz clzip
+ # 16-cores: pzstd lz4 pigz lzop lbzip2 pbzip2 plzip lzip lrz pxz gzip lzma xz bzip2
+ # 32-cores: pzstd lbzip2 pbzip2 zstd pigz lz4 lzop plzip lzip lrz gzip pxz lzma bzip2 xz clzip
+ # 64-cores: pzstd lbzip2 pbzip2 pigz zstd pixz lz4 plzip lzop lzip lrz gzip pxz lzma bzip2 xz clzip
+ # 128-core: pzstd lbzip2 pbzip2 zstd pixz lz4 pigz lzop plzip lzip gzip lrz pxz bzip2 lzma xz clzip
+
+ my @prg = qw(pzstd lbzip2 pbzip2 zstd pixz lz4 pigz lzop plzip lzip gzip
+ lrz pxz bzip2 lzma xz clzip);
+ for my $p (@prg) {
+ if(which($p)) {
+ return ("$p -c -1","$p -dc");
+ }
+ }
+ # Fall back to cat
+ return ("cat","cat");
+}
+
+sub read_options() {
+ # Read options from command line, profile and $PARALLEL
+ # Uses:
+ # $opt::shebang_wrap
+ # $opt::shebang
+ # @ARGV
+ # $opt::plain
+ # @opt::profile
+ # $ENV{'HOME'}
+ # $ENV{'PARALLEL'}
+ # Returns:
+ # @ARGV_no_opt = @ARGV without --options
+
+ # This must be done first as this may exec myself
+ if(defined $ARGV[0] and ($ARGV[0] =~ /^--shebang/ or
+ $ARGV[0] =~ /^--shebang-?wrap/ or
+ $ARGV[0] =~ /^--hashbang/)) {
+ # Program is called from #! line in script
+ # remove --shebang-wrap if it is set
+ $opt::shebang_wrap = ($ARGV[0] =~ s/^--shebang-?wrap *//);
+ # remove --shebang if it is set
+ $opt::shebang = ($ARGV[0] =~ s/^--shebang *//);
+ # remove --hashbang if it is set
+ $opt::shebang .= ($ARGV[0] =~ s/^--hashbang *//);
+ if($opt::shebang) {
+ my $argfile = Q(pop @ARGV);
+ # exec myself to split $ARGV[0] into separate fields
+ exec "$0 --skip-first-line -a $argfile @ARGV";
+ }
+ if($opt::shebang_wrap) {
+ my @options;
+ my @parser;
+ if ($^O eq 'freebsd') {
+ # FreeBSD's #! puts different values in @ARGV than Linux' does
+ my @nooptions = @ARGV;
+ get_options_from_array(\@nooptions);
+ while($#ARGV > $#nooptions) {
+ push @options, shift @ARGV;
+ }
+ while(@ARGV and $ARGV[0] ne ":::") {
+ push @parser, shift @ARGV;
+ }
+ if(@ARGV and $ARGV[0] eq ":::") {
+ shift @ARGV;
+ }
+ } else {
+ @options = shift @ARGV;
+ }
+ my $script = Q(Q(shift @ARGV)); # TODO - test if script = " "
+ my @args = map{ Q($_) } @ARGV;
+ # exec myself to split $ARGV[0] into separate fields
+ exec "$0 --_pipe-means-argfiles @options @parser $script ".
+ "::: @args";
+ }
+ }
+ if($ARGV[0] =~ / --shebang(-?wrap)? /) {
+ ::warning("--shebang and --shebang-wrap must be the first ".
+ "argument.\n");
+ }
+
+ Getopt::Long::Configure("bundling","require_order");
+ my @ARGV_copy = @ARGV;
+ my @ARGV_orig = @ARGV;
+ # Check if there is a --profile to set @opt::profile
+ get_options_from_array(\@ARGV_copy,"profile|J=s","plain") || die_usage();
+ my @ARGV_profile = ();
+ my @ARGV_env = ();
+ if(not $opt::plain) {
+ # Add options from $PARALLEL_HOME/config and other profiles
+ my @config_profiles = (
+ "/etc/parallel/config",
+ (map { "$_/config" } @Global::config_dirs),
+ $ENV{'HOME'}."/.parallelrc");
+ my @profiles = @config_profiles;
+ if(@opt::profile) {
+ # --profile overrides default profiles
+ @profiles = ();
+ for my $profile (@opt::profile) {
+ if($profile =~ m:^\./|^/:) {
+ # Look for ./profile in .
+ # Look for /profile in /
+ push @profiles, grep { -r $_ } $profile;
+ } else {
+ # Look for the $profile in @Global::config_dirs
+ push @profiles, grep { -r $_ }
+ map { "$_/$profile" } @Global::config_dirs;
+ }
+ }
+ }
+ for my $profile (@profiles) {
+ if(-r $profile) {
+ ::debug("init","Read $profile\n");
+ local $/ = "\n";
+ open (my $in_fh, "<", $profile) ||
+ ::die_bug("read-profile: $profile");
+ while(<$in_fh>) {
+ /^\s*\#/ and next;
+ chomp;
+ push @ARGV_profile, shell_words($_);
+ }
+ close $in_fh;
+ } else {
+ if(grep /^\Q$profile\E$/, @config_profiles) {
+ # config file is not required to exist
+ } else {
+ ::error(255,"$profile not readable.");
+ }
+ }
+ }
+ # Add options from shell variable $PARALLEL
+ if($ENV{'PARALLEL'}) {
+ push @ARGV_env, shell_words($ENV{'PARALLEL'});
+ }
+ # Add options from env_parallel.csh via $PARALLEL_CSH
+ if($ENV{'PARALLEL_CSH'}) {
+ push @ARGV_env, shell_words($ENV{'PARALLEL_CSH'});
+ }
+ }
+ Getopt::Long::Configure("bundling","require_order");
+ get_options_from_array(\@ARGV_profile) || die_usage();
+ get_options_from_array(\@ARGV_env) || die_usage();
+ get_options_from_array(\@ARGV) || die_usage();
+ # What were the options given on the command line?
+ # Used to start --sqlworker
+ my $ai = arrayindex(\@ARGV_orig, \@ARGV);
+ @Global::options_in_argv = @ARGV_orig[0..$ai-1];
+ # Prepend non-options to @ARGV (such as commands like 'nice')
+ unshift @ARGV, @ARGV_profile, @ARGV_env;
+ return @ARGV;
+}
+
+sub arrayindex($$) {
+ # Similar to Perl's index function, but for arrays
+ # Input:
+ # $arr_ref1 = ref to @array1 to search in
+ # $arr_ref2 = ref to @array2 to search for
+ # Returns:
+ # $pos = position of @array1 in @array2, -1 if not found
+ my ($arr_ref1,$arr_ref2) = @_;
+ my $array1_as_string = join "", map { "\0".$_ } @$arr_ref1;
+ my $array2_as_string = join "", map { "\0".$_ } @$arr_ref2;
+ my $i = index($array1_as_string,$array2_as_string,0);
+ if($i == -1) { return -1 }
+ my @before = split /\0/, substr($array1_as_string,0,$i);
+ return $#before;
+}
+
+sub read_args_from_command_line() {
+ # Arguments given on the command line after:
+ # ::: ($Global::arg_sep)
+ # :::: ($Global::arg_file_sep)
+ # :::+ ($Global::arg_sep with --link)
+ # ::::+ ($Global::arg_file_sep with --link)
+ # Removes the arguments from @ARGV and:
+ # - puts filenames into -a
+ # - puts arguments into files and add the files to -a
+ # - adds --linkinputsource with 0/1 for each -a depending on :::+/::::+
+ # Input:
+ # @::ARGV = command option ::: arg arg arg :::: argfiles
+ # Uses:
+ # $Global::arg_sep
+ # $Global::arg_file_sep
+ # $opt::_pipe_means_argfiles
+ # $opt::pipe
+ # @opt::a
+ # Returns:
+ # @argv_no_argsep = @::ARGV without ::: and :::: and following args
+ my %group_sep = ($Global::arg_sep => ":::",
+ $Global::arg_sep."+" => ":::+",
+ $Global::arg_file_sep => "::::",
+ $Global::arg_file_sep."+" => "::::+");
+ sub is_linked($) {
+ # file is linked if file starts with +
+ local $_ = shift;
+ if(/^\+(.*)/) {
+ my $noplus = $1;
+ if(-e $_ and -e $noplus) {
+ ::error(255,"It is unclear whether you mean ".
+ "+./$noplus or ./+$noplus");
+ } elsif(-e $_ and not -e $noplus) {
+ # This is ./+file = this is not linked
+ return 0;
+ } elsif(not -e $_ and -e $noplus) {
+ # This is +./file = this is linked
+ return 1;
+ } elsif(not -e $_ and not -e $noplus) {
+ # File does not exist, maybe it is stdin?
+ if($_ eq "-") {
+ # This is - = this is not linked
+ return 0;
+ } elsif($_ eq "+-") {
+ # This is +- = this is linked
+ return 1;
+ } else {
+ ::error(255,"File not found: $_");
+ }
+ } else {
+ ::die_bug("noplus: $noplus $_");
+ }
+ }
+ # not linked
+ return 0;
+ }
+ sub cmd_template() {
+ # remove command template from @ARGV
+ # keep ::: / :::: in @ARGV if any
+ my @cmd_template;
+ while(@ARGV) {
+ my $arg = shift @ARGV;
+ if($group_sep{$arg}) {
+ # Found separator: push it back and exit loop
+ unshift @ARGV, $arg;
+ last;
+ }
+ push @cmd_template, $arg;
+ }
+ return @cmd_template;
+ }
+ sub divide_into_groups() {
+ # Split arguments from @ARGV into groups:
+ # ::: 1 2 3 :::: a b c ::::+ d e f
+ # =>
+ # [ ::: 1 2 3 ], [ :::: a b c ], [ ::::+ d e f ]
+ my @g;
+ my @grp;
+ while(@ARGV) {
+ my $arg = shift @ARGV;
+ if($group_sep{$arg}) {
+ # start a new group
+ push @grp, [@g];
+ @g = ($group_sep{$arg});
+ } else {
+ push @g, $arg;
+ }
+ }
+ push @grp, [@g];
+ shift @grp; # The first will always be empty
+ return @grp;
+ }
+ sub save_to_file(@) {
+ # Put args into a file, return open file handle of file
+ # Create argfile
+ my ($fh,$name) = ::tmpfile(SUFFIX => ".arg");
+ unlink($name);
+ # Put args into argfile
+ print $fh map { $_,$/ } @_;
+ seek $fh, 0, 0;
+ exit_if_disk_full();
+ return $fh;
+ }
+ my @cmd = cmd_template();
+ # The rest of @ARGV is ::: / :::: args
+ # If there are any -a: Rewrite them to use ::::
+ if(@opt::a) { unshift @ARGV, $Global::arg_file_sep, @opt::a; }
+ @opt::a = ();
+ # Convert ::: and :::: into (linked) files and put those into @opt::a
+ for my $g_ref (divide_into_groups()) {
+ my $group_sep = shift @$g_ref;
+ if($group_sep eq ":::" or $group_sep eq ":::+") {
+ # Group starts with ::: / :::+
+ if($opt::_pipe_means_argfiles and $#$g_ref < 0) {
+ # TODO
+ # Deal with --shebang-wrap and ::: on the shebang line
+ } else {
+ push @opt::a, save_to_file(@$g_ref);
+ # if $group_sep == ":::+": it is linked
+ push @opt::linkinputsource, ($group_sep eq ":::+");
+ }
+ } elsif($group_sep eq "::::" or $group_sep eq "::::+") {
+ # Group starts with :::: / ::::+
+ for my $f (@$g_ref) {
+ if($group_sep eq "::::+") {
+ # Linking forced
+ push @opt::a, $f;
+ push @opt::linkinputsource, 1;
+ } elsif($group_sep eq "::::") {
+ # Auto detect linking
+ if(is_linked($f)) {
+ # +file
+ push @opt::linkinputsource, 1;
+ $f =~ s/^\+//;
+ } else {
+ # file (no plus)
+ push @opt::linkinputsource, 0;
+ }
+ push @opt::a, $f;
+ } else {
+ ::die_bug("arg link error");
+ }
+ }
+ } else {
+ ::die_bug("arg link error");
+ }
+ }
+ # Output: command to run with options
+ return @cmd;
+}
+
+sub cleanup() {
+ # Returns: N/A
+ unlink keys %Global::unlink;
+ map { rmdir $_ } keys %Global::unlink;
+ if(@opt::basefile and $opt::cleanup) { cleanup_basefile(); }
+ for(keys %Global::sshmaster) {
+ # If 'ssh -M's are running: kill them
+ kill "TERM", $_;
+ }
+}
+
+
+sub __QUOTING_ARGUMENTS_FOR_SHELL__() {}
+
+sub shell_quote(@) {
+ # Input:
+ # @strings = strings to be quoted
+ # Returns:
+ # @shell_quoted_strings = string quoted as needed by the shell
+ return wantarray ? (map { Q($_) } @_) : (join" ",map { Q($_) } @_);
+}
+
+sub shell_quote_scalar_rc($) {
+ # Quote for the rc-shell
+ my $a = $_[0];
+ if(defined $a) {
+ if(($a =~ s/'/''/g)
+ +
+ ($a =~ s/[\n\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\^\*\<\=\>\~\|\; \"\!\$\&\'\202-\377]+/'$&'/go)) {
+ # A string was replaced
+ # No need to test for "" or \0
+ } elsif($a eq "") {
+ $a = "''";
+ } elsif($a eq "\0") {
+ $a = "";
+ }
+ }
+ return $a;
+}
+
+sub shell_quote_scalar_csh($) {
+ # Quote for (t)csh
+ my $a = $_[0];
+ if(defined $a) {
+ # $a =~ s/([\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\^\*\>\<\~\|\; \"\!\$\&\'\202-\377])/\\$1/g;
+ # This is 1% faster than the above
+ if(($a =~ s/[\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\^\*\<\=\>\~\|\; \"\!\$\&\'\202-\377]/\\$&/go)
+ +
+ # quote newline in csh as \\\n
+ ($a =~ s/[\n]/"\\\n"/go)) {
+ # A string was replaced
+ # No need to test for "" or \0
+ } elsif($a eq "") {
+ $a = "''";
+ } elsif($a eq "\0") {
+ $a = "";
+ }
+ }
+ return $a;
+}
+
+sub shell_quote_scalar_default($) {
+ # Quote for other shells (Bourne compatibles)
+ # Inputs:
+ # $string = string to be quoted
+ # Returns:
+ # $shell_quoted = string quoted as needed by the shell
+ local $_ = $_[0];
+ if(/[^-_.+a-z0-9\/]/i) {
+ s/'+/'"$&"'/g; # "-quote '-quotes: ''' => "'''"
+ $_ = "'$_'"; # '-quote entire string
+ s/^''//; # Remove unneeded '' at ends
+ s/''$//; # (faster than s/^''|''$//g)
+ return $_;
+ } elsif ($_ eq "") {
+ return "''";
+ } else {
+ # No quoting needed
+ return $_;
+ }
+}
+
+sub shell_quote_scalar($) {
+ # Quote the string so the shell will not expand any special chars
+ # Inputs:
+ # $string = string to be quoted
+ # Returns:
+ # $shell_quoted = string quoted as needed by the shell
+
+ # Speed optimization: Choose the correct shell_quote_scalar_*
+ # and call that directly from now on
+ no warnings 'redefine';
+ if($Global::cshell) {
+ # (t)csh
+ *shell_quote_scalar = \&shell_quote_scalar_csh;
+ } elsif($Global::shell =~ m:(^|/)rc$:) {
+ # rc-shell
+ *shell_quote_scalar = \&shell_quote_scalar_rc;
+ } else {
+ # other shells
+ *shell_quote_scalar = \&shell_quote_scalar_default;
+ }
+ # The sub is now redefined. Call it
+ return shell_quote_scalar($_[0]);
+}
+
+sub Q($) {
+ # Q alias for ::shell_quote_scalar
+ my $ret = shell_quote_scalar($_[0]);
+ no warnings 'redefine';
+ *Q = \&::shell_quote_scalar;
+ return $ret;
+}
+
+sub shell_quote_file($) {
+ # Quote the string so shell will not expand any special chars
+ # and prepend ./ if needed
+ # Input:
+ # $filename = filename to be shell quoted
+ # Returns:
+ # $quoted_filename = filename quoted with \ and ./ if needed
+ my $a = shift;
+ if(defined $a) {
+ if($a =~ m:^/: or $a =~ m:^\./:) {
+ # /abs/path or ./rel/path => skip
+ } else {
+ # rel/path => ./rel/path
+ $a = "./".$a;
+ }
+ }
+ return Q($a);
+}
+
+sub shell_words(@) {
+ # Input:
+ # $string = shell line
+ # Returns:
+ # @shell_words = $string split into words as shell would do
+ $Global::use{"Text::ParseWords"} ||= eval "use Text::ParseWords; 1;";
+ return Text::ParseWords::shellwords(@_);
+}
+
+sub perl_quote_scalar($) {
+ # Quote the string so perl's eval will not expand any special chars
+ # Inputs:
+ # $string = string to be quoted
+ # Returns:
+ # $perl_quoted = string quoted with \ as needed by perl's eval
+ my $a = $_[0];
+ if(defined $a) {
+ $a =~ s/[\\\"\$\@]/\\$&/go;
+ }
+ return $a;
+}
+
+# -w complains about prototype
+sub pQ($) {
+ # pQ alias for ::perl_quote_scalar
+ my $ret = perl_quote_scalar($_[0]);
+ *pQ = \&::perl_quote_scalar;
+ return $ret;
+}
+
+sub unquote_printf() {
+ # Convert \t \n \r \000 \0
+ # Inputs:
+ # $string = string with \t \n \r \num \0
+ # Returns:
+ # $replaced = string with TAB NEWLINE CR <ascii-num> NUL
+ $_ = shift;
+ s/\\t/\t/g;
+ s/\\n/\n/g;
+ s/\\r/\r/g;
+ s/\\(\d\d\d)/eval 'sprintf "\\'.$1.'"'/ge;
+ s/\\(\d)/eval 'sprintf "\\'.$1.'"'/ge;
+ return $_;
+}
+
+
+sub __FILEHANDLES__() {}
+
+
+sub save_stdin_stdout_stderr() {
+ # Remember the original STDIN, STDOUT and STDERR
+ # and file descriptors opened by the shell (e.g. 3>/tmp/foo)
+ # Uses:
+ # %Global::fh
+ # $Global::original_stderr
+ # $Global::original_stdin
+ # Returns: N/A
+
+ # TODO Disabled until we have an open3 that will take n filehandles
+ # for my $fdno (1..61) {
+ # # /dev/fd/62 and above are used by bash for <(cmd)
+ # # Find file descriptors that are already opened (by the shell)
+ # Only focus on stdout+stderr for now
+ for my $fdno (1..2) {
+ my $fh;
+ # 2-argument-open is used to be compatible with old perl 5.8.0
+ # bug #43570: Perl 5.8.0 creates 61 files
+ if(open($fh,">&=$fdno")) {
+ $Global::fh{$fdno}=$fh;
+ }
+ }
+ open $Global::original_stderr, ">&", "STDERR" or
+ ::die_bug("Can't dup STDERR: $!");
+ open $Global::status_fd, ">&", "STDERR" or
+ ::die_bug("Can't dup STDERR: $!");
+ open $Global::original_stdin, "<&", "STDIN" or
+ ::die_bug("Can't dup STDIN: $!");
+}
+
+sub enough_file_handles() {
+ # Check that we have enough filehandles available for starting
+ # another job
+ # Uses:
+ # $opt::ungroup
+ # %Global::fh
+ # Returns:
+ # 1 if ungrouped (thus not needing extra filehandles)
+ # 0 if too few filehandles
+ # 1 if enough filehandles
+ if(not $opt::ungroup) {
+ my %fh;
+ my $enough_filehandles = 1;
+ # perl uses 7 filehandles for something?
+ # open3 uses 2 extra filehandles temporarily
+ # We need a filehandle for each redirected file descriptor
+ # (normally just STDOUT and STDERR)
+ for my $i (1..(7+2+keys %Global::fh)) {
+ $enough_filehandles &&= open($fh{$i}, "<", "/dev/null");
+ }
+ for (values %fh) { close $_; }
+ return $enough_filehandles;
+ } else {
+ # Ungrouped does not need extra file handles
+ return 1;
+ }
+}
+
+sub open_or_exit($$) {
+ # Open a file name or exit if the file cannot be opened
+ # Inputs:
+ # $mode = read:"<" write:">"
+ # $file = filehandle or filename to open
+ # Uses:
+ # $Global::original_stdin
+ # Returns:
+ # $fh = file handle to opened file
+ my $mode = shift;
+ my $file = shift;
+ if($file eq "-") {
+ if($mode eq "<") {
+ return ($Global::original_stdin || *STDIN);
+ } else {
+ return ($Global::original_stderr || *STDERR);
+ }
+ }
+ if(ref $file eq "GLOB") {
+ # This is an open filehandle
+ return $file;
+ }
+ my $fh = gensym;
+ open($fh, $mode, $file) || ::error(255,"Cannot open `$file': $!");
+ return $fh;
+}
+
+sub slurp_or_exit($) {
+ # Read content of a file or exit if the file cannot be opened
+ # Inputs:
+ # $file = filehandle or filename to open
+ # Returns:
+ # $content = content as scalar
+ my $fh = open_or_exit("<",shift);
+ # $/ = undef => slurp whole file
+ local $/;
+ my $content = <$fh>;
+ close $fh;
+ return $content;
+}
+
+sub write_or_exit(@) {
+ # Write content to a file or exit if the file cannot be opened
+ # Inputs:
+ # $file = filehandle or filename to open
+ # @content = content to be written
+ # Returns:
+ # N/A
+ my $file = shift;
+ sub failed { error(255,"Cannot write to `$file': $!"); }
+ my $fh = open_or_exit(">",$file);
+ print($fh @_) or failed();
+ close($fh) or failed();
+}
+
+sub set_fh_blocking($) {
+ # Set filehandle as blocking
+ # Inputs:
+ # $fh = filehandle to be blocking
+ # Returns:
+ # N/A
+ my $fh = shift;
+ $Global::use{"Fcntl"} ||= eval "use Fcntl qw(:DEFAULT :flock); 1;";
+ my $flags;
+ # Get the current flags on the filehandle
+ fcntl($fh, &F_GETFL, $flags) || die $!;
+ # Remove non-blocking from the flags
+ $flags &= ~&O_NONBLOCK;
+ # Set the flags on the filehandle
+ fcntl($fh, &F_SETFL, $flags) || die $!;
+}
+
+sub set_fh_non_blocking($) {
+ # Set filehandle as non-blocking
+ # Inputs:
+ # $fh = filehandle to be blocking
+ # Returns:
+ # N/A
+ my $fh = shift;
+ $Global::use{"Fcntl"} ||= eval "use Fcntl qw(:DEFAULT :flock); 1;";
+ my $flags;
+ # Get the current flags on the filehandle
+ fcntl($fh, &F_GETFL, $flags) || die $!;
+ # Add non-blocking to the flags
+ $flags |= &O_NONBLOCK;
+ # Set the flags on the filehandle
+ fcntl($fh, &F_SETFL, $flags) || die $!;
+}
+
+
+sub __RUNNING_THE_JOBS_AND_PRINTING_PROGRESS__() {}
+
+
+# Variable structure:
+#
+# $Global::running{$pid} = Pointer to Job-object
+# @Global::virgin_jobs = Pointer to Job-object that have received no input
+# $Global::host{$sshlogin} = Pointer to SSHLogin-object
+# $Global::total_running = total number of running jobs
+# $Global::total_started = total jobs started
+# $Global::max_procs_file = filename if --jobs is given a filename
+# $Global::JobQueue = JobQueue object for the queue of jobs
+# $Global::timeoutq = queue of times where jobs timeout
+# $Global::newest_job = Job object of the most recent job started
+# $Global::newest_starttime = timestamp of $Global::newest_job
+# @Global::sshlogin
+# $Global::minimal_command_line_length = min len supported by all sshlogins
+# $Global::start_no_new_jobs = should more jobs be started?
+# $Global::original_stderr = file handle for STDERR when the program started
+# $Global::total_started = total number of jobs started
+# $Global::joblog = filehandle of joblog
+# $Global::debug = Is debugging on?
+# $Global::exitstatus = status code of GNU Parallel
+# $Global::quoting = quote the command to run
+
+sub init_run_jobs() {
+ # Set Global variables and progress signal handlers
+ # Do the copying of basefiles
+ # Returns: N/A
+ $Global::total_running = 0;
+ $Global::total_started = 0;
+ $SIG{USR1} = \&list_running_jobs;
+ $SIG{USR2} = \&toggle_progress;
+ if(@opt::basefile) { setup_basefile(); }
+}
+
+{
+ my $last_time;
+ my %last_mtime;
+ my $max_procs_file_last_mod;
+
+ sub changed_procs_file {
+ # If --jobs is a file and it is modfied:
+ # Force recomputing of max_jobs_running for each $sshlogin
+ # Uses:
+ # $Global::max_procs_file
+ # %Global::host
+ # Returns: N/A
+ if($Global::max_procs_file) {
+ # --jobs filename
+ my $mtime = (stat($Global::max_procs_file))[9];
+ $max_procs_file_last_mod ||= 0;
+ if($mtime > $max_procs_file_last_mod) {
+ # file changed: Force re-computing max_jobs_running
+ $max_procs_file_last_mod = $mtime;
+ for my $sshlogin (values %Global::host) {
+ $sshlogin->set_max_jobs_running(undef);
+ }
+ }
+ }
+ }
+
+ sub changed_sshloginfile {
+ # If --slf is changed:
+ # reload --slf
+ # filter_hosts
+ # setup_basefile
+ # Uses:
+ # @opt::sshloginfile
+ # @Global::sshlogin
+ # %Global::host
+ # $opt::filter_hosts
+ # Returns: N/A
+ if(@opt::sshloginfile) {
+ # Is --sshloginfile changed?
+ for my $slf (@opt::sshloginfile) {
+ my $actual_file = expand_slf_shorthand($slf);
+ my $mtime = (stat($actual_file))[9];
+ $last_mtime{$actual_file} ||= $mtime;
+ if($mtime - $last_mtime{$actual_file} > 1) {
+ ::debug("run",
+ "--sshloginfile $actual_file changed. reload\n");
+ $last_mtime{$actual_file} = $mtime;
+ # Reload $slf
+ # Empty sshlogins
+ @Global::sshlogin = ();
+ for (values %Global::host) {
+ # Don't start new jobs on any host
+ # except the ones added back later
+ $_->set_max_jobs_running(0);
+ }
+ # This will set max_jobs_running on the SSHlogins
+ read_sshloginfile($actual_file);
+ parse_sshlogin();
+ $opt::filter_hosts and filter_hosts();
+ setup_basefile();
+ }
+ }
+ }
+ }
+
+ sub start_more_jobs {
+ # Run start_another_job() but only if:
+ # * not $Global::start_no_new_jobs set
+ # * not JobQueue is empty
+ # * not load on server is too high
+ # * not server swapping
+ # * not too short time since last remote login
+ # Uses:
+ # %Global::host
+ # $Global::start_no_new_jobs
+ # $Global::JobQueue
+ # $opt::pipe
+ # $opt::load
+ # $opt::noswap
+ # $opt::delay
+ # $Global::newest_starttime
+ # Returns:
+ # $jobs_started = number of jobs started
+ my $jobs_started = 0;
+ if($Global::start_no_new_jobs) {
+ return $jobs_started;
+ }
+ if(time - ($last_time||0) > 1) {
+ # At most do this every second
+ $last_time = time;
+ changed_procs_file();
+ changed_sshloginfile();
+ }
+ # This will start 1 job on each --sshlogin (if possible)
+ # thus distribute the jobs on the --sshlogins round robin
+ for my $sshlogin (values %Global::host) {
+ if($Global::JobQueue->empty() and not $opt::pipe) {
+ # No more jobs in the queue
+ last;
+ }
+ debug("run", "Running jobs before on ", $sshlogin->string(), ": ",
+ $sshlogin->jobs_running(), "\n");
+ if ($sshlogin->jobs_running() < $sshlogin->max_jobs_running()) {
+ if($opt::delay
+ and
+ $opt::delay-0.008 > ::now()-$Global::newest_starttime) {
+ # It has been too short since last start
+ next;
+ }
+ if($opt::load and $sshlogin->loadavg_too_high()) {
+ # The load is too high or unknown
+ next;
+ }
+ if($opt::noswap and $sshlogin->swapping()) {
+ # The server is swapping
+ next;
+ }
+ if($opt::limit and $sshlogin->limit()) {
+ # Over limit
+ next;
+ }
+ if(($opt::memfree or $opt::memsuspend)
+ and
+ $sshlogin->memfree() < $Global::memlimit) {
+ # The server has not enough mem free
+ ::debug("mem", "Not starting job: not enough mem\n");
+ next;
+ }
+ if($sshlogin->too_fast_remote_login()) {
+ # It has been too short since last login
+ next;
+ }
+ debug("run", $sshlogin->string(),
+ " has ", $sshlogin->jobs_running(),
+ " out of ", $sshlogin->max_jobs_running(),
+ " jobs running. Start another.\n");
+ if(start_another_job($sshlogin) == 0) {
+ # No more jobs to start on this $sshlogin
+ debug("run","No jobs started on ",
+ $sshlogin->string(), "\n");
+ next;
+ }
+ $sshlogin->inc_jobs_running();
+ $sshlogin->set_last_login_at(::now());
+ $jobs_started++;
+ }
+ debug("run","Running jobs after on ", $sshlogin->string(), ": ",
+ $sshlogin->jobs_running(), " of ",
+ $sshlogin->max_jobs_running(), "\n");
+ }
+
+ return $jobs_started;
+ }
+}
+
+{
+ my $no_more_file_handles_warned;
+
+ sub start_another_job() {
+ # If there are enough filehandles
+ # and JobQueue not empty
+ # and not $job is in joblog
+ # Then grab a job from Global::JobQueue,
+ # start it at sshlogin
+ # mark it as virgin_job
+ # Inputs:
+ # $sshlogin = the SSHLogin to start the job on
+ # Uses:
+ # $Global::JobQueue
+ # $opt::pipe
+ # $opt::results
+ # $opt::resume
+ # @Global::virgin_jobs
+ # Returns:
+ # 1 if another jobs was started
+ # 0 otherwise
+ my $sshlogin = shift;
+ # Do we have enough file handles to start another job?
+ if(enough_file_handles()) {
+ if($Global::JobQueue->empty() and not $opt::pipe) {
+ # No more commands to run
+ debug("start", "Not starting: JobQueue empty\n");
+ return 0;
+ } else {
+ my $job;
+ # Skip jobs already in job log
+ # Skip jobs already in results
+ do {
+ $job = get_job_with_sshlogin($sshlogin);
+ if(not defined $job) {
+ # No command available for that sshlogin
+ debug("start", "Not starting: no jobs available for ",
+ $sshlogin->string(), "\n");
+ return 0;
+ }
+ if($job->is_already_in_joblog()) {
+ $job->free_slot();
+ }
+ } while ($job->is_already_in_joblog()
+ or
+ ($opt::results and $opt::resume
+ and $job->is_already_in_results()));
+ debug("start", "Command to run on '",
+ $job->sshlogin()->string(), "': '",
+ $job->replaced(),"'\n");
+ if($job->start()) {
+ if($opt::pipe) {
+ if($job->virgin()) {
+ push(@Global::virgin_jobs,$job);
+ } else {
+ # Block already set: This is a retry
+ $job->write_block();
+ }
+ }
+ debug("start", "Started as seq ", $job->seq(),
+ " pid:", $job->pid(), "\n");
+ return 1;
+ } else {
+ # Not enough processes to run the job.
+ # Put it back on the queue.
+ $Global::JobQueue->unget($job);
+ # Count down the number of jobs to run for this SSHLogin.
+ my $max = $sshlogin->max_jobs_running();
+ if($max > 1) { $max--; } else {
+ my @arg;
+ for my $record (@{$job->{'commandline'}{'arg_list'}}) {
+ push @arg, map { $_->orig() } @$record;
+ }
+ ::error(255,"No more processes: ".
+ "cannot run a single job. ".
+ "Something is wrong at @arg.");
+ }
+ $sshlogin->set_max_jobs_running($max);
+ # Sleep up to 300 ms to give other processes time to die
+ ::usleep(rand()*300);
+ ::warning("No more processes: ".
+ "Decreasing number of running jobs to $max.",
+ "Try increasing 'ulimit -u' (try: ulimit -u `ulimit -Hu`)",
+ "or increasing 'nproc' in /etc/security/limits.conf",
+ "or increasing /proc/sys/kernel/pid_max");
+ return 0;
+ }
+ }
+ } else {
+ # No more file handles
+ $no_more_file_handles_warned++ or
+ ::warning("No more file handles. ",
+ "Try running 'parallel -j0 -N 100 --pipe parallel -j0'",
+ "or increasing 'ulimit -n' (try: ulimit -n `ulimit -Hn`)",
+ "or increasing 'nofile' in /etc/security/limits.conf",
+ "or increasing /proc/sys/fs/file-max");
+ debug("start", "No more file handles. ");
+ return 0;
+ }
+ }
+}
+
+sub init_progress() {
+ # Uses:
+ # $opt::bar
+ # Returns:
+ # list of computers for progress output
+ $|=1;
+ if($opt::bar) {
+ return("","");
+ }
+ my $progress = progress();
+ my $cpu_units = $opt::use_sockets_instead_of_threads ? "CPU sockets" :
+ ($opt::use_cores_instead_of_threads ? "CPU cores" : "CPU threads");
+ return ("\nComputers / $cpu_units / Max jobs to run\n",
+ $progress->{'workerlist'},"\n",$progress->{'header'});
+}
+
+sub drain_job_queue(@) {
+ # Uses:
+ # $opt::progress
+ # $Global::total_running
+ # $Global::max_jobs_running
+ # %Global::running
+ # $Global::JobQueue
+ # %Global::host
+ # $Global::start_no_new_jobs
+ # Returns: N/A
+ my @command = @_;
+ my $sleep = 0.2;
+ my $sleepsum = 0;
+ do {
+ while($Global::total_running > 0) {
+ debug("init",$Global::total_running, "==", scalar
+ keys %Global::running," slots: ", $Global::max_jobs_running);
+ if($opt::pipe) {
+ # When using --pipe sometimes file handles are not
+ # closed properly
+ for my $job (values %Global::running) {
+ close $job->fh(0,"w");
+ }
+ }
+ if($opt::progress) {
+ my $progress = progress();
+ ::status_no_nl("\r",$progress->{'status'});
+ }
+ if($Global::total_running < $Global::max_jobs_running
+ and not $Global::JobQueue->empty()) {
+ # These jobs may not be started because of loadavg
+ # or too little time between each ssh login.
+ if(start_more_jobs() > 0) {
+ # Exponential back-on if jobs were started
+ $sleep = $sleep/2+0.001;
+ }
+ }
+ # Exponential back-off sleeping
+ $sleep = ::reap_usleep($sleep);
+ $sleepsum += $sleep;
+ if($sleepsum >= 1000) {
+ # At most do this every second
+ $sleepsum = 0;
+ changed_procs_file();
+ changed_sshloginfile();
+ start_more_jobs();
+ }
+ }
+ if(not $Global::JobQueue->empty()) {
+ # These jobs may not be started:
+ # * because there the --filter-hosts has removed all
+ %Global::host or
+ ::error(255,"There are no hosts left to run on.");
+ # * because of loadavg
+ # * because of too little time between each ssh login.
+ $sleep = ::reap_usleep($sleep);
+ start_more_jobs();
+ if($Global::max_jobs_running == 0) {
+ ::warning("There are no job slots available. Increase --jobs.");
+ }
+ }
+ while($opt::sqlmaster and not $Global::sql->finished()) {
+ # SQL master
+ $sleep = ::reap_usleep($sleep);
+ start_more_jobs();
+ if($Global::start_sqlworker) {
+ # Start an SQL worker as we are now sure there is work to do
+ $Global::start_sqlworker = 0;
+ if(my $pid = fork()) {
+ $Global::unkilled_sqlworker = $pid;
+ } else {
+ # Replace --sql/--sqlandworker with --sqlworker
+ my @ARGV = (map { s/^--sql(andworker)?$/--sqlworker/; $_ }
+ @Global::options_in_argv);
+ # exec the --sqlworker
+ exec($0,@ARGV,@command);
+ }
+ }
+ }
+ } while ($Global::total_running > 0
+ or
+ not $Global::start_no_new_jobs and not $Global::JobQueue->empty()
+ or
+ $opt::sqlmaster and not $Global::sql->finished());
+ $Global::all_jobs_done = 1;
+ if($opt::progress) {
+ my $progress = progress();
+ ::status("\r".$progress->{'status'});
+ }
+}
+
+sub toggle_progress() {
+ # Turn on/off progress view
+ # Uses:
+ # $opt::progress
+ # Returns: N/A
+ $opt::progress = not $opt::progress;
+ if($opt::progress) {
+ ::status_no_nl(init_progress());
+ }
+}
+
+{
+ my $last_header;
+ my $eol;
+
+ sub progress() {
+ # Uses:
+ # $opt::bar
+ # $opt::eta
+ # %Global::host
+ # $Global::total_started
+ # Returns:
+ # $workerlist = list of workers
+ # $header = that will fit on the screen
+ # $status = message that will fit on the screen
+ if($opt::bar) {
+ return {"workerlist" => "", "header" => "", "status" => bar()};
+ }
+ my $eta = "";
+ my ($status,$header)=("","");
+ if($opt::eta) {
+ my($total, $completed, $left, $pctcomplete, $avgtime, $this_eta) =
+ compute_eta();
+ $eta = sprintf("ETA: %ds Left: %d AVG: %.2fs ",
+ $this_eta, $left, $avgtime);
+ }
+ my $termcols = terminal_columns();
+ my @workers = sort keys %Global::host;
+ my $workerno = 1;
+ my %wrk;
+ for my $w (@workers) {
+ my %i;
+ $i{'sshlogin'} = $w eq ":" ? "local" : $w;
+ $i{'no'} = $workerno++;
+ $i{'ncpu'} = ($Global::host{$w}->ncpus() || "-");
+ $i{'jobslots'} = $Global::host{$w}->max_jobs_running();
+ $i{'completed'} = ($Global::host{$w}->jobs_completed() || 0);
+ $i{'running'} = $Global::host{$w}->jobs_running();
+ $i{'pct'} = $Global::total_started ?
+ (($i{'running'}+$i{'completed'})*100 /
+ $Global::total_started) : 0;
+ $i{'time'} = $i{'completed'} ? (time-$^T)/($i{'completed'}) : 0;
+ $wrk{$w} = \%i;
+ }
+
+ my $workerlist = "";
+ for my $w (@workers) {
+ $workerlist .=
+ $wrk{$w}{'no'}.":".$wrk{$w}{'sshlogin'} ." / ".
+ $wrk{$w}{'ncpu'}." / ".
+ $wrk{$w}{'jobslots'}."\n";
+ }
+ # Force $status to select one of the below formats
+ $status = "c"x($termcols+1);
+ # Select an output format that will fit on a single line
+ if(length $status > $termcols) {
+ # sshlogin1:XX/XX/XX%/XX.Xs s2:XX/XX/XX%/XX.Xs s3:XX/XX/XX%/XX.Xs
+ $header = "Computer:jobs running/jobs completed/".
+ "%of started jobs/Average seconds to complete";
+ $status = $eta . join(" ",map {
+ sprintf("%s:%d/%d/%d%%/%.1fs ",
+ @{$wrk{$_}}
+ {'sshlogin','running','completed','pct','time'}
+ ); } @workers);
+ }
+ if(length $status > $termcols) {
+ # 1:XX/XX/XX%/X.Xs 2:XX/XX/XX%/X.Xs 3:XX/XX/XX%/X.Xs
+ $header = "Computer:jobs running/jobs completed/%of started jobs";
+ $status = $eta . join(" ",map {
+ sprintf("%s:%d/%d/%d%%/%.1fs ",
+ @{$wrk{$_}}
+ {'no','running','completed','pct','time'}
+ ); } @workers);
+ }
+ if(length $status > $termcols) {
+ # sshlogin1:XX/XX/XX% sshlogin2:XX/XX/XX% sshlogin3:XX/XX/XX%
+ $header = "Computer:jobs running/jobs completed/%of started jobs";
+ $status = $eta . join(" ",map {
+ sprintf("%s:%d/%d/%d%%",
+ @{$wrk{$_}}
+ {'sshlogin','running','completed','pct'}
+ ); } @workers);
+ }
+ if(length $status > $termcols) {
+ # 1:XX/XX/XX% 2:XX/XX/XX% 3:XX/XX/XX% 4:XX/XX/XX% 5:XX/XX/XX%
+ $header = "Computer:jobs running/jobs completed/%of started jobs";
+ $status = $eta . join(" ",map {
+ sprintf("%s:%d/%d/%d%%",
+ @{$wrk{$_}}
+ {'no','running','completed','pct'}
+ ); } @workers);
+ }
+ if(length $status > $termcols) {
+ # sshlogin1:XX/XX sshlogin2:XX/XX sshlogin3:XX/XX
+ $header = "Computer:jobs running/jobs completed";
+ $status = $eta . join(" ", map {
+ sprintf("%s:%d/%d",
+ @{$wrk{$_}}
+ {'sshlogin','running','completed'}
+ ); } @workers);
+ }
+ if(length $status > $termcols) {
+ # 1:XX/XX 2:XX/XX 3:XX/XX 4:XX/XX 5:XX/XX 6:XX/XX
+ $header = "Computer:jobs running/jobs completed";
+ $status = $eta . join(" ", map {
+ sprintf("%s:%d/%d",
+ @{$wrk{$_}}
+ {'no','running','completed'}
+ ); } @workers);
+ }
+ if(length $status > $termcols) {
+ # sshlogin1:XX sshlogin2:XX sshlogin3:XX sshlogin4:XX sshlogin5:XX
+ $header = "Computer:jobs completed";
+ $status = $eta . join(" ", map {
+ sprintf("%s:%d",
+ @{$wrk{$_}}
+ {'sshlogin','completed'}
+ ); } @workers);
+ }
+ if(length $status > $termcols) {
+ # 1:XX 2:XX 3:XX 4:XX 5:XX 6:XX
+ $header = "Computer:jobs completed";
+ $status = $eta . join(" ", map {
+ sprintf("%s:%d",
+ @{$wrk{$_}}
+ {'no','completed'}
+ ); } @workers);
+ }
+ if($last_header ne $header) {
+ $header .= "\n";
+ $last_header = $header;
+ } else {
+ $header = "";
+ }
+ if(not $eol) {
+ $eol = `sh -c "tput el </dev/tty" 2>/dev/null`;
+ chomp($eol);
+ if($eol eq "") { $eol = "\033[K"; }
+ }
+
+ return {"workerlist" => $workerlist, "header" => $header,
+ "status" => $status.$eol};
+ }
+}
+
+{
+
+ my ($first_completed, $smoothed_avg_time, $last_eta);
+
+ sub compute_eta {
+ # Calculate important numbers for ETA
+ # Returns:
+ # $total = number of jobs in total
+ # $completed = number of jobs completed
+ # $left = number of jobs left
+ # $pctcomplete = percent of jobs completed
+ # $avgtime = averaged time
+ # $eta = smoothed eta
+ my $completed = $Global::total_completed;
+ # In rare cases with -X will $completed > total_jobs()
+ my $total = ::max($Global::JobQueue->total_jobs(),$completed);
+ my $left = $total - $completed;
+ if(not $completed) {
+ return($total, $completed, $left, 0, 0, 0);
+ }
+ my $pctcomplete = ::min($completed / $total,100);
+ $first_completed ||= time;
+ my $timepassed = (time - $first_completed);
+ my $avgtime = $timepassed / $completed;
+ $smoothed_avg_time ||= $avgtime;
+ # Smooth the eta so it does not jump wildly
+ $smoothed_avg_time = (1 - $pctcomplete) * $smoothed_avg_time +
+ $pctcomplete * $avgtime;
+ my $eta = int($left * $smoothed_avg_time);
+ if($eta*0.90 < $last_eta and $last_eta < $eta) {
+ # Eta jumped less that 10% up: Keep the last eta instead
+ $eta = $last_eta;
+ } else {
+ $last_eta = $eta;
+ }
+ return($total, $completed, $left, $pctcomplete, $avgtime, $eta);
+ }
+}
+
+{
+ my ($rev,$reset);
+
+ sub bar() {
+ # Return:
+ # $status = bar with eta, completed jobs, arg and pct
+ $rev ||= "\033[7m";
+ $reset ||= "\033[0m";
+ my($total, $completed, $left, $pctcomplete, $avgtime, $eta) =
+ compute_eta();
+ if($Global::all_jobs_done) { $eta = now()-$Global::start_time; }
+ my $arg = $Global::newest_job ?
+ $Global::newest_job->{'commandline'}->
+ replace_placeholders(["\257<\257>"],0,0) : "";
+ $arg = decode_utf8($arg);
+ my $eta_dhms = ::seconds_to_time_units($eta);
+ my $bar_text =
+ sprintf("%d%% %d:%d=%s %s",
+ $pctcomplete*100, $completed, $left, $eta_dhms, $arg);
+ my $terminal_width = terminal_columns();
+ my $s = sprintf("%-${terminal_width}s",
+ substr($bar_text." "x$terminal_width,
+ 0,$terminal_width));
+ my $width = int($terminal_width * $pctcomplete);
+ substr($s,$width,0) = $reset;
+ my $zenity = sprintf("%-${terminal_width}s",
+ substr("# $eta sec $arg",
+ 0,$terminal_width));
+ # Prefix with zenity header
+ $s = "\r" . $zenity . "\r" . $pctcomplete*100 .
+ "\r" . $rev . $s . $reset;
+ return $s;
+ }
+}
+
+{
+ my ($rows,$columns,$last_update_time);
+
+ sub compute_terminal_size() {
+ # && true is to force spawning a shell and not just exec'ing
+ my @tput = qx{ tput lines cols </dev/tty 2>/dev/null && true };
+ $rows = 0 + $tput[0];
+ $columns = 0 + $tput[1];
+ if(not ($rows && $columns)) {
+ # && true is to force spawning a shell and not just exec'ing
+ my $stty = qx{ stty -a </dev/tty 2>/dev/null && true };
+ # FreeBSD/OpenBSD/NetBSD/Dragonfly/MirOS
+ # MacOSX/IRIX/AIX/Tru64
+ $stty =~ /(\d+) columns/ and do { $columns = $1; };
+ $stty =~ /(\d+) rows/ and do { $rows = $1; };
+ # GNU/Linux/Solaris
+ $stty =~ /columns (\d+)/ and do { $columns = $1; };
+ $stty =~ /rows (\d+)/ and do { $rows = $1; };
+ # Solaris-x86/HPUX/SCOsysV/UnixWare/OpenIndiana
+ $stty =~ /columns = (\d+)/ and do { $columns = $1; };
+ $stty =~ /rows = (\d+)/ and do { $rows = $1; };
+ # QNX
+ $stty =~ /rows=(\d+),(\d+)/ and do { ($rows,$columns) = ($1,$2); };
+ }
+ if(not ($rows && $columns)) {
+ # && true is to force spawning a shell and not just exec'ing
+ my $resize = qx{ resize 2>/dev/null && true };
+ $resize =~ /COLUMNS=(\d+);/ and do { $columns ||= $1; };
+ $resize =~ /LINES=(\d+);/ and do { $rows ||= $1; };
+ }
+ $rows ||= 24;
+ $columns ||= 80;
+ }
+
+ sub update_terminal_size() {
+ # Only update once per second.
+ if($last_update_time < time) {
+ $last_update_time = time;
+ compute_terminal_size();
+ # Set signal WINdow CHange to force recompute
+ $SIG{WINCH} = \&compute_terminal_size;
+ }
+ }
+
+ sub terminal_rows() {
+ # Get the number of rows of the terminal.
+ # Returns:
+ # number of rows of the screen
+ update_terminal_size();
+ return $rows;
+ }
+
+ sub terminal_columns() {
+ # Get the number of columns of the terminal.
+ # Returns:
+ # number of columns of the screen
+ update_terminal_size();
+ return $columns;
+ }
+}
+
+sub untabify($) {
+ # Convert \t into spaces
+ my @out;
+ my ($src);
+ # Deal with multi-byte characters
+ for my $src (split("\t",$_[0])) {
+ push @out, $src. " "x(8-mbswidth($src)%8);
+ }
+ return join "",@out;
+}
+
+# Prototype forwarding
+sub get_job_with_sshlogin($);
+sub get_job_with_sshlogin($) {
+ # Input:
+ # $sshlogin = which host should the job be run on?
+ # Uses:
+ # $opt::hostgroups
+ # $Global::JobQueue
+ # Returns:
+ # $job = next job object for $sshlogin if any available
+ my $sshlogin = shift;
+ my $job;
+
+ if ($opt::hostgroups) {
+ my @other_hostgroup_jobs = ();
+
+ while($job = $Global::JobQueue->get()) {
+ if($sshlogin->in_hostgroups($job->hostgroups())) {
+ # Found a job to be run on a hostgroup of this
+ # $sshlogin
+ last;
+ } else {
+ # This job was not in the hostgroups of $sshlogin
+ push @other_hostgroup_jobs, $job;
+ }
+ }
+ $Global::JobQueue->unget(@other_hostgroup_jobs);
+ if(not defined $job) {
+ # No more jobs
+ return undef;
+ }
+ } else {
+ $job = $Global::JobQueue->get();
+ if(not defined $job) {
+ # No more jobs
+ ::debug("start", "No more jobs: JobQueue empty\n");
+ return undef;
+ }
+ }
+ if(not $job->suspended()) {
+ $job->set_sshlogin($sshlogin);
+ }
+ if(defined $opt::retries and $job->failed_here()) {
+ # This command with these args failed for this sshlogin
+ my ($no_of_failed_sshlogins,$min_failures) = $job->min_failed();
+ # Only look at the Global::host that have > 0 jobslots
+ if($no_of_failed_sshlogins ==
+ grep { $_->max_jobs_running() > 0 } values %Global::host
+ and $job->failed_here() == $min_failures) {
+ # It failed the same or more times on another host:
+ # run it on this host
+ } else {
+ # If it failed fewer times on another host:
+ # Find another job to run
+ my $nextjob;
+ if(not $Global::JobQueue->empty()) {
+ # This can potentially recurse for all args
+ no warnings 'recursion';
+ $nextjob = get_job_with_sshlogin($sshlogin);
+ }
+ # Push the command back on the queue
+ $Global::JobQueue->unget($job);
+ return $nextjob;
+ }
+ }
+ return $job;
+}
+
+
+sub __REMOTE_SSH__() {}
+
+
+sub read_sshloginfiles(@) {
+ # Read a list of --slf's
+ # Input:
+ # @files = files or symbolic file names to read
+ # Returns: N/A
+ for my $s (@_) {
+ read_sshloginfile(expand_slf_shorthand($s));
+ }
+}
+
+sub expand_slf_shorthand($) {
+ # Expand --slf shorthand into a read file name
+ # Input:
+ # $file = file or symbolic file name to read
+ # Returns:
+ # $file = actual file name to read
+ my $file = shift;
+ if($file eq "-") {
+ # skip: It is stdin
+ } elsif($file eq "..") {
+ $file = $Global::config_dir."/sshloginfile";
+ } elsif($file eq ".") {
+ $file = "/etc/parallel/sshloginfile";
+ } elsif(not -r $file) {
+ for(@Global::config_dirs) {
+ if(not -r $_."/".$file) {
+ # Try prepending $PARALLEL_HOME
+ ::error(255,"Cannot open $file.");
+ } else {
+ $file = $_."/".$file;
+ last;
+ }
+ }
+ }
+ return $file;
+}
+
+sub read_sshloginfile($) {
+ # Read sshloginfile into @Global::sshlogin
+ # Input:
+ # $file = file to read
+ # Uses:
+ # @Global::sshlogin
+ # Returns: N/A
+ local $/ = "\n";
+ my $file = shift;
+ my $close = 1;
+ my $in_fh;
+ ::debug("init","--slf ",$file);
+ if($file eq "-") {
+ $in_fh = *STDIN;
+ $close = 0;
+ } else {
+ $in_fh = open_or_exit("<", $file);
+ }
+ while(<$in_fh>) {
+ chomp;
+ /^\s*#/ and next;
+ /^\s*$/ and next;
+ push @Global::sshlogin, $_;
+ }
+ if($close) {
+ close $in_fh;
+ }
+}
+
+sub parse_sshlogin() {
+ # Parse @Global::sshlogin into %Global::host.
+ # Keep only hosts that are in one of the given ssh hostgroups.
+ # Uses:
+ # @Global::sshlogin
+ # $Global::minimal_command_line_length
+ # %Global::host
+ # $opt::transfer
+ # @opt::return
+ # $opt::cleanup
+ # @opt::basefile
+ # @opt::trc
+ # Returns: N/A
+ sub expand_range($) {
+ # Expand host[9-11,15]a[09-11]b
+ # [9-11,15] => 9 10 11 15
+ # [09-11] => 09 10 11
+ my ($in) = @_;
+ my ($prefix, $range, $suffix);
+ if(($prefix, $range, $suffix) = $in =~ /^(.*?)\[([-0-9,]*)\](.*)$/) {
+ my @res;
+ while(length $range) {
+ if($range =~ s/^,//) {
+ # skip
+ } elsif($range =~ s/^(\d+)-(\d+)//) {
+ my ($start, $end) = ($1, $2);
+ push @res, map { $prefix . $_ . $suffix } $start..$end;
+ } elsif($range =~ s/^(\d+)//) {
+ push @res, map { $prefix . $_ . $suffix } $1;
+ } else {
+ die "Cannot parse $in (at $range)";
+ }
+ }
+ return map { expand_range($_) } @res;
+ } else {
+ return $in;
+ }
+ }
+ my @login;
+ if(not @Global::sshlogin) { @Global::sshlogin = (":"); }
+ for my $sshlogin (@Global::sshlogin) {
+ # Split up -S sshlogin,sshlogin
+ # Parse ,, and \, as , but do not split on that
+ # -S "ssh -J jump1,,jump2 host1,host2" =>
+ # ssh -J jump1,jump2 host1
+ # host2
+ # Protect \, and ,, as \0
+ $sshlogin =~ s/\\,|,,/\0/g;
+ # Protect , in ranges: [___,___] => [___\0___]
+ while($sshlogin =~ s/(\[[-0-9\0]*),(.*\])/$1\0$2/g) {}
+ for my $s (split /,|\n/, $sshlogin) {
+ # Replace \0 => ,
+ $s =~ s/\0/,/g;
+ if ($s eq ".." or $s eq "-") {
+ # This may add to @Global::sshlogin - possibly bug
+ read_sshloginfile(expand_slf_shorthand($s));
+ } else {
+ $s =~ s/\s*$//;
+ # Expand host[1-12,15]a[01-10]b
+ push @login, expand_range($s);
+ }
+ }
+ }
+ $Global::minimal_command_line_length = 100_000_000;
+ my @allowed_hostgroups;
+ for my $ncpu_sshlogin_string (::uniq(@login)) {
+ my $sshlogin = SSHLogin->new($ncpu_sshlogin_string);
+ my $sshlogin_string = $sshlogin->string();
+ if($sshlogin_string eq "") {
+ # This is an ssh group: -S @webservers
+ push @allowed_hostgroups, $sshlogin->hostgroups();
+ next;
+ }
+ if($Global::host{$sshlogin_string}) {
+ # This sshlogin has already been added:
+ # It is probably a host that has come back
+ # Set the max_jobs_running back to the original
+ debug("run","Already seen $sshlogin_string\n");
+ if($sshlogin->{'ncpus'}) {
+ # If ncpus set by '#/' of the sshlogin, overwrite it:
+ $Global::host{$sshlogin_string}->set_ncpus($sshlogin->ncpus());
+ }
+ $Global::host{$sshlogin_string}->set_max_jobs_running(undef);
+ next;
+ }
+ $sshlogin->set_maxlength(Limits::Command::max_length());
+
+ $Global::minimal_command_line_length =
+ ::min($Global::minimal_command_line_length, $sshlogin->maxlength());
+ $Global::host{$sshlogin_string} = $sshlogin;
+ }
+ $Global::usable_command_line_length =
+ # Usable len = maxlen - 3000 for wrapping, div 2 for hexing
+ int(($Global::minimal_command_line_length - 3000)/2);
+ if($opt::max_chars) {
+ if($opt::max_chars <= $Global::usable_command_line_length) {
+ $Global::usable_command_line_length = $opt::max_chars;
+ } else {
+ ::warning("Value for option -s should be < ".
+ $Global::usable_command_line_length.".");
+ }
+ }
+ if(@allowed_hostgroups) {
+ # Remove hosts that are not in these groups
+ while (my ($string, $sshlogin) = each %Global::host) {
+ if(not $sshlogin->in_hostgroups(@allowed_hostgroups)) {
+ delete $Global::host{$string};
+ }
+ }
+ }
+
+ # debug("start", "sshlogin: ", my_dump(%Global::host),"\n");
+ if(@Global::transfer_files or @opt::return
+ or $opt::cleanup or @opt::basefile) {
+ if(not remote_hosts()) {
+ # There are no remote hosts
+ if(@opt::trc) {
+ ::warning("--trc ignored as there are no remote --sshlogin.");
+ } elsif (defined $opt::transfer) {
+ ::warning("--transfer ignored as there are ".
+ "no remote --sshlogin.");
+ } elsif (@opt::transfer_files) {
+ ::warning("--transferfile ignored as there ".
+ "are no remote --sshlogin.");
+ } elsif (@opt::return) {
+ ::warning("--return ignored as there are no remote --sshlogin.");
+ } elsif (defined $opt::cleanup and not %opt::template) {
+ ::warning("--cleanup ignored as there ".
+ "are no remote --sshlogin.");
+ } elsif (@opt::basefile) {
+ ::warning("--basefile ignored as there ".
+ "are no remote --sshlogin.");
+ }
+ }
+ }
+}
+
+sub remote_hosts() {
+ # Return sshlogins that are not ':'
+ # Uses:
+ # %Global::host
+ # Returns:
+ # list of sshlogins with ':' removed
+ return grep !/^:$/, keys %Global::host;
+}
+
+sub setup_basefile() {
+ # Transfer basefiles to each $sshlogin
+ # This needs to be done before first jobs on $sshlogin is run
+ # Uses:
+ # %Global::host
+ # @opt::basefile
+ # Returns: N/A
+ my @cmd;
+ my $rsync_destdir;
+ my $workdir;
+ for my $sshlogin (values %Global::host) {
+ if($sshlogin->local()) { next }
+ for my $file (@opt::basefile) {
+ if($file !~ m:^/: and $opt::workdir eq "...") {
+ ::error(255,"Work dir '...' will not work with ".
+ "relative basefiles.");
+ }
+ if(not $workdir) {
+ my $dummycmdline =
+ CommandLine->new(1,["true"],{},0,0,[],[],[],[],{},{});
+ my $dummyjob = Job->new($dummycmdline);
+ $workdir = $dummyjob->workdir();
+ }
+ push @cmd, $sshlogin->rsync_transfer_cmd($file,$workdir);
+ }
+ }
+ debug("init", "basesetup: @cmd\n");
+ my ($exitstatus,$stdout_ref,$stderr_ref) =
+ run_gnu_parallel((join "\n",@cmd),"-j0","--retries",5);
+ if($exitstatus) {
+ my @stdout = @$stdout_ref;
+ my @stderr = @$stderr_ref;
+ ::error(255,"Copying of --basefile failed: @stdout@stderr");
+ }
+}
+
+sub cleanup_basefile() {
+ # Remove the basefiles transferred
+ # Uses:
+ # %Global::host
+ # @opt::basefile
+ # Returns: N/A
+ my @cmd;
+ my $workdir;
+ if(not $workdir) {
+ my $dummycmdline = CommandLine->new(1,["true"],{},0,0,[],[],[],[],{},{});
+ my $dummyjob = Job->new($dummycmdline);
+ $workdir = $dummyjob->workdir();
+ }
+ for my $sshlogin (values %Global::host) {
+ if($sshlogin->local()) { next }
+ for my $file (@opt::basefile) {
+ push @cmd, $sshlogin->cleanup_cmd($file,$workdir);
+ }
+ }
+ debug("init", "basecleanup: @cmd\n");
+ my ($exitstatus,$stdout_ref,$stderr_ref) =
+ run_gnu_parallel(join("\n",@cmd),"-j0","--retries",5);
+ if($exitstatus) {
+ my @stdout = @$stdout_ref;
+ my @stderr = @$stderr_ref;
+ ::error(255,"Cleanup of --basefile failed: @stdout@stderr");
+ }
+}
+
+sub run_gnu_parallel() {
+ my ($stdin,@args) = @_;
+ my $cmd = join "",map { " $_ & " } split /\n/, $stdin;
+ print $Global::original_stderr ` $cmd wait` ;
+ return 0
+}
+
+sub _run_gnu_parallel() {
+ # Run GNU Parallel
+ # This should ideally just fork an internal copy
+ # and not start it through a shell
+ # Input:
+ # $stdin = data to provide on stdin for GNU Parallel
+ # @args = command line arguments
+ # Returns:
+ # $exitstatus = exitcode of GNU Parallel run
+ # \@stdout = standard output
+ # \@stderr = standard error
+ my ($stdin,@args) = @_;
+ my ($exitstatus,@stdout,@stderr);
+ my ($stdin_fh,$stdout_fh)=(gensym(),gensym());
+ my ($stderr_fh, $stderrname) = ::tmpfile(SUFFIX => ".par");
+ unlink $stderrname;
+
+ my $pid = ::open3($stdin_fh,$stdout_fh,$stderr_fh,
+ $0,qw(--plain --shell /bin/sh --will-cite), @args);
+ if(my $writerpid = fork()) {
+ close $stdin_fh;
+ @stdout = <$stdout_fh>;
+ # Now stdout is closed:
+ # These pids should be dead or die very soon
+ while(kill 0, $writerpid) { ::usleep(1); }
+ die;
+# reap $writerpid;
+# while(kill 0, $pid) { ::usleep(1); }
+# reap $writerpid;
+ $exitstatus = $?;
+ seek $stderr_fh, 0, 0;
+ @stderr = <$stderr_fh>;
+ close $stdout_fh;
+ close $stderr_fh;
+ } else {
+ close $stdout_fh;
+ close $stderr_fh;
+ print $stdin_fh $stdin;
+ close $stdin_fh;
+ exit(0);
+ }
+ return ($exitstatus,\@stdout,\@stderr);
+}
+
+sub filter_hosts() {
+ # Remove down --sshlogins from active duty.
+ # Find ncpus, ncores, maxlen, time-to-login for each host.
+ # Uses:
+ # %Global::host
+ # $Global::minimal_command_line_length
+ # $opt::use_sockets_instead_of_threads
+ # $opt::use_cores_instead_of_threads
+ # $opt::use_cpus_instead_of_cores
+ # Returns: N/A
+
+ my ($nsockets_ref,$ncores_ref, $nthreads_ref, $time_to_login_ref,
+ $maxlen_ref, $echo_ref, $down_hosts_ref) =
+ parse_host_filtering(parallelized_host_filtering());
+
+ delete @Global::host{@$down_hosts_ref};
+ @$down_hosts_ref and ::warning("Removed @$down_hosts_ref.");
+
+ $Global::minimal_command_line_length = 100_000_000;
+ while (my ($string, $sshlogin) = each %Global::host) {
+ if($sshlogin->local()) { next }
+ my ($nsockets,$ncores,$nthreads,$time_to_login,$maxlen) =
+ ($nsockets_ref->{$string},$ncores_ref->{$string},
+ $nthreads_ref->{$string},$time_to_login_ref->{$string},
+ $maxlen_ref->{$string});
+ defined $nsockets or ::die_bug("nsockets missing: $string");
+ defined $ncores or ::die_bug("ncores missing: $string");
+ defined $nthreads or ::die_bug("nthreads missing: $string");
+ defined $time_to_login or ::die_bug("time_to_login missing: $string");
+ defined $maxlen or ::die_bug("maxlen missing: $string");
+ # ncpus may be set by 4/hostname or may be undefined yet
+ my $ncpus = $sshlogin->{'ncpus'};
+ # $nthreads may be 0 if GNU Parallel is not installed remotely
+ $ncpus = $nthreads || $ncpus || $sshlogin->ncpus();
+ if($opt::use_cpus_instead_of_cores) {
+ $ncpus = $ncores || $ncpus;
+ } elsif($opt::use_sockets_instead_of_threads) {
+ $ncpus = $nsockets || $ncpus;
+ } elsif($opt::use_cores_instead_of_threads) {
+ $ncpus = $ncores || $ncpus;
+ }
+ $sshlogin->set_ncpus($ncpus);
+ $sshlogin->set_time_to_login($time_to_login);
+ $maxlen = $maxlen || Limits::Command::max_length();
+ $sshlogin->set_maxlength($maxlen);
+ ::debug("init", "Timing from -S:$string ",
+ " ncpus:", $ncpus,
+ " nsockets:",$nsockets,
+ " ncores:", $ncores,
+ " nthreads:",$nthreads,
+ " time_to_login:", $time_to_login,
+ " maxlen:", $maxlen,
+ " min_max_len:", $Global::minimal_command_line_length,"\n");
+ }
+}
+
+sub parse_host_filtering() {
+ # Input:
+ # @lines = output from parallelized_host_filtering()
+ # Returns:
+ # \%nsockets = number of sockets of {host}
+ # \%ncores = number of cores of {host}
+ # \%nthreads = number of hyperthreaded cores of {host}
+ # \%time_to_login = time_to_login on {host}
+ # \%maxlen = max command len on {host}
+ # \%echo = echo received from {host}
+ # \@down_hosts = list of hosts with no answer
+ local $/ = "\n";
+ my (%nsockets, %ncores, %nthreads, %time_to_login, %maxlen, %echo,
+ @down_hosts);
+ for (@_) {
+ ::debug("init","Read: ",$_);
+ chomp;
+ my @col = split /\t/, $_;
+ if($col[0] =~ /^parallel: Warning:/) {
+ # Timed out job: Ignore it
+ next;
+ } elsif(defined $col[6]) {
+ # This is a line from --joblog
+ # seq host time spent sent received exit signal command
+ # 2 : 1372607672.654 0.675 0 0 0 0 eval true\ m\;ssh\ m\ parallel\ --number-of-cores
+ if($col[0] eq "Seq" and $col[1] eq "Host" and
+ $col[2] eq "Starttime") {
+ # Header => skip
+ next;
+ }
+ # Get server from: eval true server\;
+ $col[8] =~ /eval .?true.?\s([^\;]+);/ or
+ ::die_bug("col8 does not contain host: $col[8] in $_");
+ my $host = $1;
+ $host =~ tr/\\//d;
+ $Global::host{$host} or next;
+ if($col[6] eq "255" or $col[6] eq "-1" or $col[6] eq "1") {
+ # exit == 255 or exit == timeout (-1): ssh failed/timedout
+ # exit == 1: lsh failed
+ # Remove sshlogin
+ ::debug("init", "--filtered $host\n");
+ push(@down_hosts, $host);
+ } elsif($col[6] eq "127") {
+ # signal == 127: parallel not installed remote
+ # Set nsockets, ncores, nthreads = 1
+ ::warning("Could not figure out ".
+ "number of cpus on $host. Using 1.");
+ $nsockets{$host} = 1;
+ $ncores{$host} = 1;
+ $nthreads{$host} = 1;
+ $maxlen{$host} = Limits::Command::max_length();
+ } elsif($col[0] =~ /^\d+$/ and $Global::host{$host}) {
+ # Remember how log it took to log in
+ # 2 : 1372607672.654 0.675 0 0 0 0 eval true\ m\;ssh\ m\ echo
+ $time_to_login{$host} = ::min($time_to_login{$host},$col[3]);
+ } else {
+ ::die_bug("host check unmatched long jobline: $_");
+ }
+ } elsif($Global::host{$col[0]}) {
+ # This output from --number-of-cores, --number-of-cpus,
+ # --max-line-length-allowed
+ # ncores: server 8
+ # ncpus: server 2
+ # maxlen: server 131071
+ if(/parallel: Warning: Cannot figure out number of/) {
+ next;
+ }
+ if(/\t(perl: warning:|LANGUAGE =|LC_ALL =|LANG =|are supported and installed|Disconnected from|Received disconnect from)/
+ or
+ /\tWarning: /
+ or
+ /\t(Host key fingerprint is|\+-.*-\+|\|.*\|)/
+ or
+ /\t\S+: Undefined variable./
+ ) {
+ # Skip these (from perl):
+ # perl: warning: Setting locale failed.
+ # perl: warning: Please check that your locale settings:
+ # LANGUAGE = (unset),
+ # LC_ALL = (unset),
+ # LANG = "en_US.UTF-8"
+ # are supported and installed on your system.
+ # perl: warning: Falling back to the standard locale ("C").
+ # Disconnected from 127.0.0.1 port 22
+ #
+ # Skip these (from ssh):
+ # Warning: Permanently added * to the list of known hosts.
+ # Warning: Identity file * not accessible: *
+ # (VisualHostKey=yes)
+ # Host key fingerprint is SHA256:...
+ # +--[ED25519 256]--+
+ # | o |
+ # +----[SHA256]-----+
+ #
+ # Skip these (from csh):
+ # MANPATH: Undefined variable.
+ } elsif(not defined $nsockets{$col[0]}) {
+ $nsockets{$col[0]} = $col[1];
+ } elsif(not defined $ncores{$col[0]}) {
+ $ncores{$col[0]} = $col[1];
+ } elsif(not defined $nthreads{$col[0]}) {
+ $nthreads{$col[0]} = $col[1];
+ } elsif(not defined $maxlen{$col[0]}) {
+ $maxlen{$col[0]} = $col[1];
+ } elsif(not defined $echo{$col[0]}) {
+ $echo{$col[0]} = $col[1];
+ } else {
+ ::die_bug("host check too many col0: $_");
+ }
+ } else {
+ ::die_bug("host check unmatched short jobline ($col[0]): $_");
+ }
+ }
+ @down_hosts = uniq(@down_hosts);
+ return(\%nsockets, \%ncores, \%nthreads, \%time_to_login,
+ \%maxlen, \%echo, \@down_hosts);
+}
+
+sub parallelized_host_filtering() {
+ # Uses:
+ # %Global::host
+ # Returns:
+ # text entries with:
+ # * joblog line
+ # * hostname \t number of cores
+ # * hostname \t number of cpus
+ # * hostname \t max-line-length-allowed
+ # * hostname \t empty
+
+ sub sshwrapped {
+ # Wrap with ssh and --env
+ # Return $default_value if command fails
+ my $sshlogin = shift;
+ my $command = shift;
+ # wrapper that returns output "0\n" if the command fails
+ # E.g. parallel not installed => "0\n"
+ my $wcmd = q(perl -e '$a=`).$command.q(`; print $? ? "0".v010 : $a');
+ my $commandline = CommandLine->new(1,[$wcmd],{},0,0,[],[],[],[],{},{});
+ my $job = Job->new($commandline);
+ $job->set_sshlogin($sshlogin);
+ $job->wrapped();
+ return($job->{'wrapped'});
+ }
+
+ my(@sockets, @cores, @threads, @maxline, @echo);
+ while (my ($host, $sshlogin) = each %Global::host) {
+ if($host eq ":") { next }
+ # The 'true' is used to get the $host out later
+ push(@sockets, $host."\t"."true $host; ".
+ sshwrapped($sshlogin,"parallel --number-of-sockets")."\n\0");
+ push(@cores, $host."\t"."true $host; ".
+ sshwrapped($sshlogin,"parallel --number-of-cores")."\n\0");
+ push(@threads, $host."\t"."true $host; ".
+ sshwrapped($sshlogin,"parallel --number-of-threads")."\n\0");
+ push(@maxline, $host."\t"."true $host; ".
+ sshwrapped($sshlogin,
+ "parallel --max-line-length-allowed")."\n\0");
+ # 'echo' is used to get the fastest possible ssh login time
+ push(@echo, $host."\t"."true $host; ".
+ $sshlogin->wrap("echo $host")."\n\0");
+ }
+ # --timeout 10: Setting up an SSH connection and running a simple
+ # command should never take > 10 sec.
+ # --delay 0.1: If multiple sshlogins use the same proxy the delay
+ # will make it less likely to overload the ssh daemon.
+ # --retries 3: If the ssh daemon is overloaded, try 3 times
+ my $cmd =
+ "$0 -j0 --timeout 10 --joblog - --plain --delay 0.1 --retries 3 ".
+ "--tag --tagstring '{1}' -0 --colsep '\t' -k eval '{2}' && true ";
+ $cmd = $Global::shell." -c ".Q($cmd);
+ ::debug("init", $cmd, "\n");
+ my @out;
+ my $prepend = "";
+
+ my ($host_fh,$in,$err);
+ open3($in, $host_fh, $err, $cmd) || ::die_bug("parallel host check: $cmd");
+ ::debug("init", map { $_,"\n" } @sockets, @cores, @threads, @maxline, @echo);
+
+ if(not fork()) {
+ # Give the commands to run to the $cmd
+ close $host_fh;
+ print $in @sockets, @cores, @threads, @maxline, @echo;
+ close $in;
+ exit();
+ }
+ close $in;
+ # If -0: $/ must be \n
+ local $/ = "\n";
+ for(<$host_fh>) {
+ # TODO incompatible with '-quoting. Needs to be fixed differently
+ #if(/\'$/) {
+ # # if last char = ' then append next line
+ # # This may be due to quoting of \n in environment var
+ # $prepend .= $_;
+ # next;
+ #}
+ $_ = $prepend . $_;
+ $prepend = "";
+ push @out, $_;
+ }
+ close $host_fh;
+ return @out;
+}
+
+sub onall($@) {
+ # Runs @command on all hosts.
+ # Uses parallel to run @command on each host.
+ # --jobs = number of hosts to run on simultaneously.
+ # For each host a parallel command with the args will be running.
+ # Uses:
+ # $Global::debug
+ # $Global::exitstatus
+ # $Global::joblog
+ # $Global::quoting
+ # $opt::D
+ # $opt::arg_file_sep
+ # $opt::arg_sep
+ # $opt::colsep
+ # $opt::files
+ # $opt::files0
+ # $opt::group
+ # $opt::joblog
+ # $opt::jobs
+ # $opt::keeporder
+ # $opt::linebuffer
+ # $opt::max_chars
+ # $opt::plain
+ # $opt::retries
+ # $opt::tag
+ # $opt::tee
+ # $opt::timeout
+ # $opt::ungroup
+ # %Global::host
+ # @opt::basefile
+ # @opt::env
+ # @opt::v
+ # Input:
+ # @command = command to run on all hosts
+ # Returns: N/A
+ sub tmp_joblog {
+ # Input:
+ # $joblog = filename of joblog - undef if none
+ # Returns:
+ # $tmpfile = temp file for joblog - undef if none
+ my $joblog = shift;
+ if(not defined $joblog) {
+ return undef;
+ }
+ my ($fh, $tmpfile) = ::tmpfile(SUFFIX => ".log");
+ close $fh;
+ return $tmpfile;
+ }
+ my ($input_source_fh_ref,@command) = @_;
+ if($Global::quoting) {
+ @command = shell_quote(@command);
+ }
+
+ # Copy all @input_source_fh (-a and :::) into tempfiles
+ my @argfiles = ();
+ for my $fh (@$input_source_fh_ref) {
+ my ($outfh, $name) = ::tmpfile(SUFFIX => ".all", UNLINK => not $opt::D);
+ print $outfh (<$fh>);
+ close $outfh;
+ push @argfiles, $name;
+ }
+ if(@opt::basefile) { setup_basefile(); }
+ # for each sshlogin do:
+ # parallel -S $sshlogin $command :::: @argfiles
+ #
+ # Pass some of the options to the sub-parallels, not all of them as
+ # -P should only go to the first, and -S should not be copied at all.
+ my $options =
+ join(" ",
+ ((defined $opt::sshdelay) ? "--delay ".$opt::sshdelay : ""),
+ ((defined $opt::memfree) ? "--memfree ".$opt::memfree : ""),
+ ((defined $opt::memsuspend) ? "--memfree ".$opt::memsuspend : ""),
+ ((defined $opt::D) ? "-D $opt::D" : ""),
+ ((defined $opt::group) ? "--group" : ""),
+ ((defined $opt::jobs) ? "-P $opt::jobs" : ""),
+ ((defined $opt::keeporder) ? "--keeporder" : ""),
+ ((defined $opt::linebuffer) ? "--linebuffer" : ""),
+ ((defined $opt::max_chars) ? "--max-chars ".$opt::max_chars : ""),
+ ((defined $opt::plain) ? "--plain" : ""),
+ (($opt::ungroup == 1) ? "-u" : ""),
+ ((defined $opt::tee) ? "--tee" : ""),
+ );
+ my $suboptions =
+ join(" ",
+ ((defined $opt::sshdelay) ? "--delay ".$opt::sshdelay : ""),
+ ((defined $opt::D) ? "-D $opt::D" : ""),
+ ((defined $opt::arg_file_sep) ? "--arg-file-sep ".$opt::arg_file_sep : ""),
+ ((defined $opt::arg_sep) ? "--arg-sep ".$opt::arg_sep : ""),
+ ((defined $opt::colsep) ? "--colsep ".shell_quote($opt::colsep) : ""),
+ ((defined $opt::files) ? "--files" : ""),
+ ((defined $opt::files0) ? "--files0" : ""),
+ ((defined $opt::group) ? "--group" : ""),
+ ((defined $opt::cleanup) ? "--cleanup" : ""),
+ ((defined $opt::keeporder) ? "--keeporder" : ""),
+ ((defined $opt::linebuffer) ? "--linebuffer" : ""),
+ ((defined $opt::max_chars) ? "--max-chars ".$opt::max_chars : ""),
+ ((defined $opt::plain) ? "--plain" : ""),
+ ((defined $opt::plus) ? "--plus" : ""),
+ ((defined $opt::retries) ? "--retries ".$opt::retries : ""),
+ ((defined $opt::timeout) ? "--timeout ".$opt::timeout : ""),
+ (($opt::ungroup == 1) ? "-u" : ""),
+ ((defined $opt::ssh) ? "--ssh '".$opt::ssh."'" : ""),
+ ((defined $opt::tee) ? "--tee" : ""),
+ ((defined $opt::workdir) ? "--wd ".Q($opt::workdir) : ""),
+ (@Global::transfer_files ? map { "--tf ".Q($_) }
+ @Global::transfer_files : ""),
+ (@Global::ret_files ? map { "--return ".Q($_) }
+ @Global::ret_files : ""),
+ (@opt::env ? map { "--env ".Q($_) } @opt::env : ""),
+ (map { "-v" } @opt::v),
+ );
+ ::debug("init", "| $0 $options\n");
+ open(my $parallel_fh, "|-", "$0 -0 --will-cite -j0 $options") ||
+ ::die_bug("This does not run GNU Parallel: $0 $options");
+ my @joblogs;
+ for my $host (sort keys %Global::host) {
+ my $sshlogin = $Global::host{$host};
+ my $qsshlogin = Q($sshlogin->string());
+ my $joblog = tmp_joblog($opt::joblog);
+ if($joblog) {
+ push @joblogs, $joblog;
+ $joblog = "--joblog ".::Q($joblog);
+ }
+ my $quad = $opt::arg_file_sep || "::::";
+ # If PARALLEL_ENV is set: Pass it on
+ my $penv=$Global::parallel_env ?
+ "PARALLEL_ENV=".Q($Global::parallel_env) :
+ '';
+ my $results;
+ if(defined $opt::results) {
+ $results = Q($opt::results) . $qsshlogin;
+ }
+ ::debug("init", "$penv $0 $suboptions -j1 $joblog ",
+ ((defined $opt::tag) ? "--tagstring ".$qsshlogin : ""),
+ ((defined $opt::results) ? "--results ".$results : ""),
+ " -S $qsshlogin ",
+ join(" ",shell_quote(@command,$quad,@argfiles)),"\n");
+ print $parallel_fh "$penv $0 $suboptions -j1 $joblog ",
+ ((defined $opt::tag) ? "--tagstring ".$qsshlogin : ""),
+ ((defined $opt::results) ? "--results ".$results : ""),
+ " -S $qsshlogin ",
+ join(" ",shell_quote(@command,$quad,@argfiles)),"\0";
+ }
+ close $parallel_fh;
+ $Global::exitstatus = $? >> 8;
+ debug("init", "--onall exitvalue ", $?);
+ if(@opt::basefile and $opt::cleanup) { cleanup_basefile(); }
+ $Global::debug or unlink(@argfiles);
+ my %seen;
+ for my $joblog (@joblogs) {
+ # Append to $joblog
+ my $fh = open_or_exit("<", $joblog);
+ # Skip first line (header);
+ <$fh>;
+ print $Global::joblog (<$fh>);
+ close $fh;
+ unlink($joblog);
+ }
+}
+
+
+sub __SIGNAL_HANDLING__() {}
+
+
+sub sigtstp() {
+ # Send TSTP signal (Ctrl-Z) to all children process groups
+ # Uses:
+ # %SIG
+ # Returns: N/A
+ signal_children("TSTP");
+}
+
+sub sigpipe() {
+ # Send SIGPIPE signal to all children process groups
+ # Uses:
+ # %SIG
+ # Returns: N/A
+ signal_children("PIPE");
+}
+
+sub signal_children() {
+ # Send signal to all children process groups
+ # and GNU Parallel itself
+ # Uses:
+ # %SIG
+ # Returns: N/A
+ my $signal = shift;
+ debug("run", "Sending $signal ");
+ kill $signal, map { -$_ } keys %Global::running;
+ # Use default signal handler for GNU Parallel itself
+ $SIG{$signal} = undef;
+ kill $signal, $$;
+}
+
+sub save_original_signal_handler() {
+ # Remember the original signal handler
+ # Uses:
+ # %Global::original_sig
+ # Returns: N/A
+ $SIG{INT} = sub {
+ if($opt::tmux) { ::qqx("tmux kill-session -t p$$"); }
+ wait_and_exit(255);
+ };
+ $SIG{TERM} = sub {
+ if($opt::tmux) { ::qqx("tmux kill-session -t p$$"); }
+ wait_and_exit(255);
+ };
+ %Global::original_sig = %SIG;
+ $SIG{TERM} = sub {}; # Dummy until jobs really start
+ $SIG{ALRM} = 'IGNORE';
+ # Allow Ctrl-Z to suspend and `fg` to continue
+ $SIG{TSTP} = \&sigtstp;
+ $SIG{PIPE} = \&sigpipe;
+ $SIG{CONT} = sub {
+ # Set $SIG{TSTP} again (it is undef'ed in sigtstp() )
+ $SIG{TSTP} = \&sigtstp;
+ for my $job (values %Global::running) {
+ if($job->suspended()) {
+ # Force jobs to suspend, if they are marked as suspended.
+ # --memsupspend can suspend a job that will be resumed
+ # if the user presses CTRL-Z followed by `fg`.
+ $job->suspend();
+ } else {
+ # Resume the rest of the jobs
+ $job->resume();
+ }
+ }
+ };
+}
+
+sub list_running_jobs() {
+ # Print running jobs on tty
+ # Uses:
+ # %Global::running
+ # Returns: N/A
+ for my $job (values %Global::running) {
+ ::status("$Global::progname: ".$job->replaced());
+ }
+}
+
+sub start_no_new_jobs() {
+ # Start no more jobs
+ # Uses:
+ # %Global::original_sig
+ # %Global::unlink
+ # $Global::start_no_new_jobs
+ # Returns: N/A
+ unlink keys %Global::unlink;
+ ::status
+ ("$Global::progname: SIGHUP received. No new jobs will be started.",
+ "$Global::progname: Waiting for these ".(keys %Global::running).
+ " jobs to finish. Send SIGTERM to stop now.");
+ list_running_jobs();
+ $Global::start_no_new_jobs ||= 1;
+}
+
+sub reapers() {
+ # Run reaper until there are no more left
+ # Returns:
+ # @pids_reaped = pids of reaped processes
+ my @pids_reaped;
+ my $pid;
+ while($pid = reaper()) {
+ push @pids_reaped, $pid;
+ }
+ return @pids_reaped;
+}
+
+sub reaper() {
+ # A job finished:
+ # * Set exitstatus, exitsignal, endtime.
+ # * Free ressources for new job
+ # * Update median runtime
+ # * Print output
+ # * If --halt = now: Kill children
+ # * Print progress
+ # Uses:
+ # %Global::running
+ # $opt::timeout
+ # $Global::timeoutq
+ # $opt::keeporder
+ # $Global::total_running
+ # Returns:
+ # $stiff = PID of child finished
+ my $stiff;
+ debug("run", "Reaper ");
+ if(($stiff = waitpid(-1, &WNOHANG)) <= 0) {
+ # No jobs waiting to be reaped
+ return 0;
+ }
+
+ # $stiff = pid of dead process
+ my $job = $Global::running{$stiff};
+
+ # '-a <(seq 10)' will give us a pid not in %Global::running
+ # The same will one of the ssh -M: ignore
+ $job or return 0;
+ delete $Global::running{$stiff};
+ $Global::total_running--;
+ if($job->{'commandline'}{'skip'}) {
+ # $job->skip() was called
+ $job->set_exitstatus(-2);
+ $job->set_exitsignal(0);
+ } else {
+ $job->set_exitsignal($? & 127);
+ if($job->exitstatus()) {
+ # Exit status already set - probably by --timeout
+ } elsif($? & 127) {
+ # Killed by signal. Many shells return: 128 | $signal
+ $job->set_exitstatus(128 | $?);
+ } else {
+ # Normal exit
+ $job->set_exitstatus($? >> 8);
+ }
+ }
+
+ debug("run", "\nseq ",$job->seq()," died (", $job->exitstatus(), ")");
+ if($Global::delayauto or $Global::sshdelayauto) {
+ if($job->exitstatus()) {
+ # Job failed: Increase delay (if $opt::(ssh)delay set)
+ $opt::delay &&= $opt::delay * 1.3;
+ $opt::sshdelay &&= $opt::sshdelay * 1.3;
+ } else {
+ # Job succeeded: Decrease delay (if $opt::(ssh)delay set)
+ $opt::delay &&= $opt::delay * 0.9;
+ $opt::sshdelay &&= $opt::sshdelay * 0.9;
+ }
+ debug("run", "delay:$opt::delay ssh:$opt::sshdelay ");
+ }
+ $job->set_endtime(::now());
+ my $sshlogin = $job->sshlogin();
+ $sshlogin->dec_jobs_running();
+ if($job->should_be_retried()) {
+ # Free up file handles
+ $job->free_ressources();
+ } else {
+ # The job is done
+ $sshlogin->inc_jobs_completed();
+ # Free the jobslot
+ $job->free_slot();
+ if($opt::timeout and not $job->exitstatus()) {
+ # Update average runtime for timeout only for successful jobs
+ $Global::timeoutq->update_median_runtime($job->runtime());
+ }
+ if($opt::keeporder and not $opt::latestline) {
+ # --latestline fixes --keeporder in Job::row()
+ $job->print_earlier_jobs();
+ } else {
+ $job->print();
+ }
+ if($job->should_we_halt() eq "now") {
+ # Kill children
+ ::kill_sleep_seq($job->pid());
+ ::killall();
+ ::wait_and_exit($Global::halt_exitstatus);
+ }
+ }
+ $job->cleanup();
+
+ if($opt::progress) {
+ my $progress = progress();
+ ::status_no_nl("\r",$progress->{'status'});
+ }
+
+ debug("run", "jobdone \n");
+ return $stiff;
+}
+
+
+sub __USAGE__() {}
+
+
+sub killall() {
+ # Kill all jobs by killing their process groups
+ # Uses:
+ # $Global::start_no_new_jobs = we are stopping
+ # $Global::killall = Flag to not run reaper
+ $Global::start_no_new_jobs ||= 1;
+ # Do not reap killed children: Ignore them instead
+ $Global::killall ||= 1;
+ kill_sleep_seq(keys %Global::running);
+}
+
+sub kill_sleep_seq(@) {
+ # Send jobs TERM,TERM,KILL to processgroups
+ # Input:
+ # @pids = list of pids that are also processgroups
+ # Convert pids to process groups ($processgroup = -$pid)
+ my @pgrps = map { -$_ } @_;
+ my @term_seq = split/,/,$opt::termseq;
+ if(not @term_seq) {
+ @term_seq = ("TERM",200,"TERM",100,"TERM",50,"KILL",25);
+ }
+ # for each signal+waittime: kill process groups still not dead
+ while(@term_seq) {
+ @pgrps = kill_sleep(shift @term_seq, shift @term_seq, @pgrps);
+ }
+}
+
+sub kill_sleep() {
+ # Kill pids with a signal and wait a while for them to die
+ # Input:
+ # $signal = signal to send to @pids
+ # $sleep_max = number of ms to sleep at most before returning
+ # @pids = pids to kill (actually process groups)
+ # Uses:
+ # $Global::killall = set by killall() to avoid calling reaper
+ # Returns:
+ # @pids = pids still alive
+ my ($signal, $sleep_max, @pids) = @_;
+ ::debug("kill","kill_sleep $signal ",(join " ",sort @pids),"\n");
+ kill $signal, @pids;
+ my $sleepsum = 0;
+ my $sleep = 0.001;
+
+ while(@pids and $sleepsum < $sleep_max) {
+ if($Global::killall) {
+ # Killall => don't run reaper
+ while(waitpid(-1, &WNOHANG) > 0) {
+ $sleep = $sleep/2+0.001;
+ }
+ } elsif(reapers()) {
+ $sleep = $sleep/2+0.001;
+ }
+ $sleep *= 1.1;
+ ::usleep($sleep);
+ $sleepsum += $sleep;
+ # Keep only living children
+ @pids = grep { kill(0, $_) } @pids;
+ }
+ return @pids;
+}
+
+sub wait_and_exit($) {
+ # If we do not wait, we sometimes get segfault
+ # Returns: N/A
+ my $error = shift;
+ unlink keys %Global::unlink;
+ if($error) {
+ # Kill all jobs without printing
+ killall();
+ }
+ for (keys %Global::unkilled_children) {
+ # Kill any (non-jobs) children (e.g. reserved processes)
+ kill 9, $_;
+ waitpid($_,0);
+ delete $Global::unkilled_children{$_};
+ }
+ if($Global::unkilled_sqlworker) {
+ waitpid($Global::unkilled_sqlworker,0);
+ }
+ # Avoid: Warning: unable to close filehandle properly: No space
+ # left on device during global destruction.
+ $SIG{__WARN__} = sub {};
+ if($opt::_parset) {
+ # Make the shell script return $error
+ print "$Global::parset_endstring\nreturn $error";
+ }
+ exit($error);
+}
+
+sub die_usage() {
+ # Returns: N/A
+ usage();
+ wait_and_exit(255);
+}
+
+sub usage() {
+ # Returns: N/A
+ print join
+ ("\n",
+ "Usage:",
+ "",
+ "$Global::progname [options] [command [arguments]] < list_of_arguments",
+ "$Global::progname [options] [command [arguments]] (::: arguments|:::: argfile(s))...",
+ "cat ... | $Global::progname --pipe [options] [command [arguments]]",
+ "",
+ "-j n Run n jobs in parallel",
+ "-k Keep same order",
+ "-X Multiple arguments with context replace",
+ "--colsep regexp Split input on regexp for positional replacements",
+ "{} {.} {/} {/.} {#} {%} {= perl code =} Replacement strings",
+ "{3} {3.} {3/} {3/.} {=3 perl code =} Positional replacement strings",
+ "With --plus: {} = {+/}/{/} = {.}.{+.} = {+/}/{/.}.{+.} = {..}.{+..} =",
+ " {+/}/{/..}.{+..} = {...}.{+...} = {+/}/{/...}.{+...}",
+ "",
+ "-S sshlogin Example: foo\@server.example.com",
+ "--slf .. Use ~/.parallel/sshloginfile as the list of sshlogins",
+ "--trc {}.bar Shorthand for --transfer --return {}.bar --cleanup",
+ "--onall Run the given command with argument on all sshlogins",
+ "--nonall Run the given command with no arguments on all sshlogins",
+ "",
+ "--pipe Split stdin (standard input) to multiple jobs.",
+ "--recend str Record end separator for --pipe.",
+ "--recstart str Record start separator for --pipe.",
+ "",
+ "GNU Parallel can do much more. See 'man $Global::progname' for details",
+ "",
+ "Academic tradition requires you to cite works you base your article on.",
+ "If you use programs that use GNU Parallel to process data for an article in a",
+ "scientific publication, please cite:",
+ "",
+ " Tange, O. (2024, February 22). GNU Parallel 20240222 ('Навальный').",
+ " Zenodo. https://doi.org/10.5281/zenodo.10719803",
+ "",
+ # Before changing these lines, please read
+ # https://www.gnu.org/software/parallel/parallel_design.html#citation-notice
+ # https://git.savannah.gnu.org/cgit/parallel.git/tree/doc/citation-notice-faq.txt
+ # You accept to be put in a public hall of shame by removing
+ # these lines
+ "This helps funding further development; AND IT WON'T COST YOU A CENT.",
+ "If you pay 10000 EUR you should feel free to use GNU Parallel without citing.",
+ "",
+ "",);
+}
+
+sub citation_notice() {
+ # if --will-cite or --plain: do nothing
+ # if stderr redirected: do nothing
+ # if $PARALLEL_HOME/will-cite: do nothing
+ # else: print citation notice to stderr
+ if($opt::willcite
+ or
+ $opt::plain
+ or
+ not -t $Global::original_stderr
+ or
+ grep { -e "$_/will-cite" } @Global::config_dirs) {
+ # skip
+ } else {
+ ::status
+ ("Academic tradition requires you to cite works you base your article on.",
+ "If you use programs that use GNU Parallel to process data for an article in a",
+ "scientific publication, please cite:",
+ "",
+ " Tange, O. (2024, February 22). GNU Parallel 20240222 ('Навальный').",
+ " Zenodo. https://doi.org/10.5281/zenodo.10719803",
+ "",
+ # Before changing these line, please read
+ # https://www.gnu.org/software/parallel/parallel_design.html#citation-notice and
+ # https://git.savannah.gnu.org/cgit/parallel.git/tree/doc/citation-notice-faq.txt
+ # You accept to be put in a public hall of shame by
+ # removing these lines
+ "This helps funding further development; AND IT WON'T COST YOU A CENT.",
+ "If you pay 10000 EUR you should feel free to use GNU Parallel without citing.",
+ "",
+ "More about funding GNU Parallel and the citation notice:",
+ "https://www.gnu.org/software/parallel/parallel_design.html#citation-notice",
+ "",
+ "To silence this citation notice: run 'parallel --citation' once.",
+ ""
+ );
+ mkdir $Global::config_dir;
+ # Number of times the user has run GNU Parallel without showing
+ # willingness to cite
+ my $runs = 0;
+ if(open (my $fh, "<", $Global::config_dir.
+ "/runs-without-willing-to-cite")) {
+ $runs = <$fh>;
+ close $fh;
+ }
+ $runs++;
+ if(open (my $fh, ">", $Global::config_dir.
+ "/runs-without-willing-to-cite")) {
+ print $fh $runs;
+ close $fh;
+ if($runs >= 10) {
+ ::status("Come on: You have run parallel $runs times. ".
+ "Isn't it about time ",
+ "you run 'parallel --citation' once to silence ".
+ "the citation notice?",
+ "");
+ }
+ }
+ }
+}
+
+sub status(@) {
+ my @w = @_;
+ my $fh = $Global::status_fd || *STDERR;
+ print $fh map { ($_, "\n") } @w;
+ flush $fh;
+}
+
+sub status_no_nl(@) {
+ my @w = @_;
+ my $fh = $Global::status_fd || *STDERR;
+ print $fh @w;
+ flush $fh;
+}
+
+sub warning(@) {
+ my @w = @_;
+ my $prog = $Global::progname || "parallel";
+ status_no_nl(map { ($prog, ": Warning: ", $_, "\n"); } @w);
+}
+
+{
+ my %warnings;
+ sub warning_once(@) {
+ my @w = @_;
+ my $prog = $Global::progname || "parallel";
+ $warnings{@w}++ or
+ status_no_nl(map { ($prog, ": Warning: ", $_, "\n"); } @w);
+ }
+}
+
+sub error(@) {
+ my ($exitcode,@w) = @_;
+ my $prog = $Global::progname || "parallel";
+ if($exitcode =~ /^\d+$/) {
+ # If $exitcode is set, exit the program
+ status(map { ($prog.": Error: ". $_); } @w);
+ wait_and_exit($exitcode);
+ } else {
+ # Otherwise $exitcode is just a string
+ status(map { ($prog.": Error: ". $_); } $exitcode, @w);
+ }
+}
+
+sub die_bug($) {
+ my $bugid = shift;
+ print STDERR
+ ("$Global::progname: This should not happen. You have found a bug. ",
+ "Please follow\n",
+ "https://www.gnu.org/software/parallel/man.html#reporting-bugs\n",
+ "\n",
+ "Include this in the report:\n",
+ "* The version number: $Global::version\n",
+ "* The bugid: $bugid\n",
+ "* The command line being run\n",
+ "* The files being read (put the files on a webserver if they are big)\n",
+ "\n",
+ "If you get the error on smaller/fewer files, please include those instead.\n");
+ ::wait_and_exit(255);
+}
+
+sub version() {
+ # Returns: N/A
+ print join
+ ("\n",
+ "GNU $Global::progname $Global::version",
+ "Copyright (C) 2007-2024 Ole Tange, http://ole.tange.dk and Free Software",
+ "Foundation, Inc.",
+ "License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>",
+ "This is free software: you are free to change and redistribute it.",
+ "GNU $Global::progname comes with no warranty.",
+ "",
+ "Web site: https://www.gnu.org/software/${Global::progname}\n",
+ "When using programs that use GNU Parallel to process data for publication",
+ "please cite as described in 'parallel --citation'.\n",
+ );
+}
+
+sub citation() {
+ # Returns: N/A
+ my ($all_argv_ref,$argv_options_removed_ref) = @_;
+ my $all_argv = "@$all_argv_ref";
+ my $no_opts = "@$argv_options_removed_ref";
+ $all_argv=~s/--citation//;
+ if($all_argv ne $no_opts) {
+ ::warning("--citation ignores all other options and arguments.");
+ ::status("");
+ }
+
+ ::status(
+ "Academic tradition requires you to cite works you base your article on.",
+ "If you use programs that use GNU Parallel to process data for an article in a",
+ "scientific publication, please cite:",
+ "",
+ "\@software{tange_2024_10719803,",
+ " author = {Tange, Ole},",
+ " title = {GNU Parallel 20240222 ('Навальный')},",
+ " month = Feb,",
+ " year = 2024,",
+ " note = {{GNU Parallel is a general parallelizer to run",
+ " multiple serial command line programs in parallel",
+ " without changing them.}},",
+ " publisher = {Zenodo},",
+ " doi = {10.5281/zenodo.10719803},",
+ " url = {https://doi.org/10.5281/zenodo.10719803}",
+ "}",
+ "",
+ "(Feel free to use \\nocite{tange_2024_10719803})",
+ "",
+ # Before changing these lines, please read
+ # https://www.gnu.org/software/parallel/parallel_design.html#citation-notice and
+ # https://git.savannah.gnu.org/cgit/parallel.git/tree/doc/citation-notice-faq.txt
+ # You accept to be put in a public hall of shame by removing
+ # these lines
+ "This helps funding further development; AND IT WON'T COST YOU A CENT.",
+ "If you pay 10000 EUR you should feel free to use GNU Parallel without citing.",
+ "",
+ "More about funding GNU Parallel and the citation notice:",
+ "https://lists.gnu.org/archive/html/parallel/2013-11/msg00006.html",
+ "https://www.gnu.org/software/parallel/parallel_design.html#citation-notice",
+ "https://git.savannah.gnu.org/cgit/parallel.git/tree/doc/citation-notice-faq.txt",
+ ""
+ );
+ while(not grep { -e "$_/will-cite" } @Global::config_dirs) {
+ print "\nType: 'will cite' and press enter.\n> ";
+ my $input = <STDIN>;
+ if(not defined $input) {
+ exit(255);
+ }
+ if($input =~ /will cite/i) {
+ if(mkdir $Global::config_dir) {
+ # Recompute @Global::config_dirs so we can break out of the loop.
+ init_globals();
+ }
+ if(open (my $fh, ">", $Global::config_dir."/will-cite")) {
+ close $fh;
+ ::status(
+ "",
+ "Thank you for your support: You are the reason why there is funding to",
+ "continue maintaining GNU Parallel. On behalf of future versions of",
+ "GNU Parallel, which would not exist without your support:",
+ "",
+ " THANK YOU SO MUCH",
+ "",
+ "It is really appreciated. The citation notice is now silenced.",
+ "");
+ } else {
+ ::status(
+ "",
+ "Thank you for your support. It is much appreciated. The citation",
+ "cannot permanently be silenced. Use '--will-cite' instead.",
+ "",
+ "If you use '--will-cite' in scripts to be run by others you are making",
+ "it harder for others to see the citation notice. The development of",
+ "GNU Parallel is indirectly financed through citations, so if users",
+ "do not know they should cite then you are making it harder to finance",
+ "development. However, if you pay 10000 EUR, you should feel free to",
+ "use '--will-cite' in scripts.",
+ "");
+ last;
+ }
+ }
+ }
+}
+
+sub show_limits() {
+ # Returns: N/A
+ print("Maximal size of command: ",Limits::Command::real_max_length(),"\n",
+ "Maximal usable size of command: ",
+ $Global::usable_command_line_length,"\n",
+ "\n",
+ "Execution will continue now, ",
+ "and it will try to read its input\n",
+ "and run commands; if this is not ",
+ "what you wanted to happen, please\n",
+ "press CTRL-D or CTRL-C\n");
+}
+
+sub embed() {
+ # Give an embeddable version of GNU Parallel
+ # Tested with: bash, zsh, ksh, ash, dash, sh
+ my $randomstring = "cut-here-".join"",
+ map { (0..9,"a".."z","A".."Z")[rand(62)] } (1..20);
+ if(not -f $0 or not -r $0) {
+ ::error(255,"--embed only works if parallel is a readable file");
+ }
+ # Read the source from $0
+ my $source = slurp_or_exit($0);
+ my $user = $ENV{LOGNAME} || $ENV{USERNAME} || $ENV{USER};
+ my $env_parallel_source;
+ my $shell = $Global::shell;
+ $shell =~ s:.*/::;
+ for(which("env_parallel.$shell")) {
+ -r $_ or next;
+ # Read the source of env_parallel.shellname
+ $env_parallel_source .= slurp_or_exit($_);
+ last;
+ }
+ print "#!$Global::shell
+
+# Copyright (C) 2007-2024 $user, Ole Tange, http://ole.tange.dk
+# and Free Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <https://www.gnu.org/licenses/>
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+";
+
+ print q!
+# Embedded GNU Parallel created with --embed
+parallel() {
+ # Start GNU Parallel without leaving temporary files
+ #
+ # Not all shells support 'perl <(cat ...)'
+ # This is a complex way of doing:
+ # perl <(cat <<'cut-here'
+ # [...]
+ # ) "$@"
+ # and also avoiding:
+ # [1]+ Done cat
+
+ # Make a temporary fifo that perl can read from
+ _fifo_with_GNU_Parallel_source=`perl -e 'use POSIX qw(mkfifo);
+ do {
+ $f = "/tmp/parallel-".join"",
+ map { (0..9,"a".."z","A".."Z")[rand(62)] } (1..5);
+ } while(-e $f);
+ mkfifo($f,0600);
+ print $f;'`
+ # Put source code into temporary file
+ # so it is easy to copy to the fifo
+ _file_with_GNU_Parallel_source=`mktemp`;
+!,
+ "cat <<'$randomstring' > \$_file_with_GNU_Parallel_source\n",
+ $source,
+ $randomstring,"\n",
+ q!
+ # Copy the source code from the file to the fifo
+ # and remove the file and fifo ASAP
+ # 'sh -c' is needed to avoid
+ # [1]+ Done cat
+ sh -c "(rm $_file_with_GNU_Parallel_source; cat >$_fifo_with_GNU_Parallel_source; rm $_fifo_with_GNU_Parallel_source) < $_file_with_GNU_Parallel_source &"
+
+ # Read the source from the fifo
+ perl $_fifo_with_GNU_Parallel_source "$@"
+}
+!,
+ $env_parallel_source,
+ q!
+
+# This will call the functions above
+parallel -k echo ::: Put your code here
+env_parallel --session
+env_parallel -k echo ::: Put your code here
+parset p,y,c,h -k echo ::: Put your code here
+echo $p $y $c $h
+echo You can also activate GNU Parallel for interactive use by:
+echo . "$0"
+!;
+ ::status("Redirect the output to a file and add your changes at the end:",
+ " $0 --embed > new_script");
+}
+
+sub pack_combined_executable {
+ my ($before_ref,$with_argsep_ref,$argv_ref) = @_;
+ my @parallelopts;
+ my $skip_next;
+ # Remove '--combine-exec file' from options
+ for(@{$before_ref}[0..(arrayindex($before_ref,$with_argsep_ref))-1]) {
+ if (/^--combine-?exec(utable)?$/ || $skip_next) {
+ # Also skip the filename given to --combine-exec
+ $skip_next = !$skip_next;
+ next;
+ }
+ push @parallelopts, $_;
+ }
+ # From ::: and to end
+ my @argsep = @{$with_argsep_ref}[($#ARGV+1)..$#$with_argsep_ref];
+ # The executable is now the first in @ARGV
+ my $execname = shift @ARGV;
+ # The rest of @ARGV are options for $execname
+ my @execopts = @ARGV;
+ debug("combine",
+ "Parallel opts: @parallelopts ",
+ "Executable: $execname ",
+ "Execopts: @execopts ",
+ "Argsep: @argsep\n");
+ # Read the the executable
+ my $exec = slurp_or_exit(which($execname));
+ # Read the source of GNU Parallel and the executable
+ my $parallel = slurp_or_exit($0);
+ # Remove possibly __END__ from GNU Parallel
+ $parallel =~ s/^__END__.*//s;
+ if(-t $Global::original_stderr) {
+ ::status(
+ "Please be aware that combining GNU Parallel and '$execname'",
+ "into a combined executable will make the whole executable",
+ "licensed under GPLv3 (section 5.c).",
+ "",
+ "If the license of '$execname' is incompatible with GPLv3,",
+ "you cannot legally convey copies of the combined executable",
+ "to others. You can, however, still run them yourself.",
+ "",
+ "The combined executable will not have a citation notice,",
+ "so it is your resposibilty to advice that academic tradition",
+ "requires the users to cite GNU Parallel.",
+ ""
+ );
+ my $input;
+ do {
+ ::status_no_nl("\nType: 'I agree' and press enter.\n> ");
+ $input = <STDIN>;
+ if(not defined $input) {
+ exit(255);
+ }
+ } until($input =~ /I agree/i);
+ }
+ write_or_exit($opt::combineexec,
+ $parallel,
+ "\n__END__\n",
+ (map { "$_\0\n" } @parallelopts), "\0\0\n",
+ $execname, "\0\0\n",
+ (map { "$_\0\n" } @execopts), "\0\0\n",
+ (map { "$_\0\n" } @argsep), "\0\0\n",
+ $exec);
+ # Set +x permission
+ chmod 0700, $opt::combineexec;
+ exit(0);
+}
+
+sub unpack_combined_executable {
+ # If the script is a combined executable,
+ # it will have stuff in <DATA> (I.e. after __END__)
+ my $combine_exec = join("",<DATA>);
+ if(length $combine_exec) {
+ # Parse the <DATA>
+ #
+ # __END__
+ # Option for GNU Parallel\0\n
+ # Option for GNU Parallel\0\n
+ # \0\0\n
+ # Name of executable\0\0\n
+ # Option for executable\0\n
+ # Option for executable\0\n
+ # \0\0\n
+ # argsep + args if any\0\n
+ # argsep + args if any\0\n
+ # \0\0\n
+ # <<binary of exec>>
+ #
+ # parallel --combine --pipe -j10% --recend '' myscript --myopt myval
+ # __END__
+ # --pipe\0\n --pipe
+ # -j10%\0\n -j10%
+ # --recend\0\n --recend
+ # \0\n ''
+ # \0\0\n end-of-parallel-options
+ # myscript\0\0\n myscript
+ # --myopt\0\n --myopt
+ # myval\0\n myval
+ # \0\0\n end-of-myscript-options
+ # \0\0\n no argsep
+ # <<binary of myscript>>
+ #
+ # parallel --combine -j10% myscript :::
+ # __END__
+ # -j10%\0\n
+ # \0\0\n end-of-parallel-options
+ # myscript\0\0\n
+ # \0\0\n end-of-myscript-options
+ # :::\0\n
+ # \0\0\n
+ # <<binary of myscript>>
+
+ my ($opts,$execname,$execopts,$argsep,$exec) =
+ split /\0\0\n/,$combine_exec,5;
+ # Make a tmpdir with a file called $execname
+ local %ENV;
+ $ENV{TMPDIR} ||= "/tmp";
+ my $dir = File::Temp::tempdir($ENV{'TMPDIR'} . "/parXXXXX", CLEANUP => 1);
+ my $script = $dir."/".$execname;
+ write_or_exit($script,$exec);
+ # Set +x permission
+ chmod 0700, $script;
+ # Mark it for unlinking later
+ $Global::unlink{$script}++;
+ $Global::unlink{$dir}++;
+ # pass the options for GNU Parallel
+ my @opts = split /\0\n/, $opts;
+ my @execopts = split /\0\n/, $execopts;
+ if(length $argsep) {
+ # Only add argsep if set
+ unshift(@ARGV, split(/\0\n/,$argsep));
+ }
+ unshift(@ARGV,@opts,$script,@execopts);
+ }
+}
+
+
+sub __GENERIC_COMMON_FUNCTION__() {}
+
+
+sub mkdir_or_die($) {
+ # If dir is not executable: die
+ my $dir = shift;
+ # The eval is needed to catch exception from mkdir
+ eval { File::Path::mkpath($dir); };
+ if(not -x $dir) {
+ ::error(255,"Cannot change into non-executable dir $dir: $!");
+ }
+}
+
+sub tmpfile(@) {
+ # Create tempfile as $TMPDIR/parXXXXX
+ # Returns:
+ # $filehandle = opened file handle
+ # $filename = file name created
+ my($filehandle,$filename) =
+ ::tempfile(DIR=>$ENV{'TMPDIR'}, TEMPLATE => 'parXXXXX', @_);
+ if(wantarray) {
+ return($filehandle,$filename);
+ } else {
+ # Separate unlink due to NFS dealing badly with File::Temp
+ unlink $filename;
+ return $filehandle;
+ }
+}
+
+sub tmpname($) {
+ # Select a name that does not exist
+ # Do not create the file as it may be used for creating a socket (by tmux)
+ # Remember the name in $Global::unlink to avoid hitting the same name twice
+ my $name = shift;
+ my($tmpname);
+ if(not -w $ENV{'TMPDIR'}) {
+ my $qtmp = ::Q($ENV{'TMPDIR'});
+ if(not -e $ENV{'TMPDIR'}) {
+ ::error(255,"Tmpdir $qtmp does not exist.",
+ "Try: mkdir -p $qtmp");
+ } else {
+ ::error(255,"Tmpdir $qtmp is not writable.",
+ "Try: chmod +w $qtmp");
+ }
+ }
+ do {
+ $tmpname = $ENV{'TMPDIR'}."/".$name.
+ join"", map { (0..9,"a".."z","A".."Z")[rand(62)] } (1..5);
+ } while(-e $tmpname or $Global::unlink{$tmpname}++);
+ return $tmpname;
+}
+
+sub tmpfifo() {
+ # Find an unused name and mkfifo on it
+ my $tmpfifo = tmpname("fif");
+ mkfifo($tmpfifo,0600);
+ return $tmpfifo;
+}
+
+sub rm(@) {
+ # Remove file and remove it from %Global::unlink
+ # Uses:
+ # %Global::unlink
+ delete @Global::unlink{@_};
+ unlink @_;
+}
+
+sub size_of_block_dev() {
+ # Like -s but for block devices
+ # Input:
+ # $blockdev = file name of block device
+ # Returns:
+ # $size = in bytes, undef if error
+ my $blockdev = shift;
+ my $fh = open_or_exit("<", $blockdev);
+ seek($fh,0,2) || ::die_bug("cannot seek $blockdev");
+ my $size = tell($fh);
+ close $fh;
+ return $size;
+}
+
+sub qqx(@) {
+ # Like qx but with clean environment (except for @keep)
+ # and STDERR ignored
+ # This is needed if the environment contains functions
+ # that /bin/sh does not understand
+ my %env;
+ # ssh with ssh-agent needs PATH SSH_AUTH_SOCK SSH_AGENT_PID
+ # ssh with Kerberos needs KRB5CCNAME
+ # sshpass needs SSHPASS
+ # tmux needs LC_CTYPE
+ # lsh needs HOME LOGNAME
+ my @keep = qw(PATH SSH_AUTH_SOCK SSH_AGENT_PID KRB5CCNAME LC_CTYPE
+ HOME LOGNAME SSHPASS);
+ @env{@keep} = @ENV{@keep};
+ local %ENV;
+ %ENV = %env;
+ if($Global::debug) {
+ # && true is to force spawning a shell and not just exec'ing
+ return qx{ @_ && true };
+ } else {
+ # CygWin does not respect 2>/dev/null
+ # so we do that by hand
+ # This trick does not work:
+ # https://stackoverflow.com/q/13833088/363028
+ # local *STDERR;
+ # open(STDERR, ">", "/dev/null");
+ open(local *CHILD_STDIN, '<', '/dev/null') or die $!;
+ open(local *CHILD_STDERR, '>', '/dev/null') or die $!;
+ my $out;
+ # eval is needed if open3 fails (e.g. command line too long)
+ eval {
+ my $pid = open3(
+ '<&CHILD_STDIN',
+ $out,
+ '>&CHILD_STDERR',
+ # && true is to force spawning a shell and not just exec'ing
+ "@_ && true");
+ my @arr = <$out>;
+ close $out;
+ # Make sure $? is set
+ waitpid($pid, 0);
+ return wantarray ? @arr : join "",@arr;
+ } or do {
+ # If eval fails, force $?=false
+ `false`;
+ };
+ }
+}
+
+sub uniq(@) {
+ # Remove duplicates and return unique values
+ return keys %{{ map { $_ => 1 } @_ }};
+}
+
+sub min(@) {
+ # Returns:
+ # Minimum value of array
+ my $min;
+ for (@_) {
+ # Skip undefs
+ defined $_ or next;
+ defined $min or do { $min = $_; next; }; # Set $_ to the first non-undef
+ $min = ($min < $_) ? $min : $_;
+ }
+ return $min;
+}
+
+sub max(@) {
+ # Returns:
+ # Maximum value of array
+ my $max;
+ for (@_) {
+ # Skip undefs
+ defined $_ or next;
+ defined $max or do { $max = $_; next; }; # Set $_ to the first non-undef
+ $max = ($max > $_) ? $max : $_;
+ }
+ return $max;
+}
+
+sub sum(@) {
+ # Returns:
+ # Sum of values of array
+ my @args = @_;
+ my $sum = 0;
+ for (@args) {
+ # Skip undefs
+ $_ and do { $sum += $_; }
+ }
+ return $sum;
+}
+
+sub undef_as_zero($) {
+ my $a = shift;
+ return $a ? $a : 0;
+}
+
+sub undef_as_empty($) {
+ my $a = shift;
+ return $a ? $a : "";
+}
+
+sub undef_if_empty($) {
+ if(defined($_[0]) and $_[0] eq "") {
+ return undef;
+ }
+ return $_[0];
+}
+
+sub multiply_binary_prefix(@) {
+ # Evalualte numbers with binary prefix
+ # Ki=2^10, Mi=2^20, Gi=2^30, Ti=2^40, Pi=2^50, Ei=2^70, Zi=2^80, Yi=2^80
+ # ki=2^10, mi=2^20, gi=2^30, ti=2^40, pi=2^50, ei=2^70, zi=2^80, yi=2^80
+ # K =2^10, M =2^20, G =2^30, T =2^40, P =2^50, E =2^70, Z =2^80, Y =2^80
+ # k =10^3, m =10^6, g =10^9, t=10^12, p=10^15, e=10^18, z=10^21, y=10^24
+ # 13G = 13*1024*1024*1024 = 13958643712
+ # Input:
+ # $s = string with prefixes
+ # Returns:
+ # $value = int with prefixes multiplied
+ my @v = @_;
+ for(@v) {
+ defined $_ or next;
+ s/ki/*1024/gi;
+ s/mi/*1024*1024/gi;
+ s/gi/*1024*1024*1024/gi;
+ s/ti/*1024*1024*1024*1024/gi;
+ s/pi/*1024*1024*1024*1024*1024/gi;
+ s/ei/*1024*1024*1024*1024*1024*1024/gi;
+ s/zi/*1024*1024*1024*1024*1024*1024*1024/gi;
+ s/yi/*1024*1024*1024*1024*1024*1024*1024*1024/gi;
+ s/xi/*1024*1024*1024*1024*1024*1024*1024*1024*1024/gi;
+
+ s/K/*1024/g;
+ s/M/*1024*1024/g;
+ s/G/*1024*1024*1024/g;
+ s/T/*1024*1024*1024*1024/g;
+ s/P/*1024*1024*1024*1024*1024/g;
+ s/E/*1024*1024*1024*1024*1024*1024/g;
+ s/Z/*1024*1024*1024*1024*1024*1024*1024/g;
+ s/Y/*1024*1024*1024*1024*1024*1024*1024*1024/g;
+ s/X/*1024*1024*1024*1024*1024*1024*1024*1024*1024/g;
+
+ s/k/*1000/g;
+ s/m/*1000*1000/g;
+ s/g/*1000*1000*1000/g;
+ s/t/*1000*1000*1000*1000/g;
+ s/p/*1000*1000*1000*1000*1000/g;
+ s/e/*1000*1000*1000*1000*1000*1000/g;
+ s/z/*1000*1000*1000*1000*1000*1000*1000/g;
+ s/y/*1000*1000*1000*1000*1000*1000*1000*1000/g;
+ s/x/*1000*1000*1000*1000*1000*1000*1000*1000*1000/g;
+
+ $_ = eval $_;
+ }
+ return wantarray ? @v : $v[0];
+}
+
+sub multiply_time_units($) {
+ # Evalualte numbers with time units
+ # s=1, m=60, h=3600, d=86400
+ # Input:
+ # $s = string time units
+ # Returns:
+ # $value = int in seconds
+ my @v = @_;
+ for(@v) {
+ defined $_ or next;
+ if(/[dhms]/i) {
+ s/s/*1+/gi;
+ s/m/*60+/gi;
+ s/h/*3600+/gi;
+ s/d/*86400+/gi;
+ # 1m/3 => 1*60+/3 => 1*60/3
+ s/\+(\D)/$1/gi;
+ }
+ $_ = eval $_."-0";
+ }
+ return wantarray ? @v : $v[0];
+}
+
+sub seconds_to_time_units() {
+ # Convert seconds into ??d??h??m??s
+ # s=1, m=60, h=3600, d=86400
+ # Input:
+ # $s = int in seconds
+ # Returns:
+ # $str = string time units
+ my $s = shift;
+ my $str;
+ my $d = int($s/86400);
+ $s -= $d * 86400;
+ my $h = int($s/3600);
+ $s -= $h * 3600;
+ my $m = int($s/60);
+ $s -= $m * 60;
+ if($d) {
+ $str = sprintf("%dd%02dh%02dm%02ds",$d,$h,$m,$s);
+ } elsif($h) {
+ $str = sprintf("%dh%02dm%02ds",$h,$m,$s);
+ } elsif($m) {
+ $str = sprintf("%dm%02ds",$m,$s);
+ } else {
+ $str = sprintf("%ds",$s);
+ }
+ return $str;
+}
+
+{
+ my ($disk_full_fh, $b8193);
+ sub exit_if_disk_full() {
+ # Checks if $TMPDIR is full by writing 8kb to a tmpfile
+ # If the disk is full: Exit immediately.
+ # Returns:
+ # N/A
+ if(not $disk_full_fh) {
+ $disk_full_fh = ::tmpfile(SUFFIX => ".df");
+ $b8193 = "b"x8193;
+ }
+ # Linux does not discover if a disk is full if writing <= 8192
+ # Tested on:
+ # bfs btrfs cramfs ext2 ext3 ext4 ext4dev jffs2 jfs minix msdos
+ # ntfs reiserfs tmpfs ubifs vfat xfs
+ # TODO this should be tested on different OS similar to this:
+ #
+ # doit() {
+ # sudo mount /dev/ram0 /mnt/loop; sudo chmod 1777 /mnt/loop
+ # seq 100000 | parallel --tmpdir /mnt/loop/ true &
+ # seq 6900000 > /mnt/loop/i && echo seq OK
+ # seq 6980868 > /mnt/loop/i
+ # seq 10000 > /mnt/loop/ii
+ # sleep 3
+ # sudo umount /mnt/loop/ || sudo umount -l /mnt/loop/
+ # echo >&2
+ # }
+ print $disk_full_fh $b8193;
+ if(not $disk_full_fh
+ or
+ tell $disk_full_fh != 8193) {
+ # On raspbian the disk can be full except for 10 chars.
+ ::error(255,"Output is incomplete.",
+ "Cannot append to buffer file in $ENV{'TMPDIR'}.",
+ "Is the disk full?",
+ "Change \$TMPDIR with --tmpdir or use --compress.");
+ }
+ truncate $disk_full_fh, 0;
+ seek($disk_full_fh, 0, 0) || die;
+ }
+}
+
+sub spacefree($$) {
+ # Remove comments and spaces
+ # Inputs:
+ # $spaces = keep 1 space?
+ # $s = string to remove spaces from
+ # Returns:
+ # $s = with spaces removed
+ my $spaces = shift;
+ my $s = shift;
+ $s =~ s/#.*//mg;
+ if(1 == $spaces) {
+ $s =~ s/\s+/ /mg;
+ } elsif(2 == $spaces) {
+ # Keep newlines
+ $s =~ s/\n\n+/\n/sg;
+ $s =~ s/[ \t]+/ /mg;
+ } elsif(3 == $spaces) {
+ # Keep perl code required space
+ $s =~ s{([^a-zA-Z0-9/])\s+}{$1}sg;
+ $s =~ s{([a-zA-Z0-9/])\s+([^:a-zA-Z0-9/])}{$1$2}sg;
+ } else {
+ $s =~ s/\s//mg;
+ }
+ return $s;
+}
+
+{
+ my $hostname;
+ sub hostname() {
+ local $/ = "\n";
+ if(not $hostname) {
+ $hostname = `hostname`;
+ chomp($hostname);
+ $hostname ||= "nohostname";
+ }
+ return $hostname;
+ }
+}
+
+sub which(@) {
+ # Input:
+ # @programs = programs to find the path to
+ # Returns:
+ # @full_path = full paths to @programs. Nothing if not found
+ my @which;
+ for my $prg (@_) {
+ push(@which, grep { not -d $_ and -x $_ }
+ map { $_."/".$prg } split(":",$ENV{'PATH'}));
+ if($prg =~ m:/:) {
+ # Test if program with full path exists
+ push(@which, grep { not -d $_ and -x $_ } $prg);
+ }
+ }
+ ::debug("which", "$which[0] in $ENV{'PATH'}\n");
+ return wantarray ? @which : $which[0];
+}
+
+{
+ my ($regexp,$shell,%fakename);
+
+ sub parent_shell {
+ # Input:
+ # $pid = pid to see if (grand)*parent is a shell
+ # Returns:
+ # $shellpath = path to shell - undef if no shell found
+ my $pid = shift;
+ ::debug("init","Parent of $pid\n");
+ if(not $regexp) {
+ # All shells known to mankind
+ #
+ # ash bash csh dash fdsh fish fizsh ksh ksh93 mksh pdksh
+ # posh rbash rc rush rzsh sash sh static-sh tcsh yash zsh
+
+ my @shells = (qw(ash bash bsd-csh csh dash fdsh fish fizsh ksh
+ ksh93 lksh mksh pdksh posh rbash rc rush rzsh sash sh
+ static-sh tcsh yash zsh -sh -csh -bash),
+ '-sh (sh)' # sh on FreeBSD
+ );
+ # Can be formatted as:
+ # [sh] -sh sh busybox sh -sh (sh)
+ # /bin/sh /sbin/sh /opt/csw/sh
+ # But not: foo.sh sshd crash flush pdflush scosh fsflush ssh
+ $shell = "(?:".join("|",map { "\Q$_\E" } @shells).")";
+ $regexp = '^((\[)(-?)('. $shell. ')(\])|(|\S+/|busybox )'.
+ '(-?)('. $shell. '))( *$| [^(])';
+ %fakename = (
+ # sh disguises itself as -sh (sh) on FreeBSD
+ "-sh (sh)" => ["sh"],
+ # csh and tcsh disguise themselves as -sh/-csh
+ # E.g.: ssh -tt csh@lo 'ps aux;true' |egrep ^csh
+ # but sh also disguises itself as -sh
+ # (TODO When does that happen?)
+ "-sh" => ["sh"],
+ "-csh" => ["tcsh", "csh"],
+ # ash disguises itself as -ash
+ "-ash" => ["ash", "dash", "sh"],
+ # dash disguises itself as -dash
+ "-dash" => ["dash", "ash", "sh"],
+ # bash disguises itself as -bash
+ "-bash" => ["bash", "sh"],
+ # ksh disguises itself as -ksh
+ "-ksh" => ["ksh", "sh"],
+ # zsh disguises itself as -zsh
+ "-zsh" => ["zsh", "sh"],
+ );
+ }
+ if($^O eq "linux") {
+ # Optimized for GNU/Linux
+ my $testpid = $pid;
+ my $shellpath;
+ my $shellline;
+ while($testpid) {
+ if(open(my $fd, "<", "/proc/$testpid/cmdline")) {
+ local $/="\0";
+ chomp($shellline = <$fd>);
+ if($shellline =~ /$regexp/o) {
+ my $shellname = $4 || $8;
+ my $dash = $3 || $7;
+ if($shellname eq "sh" and $dash) {
+ # -sh => csh or sh
+ if($shellpath = readlink "/proc/$testpid/exe") {
+ ::debug("init","procpath $shellpath\n");
+ if($shellpath =~ m:/$shell$:o) {
+ ::debug("init",
+ "proc which ".$shellpath." => ");
+ return $shellpath;
+ }
+ }
+ }
+ ::debug("init", "which ".$shellname." => ");
+ $shellpath = (which($shellname,
+ @{$fakename{$shellname}}))[0];
+ ::debug("init", "shell path $shellpath\n");
+ return $shellpath;
+ }
+ }
+ # Get parent pid
+ if(open(my $fd, "<", "/proc/$testpid/stat")) {
+ my $line = <$fd>;
+ close $fd;
+ # Parent pid is field 4
+ $testpid = (split /\s+/, $line)[3];
+ } else {
+ # Something is wrong: fall back to old method
+ last;
+ }
+ }
+ }
+ # if -sh or -csh try readlink /proc/$$/exe
+ my ($children_of_ref, $parent_of_ref, $name_of_ref) = pid_table();
+ my $shellpath;
+ my $testpid = $pid;
+ while($testpid) {
+ if($name_of_ref->{$testpid} =~ /$regexp/o) {
+ my $shellname = $4 || $8;
+ my $dash = $3 || $7;
+ if($shellname eq "sh" and $dash) {
+ # -sh => csh or sh
+ if($shellpath = readlink "/proc/$testpid/exe") {
+ ::debug("init","procpath $shellpath\n");
+ if($shellpath =~ m:/$shell$:o) {
+ ::debug("init", "proc which ".$shellpath." => ");
+ return $shellpath;
+ }
+ }
+ }
+ ::debug("init", "which ".$shellname." => ");
+ $shellpath = (which($shellname,@{$fakename{$shellname}}))[0];
+ ::debug("init", "shell path $shellpath\n");
+ $shellpath and last;
+ }
+ if($testpid == $parent_of_ref->{$testpid}) {
+ # In Solaris zones, the PPID of the zsched process is itself
+ last;
+ }
+ $testpid = $parent_of_ref->{$testpid};
+ }
+ return $shellpath;
+ }
+}
+
+{
+ my %pid_parentpid_cmd;
+
+ sub pid_table() {
+ # Returns:
+ # %children_of = { pid -> children of pid }
+ # %parent_of = { pid -> pid of parent }
+ # %name_of = { pid -> commandname }
+
+ if(not %pid_parentpid_cmd) {
+ # Filter for SysV-style `ps`
+ my $sysv = q( ps -ef |).
+ q(perl -ane '1..1 and /^(.*)CO?MM?A?N?D/ and $s=length $1;).
+ q(s/^.{$s}//; print "@F[1,2] $_"' );
+ # Minix uses cols 2,3 and can have newlines in the command
+ # so lines not having numbers in cols 2,3 must be ignored
+ my $minix = q( ps -ef |).
+ q(perl -ane '1..1 and /^(.*)CO?MM?A?N?D/ and $s=length $1;).
+ q(s/^.{$s}// and $F[2]>0 and $F[3]>0 and print "@F[2,3] $_"' );
+ # BSD-style `ps`
+ my $bsd = q(ps -o pid,ppid,command -ax);
+ %pid_parentpid_cmd =
+ (
+ 'aix' => $sysv,
+ 'android' => $sysv,
+ 'cygwin' => $sysv,
+ 'darwin' => $bsd,
+ 'dec_osf' => $sysv,
+ 'dragonfly' => $bsd,
+ 'freebsd' => $bsd,
+ 'gnu' => $sysv,
+ 'hpux' => $sysv,
+ 'linux' => $sysv,
+ 'mirbsd' => $bsd,
+ 'minix' => $minix,
+ 'msys' => $sysv,
+ 'MSWin32' => $sysv,
+ 'netbsd' => $bsd,
+ 'nto' => $sysv,
+ 'openbsd' => $bsd,
+ 'solaris' => $sysv,
+ 'svr5' => $sysv,
+ 'syllable' => "echo ps not supported",
+ );
+ }
+ $pid_parentpid_cmd{$^O} or
+ ::die_bug("pid_parentpid_cmd for $^O missing");
+
+ my (@pidtable,%parent_of,%children_of,%name_of);
+ # Table with pid -> children of pid
+ @pidtable = `$pid_parentpid_cmd{$^O}`;
+ my $p=$$;
+ for (@pidtable) {
+ # must match: 24436 21224 busybox ash
+ # must match: 24436 21224 <<empty on MacOSX running cubase>>
+ # must match: 24436 21224 <<empty on system running Viber>>
+ # or: perl -e 'while($0=" "){}'
+ if(/^\s*(\S+)\s+(\S+)\s+(\S+.*)/
+ or
+ /^\s*(\S+)\s+(\S+)\s+()$/) {
+ $parent_of{$1} = $2;
+ push @{$children_of{$2}}, $1;
+ $name_of{$1} = $3;
+ } else {
+ ::die_bug("pidtable format: $_");
+ }
+ }
+ return(\%children_of, \%parent_of, \%name_of);
+ }
+}
+
+sub now() {
+ # Returns time since epoch as in seconds with 3 decimals
+ # Uses:
+ # @Global::use
+ # Returns:
+ # $time = time now with millisecond accuracy
+ if(not $Global::use{"Time::HiRes"}) {
+ if(eval "use Time::HiRes qw ( time );") {
+ eval "sub TimeHiRestime { return Time::HiRes::time };";
+ } else {
+ eval "sub TimeHiRestime { return time() };";
+ }
+ $Global::use{"Time::HiRes"} = 1;
+ }
+
+ return (int(TimeHiRestime()*1000))/1000;
+}
+
+sub usleep($) {
+ # Sleep this many milliseconds.
+ # Input:
+ # $ms = milliseconds to sleep
+ my $ms = shift;
+ ::debug("timing",int($ms),"ms ");
+ select(undef, undef, undef, $ms/1000);
+}
+
+sub make_regexp_ungreedy {
+ my $regexp = shift;
+ my $class_state = 0;
+ my $escape_state = 0;
+ my $found = 0;
+ my $ungreedy = "";
+ my $c;
+
+ for $c (split (//, $regexp)) {
+ if ($found) {
+ if($c ne "?") { $ungreedy .= "?"; }
+ $found = 0;
+ }
+ $ungreedy .= $c;
+
+ if ($escape_state) { $escape_state = 0; next; }
+ if ($c eq "\\") { $escape_state = 1; next; }
+ if ($c eq '[') { $class_state = 1; next; }
+ if ($class_state) {
+ if($c eq ']') { $class_state = 0; }
+ next;
+ }
+ # Quantifiers: + * {...}
+ if ($c =~ /[*}+]/) { $found = 1; }
+ }
+ if($found) { $ungreedy .= '?'; }
+ return $ungreedy;
+}
+
+
+sub __KILLER_REAPER__() {}
+
+sub reap_usleep() {
+ # Reap dead children.
+ # If no dead children: Sleep specified amount with exponential backoff
+ # Input:
+ # $ms = milliseconds to sleep
+ # Returns:
+ # $ms/2+0.001 if children reaped
+ # $ms*1.1 if no children reaped
+ my $ms = shift;
+ if(reapers()) {
+ if(not $Global::total_completed % 100) {
+ if($opt::timeout) {
+ # Force cleaning the timeout queue for every 100 jobs
+ # Fixes potential memleak
+ $Global::timeoutq->process_timeouts();
+ }
+ }
+ # Sleep exponentially shorter (1/2^n) if a job finished
+ return $ms/2+0.001;
+ } else {
+ if($opt::timeout) {
+ $Global::timeoutq->process_timeouts();
+ }
+ if($opt::memfree) {
+ kill_youngster_if_not_enough_mem($opt::memfree*0.5);
+ }
+ if($opt::memsuspend) {
+ suspend_young_if_not_enough_mem($opt::memsuspend);
+ }
+ if($opt::limit) {
+ kill_youngest_if_over_limit();
+ }
+ exit_if_disk_full();
+ if($Global::linebuffer) {
+ my $something_printed = 0;
+ if($opt::keeporder and not $opt::latestline) {
+ for my $job (values %Global::running) {
+ $something_printed += $job->print_earlier_jobs();
+ }
+ } else {
+ for my $job (values %Global::running) {
+ $something_printed += $job->print();
+ }
+ }
+ if($something_printed) { $ms = $ms/2+0.001; }
+ }
+ if($ms > 0.002) {
+ # When a child dies, wake up from sleep (or select(,,,))
+ $SIG{CHLD} = sub { kill "ALRM", $$ };
+ if($opt::delay and not $Global::linebuffer) {
+ # The 0.004s is approximately the time it takes for one round
+ my $next_earliest_start =
+ $Global::newest_starttime + $opt::delay - 0.004;
+ my $remaining_ms = 1000 * ($next_earliest_start - ::now());
+ # The next job can only start at $next_earliest_start
+ # so sleep until then (but sleep at least $ms)
+ usleep(::max($ms,$remaining_ms));
+ } else {
+ usleep($ms);
+ }
+ # --compress needs $SIG{CHLD} unset
+ $SIG{CHLD} = 'DEFAULT';
+ }
+ # Sleep exponentially longer (1.1^n) if a job did not finish,
+ # though at most 1000 ms.
+ return (($ms < 1000) ? ($ms * 1.1) : ($ms));
+ }
+}
+
+sub kill_youngest_if_over_limit() {
+ # Check each $sshlogin we are over limit
+ # If over limit: kill off the youngest child
+ # Put the child back in the queue.
+ # Uses:
+ # %Global::running
+ my %jobs_of;
+ my @sshlogins;
+
+ for my $job (values %Global::running) {
+ if(not $jobs_of{$job->sshlogin()}) {
+ push @sshlogins, $job->sshlogin();
+ }
+ push @{$jobs_of{$job->sshlogin()}}, $job;
+ }
+ for my $sshlogin (@sshlogins) {
+ for my $job (sort { $b->seq() <=> $a->seq() }
+ @{$jobs_of{$sshlogin}}) {
+ if($sshlogin->limit() == 2) {
+ $job->kill();
+ last;
+ }
+ }
+ }
+}
+
+sub suspend_young_if_not_enough_mem() {
+ # Check each $sshlogin if there is enough mem.
+ # If less than $limit free mem: suspend some of the young children
+ # Else: Resume all jobs
+ # Uses:
+ # %Global::running
+ my $limit = shift;
+ my %jobs_of;
+ my @sshlogins;
+
+ for my $job (values %Global::running) {
+ if(not $jobs_of{$job->sshlogin()}) {
+ push @sshlogins, $job->sshlogin();
+ }
+ push @{$jobs_of{$job->sshlogin()}}, $job;
+ }
+ for my $sshlogin (@sshlogins) {
+ my $free = $sshlogin->memfree();
+ if($free < 2*$limit) {
+ # Suspend all jobs (resume some of them later)
+ map { $_->suspended() or $_->suspend(); } @{$jobs_of{$sshlogin}};
+ my @jobs = (sort { $b->seq() <=> $a->seq() }
+ @{$jobs_of{$sshlogin}});
+ # how many should be running?
+ # limit*1 => 1;
+ # limit*1.5 => 2;
+ # limit*1.75 => 4;
+ # free < limit*(2-1/2^n);
+ # =>
+ # 1/(2-free/limit) < 2^n;
+ my $run = int(1/(2-$free/$limit));
+ $run = ::min($run,$#jobs);
+ # Resume the oldest running
+ for my $job ((sort { $a->seq() <=> $b->seq() } @jobs)[0..$run]) {
+ ::debug("mem","\nResume ",$run+1, " jobs. Seq ",
+ $job->seq(), " resumed ",
+ $sshlogin->memfree()," < ",2*$limit);
+ $job->resume();
+ }
+ } else {
+ for my $job (@{$jobs_of{$sshlogin}}) {
+ if($job->suspended()) {
+ $job->resume();
+ ::debug("mem","\nResume ",$#{$jobs_of{$sshlogin}}+1,
+ " jobs. Seq ", $job->seq(), " resumed ",
+ $sshlogin->memfree()," > ",2*$limit);
+ last;
+ }
+ }
+ }
+ }
+}
+
+sub kill_youngster_if_not_enough_mem() {
+ # Check each $sshlogin if there is enough mem.
+ # If less than 50% enough free mem: kill off the youngest child
+ # Put the child back in the queue.
+ # Uses:
+ # %Global::running
+ my $limit = shift;
+ my %jobs_of;
+ my @sshlogins;
+
+ for my $job (values %Global::running) {
+ if(not $jobs_of{$job->sshlogin()}) {
+ push @sshlogins, $job->sshlogin();
+ }
+ push @{$jobs_of{$job->sshlogin()}}, $job;
+ }
+ for my $sshlogin (@sshlogins) {
+ for my $job (sort { $b->seq() <=> $a->seq() }
+ @{$jobs_of{$sshlogin}}) {
+ if($sshlogin->memfree() < $limit) {
+ ::debug("mem","\n",map { $_->seq()." " }
+ (sort { $b->seq() <=> $a->seq() }
+ @{$jobs_of{$sshlogin}}));
+ ::debug("mem","\n", $job->seq(), "killed ",
+ $sshlogin->memfree()," < ",$limit);
+ $job->kill();
+ $sshlogin->memfree_recompute();
+ } else {
+ last;
+ }
+ }
+ ::debug("mem","Free mem OK? ",
+ $sshlogin->memfree()," > ",$limit);
+ }
+}
+
+
+sub __DEBUGGING__() {}
+
+
+sub debug(@) {
+ # Uses:
+ # $Global::debug
+ # %Global::fh
+ # Returns: N/A
+ $Global::debug or return;
+ @_ = grep { defined $_ ? $_ : "" } @_;
+ if($Global::debug eq "all" or $Global::debug eq $_[0]) {
+ if($Global::fh{2}) {
+ # Original stderr was saved
+ my $stderr = $Global::fh{2};
+ print $stderr @_[1..$#_];
+ } else {
+ print STDERR @_[1..$#_];
+ }
+ }
+}
+
+sub my_memory_usage() {
+ # Returns:
+ # memory usage if found
+ # 0 otherwise
+ use strict;
+ use FileHandle;
+
+ local $/ = "\n";
+ my $pid = $$;
+ if(-e "/proc/$pid/stat") {
+ my $fh = FileHandle->new("</proc/$pid/stat");
+
+ my $data = <$fh>;
+ chomp $data;
+ $fh->close;
+
+ my @procinfo = split(/\s+/,$data);
+
+ return undef_as_zero($procinfo[22]);
+ } else {
+ return 0;
+ }
+}
+
+sub my_size() {
+ # Returns:
+ # $size = size of object if Devel::Size is installed
+ # -1 otherwise
+ my @size_this = (@_);
+ eval "use Devel::Size qw(size total_size)";
+ if ($@) {
+ return -1;
+ } else {
+ return total_size(@_);
+ }
+}
+
+sub my_dump(@) {
+ # Returns:
+ # ascii expression of object if Data::Dump(er) is installed
+ # error code otherwise
+ my @dump_this = (@_);
+ eval "use Data::Dump qw(dump);";
+ if ($@) {
+ # Data::Dump not installed
+ eval "use Data::Dumper;";
+ if ($@) {
+ my $err = "Neither Data::Dump nor Data::Dumper is installed\n".
+ "Not dumping output\n";
+ ::status($err);
+ return $err;
+ } else {
+ return Dumper(@dump_this);
+ }
+ } else {
+ # Create a dummy Data::Dump:dump as Hans Schou sometimes has
+ # it undefined
+ eval "sub Data::Dump:dump {}";
+ eval "use Data::Dump qw(dump);";
+ return (Data::Dump::dump(@dump_this));
+ }
+}
+
+sub my_croak(@) {
+ eval "use Carp; 1";
+ $Carp::Verbose = 1;
+ croak(@_);
+}
+
+sub my_carp() {
+ eval "use Carp; 1";
+ $Carp::Verbose = 1;
+ carp(@_);
+}
+
+
+sub __OBJECT_ORIENTED_PARTS__() {}
+
+
+package SSHLogin;
+
+sub new($$) {
+ my $class = shift;
+ my $s = shift;
+ my $origs = $s;
+ my %hostgroups;
+ my $ncpus;
+ my $sshcommand;
+ my $user;
+ my $password;
+ my $host;
+ my $port;
+ my $local;
+ my $string;
+ # SSHLogins can have these formats:
+ # @grp+grp/ncpu//usr/bin/ssh user@server
+ # ncpu//usr/bin/ssh user@server
+ # /usr/bin/ssh user@server
+ # user@server
+ # ncpu/user@server
+ # @grp+grp/user@server
+ # above with: user:password@server
+ # above with: user@server:port
+ # So:
+ # [@grp+grp][ncpu/][ssh command ][[user][:password]@][server[:port]]
+
+ # [@grp+grp]/ncpu//usr/bin/ssh user:pass@server:port
+ if($s =~ s:^\@([^/]+)/?::) {
+ # Look for SSHLogin hostgroups
+ %hostgroups = map { $_ => 1 } split(/\+|,/, $1);
+ }
+ # An SSHLogin is always in the hostgroup of its "numcpu/host"
+ $hostgroups{$s} = 1;
+
+ # [ncpu/]/usr/bin/ssh user:pass@server:port
+ if ($s =~ s:^(\d+)/::) { $ncpus = $1; }
+
+ # [/usr/bin/ssh ]user:pass@server:port
+ if($s =~ s/^(.*) //) { $sshcommand = $1; }
+
+ # [user:pass@]server:port
+ if($s =~ s/^([^@]+)@//) {
+ my $userpw = $1;
+ # user[:pass]
+ if($userpw =~ s/:(.*)//) {
+ $password = $1;
+ if($password eq "") { $password = $ENV{'SSHPASS'} }
+ if(not ::which("sshpass")) {
+ ::error(255,"--sshlogin with password requires ".
+ "sshpass installed");
+ }
+ }
+ $user = $userpw;
+ }
+ # [server]:port
+ if(not $s =~ /:.*:/
+ and
+ $s =~ s/^([-a-z0-9._]+)//i) {
+ # Not IPv6 (IPv6 has 2 or more ':')
+ $host = $1;
+ } elsif($s =~ s/^(\\[\[\]box0-9a-f.]+)//i) {
+ # RFC2673 allows for:
+ # \[b11010000011101] \[o64072/14] \[xd074/14] \[208.116.0.0/14]
+ $host = $1;
+ } elsif($s =~ s/^\[([0-9a-f:]+)\]//i
+ or
+ $s =~ s/^([0-9a-f:]+)//i) {
+ # RFC5952
+ # [2001:db8::1]:80
+ # 2001:db8::1.80
+ # 2001:db8::1p80
+ # 2001:db8::1#80
+ # 2001:db8::1:80 - not supported
+ # 2001:db8::1 port 80 - not supported
+ $host = $1;
+ }
+
+ # [:port]
+ if($s =~ s/^:(\w+)//i) {
+ $port = $1;
+ } elsif($s =~ s/^[p\.\#](\w+)//i) {
+ # RFC5952
+ # 2001:db8::1.80
+ # 2001:db8::1p80
+ # 2001:db8::1#80
+ $port = $1;
+ }
+
+ if($s and $s ne ':') {
+ ::die_bug("SSHLogin parser failed on '$origs' => '$s'");
+ }
+
+ $string =
+ # Only include the sshcommand in $string if it is set by user
+ ($sshcommand && $sshcommand." ").
+ ($user && $user."@").
+ ($host && $host).
+ ($port && ":$port");
+ if($host eq ':') {
+ $local = 1;
+ $string = ":";
+ } else {
+ $sshcommand ||= $opt::ssh || $ENV{'PARALLEL_SSH'} || "ssh";
+ }
+ # An SSHLogin is always in the hostgroup of its $string-name
+ $hostgroups{$string} = 1;
+ @Global::hostgroups{keys %hostgroups} = values %hostgroups;
+ # Used for file names for loadavg
+ my $no_slash_string = $string;
+ $no_slash_string =~ s/[^-a-z0-9:]/_/gi;
+ return bless {
+ 'string' => $string,
+ 'jobs_running' => 0,
+ 'jobs_completed' => 0,
+ 'maxlength' => undef,
+ 'max_jobs_running' => undef,
+ 'orig_max_jobs_running' => undef,
+ 'ncpus' => $ncpus,
+ 'sshcommand' => $sshcommand,
+ 'user' => $user,
+ 'password' => $password,
+ 'host' => $host,
+ 'port' => $port,
+ 'hostgroups' => \%hostgroups,
+ 'local' => $local,
+ 'control_path_dir' => undef,
+ 'control_path' => undef,
+ 'time_to_login' => undef,
+ 'last_login_at' => undef,
+ 'loadavg_file' => $Global::cache_dir . "/tmp/sshlogin/" .
+ $no_slash_string . "/loadavg",
+ 'loadavg' => undef,
+ 'last_loadavg_update' => 0,
+ 'swap_activity_file' => $Global::cache_dir . "/tmp/sshlogin/" .
+ $no_slash_string . "/swap_activity",
+ 'swap_activity' => undef,
+ }, ref($class) || $class;
+}
+
+sub DESTROY($) {
+ my $self = shift;
+ # Remove temporary files if they are created.
+ ::rm($self->{'loadavg_file'});
+ ::rm($self->{'swap_activity_file'});
+}
+
+sub string($) {
+ my $self = shift;
+ return $self->{'string'};
+}
+
+sub host($) {
+ my $self = shift;
+ return $self->{'host'};
+}
+
+sub sshcmd($) {
+ # Give the ssh command without hostname
+ # Returns:
+ # "sshpass -e ssh -p port -l user"
+ my $self = shift;
+ my @local;
+ # [sshpass -e] ssh -p port -l user
+ if($self->{'password'}) { push @local, "sshpass -e"; }
+ # [ssh] -p port -l user
+ # TODO sshpass + space
+ push @local, $self->{'sshcommand'};
+ # [-p port] -l user
+ if($self->{'port'}) { push @local, '-p',$self->{'port'}; }
+ # [-l user]
+ if($self->{'user'}) { push @local, '-l',$self->{'user'}; }
+ if($opt::controlmaster) {
+ # Use control_path to make ssh faster
+ my $control_path = $self->control_path_dir()."/ssh-%r@%h:%p";
+
+ if(not $self->{'control_path'}{$control_path}++) {
+ # Master is not running for this control_path
+ # Start it
+ my $pid = fork();
+ if($pid) {
+ $Global::sshmaster{$pid} ||= 1;
+ } else {
+ push @local, "-S", $control_path;
+ $SIG{'TERM'} = undef;
+ # Run a sleep that outputs data, so it will discover
+ # if the ssh connection closes.
+ my $sleep = ::Q('$|=1;while(1){sleep 1;print "foo\n"}');
+ # Ignore the 'foo' being printed
+ open(STDOUT,">","/dev/null");
+ # STDERR >/dev/null to ignore
+ open(STDERR,">","/dev/null");
+ open(STDIN,"<","/dev/null");
+ exec(@local, "-MT", $self->{'host'}, "--",
+ "perl", "-e", $sleep);
+ }
+ }
+ push @local, "-S", ::Q($control_path);
+ }
+
+ return "@local";
+}
+
+sub wrap($@) {
+ # Input:
+ # @cmd = shell command to run on remote
+ # Returns:
+ # $sshwrapped = ssh remote @cmd
+ my $self = shift;
+ my @remote = @_;
+ return(join " ",
+ $self->sshcmd(), $self->{'host'}, "--", "exec", @remote);
+}
+
+sub hexwrap($@) {
+ # Input:
+ # @cmd = perl expresion to eval
+ # Returns:
+ # $hexencoded = perl command that decodes hex and evals @cmd
+ my $self = shift;
+ my $cmd = join("",@_);
+
+ # "#" is needed because Perl on MacOS X adds NULs
+ # when running pack q/H10000000/
+ my $hex = unpack "H*", $cmd."#";
+ # csh does not deal well with > 1000 chars in one word
+ # Insert space every 1000 char
+ $hex =~ s/\G.{1000}\K/ /sg;
+ # Explanation:
+ # Write this without special chars: eval pack 'H*', join '',@ARGV
+ # GNU_Parallel_worker = String so people can see this is from GNU Parallel
+ # eval+ = way to write 'eval ' without space (gives warning)
+ # pack+ = way to write 'pack ' without space
+ # q/H10000000/, = almost the same as "H*" but does not use *
+ # join+q//, = join '',
+ return('perl -X -e '.
+ 'GNU_Parallel_worker,eval+pack+q/H10000000/,join+q//,@ARGV '.
+ $hex);
+}
+
+sub jobs_running($) {
+ my $self = shift;
+ return ($self->{'jobs_running'} || "0");
+}
+
+sub inc_jobs_running($) {
+ my $self = shift;
+ $self->{'jobs_running'}++;
+}
+
+sub dec_jobs_running($) {
+ my $self = shift;
+ $self->{'jobs_running'}--;
+}
+
+sub set_maxlength($$) {
+ my $self = shift;
+ $self->{'maxlength'} = shift;
+}
+
+sub maxlength($) {
+ my $self = shift;
+ return $self->{'maxlength'};
+}
+
+sub jobs_completed() {
+ my $self = shift;
+ return $self->{'jobs_completed'};
+}
+
+sub in_hostgroups() {
+ # Input:
+ # @hostgroups = the hostgroups to look for
+ # Returns:
+ # true if intersection of @hostgroups and the hostgroups of this
+ # SSHLogin is non-empty
+ my $self = shift;
+ return grep { defined $self->{'hostgroups'}{$_} } @_;
+}
+
+sub hostgroups() {
+ my $self = shift;
+ return keys %{$self->{'hostgroups'}};
+}
+
+sub inc_jobs_completed($) {
+ my $self = shift;
+ $self->{'jobs_completed'}++;
+ $Global::total_completed++;
+}
+
+sub set_max_jobs_running($$) {
+ my $self = shift;
+ if(defined $self->{'max_jobs_running'}) {
+ $Global::max_jobs_running -= $self->{'max_jobs_running'};
+ }
+ $self->{'max_jobs_running'} = shift;
+
+ if(defined $self->{'max_jobs_running'}) {
+ # max_jobs_running could be resat if -j is a changed file
+ $Global::max_jobs_running += $self->{'max_jobs_running'};
+ }
+ # Initialize orig to the first non-zero value that comes around
+ $self->{'orig_max_jobs_running'} ||= $self->{'max_jobs_running'};
+}
+
+sub memfree() {
+ # Returns:
+ # $memfree in bytes
+ my $self = shift;
+ $self->memfree_recompute();
+ # Return 1 if not defined.
+ return (not defined $self->{'memfree'} or $self->{'memfree'})
+}
+
+sub memfree_recompute() {
+ my $self = shift;
+ my $script = memfreescript();
+
+ # TODO add sshlogin and backgrounding
+ # Run the script twice if it gives 0 (typically intermittent error)
+ $self->{'memfree'} = ::qqx($script) || ::qqx($script);
+ if(not $self->{'memfree'}) {
+ ::die_bug("Less than 1 byte memory free");
+ }
+ #::debug("mem","New free:",$self->{'memfree'}," ");
+}
+
+{
+ my $script;
+
+ sub memfreescript() {
+ # Returns:
+ # shellscript for giving available memory in bytes
+ if(not $script) {
+ my %script_of = (
+ # /proc/meminfo
+ # MemFree: 7012 kB
+ # Buffers: 19876 kB
+ # Cached: 431192 kB
+ # SwapCached: 0 kB
+ "linux" => (
+ q{
+ print 1024 * qx{
+ awk '/^((Swap)?Cached|MemFree|Buffers):/
+ { sum += \$2} END { print sum }'
+ /proc/meminfo }
+ }),
+ # Android uses same code as GNU/Linux
+ "android" => (
+ q{
+ print 1024 * qx{
+ awk '/^((Swap)?Cached|MemFree|Buffers):/
+ { sum += \$2} END { print sum }'
+ /proc/meminfo }
+ }),
+ # $ vmstat 1 1
+ # procs memory page faults cpu
+ # r b w avm free re at pi po fr de sr in sy cs us sy id
+ # 1 0 0 242793 389737 5 1 0 0 0 0 0 107 978 60 1 1 99
+ "hpux" => (
+ q{
+ print (((reverse `vmstat 1 1`)[0]
+ =~ /(?:\d+\D+){4}(\d+)/)[0]*1024)
+ }),
+ # $ vmstat 1 2
+ # kthr memory page disk faults cpu
+ # r b w swap free re mf pi po fr de sr s3 s4 -- -- in sy cs us sy id
+ # 0 0 0 6496720 5170320 68 260 8 2 1 0 0 -0 3 0 0 309 1371 255 1 2 97
+ # 0 0 0 6434088 5072656 7 15 8 0 0 0 0 0 261 0 0 1889 1899 3222 0 8 92
+ #
+ # The second free value is correct
+ "solaris" => (
+ q{
+ print (((reverse `vmstat 1 2`)[0]
+ =~ /(?:\d+\D+){4}(\d+)/)[0]*1024)
+ }),
+ # hw.pagesize: 4096
+ # vm.stats.vm.v_cache_count: 0
+ # vm.stats.vm.v_inactive_count: 79574
+ # vm.stats.vm.v_free_count: 4507
+ "freebsd" => (
+ q{
+ for(qx{/sbin/sysctl -a}) {
+ if (/^([^:]+):\s+(.+)\s*$/s) {
+ $sysctl->{$1} = $2;
+ }
+ }
+ print $sysctl->{"hw.pagesize"} *
+ ($sysctl->{"vm.stats.vm.v_cache_count"}
+ + $sysctl->{"vm.stats.vm.v_inactive_count"}
+ + $sysctl->{"vm.stats.vm.v_free_count"});
+ }),
+ # Mach Virtual Memory Statistics: (page size of 4096 bytes)
+ # Pages free: 198061.
+ # Pages active: 159701.
+ # Pages inactive: 47378.
+ # Pages speculative: 29707.
+ # Pages wired down: 89231.
+ # "Translation faults": 928901425.
+ # Pages copy-on-write: 156988239.
+ # Pages zero filled: 271267894.
+ # Pages reactivated: 48895.
+ # Pageins: 1798068.
+ # Pageouts: 257.
+ # Object cache: 6603 hits of 1713223 lookups (0% hit rate)
+ 'darwin' => (
+ q{
+ $vm = `vm_stat`;
+ print (($vm =~ /page size of (\d+)/)[0] *
+ (($vm =~ /Pages free:\s+(\d+)/)[0] +
+ ($vm =~ /Pages inactive:\s+(\d+)/)[0]));
+ }),
+ );
+ my $perlscript = "";
+ # Make a perl script that detects the OS ($^O) and runs
+ # the appropriate command
+ for my $os (keys %script_of) {
+ $perlscript .= 'if($^O eq "'.$os.'") { '.$script_of{$os}.'}';
+ }
+ $script = "perl -e " . ::Q(::spacefree(1,$perlscript));
+ }
+ return $script;
+ }
+}
+
+sub limit($) {
+ # Returns:
+ # 0 = Below limit. Start another job.
+ # 1 = Over limit. Start no jobs.
+ # 2 = Kill youngest job
+ my $self = shift;
+
+ if(not defined $self->{'limitscript'}) {
+ my %limitscripts =
+ ("io" => q!
+ io() {
+ limit=$1;
+ io_file=$2;
+ # Do the measurement in the background
+ ((tmp=$(tempfile);
+ LANG=C iostat -x 1 2 > $tmp;
+ mv $tmp $io_file) </dev/null >/dev/null & );
+ perl -e '-e $ARGV[0] or exit(1);
+ for(reverse <>) {
+ /Device/ and last;
+ /(\S+)$/ and $max = $max > $1 ? $max : $1; }
+ exit ('$limit' < $max)' $io_file;
+ };
+ io %s %s
+ !,
+ "mem" => q!
+ mem() {
+ limit=$1;
+ awk '/^((Swap)?Cached|MemFree|Buffers):/{ sum += $2}
+ END {
+ if (sum*1024 < '$limit'/2) { exit 2; }
+ else { exit (sum*1024 < '$limit') }
+ }' /proc/meminfo;
+ };
+ mem %s;
+ !,
+ "load" => q!
+ load() {
+ limit=$1;
+ ps ax -o state,command |
+ grep -E '^[DOR].[^[]' |
+ wc -l |
+ perl -ne 'exit ('$limit' < $_)';
+ };
+ load %s
+ !,
+ );
+ my ($cmd,@args) = split /\s+/,$opt::limit;
+ if($limitscripts{$cmd}) {
+ my $tmpfile = ::tmpname("parlmt");
+ ++$Global::unlink{$tmpfile};
+ $self->{'limitscript'} =
+ ::spacefree(1, sprintf($limitscripts{$cmd},
+ ::multiply_binary_prefix(@args),$tmpfile));
+ } else {
+ $self->{'limitscript'} = $opt::limit;
+ }
+ }
+
+ my %env = %ENV;
+ local %ENV = %env;
+ $ENV{'SSHLOGIN'} = $self->string();
+ system($Global::shell,"-c",$self->{'limitscript'});
+ #::qqx($self->{'limitscript'});
+ ::debug("limit","limit `".$self->{'limitscript'}."` result ".($?>>8)."\n");
+ return $?>>8;
+}
+
+
+sub swapping($) {
+ my $self = shift;
+ my $swapping = $self->swap_activity();
+ return (not defined $swapping or $swapping)
+}
+
+sub swap_activity($) {
+ # If the currently known swap activity is too old:
+ # Recompute a new one in the background
+ # Returns:
+ # last swap activity computed
+ my $self = shift;
+ # Should we update the swap_activity file?
+ my $update_swap_activity_file = 0;
+ # Test with (on 64 core machine):
+ # seq 100 | parallel --lb -j100 'seq 1000 | parallel --noswap -j 1 true'
+ if(open(my $swap_fh, "<", $self->{'swap_activity_file'})) {
+ my $swap_out = <$swap_fh>;
+ close $swap_fh;
+ if($swap_out =~ /^(\d+)$/) {
+ $self->{'swap_activity'} = $1;
+ ::debug("swap", "New swap_activity: ", $self->{'swap_activity'});
+ }
+ ::debug("swap", "Last update: ", $self->{'last_swap_activity_update'});
+ if(time - $self->{'last_swap_activity_update'} > 10) {
+ # last swap activity update was started 10 seconds ago
+ ::debug("swap", "Older than 10 sec: ", $self->{'swap_activity_file'});
+ $update_swap_activity_file = 1;
+ }
+ } else {
+ ::debug("swap", "No swap_activity file: ", $self->{'swap_activity_file'});
+ $self->{'swap_activity'} = undef;
+ $update_swap_activity_file = 1;
+ }
+ if($update_swap_activity_file) {
+ ::debug("swap", "Updating swap_activity file ", $self->{'swap_activity_file'});
+ $self->{'last_swap_activity_update'} = time;
+ my $dir = ::dirname($self->{'swap_activity_file'});
+ -d $dir or eval { File::Path::mkpath($dir); };
+ my $swap_activity;
+ $swap_activity = swapactivityscript();
+ if(not $self->local()) {
+ $swap_activity = $self->wrap($swap_activity);
+ }
+ # Run swap_activity measuring.
+ # As the command can take long to run if run remote
+ # save it to a tmp file before moving it to the correct file
+ my $file = $self->{'swap_activity_file'};
+ my ($dummy_fh, $tmpfile) = ::tmpfile(SUFFIX => ".swp");
+ ::debug("swap", "\n", $swap_activity, "\n");
+ my $qtmp = ::Q($tmpfile);
+ my $qfile = ::Q($file);
+ ::qqx("($swap_activity > $qtmp && mv $qtmp $qfile || rm $qtmp &)");
+ }
+ return $self->{'swap_activity'};
+}
+
+{
+ my $script;
+
+ sub swapactivityscript() {
+ # Returns:
+ # shellscript for detecting swap activity
+ #
+ # arguments for vmstat are OS dependant
+ # swap_in and swap_out are in different columns depending on OS
+ #
+ if(not $script) {
+ my %vmstat = (
+ # linux: $7*$8
+ # $ vmstat 1 2
+ # procs -----------memory---------- ---swap-- -----io---- -system-- ----cpu----
+ # r b swpd free buff cache si so bi bo in cs us sy id wa
+ # 5 0 51208 1701096 198012 18857888 0 0 37 153 28 19 56 11 33 1
+ # 3 0 51208 1701288 198012 18857972 0 0 0 0 3638 10412 15 3 82 0
+ 'linux' => ['vmstat 1 2 | tail -n1', '$7*$8'],
+
+ # solaris: $6*$7
+ # $ vmstat -S 1 2
+ # kthr memory page disk faults cpu
+ # r b w swap free si so pi po fr de sr s3 s4 -- -- in sy cs us sy id
+ # 0 0 0 4628952 3208408 0 0 3 1 1 0 0 -0 2 0 0 263 613 246 1 2 97
+ # 0 0 0 4552504 3166360 0 0 0 0 0 0 0 0 0 0 0 246 213 240 1 1 98
+ 'solaris' => ['vmstat -S 1 2 | tail -1', '$6*$7'],
+
+ # darwin (macosx): $21*$22
+ # $ vm_stat -c 2 1
+ # Mach Virtual Memory Statistics: (page size of 4096 bytes)
+ # free active specul inactive throttle wired prgable faults copy 0fill reactive purged file-backed anonymous cmprssed cmprssor dcomprs comprs pageins pageout swapins swapouts
+ # 346306 829050 74871 606027 0 240231 90367 544858K 62343596 270837K 14178 415070 570102 939846 356 370 116 922 4019813 4 0 0
+ # 345740 830383 74875 606031 0 239234 90369 2696 359 553 0 0 570110 941179 356 370 0 0 0 0 0 0
+ 'darwin' => ['vm_stat -c 2 1 | tail -n1', '$21*$22'],
+
+ # ultrix: $12*$13
+ # $ vmstat -S 1 2
+ # procs faults cpu memory page disk
+ # r b w in sy cs us sy id avm fre si so pi po fr de sr s0
+ # 1 0 0 4 23 2 3 0 97 7743 217k 0 0 0 0 0 0 0 0
+ # 1 0 0 6 40 8 0 1 99 7743 217k 0 0 3 0 0 0 0 0
+ 'ultrix' => ['vmstat -S 1 2 | tail -1', '$12*$13'],
+
+ # aix: $6*$7
+ # $ vmstat 1 2
+ # System configuration: lcpu=1 mem=2048MB
+ #
+ # kthr memory page faults cpu
+ # ----- ----------- ------------------------ ------------ -----------
+ # r b avm fre re pi po fr sr cy in sy cs us sy id wa
+ # 0 0 333933 241803 0 0 0 0 0 0 10 143 90 0 0 99 0
+ # 0 0 334125 241569 0 0 0 0 0 0 37 5368 184 0 9 86 5
+ 'aix' => ['vmstat 1 2 | tail -n1', '$6*$7'],
+
+ # freebsd: $8*$9
+ # $ vmstat -H 1 2
+ # procs memory page disks faults cpu
+ # r b w avm fre flt re pi po fr sr ad0 ad1 in sy cs us sy id
+ # 1 0 0 596716 19560 32 0 0 0 33 8 0 0 11 220 277 0 0 99
+ # 0 0 0 596716 19560 2 0 0 0 0 0 0 0 11 144 263 0 1 99
+ 'freebsd' => ['vmstat -H 1 2 | tail -n1', '$8*$9'],
+
+ # mirbsd: $8*$9
+ # $ vmstat 1 2
+ # procs memory page disks traps cpu
+ # r b w avm fre flt re pi po fr sr wd0 cd0 int sys cs us sy id
+ # 0 0 0 25776 164968 34 0 0 0 0 0 0 0 230 259 38 4 0 96
+ # 0 0 0 25776 164968 24 0 0 0 0 0 0 0 237 275 37 0 0 100
+ 'mirbsd' => ['vmstat 1 2 | tail -n1', '$8*$9'],
+
+ # netbsd: $7*$8
+ # $ vmstat 1 2
+ # procs memory page disks faults cpu
+ # r b avm fre flt re pi po fr sr w0 w1 in sy cs us sy id
+ # 0 0 138452 6012 54 0 0 0 1 2 3 0 4 100 23 0 0 100
+ # 0 0 138456 6008 1 0 0 0 0 0 0 0 7 26 19 0 0 100
+ 'netbsd' => ['vmstat 1 2 | tail -n1', '$7*$8'],
+
+ # openbsd: $8*$9
+ # $ vmstat 1 2
+ # procs memory page disks traps cpu
+ # r b w avm fre flt re pi po fr sr wd0 wd1 int sys cs us sy id
+ # 0 0 0 76596 109944 73 0 0 0 0 0 0 1 5 259 22 0 1 99
+ # 0 0 0 76604 109936 24 0 0 0 0 0 0 0 7 114 20 0 1 99
+ 'openbsd' => ['vmstat 1 2 | tail -n1', '$8*$9'],
+
+ # hpux: $8*$9
+ # $ vmstat 1 2
+ # procs memory page faults cpu
+ # r b w avm free re at pi po fr de sr in sy cs us sy id
+ # 1 0 0 247211 216476 4 1 0 0 0 0 0 102 73005 54 6 11 83
+ # 1 0 0 247211 216421 43 9 0 0 0 0 0 144 1675 96 25269512791222387000 25269512791222387000 105
+ 'hpux' => ['vmstat 1 2 | tail -n1', '$8*$9'],
+
+ # dec_osf (tru64): $11*$12
+ # $ vmstat 1 2
+ # Virtual Memory Statistics: (pagesize = 8192)
+ # procs memory pages intr cpu
+ # r w u act free wire fault cow zero react pin pout in sy cs us sy id
+ # 3 181 36 51K 1895 8696 348M 59M 122M 259 79M 0 5 218 302 4 1 94
+ # 3 181 36 51K 1893 8696 3 15 21 0 28 0 4 81 321 1 1 98
+ 'dec_osf' => ['vmstat 1 2 | tail -n1', '$11*$12'],
+
+ # gnu (hurd): $7*$8
+ # $ vmstat -k 1 2
+ # (pagesize: 4, size: 512288, swap size: 894972)
+ # free actv inact wired zeroed react pgins pgouts pfaults cowpfs hrat caobj cache swfree
+ # 371940 30844 89228 20276 298348 0 48192 19016 756105 99808 98% 876 20628 894972
+ # 371940 30844 89228 20276 +0 +0 +0 +0 +42 +2 98% 876 20628 894972
+ 'gnu' => ['vmstat -k 1 2 | tail -n1', '$7*$8'],
+
+ # -nto (qnx has no swap)
+ #-irix
+ #-svr5 (scosysv)
+ );
+ my $perlscript = "";
+ # Make a perl script that detects the OS ($^O) and runs
+ # the appropriate vmstat command
+ for my $os (keys %vmstat) {
+ $vmstat{$os}[1] =~ s/\$/\\\\\\\$/g; # $ => \\\$
+ $perlscript .= 'if($^O eq "'.$os.'") { print `'.$vmstat{$os}[0].' | awk "{print ' .
+ $vmstat{$os}[1] . '}"` }';
+ }
+ $script = "perl -e " . ::Q($perlscript);
+ }
+ return $script;
+ }
+}
+
+sub too_fast_remote_login($) {
+ my $self = shift;
+ if($self->{'last_login_at'} and $self->{'time_to_login'}) {
+ # sshd normally allows 10 simultaneous logins
+ # A login takes time_to_login
+ # So time_to_login/5 should be safe
+ # If now <= last_login + time_to_login/5: Then it is too soon.
+ my $too_fast = (::now() <= $self->{'last_login_at'}
+ + $self->{'time_to_login'}/5);
+ ::debug("run", "Too fast? $too_fast ");
+ return $too_fast;
+ } else {
+ # No logins so far (or time_to_login not computed): it is not too fast
+ return 0;
+ }
+}
+
+sub last_login_at($) {
+ my $self = shift;
+ return $self->{'last_login_at'};
+}
+
+sub set_last_login_at($$) {
+ my $self = shift;
+ $self->{'last_login_at'} = shift;
+}
+
+sub loadavg_too_high($) {
+ my $self = shift;
+ my $loadavg = $self->loadavg();
+ if(defined $loadavg) {
+ ::debug("load", "Load $loadavg > ",$self->max_loadavg());
+ return $loadavg >= $self->max_loadavg();
+ } else {
+ # Unknown load: Assume load is too high
+ return 1;
+ }
+}
+
+
+
+sub loadavg($) {
+ # If the currently know loadavg is too old:
+ # Recompute a new one in the background
+ # The load average is computed as the number of processes waiting
+ # for disk or CPU right now. So it is the server load this instant
+ # and not averaged over several minutes. This is needed so GNU
+ # Parallel will at most start one job that will push the load over
+ # the limit.
+ #
+ # Returns:
+ # $last_loadavg = last load average computed (undef if none)
+
+ my $self = shift;
+ sub loadavg_cmd() {
+ if(not $Global::loadavg_cmd) {
+ # aix => "ps -ae -o state,command" # state wrong
+ # bsd => "ps ax -o state,command"
+ # sysv => "ps -ef -o s -o comm"
+ # cygwin => perl -ne 'close STDERR; /Name/ and print"\n"; \
+ # /(Name|Pid|Ppid|State):\s+(\S+)/ and print "$2\t";' /proc/*/status |
+ # awk '{print $2,$1}'
+ # dec_osf => bsd
+ # dragonfly => bsd
+ # freebsd => bsd
+ # gnu => bsd
+ # hpux => ps -el|awk '{print $2,$14,$15}'
+ # irix => ps -ef -o state -o comm
+ # linux => bsd
+ # minix => ps el|awk '{print \$1,\$11}'
+ # mirbsd => bsd
+ # netbsd => bsd
+ # openbsd => bsd
+ # solaris => sysv
+ # svr5 => sysv
+ # ultrix => ps -ax | awk '{print $3,$5}'
+ # unixware => ps -el|awk '{print $2,$14,$15}'
+ my $ps = ::spacefree(1,q{
+ $sysv="ps -ef -o s -o comm";
+ $sysv2="ps -ef -o state -o comm";
+ $bsd="ps ax -o state,command";
+ # Treat threads as processes
+ $bsd2="ps axH -o state,command";
+ $psel="ps -el|awk '{ print \$2,\$14,\$15 }'";
+ $cygwin=q{ perl -ne 'close STDERR; /Name/ and print"\n";
+ /(Name|Pid|Ppid|State):\s+(\S+)/ and print "$2\t";' /proc/*/status |
+ awk '{print $2,$1}' };
+ $dummy="echo S COMMAND;echo R dummy";
+ %ps=(
+ # TODO Find better code for AIX/Android
+ 'aix' => "uptime",
+ 'android' => "uptime",
+ 'cygwin' => $cygwin,
+ 'darwin' => $bsd,
+ 'dec_osf' => $sysv2,
+ 'dragonfly' => $bsd,
+ 'freebsd' => $bsd2,
+ 'gnu' => $bsd,
+ 'hpux' => $psel,
+ 'irix' => $sysv2,
+ 'linux' => $bsd2,
+ 'minix' => "ps el|awk '{print \$1,\$11}'",
+ 'mirbsd' => $bsd,
+ 'msys' => $cygwin,
+ 'netbsd' => $bsd,
+ 'nto' => $dummy,
+ 'openbsd' => $bsd,
+ 'solaris' => $sysv,
+ 'svr5' => $psel,
+ 'ultrix' => "ps -ax | awk '{print \$3,\$5}'",
+ 'MSWin32' => $sysv,
+ );
+ print `$ps{$^O}`;
+ });
+ # The command is too long for csh, so base64_wrap the command
+ $Global::loadavg_cmd = $self->hexwrap($ps);
+ }
+ return $Global::loadavg_cmd;
+ }
+ # Should we update the loadavg file?
+ my $update_loadavg_file = 0;
+ if(open(my $load_fh, "<", $self->{'loadavg_file'})) {
+ local $/; # $/ = undef => slurp whole file
+ my $load_out = <$load_fh>;
+ close $load_fh;
+ if($load_out =~ /\S/) {
+ # Content can be empty if ~/ is on NFS
+ # due to reading being non-atomic.
+ #
+ # Count lines starting with D,O,R but command does not start with [
+ my $load =()= ($load_out=~/(^\s?[DOR]\S* +(?=[^\[])\S)/gm);
+ if($load > 0) {
+ # load is overestimated by 1
+ $self->{'loadavg'} = $load - 1;
+ ::debug("load", "New loadavg: ", $self->{'loadavg'},"\n");
+ } elsif ($load_out=~/average: (\d+.\d+)/) {
+ # AIX does not support instant load average
+ # 04:11AM up 21 days, 12:55, 1 user, load average: 1.85, 1.57, 1.55
+ $self->{'loadavg'} = $1;
+ } else {
+ ::die_bug("loadavg_invalid_content: " .
+ $self->{'loadavg_file'} . "\n$load_out");
+ }
+ }
+ $update_loadavg_file = 1;
+ } else {
+ ::debug("load", "No loadavg file: ", $self->{'loadavg_file'});
+ $self->{'loadavg'} = undef;
+ $update_loadavg_file = 1;
+ }
+ if($update_loadavg_file) {
+ ::debug("load", "Updating loadavg file", $self->{'loadavg_file'}, "\n");
+ $self->{'last_loadavg_update'} = time;
+ my $dir = ::dirname($self->{'swap_activity_file'});
+ -d $dir or eval { File::Path::mkpath($dir); };
+ -w $dir or ::die_bug("Cannot write to $dir");
+ my $cmd = "";
+ if($self->{'string'} ne ":") {
+ $cmd = $self->wrap(loadavg_cmd());
+ } else {
+ $cmd .= loadavg_cmd();
+ }
+ # As the command can take long to run if run remote
+ # save it to a tmp file before moving it to the correct file
+ ::debug("load", "Update load\n");
+ my $file = ::Q($self->{'loadavg_file'});
+ # tmpfile on same filesystem as $file
+ my $tmpfile = $file.$$;
+ $ENV{'SSHPASS'} = $self->{'password'};
+ ::qqx("($cmd > $tmpfile && mv $tmpfile $file || rm $tmpfile & )");
+ }
+ return $self->{'loadavg'};
+}
+
+sub max_loadavg($) {
+ my $self = shift;
+ # If --load is a file it might be changed
+ if($Global::max_load_file) {
+ my $mtime = (stat($Global::max_load_file))[9];
+ if($mtime > $Global::max_load_file_last_mod) {
+ $Global::max_load_file_last_mod = $mtime;
+ for my $sshlogin (values %Global::host) {
+ $sshlogin->set_max_loadavg(undef);
+ }
+ }
+ }
+ if(not defined $self->{'max_loadavg'}) {
+ $self->{'max_loadavg'} =
+ $self->compute_max_loadavg($opt::load);
+ }
+ ::debug("load", "max_loadavg: ", $self->string(), " ", $self->{'max_loadavg'});
+ return $self->{'max_loadavg'};
+}
+
+sub set_max_loadavg($$) {
+ my $self = shift;
+ $self->{'max_loadavg'} = shift;
+}
+
+sub compute_max_loadavg($) {
+ # Parse the max loadaverage that the user asked for using --load
+ # Returns:
+ # max loadaverage
+ my $self = shift;
+ my $loadspec = shift;
+ my $load;
+ if(defined $loadspec) {
+ if($loadspec =~ /^\+(\d+)$/) {
+ # E.g. --load +2
+ my $j = $1;
+ $load =
+ $self->ncpus() + $j;
+ } elsif ($loadspec =~ /^-(\d+)$/) {
+ # E.g. --load -2
+ my $j = $1;
+ $load =
+ $self->ncpus() - $j;
+ } elsif ($loadspec =~ /^(\d+)\%$/) {
+ my $j = $1;
+ $load =
+ $self->ncpus() * $j / 100;
+ } elsif ($loadspec =~ /^(\d+(\.\d+)?)$/) {
+ $load = $1;
+ } elsif (-f $loadspec) {
+ $Global::max_load_file = $loadspec;
+ $Global::max_load_file_last_mod = (stat($Global::max_load_file))[9];
+ $load = $self->compute_max_loadavg(
+ ::slurp_or_exit($Global::max_load_file)
+ );
+ } else {
+ ::error("Parsing of --load failed.");
+ ::die_usage();
+ }
+ if($load < 0.01) {
+ $load = 0.01;
+ }
+ }
+ return $load;
+}
+
+sub time_to_login($) {
+ my $self = shift;
+ return $self->{'time_to_login'};
+}
+
+sub set_time_to_login($$) {
+ my $self = shift;
+ $self->{'time_to_login'} = shift;
+}
+
+sub max_jobs_running($) {
+ my $self = shift;
+ if(not defined $self->{'max_jobs_running'}) {
+ my $nproc = $self->compute_number_of_processes($opt::jobs);
+ $self->set_max_jobs_running($nproc);
+ }
+ return $self->{'max_jobs_running'};
+}
+
+sub orig_max_jobs_running($) {
+ my $self = shift;
+ return $self->{'orig_max_jobs_running'};
+}
+
+sub compute_number_of_processes($) {
+ # Number of processes wanted and limited by system resources
+ # Returns:
+ # Number of processes
+ my $self = shift;
+ my $opt_P = shift;
+ my $wanted_processes = $self->user_requested_processes($opt_P);
+ if(not defined $wanted_processes) {
+ $wanted_processes = $Global::default_simultaneous_sshlogins;
+ }
+ ::debug("load", "Wanted procs: $wanted_processes\n");
+ my $system_limit =
+ $self->processes_available_by_system_limit($wanted_processes);
+ ::debug("load", "Limited to procs: $system_limit\n");
+ return $system_limit;
+}
+
+{
+ my @children;
+ my $max_system_proc_reached;
+ my $more_filehandles;
+ my %fh;
+ my $tmpfhname;
+ my $count_jobs_already_read;
+ my @jobs;
+ my $job;
+ my @args;
+ my $arg;
+
+ sub reserve_filehandles($) {
+ # Reserves filehandle
+ my $n = shift;
+ for (1..$n) {
+ $more_filehandles &&= open($fh{$tmpfhname++}, "<", "/dev/null");
+ }
+ }
+
+ sub reserve_process() {
+ # Spawn a dummy process
+ my $child;
+ if($child = fork()) {
+ push @children, $child;
+ $Global::unkilled_children{$child} = 1;
+ } elsif(defined $child) {
+ # This is the child
+ # The child takes one process slot
+ # It will be killed later
+ $SIG{'TERM'} = $Global::original_sig{'TERM'};
+ if($^O eq "cygwin" or $^O eq "msys" or $^O eq "nto") {
+ # The exec does not work on Cygwin and QNX
+ sleep 10101010;
+ } else {
+ # 'exec sleep' takes less RAM than sleeping in perl
+ exec 'sleep', 10101;
+ }
+ exit(0);
+ } else {
+ # Failed to spawn
+ $max_system_proc_reached = 1;
+ }
+ }
+
+ sub get_args_or_jobs() {
+ # Get an arg or a job (depending on mode)
+ if($Global::semaphore or ($opt::pipe and not $opt::tee)) {
+ # Skip: No need to get args
+ return 1;
+ } elsif(defined $opt::retries and $count_jobs_already_read) {
+ # For retries we may need to run all jobs on this sshlogin
+ # so include the already read jobs for this sshlogin
+ $count_jobs_already_read--;
+ return 1;
+ } else {
+ if($opt::X or $opt::m) {
+ # The arguments may have to be re-spread over several jobslots
+ # So pessimistically only read one arg per jobslot
+ # instead of a full commandline
+ if($Global::JobQueue->{'commandlinequeue'}->{'arg_queue'}->empty()) {
+ if($Global::JobQueue->empty()) {
+ return 0;
+ } else {
+ $job = $Global::JobQueue->get();
+ push(@jobs, $job);
+ return 1;
+ }
+ } else {
+ $arg = $Global::JobQueue->{'commandlinequeue'}->{'arg_queue'}->get();
+ push(@args, $arg);
+ return 1;
+ }
+ } else {
+ # If there are no more command lines, then we have a process
+ # per command line, so no need to go further
+ if($Global::JobQueue->empty()) {
+ return 0;
+ } else {
+ $job = $Global::JobQueue->get();
+ # Replacement must happen here due to seq()
+ $job and $job->replaced();
+ push(@jobs, $job);
+ return 1;
+ }
+ }
+ }
+ }
+
+ sub cleanup() {
+ # Cleanup: Close the files
+ for (values %fh) { close $_ }
+ # Cleanup: Kill the children
+ for my $pid (@children) {
+ kill 9, $pid;
+ waitpid($pid,0);
+ delete $Global::unkilled_children{$pid};
+ }
+ # Cleanup: Unget the command_lines or the @args
+ $Global::JobQueue->{'commandlinequeue'}{'arg_queue'}->unget(@args);
+ @args = ();
+ $Global::JobQueue->unget(@jobs);
+ @jobs = ();
+ }
+
+ sub processes_available_by_system_limit($) {
+ # If the wanted number of processes is bigger than the system limits:
+ # Limit them to the system limits
+ # Limits are: File handles, number of input lines, processes,
+ # and taking > 1 second to spawn 10 extra processes
+ # Returns:
+ # Number of processes
+ my $self = shift;
+ my $wanted_processes = shift;
+ my $system_limit = 0;
+ my $slow_spawning_warning_printed = 0;
+ my $time = time;
+ $more_filehandles = 1;
+ $tmpfhname = "TmpFhNamE";
+
+ # perl uses 7 filehandles for something?
+ # parallel uses 1 for memory_usage
+ # parallel uses 4 for ?
+ reserve_filehandles(12);
+ # Two processes for load avg and ?
+ reserve_process();
+ reserve_process();
+
+ # For --retries count also jobs already run
+ $count_jobs_already_read = $Global::JobQueue->next_seq();
+ my $wait_time_for_getting_args = 0;
+ my $start_time = time;
+ if($wanted_processes < $Global::infinity) {
+ $Global::dummy_jobs = 1;
+ }
+ while(1) {
+ $system_limit >= $wanted_processes and last;
+ not $more_filehandles and last;
+ $max_system_proc_reached and last;
+
+ my $before_getting_arg = time;
+ if(!$Global::dummy_jobs) {
+ get_args_or_jobs() or last;
+ }
+ $wait_time_for_getting_args += time - $before_getting_arg;
+ $system_limit++;
+
+ # Every simultaneous process uses 2 filehandles to write to
+ # and 2 filehandles to read from
+ reserve_filehandles(4);
+
+ # System process limit
+ reserve_process();
+
+ my $forktime = time - $time - $wait_time_for_getting_args;
+ ::debug("run", "Time to fork $system_limit procs: ".
+ $wait_time_for_getting_args, " ", $forktime,
+ " (processes so far: ", $system_limit,")\n");
+ if($system_limit > 10 and
+ $forktime > 1 and
+ $forktime > $system_limit * 0.01) {
+ # It took more than 0.01 second to fork a processes on avg.
+ # Give the user a warning. He can press Ctrl-C if this
+ # sucks.
+ ::warning_once(
+ "Starting $system_limit processes took > $forktime sec.",
+ "Consider adjusting -j. Press CTRL-C to stop.");
+ }
+ }
+ cleanup();
+
+ if($system_limit < $wanted_processes) {
+ # The system_limit is less than the wanted_processes
+ if($system_limit < 1 and not $Global::JobQueue->empty()) {
+ ::error(255,"Cannot spawn any jobs.",
+ "Try increasing 'ulimit -u' (try: ulimit -u `ulimit -Hu`)",
+ "or increasing 'nproc' in /etc/security/limits.conf",
+ "or increasing /proc/sys/kernel/pid_max");
+ }
+ if(not $more_filehandles) {
+ ::warning("Only enough file handles to run ".
+ $system_limit. " jobs in parallel.",
+ "Try running 'parallel -j0 -N $system_limit --pipe parallel -j0'",
+ "or increasing 'ulimit -n' (try: ulimit -n `ulimit -Hn`)",
+ "or increasing 'nofile' in /etc/security/limits.conf",
+ "or increasing /proc/sys/fs/file-max");
+ }
+ if($max_system_proc_reached) {
+ ::warning("Only enough available processes to run ".
+ $system_limit. " jobs in parallel.",
+ "Try increasing 'ulimit -u' (try: ulimit -u `ulimit -Hu`)",
+ "or increasing 'nproc' in /etc/security/limits.conf",
+ "or increasing /proc/sys/kernel/pid_max");
+ }
+ }
+ if($] == 5.008008 and $system_limit > 1000) {
+ # https://savannah.gnu.org/bugs/?36942
+ $system_limit = 1000;
+ }
+ if($Global::JobQueue->empty()) {
+ $system_limit ||= 1;
+ }
+ if($self->string() ne ":" and
+ $system_limit > $Global::default_simultaneous_sshlogins) {
+ $system_limit =
+ $self->simultaneous_sshlogin_limit($system_limit);
+ }
+ return $system_limit;
+ }
+}
+
+sub simultaneous_sshlogin_limit($) {
+ # Test by logging in wanted number of times simultaneously
+ # Returns:
+ # min($wanted_processes,$working_simultaneous_ssh_logins-1)
+ my $self = shift;
+ my $wanted_processes = shift;
+ if($self->{'time_to_login'}) {
+ return $wanted_processes;
+ }
+
+ # Try twice because it guesses wrong sometimes
+ # Choose the minimal
+ my $ssh_limit =
+ ::min($self->simultaneous_sshlogin($wanted_processes),
+ $self->simultaneous_sshlogin($wanted_processes));
+ if($ssh_limit < $wanted_processes) {
+ my $serverlogin = $self->string();
+ ::warning("ssh to $serverlogin only allows ".
+ "for $ssh_limit simultaneous logins.",
+ "You may raise this by changing",
+ "/etc/ssh/sshd_config:MaxStartups and MaxSessions on $serverlogin.",
+ "You can also try --sshdelay 0.1",
+ "Using only ".($ssh_limit-1)." connections ".
+ "to avoid race conditions.");
+ # Race condition can cause problem if using all sshs.
+ if($ssh_limit > 1) { $ssh_limit -= 1; }
+ }
+ return $ssh_limit;
+}
+
+sub simultaneous_sshlogin($) {
+ # Using $sshlogin try to see if we can do $wanted_processes
+ # simultaneous logins
+ # (ssh host echo simul-login & ssh host echo simul-login & ...) |
+ # grep simul|wc -l
+ # Input:
+ # $wanted_processes = Try for this many logins in parallel
+ # Returns:
+ # $ssh_limit = Number of succesful parallel logins
+ local $/ = "\n";
+ my $self = shift;
+ my $wanted_processes = shift;
+ my $sshdelay = $opt::sshdelay ? "sleep $opt::sshdelay;" : "";
+ # TODO sh -c wrapper to work for csh
+ my $cmd = ($sshdelay.$self->wrap("echo simultaneouslogin").
+ "</dev/null 2>&1 &")x$wanted_processes;
+ ::debug("init","Trying $wanted_processes logins at ".$self->string()."\n");
+ open (my $simul_fh, "-|", "($cmd)|grep simultaneouslogin | wc -l") or
+ ::die_bug("simultaneouslogin");
+ my $ssh_limit = <$simul_fh>;
+ close $simul_fh;
+ chomp $ssh_limit;
+ return $ssh_limit;
+}
+
+sub set_ncpus($$) {
+ my $self = shift;
+ $self->{'ncpus'} = shift;
+}
+
+sub user_requested_processes($) {
+ # Parse the number of processes that the user asked for using -j
+ # Input:
+ # $opt_P = string formatted as for -P
+ # Returns:
+ # $processes = the number of processes to run on this sshlogin
+ my $self = shift;
+ my $opt_P = shift;
+ my $processes;
+ if(defined $opt_P) {
+ if (-f $opt_P) {
+ $Global::max_procs_file = $opt_P;
+ my $opt_P_file = ::slurp_or_exit($Global::max_procs_file);
+ if($opt_P_file !~ /\S/) {
+ ::warning_once("$Global::max_procs_file is empty. ".
+ "Treated as 100%");
+ $opt_P_file = "100%";
+ }
+ $processes = $self->user_requested_processes($opt_P_file);
+ } else {
+ if($opt_P eq "0") {
+ # -P 0 = infinity (or at least close)
+ $processes = $Global::infinity;
+ } else {
+ # -P +3 and -P -1
+ $opt_P =~ s/^([-+])/\$self->ncpus()$1/;
+ # -P 40%
+ $opt_P =~ s:%$:*\$self->ncpus()/100:;
+ $processes = eval $opt_P;
+ if($processes <= 0) {
+ # Do not go below 1
+ $processes = 1;
+ }
+ }
+ }
+ $processes = ::ceil($processes);
+ }
+ return $processes;
+}
+
+sub ncpus($) {
+ # Number of CPU threads
+ # --use_sockets_instead_of_threads = count socket instead
+ # --use_cores_instead_of_threads = count physical cores instead
+ # Returns:
+ # $ncpus = number of cpu (threads) on this sshlogin
+ local $/ = "\n";
+ my $self = shift;
+ if(not defined $self->{'ncpus'}) {
+ if($self->local()) {
+ if($opt::use_sockets_instead_of_threads) {
+ $self->{'ncpus'} = socket_core_thread()->{'sockets'};
+ } elsif($opt::use_cores_instead_of_threads) {
+ $self->{'ncpus'} = socket_core_thread()->{'cores'};
+ } else {
+ $self->{'ncpus'} = socket_core_thread()->{'threads'};
+ }
+ } else {
+ my $ncpu;
+ $ENV{'SSHPASS'} = $self->{'password'};
+ ::debug("init",("echo | ".$self->wrap("parallel --number-of-sockets")));
+ if($opt::use_sockets_instead_of_threads
+ or
+ $opt::use_cpus_instead_of_cores) {
+ $ncpu = ::qqx("echo | ".$self->wrap("parallel --number-of-sockets"));
+ } elsif($opt::use_cores_instead_of_threads) {
+ $ncpu = ::qqx("echo | ".$self->wrap("parallel --number-of-cores"));
+ } else {
+ $ncpu = ::qqx("echo | ".$self->wrap("parallel --number-of-threads"));
+ }
+ chomp $ncpu;
+ if($ncpu =~ /^\s*[0-9]+\s*$/s) {
+ $self->{'ncpus'} = $ncpu;
+ } else {
+ ::warning("Could not figure out ".
+ "number of cpus on ".$self->string." ($ncpu). Using 1.");
+ $self->{'ncpus'} = 1;
+ }
+ }
+ }
+ return $self->{'ncpus'};
+}
+
+
+sub nproc() {
+ # Returns:
+ # Number of threads using `nproc`
+ my $no_of_threads = ::qqx("nproc");
+ chomp $no_of_threads;
+ return $no_of_threads;
+}
+
+sub no_of_sockets() {
+ return socket_core_thread()->{'sockets'};
+}
+
+sub no_of_cores() {
+ return socket_core_thread()->{'cores'};
+}
+
+sub no_of_threads() {
+ return socket_core_thread()->{'threads'};
+}
+
+sub socket_core_thread() {
+ # Returns:
+ # {
+ # 'sockets' => #sockets = number of socket with CPU present
+ # 'cores' => #cores = number of physical cores
+ # 'threads' => #threads = number of compute cores (hyperthreading)
+ # 'active' => #taskset_threads = number of taskset limited cores
+ # }
+ my $cpu;
+ if ($^O eq 'linux') {
+ $cpu = sct_gnu_linux($cpu);
+ } elsif ($^O eq 'android') {
+ $cpu = sct_android($cpu);
+ } elsif ($^O eq 'freebsd') {
+ $cpu = sct_freebsd($cpu);
+ } elsif ($^O eq 'netbsd') {
+ $cpu = sct_netbsd($cpu);
+ } elsif ($^O eq 'openbsd') {
+ $cpu = sct_openbsd($cpu);
+ } elsif ($^O eq 'gnu') {
+ $cpu = sct_hurd($cpu);
+ } elsif ($^O eq 'darwin') {
+ $cpu = sct_darwin($cpu);
+ } elsif ($^O eq 'solaris') {
+ $cpu = sct_solaris($cpu);
+ } elsif ($^O eq 'aix') {
+ $cpu = sct_aix($cpu);
+ } elsif ($^O eq 'hpux') {
+ $cpu = sct_hpux($cpu);
+ } elsif ($^O eq 'nto') {
+ $cpu = sct_qnx($cpu);
+ } elsif ($^O eq 'svr5') {
+ $cpu = sct_openserver($cpu);
+ } elsif ($^O eq 'irix') {
+ $cpu = sct_irix($cpu);
+ } elsif ($^O eq 'dec_osf') {
+ $cpu = sct_tru64($cpu);
+ } else {
+ # Try all methods until we find something that works
+ $cpu = (sct_gnu_linux($cpu)
+ || sct_android($cpu)
+ || sct_freebsd($cpu)
+ || sct_netbsd($cpu)
+ || sct_openbsd($cpu)
+ || sct_hurd($cpu)
+ || sct_darwin($cpu)
+ || sct_solaris($cpu)
+ || sct_aix($cpu)
+ || sct_hpux($cpu)
+ || sct_qnx($cpu)
+ || sct_openserver($cpu)
+ || sct_irix($cpu)
+ || sct_tru64($cpu)
+ );
+ }
+ if(not $cpu) {
+ # Fall back: Set all to nproc
+ my $nproc = nproc();
+ if($nproc) {
+ $cpu->{'sockets'} =
+ $cpu->{'cores'} =
+ $cpu->{'threads'} =
+ $cpu->{'active'} =
+ $nproc;
+ }
+ }
+ if(not $cpu) {
+ ::warning("Cannot figure out number of cpus. Using 1.");
+ $cpu->{'sockets'} =
+ $cpu->{'cores'} =
+ $cpu->{'threads'} =
+ $cpu->{'active'} =
+ 1
+ }
+ $cpu->{'sockets'} ||= 1;
+ $cpu->{'threads'} ||= $cpu->{'cores'};
+ $cpu->{'active'} ||= $cpu->{'threads'};
+ chomp($cpu->{'sockets'},
+ $cpu->{'cores'},
+ $cpu->{'threads'},
+ $cpu->{'active'});
+ # Choose minimum of active and actual
+ my $mincpu;
+ $mincpu->{'sockets'} = ::min($cpu->{'sockets'},$cpu->{'active'});
+ $mincpu->{'cores'} = ::min($cpu->{'cores'},$cpu->{'active'});
+ $mincpu->{'threads'} = ::min($cpu->{'threads'},$cpu->{'active'});
+ return $mincpu;
+}
+
+sub sct_gnu_linux($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ my $cpu = shift;
+
+ sub read_topology($) {
+ my $prefix = shift;
+ my %sibiling;
+ my %socket;
+ my $thread;
+ for($thread = 0;
+ -r "$prefix/cpu$thread/topology/physical_package_id";
+ $thread++) {
+ $socket{::slurp_or_exit(
+ "$prefix/cpu$thread/topology/physical_package_id")}++;
+ }
+ for($thread = 0;
+ -r "$prefix/cpu$thread/topology/thread_siblings";
+ $thread++) {
+ $sibiling{::slurp_or_exit(
+ "$prefix/cpu$thread/topology/thread_siblings")}++;
+ }
+ $cpu->{'sockets'} = keys %socket;
+ $cpu->{'cores'} = keys %sibiling;
+ $cpu->{'threads'} = $thread;
+ }
+
+ sub read_cpuinfo(@) {
+ my @cpuinfo = @_;
+ $cpu->{'sockets'} = 0;
+ $cpu->{'cores'} = 0;
+ $cpu->{'threads'} = 0;
+ my %seen;
+ my %phy_seen;
+ my $physicalid;
+ for(@cpuinfo) {
+ # physical id : 0
+ if(/^physical id.*[:](.*)/) {
+ $physicalid = $1;
+ if(not $phy_seen{$1}++) {
+ $cpu->{'sockets'}++;
+ }
+ }
+ # core id : 3
+ if(/^core id.*[:](.*)/ and not $seen{$physicalid,$1}++) {
+ $cpu->{'cores'}++;
+ }
+ # processor : 2
+ /^processor.*[:]\s*\d/i and $cpu->{'threads'}++;
+ }
+ $cpu->{'cores'} ||= $cpu->{'threads'};
+ $cpu->{'cpus'} ||= $cpu->{'threads'};
+ $cpu->{'sockets'} ||= 1;
+ }
+
+ sub read_lscpu(@) {
+ my @lscpu = @_;
+ my $threads_per_core;
+ my $cores_per_socket;
+ for(@lscpu) {
+ /^CPU.s.:\s*(\d+)/ and $cpu->{'threads'} = $1;
+ /^Thread.s. per core:\s*(\d+)/ and $threads_per_core = $1;
+ /^Core.s. per socket:\s*(\d+)/ and $cores_per_socket = $1;
+ /^(CPU )?Socket.s.:\s*(\d+)/i and $cpu->{'sockets'} = $2;
+ }
+ if($cores_per_socket and $cpu->{'sockets'}) {
+ $cpu->{'cores'} = $cores_per_socket * $cpu->{'sockets'};
+ }
+ if($threads_per_core and $cpu->{'cores'}) {
+ $cpu->{'threads'} = $threads_per_core * $cpu->{'cores'};
+ }
+ if($threads_per_core and $cpu->{'threads'}) {
+ $cpu->{'cores'} = $cpu->{'threads'} / $threads_per_core;
+ }
+ $cpu->{'cpus'} ||= $cpu->{'threads'};
+ }
+
+ local $/ = "\n"; # If delimiter is set, then $/ will be wrong
+ my @cpuinfo;
+ my @lscpu;
+ if($ENV{'PARALLEL_CPUINFO'}) {
+ # Use CPUINFO from environment - used for testing only
+ read_cpuinfo(split/(?<=\n)/,$ENV{'PARALLEL_CPUINFO'});
+ } elsif($ENV{'PARALLEL_LSCPU'}) {
+ # Use LSCPU from environment - used for testing only
+ read_lscpu(split/(?<=\n)/,$ENV{'PARALLEL_LSCPU'});
+ } elsif(-r "$ENV{'PARALLEL_CPUPREFIX'}/cpu0/topology/thread_siblings") {
+ # Use CPUPREFIX from environment - used for testing only
+ read_topology($ENV{'PARALLEL_CPUPREFIX'});
+ } elsif($cpu->{'sockets'} and $cpu->{'cores'} and $cpu->{'threads'}) {
+ # Skip /proc/cpuinfo - already set
+ } else {
+ # Not debugging: Look at this computer
+ if(!($cpu->{'sockets'} and $cpu->{'cores'} and $cpu->{'threads'})
+ and
+ open(my $in_fh, "-|", "lscpu")) {
+ # Parse output from lscpu
+ read_lscpu(<$in_fh>);
+ close $in_fh;
+ }
+ if(!($cpu->{'sockets'} and $cpu->{'cores'} and $cpu->{'threads'})
+ and
+ -r "/sys/devices/system/cpu/cpu0/topology/thread_siblings") {
+ read_topology("/sys/devices/system/cpu");
+ }
+ if(!($cpu->{'sockets'} and $cpu->{'cores'} and $cpu->{'threads'})
+ and
+ open(my $in_fh, "<", "/proc/cpuinfo")) {
+ # Read /proc/cpuinfo
+ read_cpuinfo(<$in_fh>);
+ close $in_fh;
+ }
+ }
+ if(-e "/proc/self/status"
+ and not $ENV{'PARALLEL_CPUINFO'}
+ and not $ENV{'PARALLEL_LSCPU'}) {
+ # if 'taskset' is used to limit number of threads
+ if(open(my $in_fh, "<", "/proc/self/status")) {
+ while(<$in_fh>) {
+ if(/^Cpus_allowed:\s*(\S+)/) {
+ my $a = $1;
+ $a =~ tr/,//d;
+ $cpu->{'active'} = unpack ("%32b*", pack ("H*",$a));
+ }
+ }
+ close $in_fh;
+ }
+ }
+ return $cpu;
+}
+
+sub sct_android($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ # Use GNU/Linux
+ return sct_gnu_linux($_[0]);
+}
+
+sub sct_freebsd($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ $cpu->{'cores'} ||=
+ (::qqx(qq{ sysctl -a dev.cpu | grep \%parent | awk '{ print \$2 }' | uniq | wc -l | awk '{ print \$1 }' })
+ or
+ ::qqx(qq{ sysctl hw.ncpu | awk '{ print \$2 }' }));
+ $cpu->{'threads'} ||=
+ (::qqx(qq{ sysctl hw.ncpu | awk '{ print \$2 }' })
+ or
+ ::qqx(qq{ sysctl -a dev.cpu | grep \%parent | awk '{ print \$2 }' | uniq | wc -l | awk '{ print \$1 }' }));
+ return $cpu;
+}
+
+sub sct_netbsd($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ $cpu->{'cores'} ||= ::qqx("sysctl -n hw.ncpu");
+ return $cpu;
+}
+
+sub sct_openbsd($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ $cpu->{'cores'} ||= ::qqx('sysctl -n hw.ncpu');
+ return $cpu;
+}
+
+sub sct_hurd($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ $cpu->{'cores'} ||= ::qqx("nproc");
+ return $cpu;
+}
+
+sub sct_darwin($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ $cpu->{'cores'} ||=
+ (::qqx('sysctl -n hw.physicalcpu')
+ or
+ ::qqx(qq{ sysctl -a hw | grep [^a-z]physicalcpu[^a-z] | awk '{ print \$2 }' }));
+ $cpu->{'threads'} ||=
+ (::qqx('sysctl -n hw.logicalcpu')
+ or
+ ::qqx(qq{ sysctl -a hw | grep [^a-z]logicalcpu[^a-z] | awk '{ print \$2 }' }));
+ return $cpu;
+}
+
+sub sct_solaris($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ if(not $cpu->{'cores'}) {
+ if(-x "/usr/bin/kstat") {
+ my @chip_id = ::qqx("/usr/bin/kstat cpu_info|grep chip_id");
+ if($#chip_id >= 0) {
+ $cpu->{'sockets'} ||= $#chip_id +1;
+ }
+ my @core_id = ::qqx("/usr/bin/kstat -m cpu_info|grep -w core_id|uniq");
+ if($#core_id >= 0) {
+ $cpu->{'cores'} ||= $#core_id +1;
+ }
+ }
+ if(-x "/usr/sbin/psrinfo") {
+ my @psrinfo = ::qqx("/usr/sbin/psrinfo -p");
+ if($#psrinfo >= 0) {
+ $cpu->{'sockets'} ||= $psrinfo[0];
+ }
+ }
+ if(-x "/usr/sbin/prtconf") {
+ my @prtconf = ::qqx("/usr/sbin/prtconf | grep cpu..instance");
+ if($#prtconf >= 0) {
+ $cpu->{'cores'} ||= $#prtconf +1;
+ }
+ }
+ }
+ return $cpu;
+}
+
+sub sct_aix($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ if(not $cpu->{'cores'}) {
+ if(-x "/usr/sbin/lscfg") {
+ if(open(my $in_fh, "-|",
+ "/usr/sbin/lscfg -vs |grep proc | wc -l|tr -d ' '")) {
+ $cpu->{'cores'} = <$in_fh>;
+ close $in_fh;
+ }
+ }
+ }
+ if(not $cpu->{'threads'}) {
+ if(-x "/usr/bin/vmstat") {
+ if(open(my $in_fh, "-|", "/usr/bin/vmstat 1 1")) {
+ while(<$in_fh>) {
+ /lcpu=([0-9]*) / and $cpu->{'threads'} = $1;
+ }
+ close $in_fh;
+ }
+ }
+ }
+ return $cpu;
+}
+
+sub sct_hpux($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ $cpu->{'cores'} ||=
+ ::qqx(qq{ /usr/bin/mpsched -s 2>&1 | grep 'Locality Domain Count' | awk '{ print \$4 }'});
+ $cpu->{'threads'} ||=
+ ::qqx(qq{ /usr/bin/mpsched -s 2>&1 | perl -ne '/Processor Count\\D+(\\d+)/ and print "\$1"'});
+ return $cpu;
+}
+
+sub sct_qnx($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ # BUG: It is not known how to calculate this.
+
+ return $cpu;
+}
+
+sub sct_openserver($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ if(not $cpu->{'cores'}) {
+ if(-x "/usr/sbin/psrinfo") {
+ my @psrinfo = ::qqx("/usr/sbin/psrinfo");
+ if($#psrinfo >= 0) {
+ $cpu->{'cores'} = $#psrinfo +1;
+ }
+ }
+ }
+ $cpu->{'sockets'} ||= $cpu->{'cores'};
+ return $cpu;
+}
+
+sub sct_irix($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ $cpu->{'cores'} ||=
+ ::qqx(qq{ hinv | grep HZ | grep Processor | awk '{print \$1}' });
+ return $cpu;
+}
+
+sub sct_tru64($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ $cpu->{'cores'} ||= ::qqx("sizer -pr");
+ $cpu->{'sockets'} ||= $cpu->{'cores'};
+ $cpu->{'threads'} ||= $cpu->{'cores'};
+
+ return $cpu;
+}
+
+sub sshcommand($) {
+ # Returns:
+ # $sshcommand = the command (incl options) to run when using ssh
+ my $self = shift;
+ if (not defined $self->{'sshcommand'}) {
+ ::die_bug("sshcommand not set");
+ }
+ return $self->{'sshcommand'};
+}
+
+sub local($) {
+ my $self = shift;
+ return $self->{'local'};
+}
+
+sub control_path_dir($) {
+ # Returns:
+ # $control_path_dir = dir of control path (for -M)
+ my $self = shift;
+ if(not defined $self->{'control_path_dir'}) {
+ $self->{'control_path_dir'} =
+ # Use $ENV{'TMPDIR'} as that is typically not
+ # NFS mounted.
+ # The file system must support UNIX domain sockets
+ File::Temp::tempdir($ENV{'TMPDIR'}
+ . "/ctrlpath-XXXX",
+ CLEANUP => 1);
+ }
+ return $self->{'control_path_dir'};
+}
+
+sub rsync_transfer_cmd($) {
+ # Command to run to transfer a file
+ # Input:
+ # $file = filename of file to transfer
+ # $workdir = destination dir
+ # Returns:
+ # $cmd = rsync command to run to transfer $file ("" if unreadable)
+ my $self = shift;
+ my $file = shift;
+ my $workdir = shift;
+ if(not -r $file) {
+ ::warning($file. " is not readable and will not be transferred.");
+ return "true";
+ }
+ my $rsync_destdir;
+ my $relpath = ($file !~ m:^/:) || ($file =~ m:/\./:); # Is the path relative or /./?
+ if($relpath) {
+ $rsync_destdir = ::shell_quote_file($workdir);
+ } else {
+ # rsync /foo/bar /
+ $rsync_destdir = "/";
+ }
+ $file = ::shell_quote_file($file);
+ # Make dir if it does not exist
+ return($self->wrap("mkdir -p $rsync_destdir") . " && " .
+ $self->rsync()." $file ".$self->{'host'}.":$rsync_destdir");
+}
+
+{
+ my $rsync_fix;
+ my $rsync_version;
+
+ sub rsync($) {
+ # rsync 3.1.x uses protocol 31 which is unsupported by 2.5.7.
+ # If the version >= 3.1.0: downgrade to protocol 30
+ # rsync 3.2.4 introduces a quoting bug: Add --old-args for that
+ # Returns:
+ # $rsync = "rsync" or "rsync --protocol 30 --old-args"
+ sub rsync_version {
+ if(not $rsync_version) {
+ my @out = `rsync --version`;
+ if(not @out) {
+ if(::which("rsync")) {
+ ::die_bug("'rsync --version' gave no output.");
+ } else {
+ ::error(255,"'rsync' is not in \$PATH.");
+ }
+ }
+ for (@out) {
+ # rsync version 3.1.3 protocol version 31
+ # rsync version v3.2.3 protocol version 31
+ if(/version v?(\d+)\.(\d+)(\.(\d+))?/) {
+ # 3.2.27 => 03.0227
+ $rsync_version = sprintf "%02d.%02d%02d",$1,$2,$4;
+ }
+ }
+ $rsync_version or
+ ::die_bug("Cannot figure out version of rsync: @out");
+ }
+ }
+
+ sub rsync_fixup {
+ # rsync 3.1.x uses protocol 31 which is unsupported by 2.5.7.
+ # If the version >= 3.1.0: downgrade to protocol 30
+ # Returns:
+ # $rsync = "rsync" or "rsync --protocol 30"
+ if(not $rsync_fix) {
+ rsync_version();
+ if($rsync_version >= 3.01) {
+ # Version 3.1.0 or later: Downgrade to protocol 30
+ $rsync_fix .= " --protocol 30";
+ }
+ if($rsync_version >= 3.0204) {
+ # Version 3.2.4 .. 3.2.8: --old-args
+ $rsync_fix .= " --old-args";
+ }
+ }
+ return $rsync_fix;
+ }
+ my $self = shift;
+
+ return "rsync".rsync_fixup()." ".$ENV{'PARALLEL_RSYNC_OPTS'}.
+ " -e".::Q($self->sshcmd());
+ }
+}
+
+sub cleanup_cmd($$$) {
+ # Command to run to remove the remote file
+ # Input:
+ # $file = filename to remove
+ # $workdir = destination dir
+ # Returns:
+ # $cmd = ssh command to run to remove $file and empty parent dirs
+ my $self = shift;
+ my $file = shift;
+ my $workdir = shift;
+ my $f = $file;
+ if($f =~ m:/\./:) {
+ # foo/bar/./baz/quux => workdir/baz/quux
+ # /foo/bar/./baz/quux => workdir/baz/quux
+ $f =~ s:.*/\./:$workdir/:;
+ } elsif($f =~ m:^[^/]:) {
+ # foo/bar => workdir/foo/bar
+ $f = $workdir."/".$f;
+ }
+ my @subdirs = split m:/:, ::dirname($f);
+ my @rmdir;
+ my $dir = "";
+ for(@subdirs) {
+ $dir .= $_."/";
+ unshift @rmdir, ::shell_quote_file($dir);
+ }
+ my $rmdir = @rmdir ? "rmdir @rmdir 2>/dev/null;" : "";
+ if(defined $opt::workdir and $opt::workdir eq "...") {
+ $rmdir .= "rm -rf " . ::shell_quote_file($workdir).';';
+ }
+ my $rmf = "sh -c ".
+ ::Q("rm -f ".::shell_quote_file($f)." 2>/dev/null;".$rmdir);
+ return $self->wrap(::Q($rmf));
+}
+
+package JobQueue;
+
+sub new($) {
+ my $class = shift;
+ my $commandref = shift;
+ my $read_from = shift;
+ my $context_replace = shift;
+ my $max_number_of_args = shift;
+ my $transfer_files = shift;
+ my $return_files = shift;
+ my $template_names = shift;
+ my $template_contents = shift;
+ my $commandlinequeue = CommandLineQueue->new
+ ($commandref, $read_from, $context_replace, $max_number_of_args,
+ $transfer_files, $return_files, $template_names, $template_contents);
+ my @unget = ();
+ return bless {
+ 'unget' => \@unget,
+ 'commandlinequeue' => $commandlinequeue,
+ 'this_job_no' => 0,
+ 'total_jobs' => undef,
+ }, ref($class) || $class;
+}
+
+sub get($) {
+ my $self = shift;
+
+ $self->{'this_job_no'}++;
+ if(@{$self->{'unget'}}) {
+ my $job = shift @{$self->{'unget'}};
+ # {%} may have changed, so flush computed values
+ $job && $job->flush_cache();
+ return $job;
+ } else {
+ my $commandline = $self->{'commandlinequeue'}->get();
+ if(defined $commandline) {
+ return Job->new($commandline);
+ } else {
+ $self->{'this_job_no'}--;
+ return undef;
+ }
+ }
+}
+
+sub unget($) {
+ my $self = shift;
+ unshift @{$self->{'unget'}}, @_;
+ $self->{'this_job_no'} -= @_;
+}
+
+sub empty($) {
+ my $self = shift;
+ my $empty = (not @{$self->{'unget'}}) &&
+ $self->{'commandlinequeue'}->empty();
+ ::debug("run", "JobQueue->empty $empty ");
+ return $empty;
+}
+
+sub total_jobs($) {
+ my $self = shift;
+ if(not defined $self->{'total_jobs'}) {
+ if($opt::pipe and not $opt::tee) {
+ ::error(255,"--pipe is incompatible with --eta/--bar/--shuf");
+ }
+ if($opt::totaljobs) {
+ $self->{'total_jobs'} = $opt::totaljobs;
+ } elsif($opt::sqlworker) {
+ $self->{'total_jobs'} = $Global::sql->total_jobs();
+ } else {
+ my $record;
+ my @arg_records;
+ my $record_queue = $self->{'commandlinequeue'}{'arg_queue'};
+ my $start = time;
+ while($record = $record_queue->get()) {
+ push @arg_records, $record;
+ if(time - $start > 10) {
+ ::warning("Reading ".scalar(@arg_records).
+ " arguments took longer than 10 seconds.");
+ $opt::eta && ::warning("Consider removing --eta.");
+ $opt::bar && ::warning("Consider removing --bar.");
+ $opt::shuf && ::warning("Consider removing --shuf.");
+ last;
+ }
+ }
+ while($record = $record_queue->get()) {
+ push @arg_records, $record;
+ }
+ if($opt::shuf and @arg_records) {
+ my $i = @arg_records;
+ while (--$i) {
+ my $j = int rand($i+1);
+ @arg_records[$i,$j] = @arg_records[$j,$i];
+ }
+ }
+ $record_queue->unget(@arg_records);
+ # $#arg_records = number of args - 1
+ # We have read one @arg_record for this job (so add 1 more)
+ my $num_args = $#arg_records + 2;
+ # This jobs is not started so -1
+ my $started_jobs = $self->{'this_job_no'} - 1;
+ my $max_args = ::max($Global::max_number_of_args,1);
+ $self->{'total_jobs'} = ::ceil($num_args / $max_args)
+ + $started_jobs;
+ ::debug("init","Total jobs: ".$self->{'total_jobs'}.
+ " ($num_args/$max_args + $started_jobs)\n");
+ }
+ }
+ return $self->{'total_jobs'};
+}
+
+sub flush_total_jobs($) {
+ # Unset total_jobs to force recomputing
+ my $self = shift;
+ ::debug("init","flush Total jobs: ");
+ $self->{'total_jobs'} = undef;
+}
+
+sub next_seq($) {
+ my $self = shift;
+
+ return $self->{'commandlinequeue'}->seq();
+}
+
+sub quote_args($) {
+ my $self = shift;
+ return $self->{'commandlinequeue'}->quote_args();
+}
+
+
+package Job;
+
+sub new($) {
+ my $class = shift;
+ my $commandlineref = shift;
+ return bless {
+ 'commandline' => $commandlineref, # CommandLine object
+ 'workdir' => undef, # --workdir
+ # filehandle for stdin (used for --pipe)
+ # filename for writing stdout to (used for --files)
+ # remaining data not sent to stdin (used for --pipe)
+ # tmpfiles to cleanup when job is done
+ 'unlink' => [],
+ # amount of data sent via stdin (used for --pipe)
+ 'transfersize' => 0, # size of files using --transfer
+ 'returnsize' => 0, # size of files using --return
+ 'pid' => undef,
+ # hash of { SSHLogins => number of times the command failed there }
+ 'failed' => undef,
+ 'sshlogin' => undef,
+ # The commandline wrapped with rsync and ssh
+ 'sshlogin_wrap' => undef,
+ 'exitstatus' => undef,
+ 'exitsignal' => undef,
+ # Timestamp for timeout if any
+ 'timeout' => undef,
+ 'virgin' => 1,
+ # Output used for SQL and CSV-output
+ 'output' => { 1 => [], 2 => [] },
+ 'halfline' => { 1 => [], 2 => [] },
+ }, ref($class) || $class;
+}
+
+sub flush_cache($) {
+ my $self = shift;
+ $self->{'commandline'}->flush_cache();
+}
+
+sub replaced($) {
+ my $self = shift;
+ $self->{'commandline'} or ::die_bug("commandline empty");
+ return $self->{'commandline'}->replaced();
+}
+
+{
+ my $next_available_row;
+
+ sub row($) {
+ my $self = shift;
+ if(not defined $self->{'row'}) {
+ if($opt::keeporder) {
+ $self->{'row'} = $self->seq();
+ } else {
+ $self->{'row'} = ++$next_available_row;
+ }
+ }
+ return $self->{'row'};
+ }
+}
+
+sub seq($) {
+ my $self = shift;
+ return $self->{'commandline'}->seq();
+}
+
+sub set_seq($$) {
+ my $self = shift;
+ return $self->{'commandline'}->set_seq(shift);
+}
+
+sub slot($) {
+ my $self = shift;
+ return $self->{'commandline'}->slot();
+}
+
+sub free_slot($) {
+ my $self = shift;
+ push @Global::slots, $self->slot();
+}
+
+{
+ my($cattail);
+
+ sub cattail() {
+ # Returns:
+ # $cattail = perl program for:
+ # cattail "decomp-prg" wpid [file_stdin] [file_to_unlink]
+ # decomp-prg = decompress program
+ # wpid = pid of writer program
+ # file_stdin = file_to_decompress
+ # file_to_unlink = unlink this file
+ if(not $cattail) {
+ $cattail = q{
+ # cat followed by tail (possibly with rm as soon at the file is opened)
+ # If $writerpid dead: finish after this round
+ use Fcntl;
+ $|=1;
+
+ my ($comfile, $cmd, $writerpid, $read_file, $unlink_file) = @ARGV;
+ if($read_file) {
+ open(IN,"<",$read_file) || die("cattail: Cannot open $read_file");
+ } else {
+ *IN = *STDIN;
+ }
+ while(! -s $comfile) {
+ # Writer has not opened the buffer file, so we cannot remove it yet
+ $sleep = ($sleep < 30) ? ($sleep * 1.001 + 0.01) : ($sleep);
+ usleep($sleep);
+ }
+ # The writer and we have both opened the file, so it is safe to unlink it
+ unlink $unlink_file;
+ unlink $comfile;
+
+ my $first_round = 1;
+ my $flags;
+ fcntl(IN, F_GETFL, $flags) || die $!; # Get the current flags on the filehandle
+ $flags |= O_NONBLOCK; # Add non-blocking to the flags
+ fcntl(IN, F_SETFL, $flags) || die $!; # Set the flags on the filehandle
+
+ while(1) {
+ # clear EOF
+ seek(IN,0,1);
+ my $writer_running = kill 0, $writerpid;
+ $read = sysread(IN,$buf,131072);
+ if($read) {
+ if($first_round) {
+ # Only start the command if there any input to process
+ $first_round = 0;
+ open(OUT,"|-",$cmd) || die("cattail: Cannot run $cmd");
+ }
+
+ # Blocking print
+ while($buf) {
+ my $bytes_written = syswrite(OUT,$buf);
+ # syswrite may be interrupted by SIGHUP
+ substr($buf,0,$bytes_written) = "";
+ }
+ # Something printed: Wait less next time
+ $sleep /= 2;
+ } else {
+ if(eof(IN) and not $writer_running) {
+ # Writer dead: There will never be sent more to the decompressor
+ close OUT;
+ exit;
+ }
+ # TODO This could probably be done more efficiently using select(2)
+ # Nothing read: Wait longer before next read
+ # Up to 100 milliseconds
+ $sleep = ($sleep < 100) ? ($sleep * 1.001 + 0.01) : ($sleep);
+ usleep($sleep);
+ }
+ }
+
+ sub usleep {
+ # Sleep this many milliseconds.
+ my $secs = shift;
+ select(undef, undef, undef, $secs/1000);
+ }
+ };
+ $cattail =~ s/#.*//mg;
+ $cattail =~ s/\s+/ /g;
+ }
+ return $cattail;
+ }
+}
+
+sub openoutputfiles($) {
+ # Open files for STDOUT and STDERR
+ # Set file handles in $self->fh
+ my $self = shift;
+ my ($outfhw, $errfhw, $outname, $errname);
+
+ if($opt::latestline) {
+ # Do not save to files: Use non-blocking pipe
+ my ($outfhr, $errfhr);
+ pipe($outfhr, $outfhw) || die;
+ $self->set_fh(1,'w',$outfhw);
+ $self->set_fh(2,'w',$outfhw);
+ $self->set_fh(1,'r',$outfhr);
+ $self->set_fh(2,'r',$outfhr);
+ # Make it possible to read non-blocking from the pipe
+ for my $fdno (1,2) {
+ ::set_fh_non_blocking($self->fh($fdno,'r'));
+ }
+ # Return immediately because we do not need setting filenames
+ return;
+ } elsif($Global::linebuffer and not
+ ($opt::keeporder or $Global::files or $opt::results or
+ $opt::compress or $opt::compress_program or
+ $opt::decompress_program)) {
+ # Do not save to files: Use non-blocking pipe
+ my ($outfhr, $errfhr);
+ pipe($outfhr, $outfhw) || die;
+ pipe($errfhr, $errfhw) || die;
+ $self->set_fh(1,'w',$outfhw);
+ $self->set_fh(2,'w',$errfhw);
+ $self->set_fh(1,'r',$outfhr);
+ $self->set_fh(2,'r',$errfhr);
+ # Make it possible to read non-blocking from the pipe
+ for my $fdno (1,2) {
+ ::set_fh_non_blocking($self->fh($fdno,'r'));
+ }
+ # Return immediately because we do not need setting filenames
+ return;
+ } elsif($opt::results and not $Global::csvsep and not $Global::jsonout) {
+ # If --results, but not --results *.csv/*.tsv
+ my $out = $self->{'commandline'}->results_out();
+ my $seqname;
+ if($out eq $opt::results or $out =~ m:/$:) {
+ # $opt::results = simple string or ending in /
+ # => $out is a dir/
+ # prefix/name1/val1/name2/val2/seq
+ $seqname = $out."seq";
+ # prefix/name1/val1/name2/val2/stdout
+ $outname = $out."stdout";
+ # prefix/name1/val1/name2/val2/stderr
+ $errname = $out."stderr";
+ } else {
+ # $opt::results = replacement string not ending in /
+ # => $out is a file
+ $outname = $out;
+ $errname = "$out.err";
+ $seqname = "$out.seq";
+ }
+ ::write_or_exit($seqname, $self->seq());
+ $outfhw = ::open_or_exit("+>", $outname);
+ $errfhw = ::open_or_exit("+>", $errname);
+ $self->set_fh(1,"unlink","");
+ $self->set_fh(2,"unlink","");
+ if($opt::sqlworker) {
+ # Save the filenames in SQL table
+ $Global::sql->update("SET Stdout = ?, Stderr = ? ".
+ "WHERE Seq = ". $self->seq(),
+ $outname, $errname);
+ }
+ } elsif(not $opt::ungroup) {
+ # To group we create temporary files for STDOUT and STDERR
+ # To avoid the cleanup unlink the files immediately (but keep them open)
+ if($Global::files) {
+ ($outfhw, $outname) = ::tmpfile(SUFFIX => ".par");
+ ($errfhw, $errname) = ::tmpfile(SUFFIX => ".par");
+ # --files => only remove stderr
+ $self->set_fh(1,"unlink","");
+ $self->set_fh(2,"unlink",$errname);
+ } else {
+ ($outfhw, $outname) = ::tmpfile(SUFFIX => ".par");
+ ($errfhw, $errname) = ::tmpfile(SUFFIX => ".par");
+ $self->set_fh(1,"unlink",$outname);
+ $self->set_fh(2,"unlink",$errname);
+ }
+ } else {
+ # --ungroup
+ open($outfhw,">&",$Global::fh{1}) || die;
+ open($errfhw,">&",$Global::fh{2}) || die;
+ # File name must be empty as it will otherwise be printed
+ $outname = "";
+ $errname = "";
+ $self->set_fh(1,"unlink",$outname);
+ $self->set_fh(2,"unlink",$errname);
+ }
+ # Set writing FD
+ $self->set_fh(1,'w',$outfhw);
+ $self->set_fh(2,'w',$errfhw);
+ $self->set_fh(1,'name',$outname);
+ $self->set_fh(2,'name',$errname);
+ if($opt::compress) {
+ $self->filter_through_compress();
+ } elsif(not $opt::ungroup) {
+ $self->grouped();
+ }
+ if($Global::linebuffer) {
+ # Make it possible to read non-blocking from
+ # the buffer files
+ # Used for --linebuffer with -k, --files, --res, --compress*
+ for my $fdno (1,2) {
+ ::set_fh_non_blocking($self->fh($fdno,'r'));
+ }
+ }
+}
+
+sub print_verbose_dryrun($) {
+ # If -v set: print command to stdout (possibly buffered)
+ # This must be done before starting the command
+ my $self = shift;
+ if($Global::verbose or $opt::dryrun) {
+ my $fh = $self->fh(1,"w");
+ if($Global::verbose <= 1) {
+ print $fh $self->replaced(),"\n";
+ } else {
+ # Verbose level > 1: Print the rsync and stuff
+ print $fh $self->wrapped(),"\n";
+ }
+ }
+ if($opt::sqlworker) {
+ $Global::sql->update("SET Command = ? WHERE Seq = ".$self->seq(),
+ $self->replaced());
+ }
+}
+
+sub add_rm($) {
+ # Files to remove when job is done
+ my $self = shift;
+ push @{$self->{'unlink'}}, @_;
+}
+
+sub get_rm($) {
+ # Files to remove when job is done
+ my $self = shift;
+ return @{$self->{'unlink'}};
+}
+
+sub cleanup($) {
+ # Remove files when job is done
+ my $self = shift;
+ unlink $self->get_rm();
+ delete @Global::unlink{$self->get_rm()};
+}
+
+sub grouped($) {
+ my $self = shift;
+ # Set reading FD if using --group (--ungroup does not need)
+ for my $fdno (1,2) {
+ # Re-open the file for reading
+ # so fdw can be closed seperately
+ # and fdr can be seeked seperately (for --line-buffer)
+ my $fdr = ::open_or_exit("<", $self->fh($fdno,'name'));
+ $self->set_fh($fdno,'r',$fdr);
+ # Unlink if not debugging
+ $Global::debug or ::rm($self->fh($fdno,"unlink"));
+ }
+}
+
+sub empty_input_wrapper($) {
+ # If no input: exit(0)
+ # If some input: Pass input as input to command on STDIN
+ # This avoids starting the command if there is no input.
+ # Input:
+ # $command = command to pipe data to
+ # Returns:
+ # $wrapped_command = the wrapped command
+ my $command = shift;
+ # The optimal block size differs
+ # It has been measured on:
+ # AMD 6376: 59000
+ # <big ppar --pipe --block 100M --test $1 -j1 'cat >/dev/null';
+ my $script =
+ ::spacefree(0,q{
+ if(sysread(STDIN, $buf, 1)) {
+ open($fh, "|-", @ARGV) || die;
+ syswrite($fh, $buf);
+ while($read = sysread(STDIN, $buf, 59000)) {
+ syswrite($fh, $buf);
+ }
+ close $fh;
+ exit ($?&127 ? 128+($?&127) : 1+$?>>8)
+ }
+ });
+ ::debug("run",'Empty wrap: perl -e '.::Q($script)."\n");
+ if($Global::cshell
+ and
+ length $command > 499) {
+ # csh does not like words longer than 1000 (499 quoted)
+ # $command = "perl -e '".base64_zip_eval()."' ".
+ # join" ",string_zip_base64(
+ # 'exec "'.::perl_quote_scalar($command).'"');
+ return 'perl -e '.::Q($script)." ".
+ base64_wrap("exec \"$Global::shell\",'-c',\"".
+ ::perl_quote_scalar($command).'"');
+ } else {
+ return 'perl -e '.::Q($script)." ".
+ $Global::shell." -c ".::Q($command);
+ }
+}
+
+sub filter_through_compress($) {
+ my $self = shift;
+ # Send stdout to stdin for $opt::compress_program(1)
+ # Send stderr to stdin for $opt::compress_program(2)
+ # cattail get pid: $pid = $self->fh($fdno,'rpid');
+ my $cattail = cattail();
+
+ for my $fdno (1,2) {
+ # Make a communication file.
+ my ($fh, $comfile) = ::tmpfile(SUFFIX => ".pac");
+ close $fh;
+ # Compressor: (echo > $comfile; compress pipe) > output
+ # When the echo is written to $comfile,
+ # it is known that output file is opened,
+ # thus output file can then be removed by the decompressor.
+ # empty_input_wrapper is needed for plzip
+ my $qcom = ::Q($comfile);
+ my $wpid = open(my $fdw,"|-", "(echo > $qcom; ".
+ empty_input_wrapper($opt::compress_program).") >".
+ ::Q($self->fh($fdno,'name'))) || die $?;
+ $self->set_fh($fdno,'w',$fdw);
+ $self->set_fh($fdno,'wpid',$wpid);
+ # Decompressor: open output; -s $comfile > 0: rm $comfile output;
+ # decompress output > stdout
+ my $rpid = open(my $fdr, "-|", "perl", "-e", $cattail, $comfile,
+ $opt::decompress_program, $wpid,
+ $self->fh($fdno,'name'),$self->fh($fdno,'unlink'))
+ || die $?;
+ $self->set_fh($fdno,'r',$fdr);
+ $self->set_fh($fdno,'rpid',$rpid);
+ }
+}
+
+sub set_fh($$$$) {
+ # Set file handle
+ my ($self, $fd_no, $key, $fh) = @_;
+ $self->{'fd'}{$fd_no,$key} = $fh;
+}
+
+sub fh($) {
+ # Get file handle
+ my ($self, $fd_no, $key) = @_;
+ return $self->{'fd'}{$fd_no,$key};
+}
+
+sub write_block($) {
+ my $self = shift;
+ my $stdin_fh = $self->fh(0,"w");
+ if(fork()) {
+ # Close in parent
+ close $stdin_fh;
+ } else {
+ # If writing is to a closed pipe:
+ # Do not call signal handler, but let nothing be written
+ local $SIG{PIPE} = undef;
+
+ for my $part (
+ grep { defined $_ }
+ $self->{'header'},$self->{'block'}) {
+ # syswrite may not write all in one go,
+ # so make sure everything is written.
+ my $written;
+ while($written = syswrite($stdin_fh,$$part)) {
+ substr($$part,0,$written) = "";
+ }
+ }
+ close $stdin_fh;
+ exit(0);
+ }
+}
+
+sub write($) {
+ my $self = shift;
+ my $remaining_ref = shift;
+ my $stdin_fh = $self->fh(0,"w");
+
+ my $len = length $$remaining_ref;
+ # syswrite may not write all in one go,
+ # so make sure everything is written.
+ my $written;
+
+ # If writing is to a closed pipe:
+ # Do not call signal handler, but let nothing be written
+ local $SIG{PIPE} = undef;
+ while($written = syswrite($stdin_fh,$$remaining_ref)){
+ substr($$remaining_ref,0,$written) = "";
+ }
+}
+
+sub set_block($$$$$$) {
+ # Copy stdin buffer from $block_ref up to $endpos
+ # Prepend with $header_ref if virgin (i.e. not --roundrobin)
+ # Remove $recstart and $recend if needed
+ # Input:
+ # $header_ref = ref to $header to prepend
+ # $buffer_ref = ref to $buffer containing the block
+ # $endpos = length of $block to pass on
+ # $recstart = --recstart regexp
+ # $recend = --recend regexp
+ # Returns:
+ # N/A
+ my $self = shift;
+ my ($header_ref,$buffer_ref,$endpos,$recstart,$recend) = @_;
+ $self->{'header'} = $header_ref;
+ if($opt::roundrobin or $opt::remove_rec_sep or defined $opt::retries) {
+ my $a = "";
+ if(($opt::roundrobin or defined $opt::retries) and $self->virgin()) {
+ $a .= $$header_ref;
+ }
+ # Job is no longer virgin
+ $self->set_virgin(0);
+ # Make a full copy because $buffer will change
+ $a .= substr($$buffer_ref,0,$endpos);
+ $self->{'block'} = \$a;
+ if($opt::remove_rec_sep) {
+ remove_rec_sep($self->{'block'},$recstart,$recend);
+ }
+ $self->{'block_length'} = length ${$self->{'block'}};
+ } else {
+ $self->set_virgin(0);
+ for(substr($$buffer_ref,0,$endpos)) {
+ $self->{'block'} = \$_;
+ }
+ $self->{'block_length'} = $endpos + length ${$self->{'header'}};
+ }
+ $self->{'block_pos'} = 0;
+ $self->add_transfersize($self->{'block_length'});
+}
+
+sub block_ref($) {
+ my $self = shift;
+ return $self->{'block'};
+}
+
+sub block_length($) {
+ my $self = shift;
+ return $self->{'block_length'};
+}
+
+sub remove_rec_sep($) {
+ # Remove --recstart and --recend from $block
+ # Input:
+ # $block_ref = reference to $block to be modified
+ # $recstart = --recstart
+ # $recend = --recend
+ # Uses:
+ # $opt::regexp = Are --recstart/--recend regexp?
+ # Returns:
+ # N/A
+ my ($block_ref,$recstart,$recend) = @_;
+ # Remove record separator
+ if($opt::regexp) {
+ $$block_ref =~ s/$recend$recstart//gom;
+ $$block_ref =~ s/^$recstart//os;
+ $$block_ref =~ s/$recend$//os;
+ } else {
+ $$block_ref =~ s/\Q$recend$recstart\E//gom;
+ $$block_ref =~ s/^\Q$recstart\E//os;
+ $$block_ref =~ s/\Q$recend\E$//os;
+ }
+}
+
+sub non_blocking_write($) {
+ my $self = shift;
+ my $something_written = 0;
+
+ my $in = $self->fh(0,"w");
+ my $rv = syswrite($in,
+ substr(${$self->{'block'}},$self->{'block_pos'}));
+ if (!defined($rv) && $! == ::EAGAIN()) {
+ # would block - but would have written
+ $something_written = 0;
+ # avoid triggering auto expanding block size
+ $Global::no_autoexpand_block ||= 1;
+ } elsif ($self->{'block_pos'}+$rv != $self->{'block_length'}) {
+ # incomplete write
+ # Remove the written part
+ $self->{'block_pos'} += $rv;
+ $something_written = $rv;
+ } else {
+ # successfully wrote everything
+ # Empty block to free memory
+ my $a = "";
+ $self->set_block(\$a,\$a,0,"","");
+ $something_written = $rv;
+ }
+ ::debug("pipe", "Non-block: ", $something_written);
+ return $something_written;
+}
+
+
+sub virgin($) {
+ my $self = shift;
+ return $self->{'virgin'};
+}
+
+sub set_virgin($$) {
+ my $self = shift;
+ $self->{'virgin'} = shift;
+}
+
+sub pid($) {
+ my $self = shift;
+ return $self->{'pid'};
+}
+
+sub set_pid($$) {
+ my $self = shift;
+ $self->{'pid'} = shift;
+}
+
+sub starttime($) {
+ # Returns:
+ # UNIX-timestamp this job started
+ my $self = shift;
+ return sprintf("%.3f",$self->{'starttime'});
+}
+
+sub set_starttime($@) {
+ my $self = shift;
+ my $starttime = shift || ::now();
+ $self->{'starttime'} = $starttime;
+ $opt::sqlworker and
+ $Global::sql->update("SET Starttime = ? WHERE Seq = ".$self->seq(),
+ $starttime);
+}
+
+sub runtime($) {
+ # Returns:
+ # Run time in seconds with 3 decimals
+ my $self = shift;
+ return sprintf("%.3f",
+ int(($self->endtime() - $self->starttime())*1000)/1000);
+}
+
+sub endtime($) {
+ # Returns:
+ # UNIX-timestamp this job ended
+ # 0 if not ended yet
+ my $self = shift;
+ return ($self->{'endtime'} || 0);
+}
+
+sub set_endtime($$) {
+ my $self = shift;
+ my $endtime = shift;
+ $self->{'endtime'} = $endtime;
+ $opt::sqlworker and
+ $Global::sql->update("SET JobRuntime = ? WHERE Seq = ".$self->seq(),
+ $self->runtime());
+}
+
+sub is_timedout($) {
+ # Is the job timedout?
+ # Input:
+ # $delta_time = time that the job may run
+ # Returns:
+ # True or false
+ my $self = shift;
+ my $delta_time = shift;
+ return time > $self->{'starttime'} + $delta_time;
+}
+
+sub kill($) {
+ my $self = shift;
+ $self->set_exitstatus(-1);
+ ::kill_sleep_seq($self->pid());
+}
+
+sub suspend($) {
+ my $self = shift;
+ my @pgrps = map { -$_ } $self->pid();
+ kill "STOP", @pgrps;
+ $self->set_suspended(1);
+}
+
+sub set_suspended($$) {
+ my $self = shift;
+ $self->{'suspended'} = shift;
+}
+
+sub suspended($) {
+ my $self = shift;
+ return $self->{'suspended'};
+}
+
+sub resume($) {
+ my $self = shift;
+ my @pgrps = map { -$_ } $self->pid();
+ kill "CONT", @pgrps;
+ $self->set_suspended(0);
+}
+
+sub failed($) {
+ # return number of times failed for this $sshlogin
+ # Input:
+ # $sshlogin
+ # Returns:
+ # Number of times failed for $sshlogin
+ my $self = shift;
+ my $sshlogin = shift;
+ return $self->{'failed'}{$sshlogin};
+}
+
+sub failed_here($) {
+ # return number of times failed for the current $sshlogin
+ # Returns:
+ # Number of times failed for this sshlogin
+ my $self = shift;
+ return $self->{'failed'}{$self->sshlogin()};
+}
+
+sub add_failed($) {
+ # increase the number of times failed for this $sshlogin
+ my $self = shift;
+ my $sshlogin = shift;
+ $self->{'failed'}{$sshlogin}++;
+}
+
+sub add_failed_here($) {
+ # increase the number of times failed for the current $sshlogin
+ my $self = shift;
+ $self->{'failed'}{$self->sshlogin()}++;
+}
+
+sub reset_failed($) {
+ # increase the number of times failed for this $sshlogin
+ my $self = shift;
+ my $sshlogin = shift;
+ delete $self->{'failed'}{$sshlogin};
+}
+
+sub reset_failed_here($) {
+ # increase the number of times failed for this $sshlogin
+ my $self = shift;
+ delete $self->{'failed'}{$self->sshlogin()};
+}
+
+sub min_failed($) {
+ # Returns:
+ # the number of sshlogins this command has failed on
+ # the minimal number of times this command has failed
+ my $self = shift;
+ my $min_failures =
+ ::min(map { $self->{'failed'}{$_} } keys %{$self->{'failed'}});
+ my $number_of_sshlogins_failed_on = scalar keys %{$self->{'failed'}};
+ return ($number_of_sshlogins_failed_on,$min_failures);
+}
+
+sub total_failed($) {
+ # Returns:
+ # $total_failures = the number of times this command has failed
+ my $self = shift;
+ my $total_failures = 0;
+ for (values %{$self->{'failed'}}) {
+ $total_failures += $_;
+ }
+ return $total_failures;
+}
+
+{
+ my $script;
+
+ sub postpone_exit_and_cleanup {
+ # Command to remove files and dirs (given as args) without
+ # affecting the exit value in $?/$status.
+ if(not $script) {
+ $script = "perl -e '".
+ ::spacefree(0,q{
+ $bash=shift;
+ $csh=shift;
+ for(@ARGV){
+ unlink;
+ rmdir;
+ }
+ if($bash=~s/(\d+)h/$1/) {
+ exit $bash;
+ }
+ exit $csh;
+ }).
+ # `echo \$?h` is needed to make fish not complain
+ "' ".'"`echo \\\\\\\\\$?h`" "$status" ';
+ }
+ return $script
+ }
+}
+
+{
+ my $script;
+
+ sub fifo_wrap() {
+ # Script to create a fifo, run a command on the fifo
+ # while copying STDIN to the fifo, and finally
+ # remove the fifo and return the exit code of the command.
+ if(not $script) {
+ # {} == $PARALLEL_TMP for --fifo
+ # To make it csh compatible a wrapper needs to:
+ # * mkfifo
+ # * spawn $command &
+ # * cat > fifo
+ # * waitpid to get the exit code from $command
+ # * be less than 1000 chars long
+
+ # The optimal block size differs
+ # It has been measured on:
+ # AMD 6376: 4095
+ # ppar -a big --pipepart --block -1 --test $1 --fifo 'cat {} >/dev/null';
+ $script = "perl -e '".
+ (::spacefree
+ (0, q{
+ ($s,$c,$f) = @ARGV;
+ # mkfifo $PARALLEL_TMP
+ system "mkfifo", $f;
+ # spawn $shell -c $command &
+ $pid = fork || exec $s, "-c", $c;
+ open($o,">",$f) || die $!;
+ # cat > $PARALLEL_TMP
+ while(sysread(STDIN,$buf,4095)){
+ syswrite $o, $buf;
+ }
+ close $o;
+ # waitpid to get the exit code from $command
+ waitpid $pid,0;
+ # Cleanup
+ unlink $f;
+ exit $?/256;
+ }))."'";
+ }
+ return $script;
+ }
+}
+
+sub wrapped($) {
+ # Wrap command with:
+ # * --shellquote
+ # * --nice
+ # * --cat
+ # * --fifo
+ # * --sshlogin
+ # * --pipepart (@Global::cat_prepends)
+ # * --tee (@Global::cat_prepends)
+ # * --pipe
+ # * --tmux
+ # The ordering of the wrapping is important:
+ # * --nice/--cat/--fifo should be done on the remote machine
+ # * --pipepart/--pipe should be done on the local machine inside --tmux
+ # Uses:
+ # @opt::shellquote
+ # $opt::nice
+ # $Global::shell
+ # $opt::cat
+ # $opt::fifo
+ # @Global::cat_prepends
+ # $opt::pipe
+ # $opt::tmux
+ # Returns:
+ # $self->{'wrapped'} = the command wrapped with the above
+ my $self = shift;
+ if(not defined $self->{'wrapped'}) {
+ my $command = $self->replaced();
+ # Bug in Bash and Ksh when running multiline aliases
+ # This will force them to run correctly, but will fail in
+ # tcsh so we do not do it.
+ # $command .= "\n\n";
+ if(@opt::shellquote) {
+ # Quote one time for each --shellquote
+ my $c = $command;
+ for(@opt::shellquote) {
+ $c = ::Q($c);
+ }
+ # Prepend "echo" (it is written in perl because
+ # quoting '-e' causes problem in some versions and
+ # csh's version does something wrong)
+ $command = q(perl -e '$,=" "; print "@ARGV\n";' -- ) . ::Q($c);
+ }
+ if($Global::parallel_env) {
+ # If $PARALLEL_ENV set, put that in front of the command
+ # Used for env_parallel.*
+ if($Global::shell =~ /zsh/) {
+ # The extra 'eval' will make aliases work, too
+ $command = $Global::parallel_env."\n".
+ "eval ".::Q($command);
+ } else {
+ $command = $Global::parallel_env."\n".$command;
+ }
+ }
+ if($opt::cat) {
+ # In '--cat' and '--fifo' {} == $PARALLEL_TMP.
+ # This is to make it possible to compute $PARALLEL_TMP on
+ # the fly when running remotely.
+ # $ENV{PARALLEL_TMP} is set in the remote wrapper before
+ # the command is run.
+ #
+ # Prepend 'cat > $PARALLEL_TMP;'
+ # Append 'unlink $PARALLEL_TMP without affecting $?'
+ $command =
+ 'cat > "$PARALLEL_TMP";'.
+ $command.";". postpone_exit_and_cleanup().
+ '"$PARALLEL_TMP"';
+ } elsif($opt::fifo) {
+ # Prepend fifo-wrapper. In essence:
+ # mkfifo {}
+ # ( $command ) &
+ # # $command must read {}, otherwise this 'cat' will block
+ # cat > {};
+ # wait; rm {}
+ # without affecting $?
+ $command = fifo_wrap(). " ".
+ $Global::shell. " ". ::Q($command). ' "$PARALLEL_TMP"'. ';';
+ }
+ # Wrap with ssh + tranferring of files
+ $command = $self->sshlogin_wrap($command);
+ if(@Global::cat_prepends) {
+ # --pipepart: prepend:
+ # < /tmp/foo perl -e 'while(@ARGV) {
+ # sysseek(STDIN,shift,0) || die; $left = shift;
+ # while($read = sysread(STDIN,$buf, ($left > 60800 ? 60800 : $left))){
+ # $left -= $read; syswrite(STDOUT,$buf);
+ # }
+ # }' 0 0 0 11 |
+ #
+ # --pipepart --tee: prepend:
+ # < dash-a-file
+ #
+ # --pipe --tee: wrap:
+ # (rm fifo; ... ) < fifo
+ #
+ # --pipe --shard X:
+ # (rm fifo; ... ) < fifo
+ $command = (shift @Global::cat_prepends). "($command)".
+ (shift @Global::cat_appends);
+ } elsif($opt::pipe and not $opt::roundrobin) {
+ # Wrap with EOF-detector to avoid starting $command if EOF.
+ $command = empty_input_wrapper($command);
+ }
+ if($opt::tmux) {
+ # Wrap command with 'tmux'
+ $command = $self->tmux_wrap($command);
+ }
+ if($Global::cshell
+ and
+ length $command > 499) {
+ # csh does not like words longer than 1000 (499 quoted)
+ # $command = "perl -e '".base64_zip_eval()."' ".
+ # join" ",string_zip_base64(
+ # 'exec "'.::perl_quote_scalar($command).'"');
+ $command = base64_wrap("exec \"$Global::shell\",'-c',\"".
+ ::perl_quote_scalar($command).'"');
+ }
+ $self->{'wrapped'} = $command;
+ }
+ return $self->{'wrapped'};
+}
+
+sub set_sshlogin($$) {
+ my $self = shift;
+ my $sshlogin = shift;
+ $self->{'sshlogin'} = $sshlogin;
+ delete $self->{'sshlogin_wrap'}; # If sshlogin is changed the wrap is wrong
+ delete $self->{'wrapped'};
+
+ if($opt::sqlworker) {
+ # Identify worker as --sqlworker often runs on different machines
+ # If local: Use hostname
+ my $host = $sshlogin->local() ? ::hostname() : $sshlogin->host();
+ $Global::sql->update("SET Host = ? WHERE Seq = ".$self->seq(), $host);
+ }
+}
+
+sub sshlogin($) {
+ my $self = shift;
+ return $self->{'sshlogin'};
+}
+
+sub string_base64($) {
+ # Base64 encode strings into 1000 byte blocks.
+ # 1000 bytes is the largest word size csh supports
+ # Input:
+ # @strings = to be encoded
+ # Returns:
+ # @base64 = 1000 byte block
+ $Global::use{"MIME::Base64"} ||= eval "use MIME::Base64; 1;";
+ my @base64 = unpack("(A1000)*",encode_base64((join"",@_),""));
+ return @base64;
+}
+
+sub string_zip_base64($) {
+ # Pipe string through 'bzip2 -9' and base64 encode it into 1000
+ # byte blocks.
+ # 1000 bytes is the largest word size csh supports
+ # Zipping will make exporting big environments work, too
+ # Input:
+ # @strings = to be encoded
+ # Returns:
+ # @base64 = 1000 byte block
+ my($zipin_fh, $zipout_fh,@base64);
+ ::open3($zipin_fh,$zipout_fh,">&STDERR","bzip2 -9");
+ if(fork) {
+ close $zipin_fh;
+ $Global::use{"MIME::Base64"} ||= eval "use MIME::Base64; 1;";
+ # Split base64 encoded into 1000 byte blocks
+ @base64 = unpack("(A1000)*",encode_base64((join"",<$zipout_fh>),""));
+ close $zipout_fh;
+ } else {
+ close $zipout_fh;
+ print $zipin_fh @_;
+ close $zipin_fh;
+ exit;
+ }
+ ::debug("base64","Orig:@_\nAs bzip2 base64:@base64\n");
+ return @base64;
+}
+
+sub base64_zip_eval() {
+ # Script that:
+ # * reads base64 strings from @ARGV
+ # * decodes them
+ # * pipes through 'bzip2 -dc'
+ # * evals the result
+ # Reverse of string_zip_base64 + eval
+ # Will be wrapped in ' so single quote is forbidden
+ # Returns:
+ # $script = 1-liner for perl -e
+ my $script = ::spacefree(0,q{
+ @GNU_Parallel = split /_/, "use_IPC::Open3;_use_MIME::Base64";
+ eval"@GNU_Parallel";
+ $chld = $SIG{CHLD};
+ $SIG{CHLD} = "IGNORE";
+ # Search for bzip2. Not found => use default path
+ my $zip = (grep { -x $_ } "/usr/local/bin/bzip2")[0] || "bzip2";
+ # $in = stdin on $zip, $out = stdout from $zip
+ # Forget my() to save chars for csh
+ # my($in, $out,$eval);
+ open3($in,$out,">&STDERR",$zip,"-dc");
+ if(my $perlpid = fork) {
+ close $in;
+ $eval = join "", <$out>;
+ close $out;
+ } else {
+ close $out;
+ # Pipe decoded base64 into 'bzip2 -dc'
+ print $in (decode_base64(join"",@ARGV));
+ close $in;
+ exit;
+ }
+ wait;
+ $SIG{CHLD} = $chld;
+ eval $eval;
+ });
+ ::debug("base64",$script,"\n");
+ return $script;
+}
+
+sub base64_wrap($) {
+ # base64 encode Perl code
+ # Split it into chunks of < 1000 bytes
+ # Prepend it with a decoder that eval's it
+ # Input:
+ # $eval_string = Perl code to run
+ # Returns:
+ # $shell_command = shell command that runs $eval_string
+ my $eval_string = shift;
+ return
+ "perl -e ".
+ ::Q(base64_zip_eval())." ".
+ join" ",::shell_quote(string_zip_base64($eval_string));
+}
+
+sub base64_eval($) {
+ # Script that:
+ # * reads base64 strings from @ARGV
+ # * decodes them
+ # * evals the result
+ # Reverse of string_base64 + eval
+ # Will be wrapped in ' so single quote is forbidden.
+ # Spaces are stripped so spaces cannot be significant.
+ # The funny 'use IPC::Open3'-syntax is to avoid spaces and
+ # to make it clear that this is a GNU Parallel command
+ # when looking at the process table.
+ # Returns:
+ # $script = 1-liner for perl -e
+ my $script = ::spacefree(0,q{
+ @GNU_Parallel=("use","IPC::Open3;","use","MIME::Base64");
+ eval "@GNU_Parallel";
+ my $eval = decode_base64(join"",@ARGV);
+ eval $eval;
+ });
+ ::debug("base64",$script,"\n");
+ return $script;
+}
+
+sub sshlogin_wrap($) {
+ # Wrap the command with the commands needed to run remotely
+ # Input:
+ # $command = command to run
+ # Returns:
+ # $self->{'sshlogin_wrap'} = command wrapped with ssh+transfer commands
+ sub monitor_parent_sshd_script {
+ # This script is to solve the problem of
+ # * not mixing STDERR and STDOUT
+ # * terminating with ctrl-c
+ # If its parent is ssh: all good
+ # If its parent is init(1): ssh died, so kill children
+ my $monitor_parent_sshd_script;
+
+ if(not $monitor_parent_sshd_script) {
+ $monitor_parent_sshd_script =
+ # This will be packed in ', so only use "
+ ::spacefree
+ (0,'$shell = "'.($ENV{'PARALLEL_SHELL'} || '$ENV{SHELL}').'";'.
+ '$tmpdir = $ENV{"TMPDIR"} || "'.
+ ::perl_quote_scalar($ENV{'PARALLEL_REMOTE_TMPDIR'}).'";'.
+ '$nice = '.$opt::nice.';'.
+ '$termseq = "'.$opt::termseq.'";'.
+ # }
+ q{
+ # Check that $tmpdir is writable
+ -w $tmpdir ||
+ die("$tmpdir\040is\040not\040writable.".
+ "\040Set\040PARALLEL_REMOTE_TMPDIR");
+ # Set $PARALLEL_TMP to a non-existent file name in $TMPDIR
+ do {
+ $ENV{PARALLEL_TMP} = $tmpdir."/par".
+ join"", map { (0..9,"a".."z","A".."Z")[rand(62)] } (1..5);
+ } while(-e $ENV{PARALLEL_TMP});
+ # Set $script to a non-existent file name in $TMPDIR
+ do {
+ $script = $tmpdir."/parallel\043$ENV{PARALLEL_SEQ}_".
+ join"", map { (0..9,"a".."z","A".."Z")[rand(62)] } (1..5);
+ } while(-e $script);
+ # Create a script from the hex code
+ # that removes itself and runs the commands
+ open($fh,">",$script) || die;
+ # \040 = space - but we remove spaces in the script
+ # ' needed due to rc-shell
+ print($fh("rm\040\'$script\'\n",$bashfunc.$cmd));
+ close $fh;
+ my $parent = getppid;
+ my $done = 0;
+ $SIG{CHLD} = sub { $done = 1; };
+ $pid = fork;
+ unless($pid) {
+ # Make own process group to be able to kill HUP it later
+ eval { setpgrp };
+ # Set nice value
+ eval { setpriority(0,0,$nice) };
+ # Run the script
+ exec($shell,$script);
+ die("exec\040failed: $!");
+ }
+ while((not $done) and (getppid == $parent)) {
+ # Parent pid is not changed, so sshd is alive
+ # Exponential sleep up to 1 sec
+ $s = $s < 1 ? 0.001 + $s * 1.03 : $s;
+ select(undef, undef, undef, $s);
+ }
+ if(not $done) {
+ # sshd is dead: User pressed Ctrl-C
+ # Kill as per --termseq
+ my @term_seq = split/,/,$termseq;
+ if(not @term_seq) {
+ @term_seq = ("TERM",200,"TERM",100,"TERM",50,"KILL",25);
+ }
+ while(@term_seq && kill(0,-$pid)) {
+ kill(shift @term_seq, -$pid);
+ select(undef, undef, undef, (shift @term_seq)/1000);
+ }
+ }
+ wait;
+ exit ($?&127 ? 128+($?&127) : 1+$?>>8)
+ });
+ }
+ return $monitor_parent_sshd_script;
+ }
+
+ sub vars_to_export {
+ # Uses:
+ # @opt::env
+ my @vars = ("parallel_bash_environment");
+ for my $varstring (@opt::env) {
+ # Split up --env VAR1,VAR2
+ push @vars, split /,/, $varstring;
+ }
+ for (@vars) {
+ if(-r $_ and not -d) {
+ # Read as environment definition bug #44041
+ # TODO parse this
+ $Global::envdef = ::slurp_or_exit($_);
+ }
+ }
+ if(grep { /^_$/ } @vars) {
+ local $/ = "\n";
+ # --env _
+ # Include all vars that are not in a clean environment
+ if(open(my $vars_fh, "<", $Global::config_dir . "/ignored_vars")) {
+ my @ignore = <$vars_fh>;
+ chomp @ignore;
+ my %ignore;
+ @ignore{@ignore} = @ignore;
+ close $vars_fh;
+ push @vars, grep { not defined $ignore{$_} } keys %ENV;
+ @vars = grep { not /^_$/ } @vars;
+ } else {
+ ::error(255,"Run '$Global::progname --record-env' ".
+ "in a clean environment first.");
+ }
+ }
+ # Duplicate vars as BASH functions to include post-shellshock functions (v1+v2)
+ # So --env myfunc should look for BASH_FUNC_myfunc() and BASH_FUNC_myfunc%%
+
+ push(@vars, "PARALLEL_PID", "PARALLEL_SEQ",
+ "PARALLEL_SSHLOGIN", "PARALLEL_SSHHOST",
+ "PARALLEL_HOSTGROUPS", "PARALLEL_ARGHOSTGROUPS",
+ "PARALLEL_JOBSLOT", $opt::process_slot_var,
+ map { ("BASH_FUNC_$_()", "BASH_FUNC_$_%%") } @vars);
+ # Keep only defined variables
+ return grep { defined($ENV{$_}) } @vars;
+ }
+
+ sub env_as_eval {
+ # Returns:
+ # $eval = '$ENV{"..."}=...; ...'
+ my @vars = vars_to_export();
+ my $csh_friendly = not grep { /\n/ } @ENV{@vars};
+ my @bash_functions = grep { substr($ENV{$_},0,4) eq "() {" } @vars;
+ my @non_functions = (grep { !/PARALLEL_ENV/ }
+ grep { substr($ENV{$_},0,4) ne "() {" } @vars);
+
+ # eval of @envset will set %ENV
+ my $envset = join"", map {
+ '$ENV{"'.::perl_quote_scalar($_).'"}="'.
+ ::perl_quote_scalar($ENV{$_}).'";'; } @non_functions;
+
+ # running @bashfunc on the command line, will set the functions
+ my @bashfunc = map {
+ my $v=$_;
+ s/BASH_FUNC_(.*)(\(\)|%%)/$1/;
+ "$_$ENV{$v};\nexport -f $_ 2> /dev/null;\n" } @bash_functions;
+ # eval $bashfuncset will set $bashfunc
+ my $bashfuncset;
+ if(@bashfunc) {
+ # Functions are not supported for all shells
+ if($Global::shell !~ m:(^|/)(ash|bash|rbash|zsh|rzsh|dash|ksh):) {
+ ::warning("Shell functions may not be supported in $Global::shell.");
+ }
+ $bashfuncset =
+ '@bash_functions=qw('."@bash_functions".");".
+ ::spacefree(1,'$shell="'.($ENV{'PARALLEL_SHELL'} || '$ENV{SHELL}').'";'.q{
+ if($shell=~/csh/) {
+ print STDERR "CSH/TCSH DO NOT SUPPORT newlines IN VARIABLES/FUNCTIONS. Unset @bash_functions\n";
+ exec "false";
+ }
+ }).
+ "\n".'$bashfunc = "'.::perl_quote_scalar("@bashfunc").'";';
+ } else {
+ $bashfuncset = '$bashfunc = "";'
+ }
+ if($ENV{'parallel_bash_environment'}) {
+ $bashfuncset .= '$bashfunc .= "eval\ \"\$parallel_bash_environment\"\;";';
+ }
+ ::debug("base64",$envset,$bashfuncset,"\n");
+ return $csh_friendly,$envset,$bashfuncset;
+ }
+
+ my $self = shift;
+ my $command = shift;
+ # TODO test that *sh -c 'parallel --env' use *sh
+ if(not defined $self->{'sshlogin_wrap'}{$command}) {
+ my $sshlogin = $self->sshlogin();
+ $ENV{'PARALLEL_SEQ'} = $self->seq();
+ $ENV{$opt::process_slot_var} = -1 +
+ ($ENV{'PARALLEL_JOBSLOT'} = $self->slot());
+ $ENV{'PARALLEL_SSHLOGIN'} = $sshlogin->string();
+ $ENV{'PARALLEL_SSHHOST'} = $sshlogin->host();
+ if ($opt::hostgroups) {
+ $ENV{'PARALLEL_HOSTGROUPS'} = join '+', $sshlogin->hostgroups();
+ $ENV{'PARALLEL_ARGHOSTGROUPS'} = join '+', $self->hostgroups();
+ }
+ $ENV{'PARALLEL_PID'} = $$;
+ if($sshlogin->local()) {
+ if($opt::workdir) {
+ # Create workdir if needed. Then cd to it.
+ my $wd = $self->workdir();
+ if($opt::workdir eq "." or $opt::workdir eq "...") {
+ # If $wd does not start with '/': Prepend $HOME
+ $wd =~ s:^([^/]):$ENV{'HOME'}/$1:;
+ }
+ ::mkdir_or_die($wd);
+ my $post = "";
+ if($opt::workdir eq "...") {
+ $post = ";".exitstatuswrapper("rm -rf ".::Q($wd).";");
+
+ }
+ $command = "cd ".::Q($wd)." || exit 255; " .
+ $command . $post;;
+ }
+ if(@opt::env) {
+ # Prepend with environment setter, which sets functions in zsh
+ my ($csh_friendly,$envset,$bashfuncset) = env_as_eval();
+ my $perl_code = $envset.$bashfuncset.
+ '@ARGV="'.::perl_quote_scalar($command).'";'.
+ "exec\"$Global::shell\",\"-c\",\(\$bashfunc.\"\@ARGV\"\)\;die\"exec:\$\!\\n\"\;";
+ if(length $perl_code > 999
+ or
+ not $csh_friendly
+ or
+ $command =~ /\n/) {
+ # csh does not deal well with > 1000 chars in one word
+ # csh does not deal well with $ENV with \n
+ $self->{'sshlogin_wrap'}{$command} = base64_wrap($perl_code);
+ } else {
+ $self->{'sshlogin_wrap'}{$command} = "perl -e ".::Q($perl_code);
+ }
+ } else {
+ $self->{'sshlogin_wrap'}{$command} = $command;
+ }
+ } else {
+ my $pwd = "";
+ if($opt::workdir) {
+ # Create remote workdir if needed. Then cd to it.
+ my $wd = ::pQ($self->workdir());
+ $pwd = qq{system("mkdir","-p","--","$wd"); chdir "$wd" ||}.
+ qq{print(STDERR "parallel: Cannot chdir to $wd\\n") &&}.
+ qq{exit 255;};
+ }
+ my ($csh_friendly,$envset,$bashfuncset) = env_as_eval();
+ my $cmd = $command;
+ # q// does not quote \, so we must do that
+ $cmd =~ s/\\/\\\\/g;
+
+ my $remote_command = $sshlogin->hexwrap
+ ($pwd.$envset.$bashfuncset.'$cmd='."q\0".$cmd."\0;".
+ monitor_parent_sshd_script());
+ my ($pre,$post,$cleanup)=("","","");
+ # --transfer
+ $pre .= $self->sshtransfer();
+ # --return
+ $post .= $self->sshreturn();
+ # --cleanup
+ $post .= $self->sshcleanup();
+ if($post) {
+ # We need to save the exit status of the job
+ $post = exitstatuswrapper($post);
+ }
+ $self->{'sshlogin_wrap'}{$command} =
+ ($pre
+ . $sshlogin->wrap($remote_command)
+ . ";"
+ . $post);
+ }
+ }
+ return $self->{'sshlogin_wrap'}{$command};
+}
+
+sub fill_templates($) {
+ # Replace replacement strings in template(s)
+ # Returns:
+ # @templates - File names of replaced templates
+ my $self = shift;
+
+ if(%opt::template) {
+ my @template_name =
+ map { $self->{'commandline'}->replace_placeholders([$_],0,0) }
+ @{$self->{'commandline'}{'template_names'}};
+ ::debug("tmpl","Names: @template_name\n");
+ for(my $i = 0; $i <= $#template_name; $i++) {
+ ::write_or_exit
+ ($template_name[$i],
+ $self->{'commandline'}->
+ replace_placeholders([$self->{'commandline'}
+ {'template_contents'}[$i]],0,0));
+ }
+ if($opt::cleanup) {
+ $self->add_rm(@template_name);
+ }
+ }
+}
+
+sub filter($) {
+ # Replace replacement strings in filter(s) and evaluate them
+ # Returns:
+ # $run - 1=yes, undef=no
+ my $self = shift;
+ my $run = 1;
+ if(@opt::filter) {
+ for my $eval ($self->{'commandline'}->
+ replace_placeholders(\@opt::filter,0,0)) {
+ $run &&= eval $eval;
+ }
+ $self->{'commandline'}{'skip'} ||= not $run;
+ }
+ return $run;
+}
+
+sub transfer($) {
+ # Files to transfer
+ # Non-quoted and with {...} substituted
+ # Returns:
+ # @transfer - File names of files to transfer
+ my $self = shift;
+
+ my $transfersize = 0;
+ my @transfer = $self->{'commandline'}->
+ replace_placeholders($self->{'commandline'}{'transfer_files'},0,0);
+ for(@transfer) {
+ # filesize
+ if(-e $_) {
+ $transfersize += (stat($_))[7];
+ }
+ }
+ $self->add_transfersize($transfersize);
+ return @transfer;
+}
+
+sub transfersize($) {
+ my $self = shift;
+ return $self->{'transfersize'};
+}
+
+sub add_transfersize($) {
+ my $self = shift;
+ my $transfersize = shift;
+ $self->{'transfersize'} += $transfersize;
+ $opt::sqlworker and
+ $Global::sql->update("SET Send = ? WHERE Seq = ".$self->seq(),
+ $self->{'transfersize'});
+}
+
+sub sshtransfer($) {
+ # Returns for each transfer file:
+ # rsync $file remote:$workdir
+ my $self = shift;
+ my @pre;
+ my $sshlogin = $self->sshlogin();
+ my $workdir = $self->workdir();
+ for my $file ($self->transfer()) {
+ push @pre, $sshlogin->rsync_transfer_cmd($file,$workdir).";";
+ }
+ return join("",@pre);
+}
+
+sub return($) {
+ # Files to return
+ # Non-quoted and with {...} substituted
+ # Returns:
+ # @non_quoted_filenames
+ my $self = shift;
+ return $self->{'commandline'}->
+ replace_placeholders($self->{'commandline'}{'return_files'},0,0);
+}
+
+sub returnsize($) {
+ # This is called after the job has finished
+ # Returns:
+ # $number_of_bytes transferred in return
+ my $self = shift;
+ for my $file ($self->return()) {
+ if(-e $file) {
+ $self->{'returnsize'} += (stat($file))[7];
+ }
+ }
+ return $self->{'returnsize'};
+}
+
+sub add_returnsize($) {
+ my $self = shift;
+ my $returnsize = shift;
+ $self->{'returnsize'} += $returnsize;
+ $opt::sqlworker and
+ $Global::sql->update("SET Receive = ? WHERE Seq = ".$self->seq(),
+ $self->{'returnsize'});
+}
+
+sub sshreturn($) {
+ # Returns for each return-file:
+ # rsync remote:$workdir/$file .
+ my $self = shift;
+ my $sshlogin = $self->sshlogin();
+ my $pre = "";
+ for my $file ($self->return()) {
+ $file =~ s:^\./::g; # Remove ./ if any
+ my $relpath = ($file !~ m:^/:) ||
+ ($file =~ m:/\./:); # Is the path relative or /./?
+ my $cd = "";
+ my $wd = "";
+ if($relpath) {
+ # rsync -avR /foo/./bar/baz.c remote:/tmp/
+ # == (on old systems)
+ # rsync -avR --rsync-path="cd /foo; rsync" remote:bar/baz.c /tmp/
+ $wd = ::shell_quote_file($self->workdir()."/");
+ }
+ # Only load File::Basename if actually needed
+ $Global::use{"File::Basename"} ||= eval "use File::Basename; 1;";
+ # dir/./file means relative to dir, so remove dir on remote
+ $file =~ m:(.*)/\./:;
+ my $basedir = $1 ? ::shell_quote_file($1."/") : "";
+ my $nobasedir = $file;
+ $nobasedir =~ s:.*/\./::;
+ $cd = ::shell_quote_file(::dirname($nobasedir));
+ my $rsync_cd = '--rsync-path='.::Q("cd $wd$cd; rsync");
+ my $basename = ::Q(::shell_quote_file(::basename($file)));
+ # --return
+ # mkdir -p /home/tange/dir/subdir/;
+ # rsync (--protocol 30) -rlDzR
+ # --rsync-path="cd /home/tange/dir/subdir/; rsync"
+ # server:file.gz /home/tange/dir/subdir/
+ $pre .= "mkdir -p $basedir$cd" . " && " .
+ $sshlogin->rsync(). " $rsync_cd -- ".$sshlogin->host().':'.
+ $basename . " ".$basedir.$cd.";";
+ }
+ return $pre;
+}
+
+sub sshcleanup($) {
+ # Return the sshcommand needed to remove the file
+ # Returns:
+ # ssh command needed to remove files from sshlogin
+ my $self = shift;
+ my $sshlogin = $self->sshlogin();
+ my $workdir = $self->workdir();
+ my $cleancmd = "";
+
+ for my $file ($self->remote_cleanup()) {
+ my @subworkdirs = parentdirs_of($file);
+ $cleancmd .= $sshlogin->cleanup_cmd($file,$workdir).";";
+ }
+ if(defined $opt::workdir and $opt::workdir eq "...") {
+ $cleancmd .= $sshlogin->wrap("rm -rf " . ::Q($workdir).';');
+ }
+ return $cleancmd;
+}
+
+sub remote_cleanup($) {
+ # Returns:
+ # Files to remove at cleanup
+ my $self = shift;
+ if($opt::cleanup) {
+ my @transfer = $self->transfer();
+ my @return = $self->return();
+ return (@transfer,@return);
+ } else {
+ return ();
+ }
+}
+
+sub exitstatuswrapper(@) {
+ # Input:
+ # @shellcode = shell code to execute
+ # Returns:
+ # shell script that returns current status after executing @shellcode
+ if($Global::cshell) {
+ return ('set _EXIT_status=$status; ' .
+ join(" ",@_).
+ 'exit $_EXIT_status;');
+ } elsif($Global::fish) {
+ return ('export _EXIT_status=$status; ' .
+ join(" ",@_).
+ 'exit $_EXIT_status;');
+ } else {
+ return ('_EXIT_status=$?; ' .
+ join(" ",@_).
+ 'exit $_EXIT_status;');
+ }
+}
+
+sub workdir($) {
+ # Returns:
+ # the workdir on a remote machine
+ my $self = shift;
+ if(not defined $self->{'workdir'}) {
+ my $workdir;
+ if(defined $opt::workdir) {
+ if($opt::workdir eq ".") {
+ # . means current dir
+ my $home = $ENV{'HOME'};
+ eval 'use Cwd';
+ my $cwd = cwd();
+ $workdir = $cwd;
+ if($home) {
+ # If homedir exists: remove the homedir from
+ # workdir if cwd starts with homedir
+ # E.g. /home/foo/my/dir => my/dir
+ # E.g. /tmp/my/dir => /tmp/my/dir
+ my ($home_dev, $home_ino) = (stat($home))[0,1];
+ my $parent = "";
+ my @dir_parts = split(m:/:,$cwd);
+ my $part;
+ while(defined ($part = shift @dir_parts)) {
+ $part eq "" and next;
+ $parent .= "/".$part;
+ my ($parent_dev, $parent_ino) = (stat($parent))[0,1];
+ if($parent_dev == $home_dev and $parent_ino == $home_ino) {
+ # dev and ino is the same: We found the homedir.
+ $workdir = join("/",@dir_parts);
+ last;
+ }
+ }
+ }
+ if($workdir eq "") {
+ $workdir = ".";
+ }
+ } elsif($opt::workdir eq "...") {
+ $workdir = ".parallel/tmp/" . ::hostname() . "-" . $$
+ . "-" . $self->seq();
+ } else {
+ $workdir = $self->{'commandline'}->
+ replace_placeholders([$opt::workdir],0,0);
+ #$workdir = $opt::workdir;
+ # Rsync treats /./ special. We dont want that
+ $workdir =~ s:/\./:/:g; # Remove /./
+ $workdir =~ s:(.)/+$:$1:; # Remove ending / if any
+ $workdir =~ s:^\./::g; # Remove starting ./ if any
+ }
+ } else {
+ $workdir = ".";
+ }
+ $self->{'workdir'} = $workdir;
+ }
+ return $self->{'workdir'};
+}
+
+sub parentdirs_of($) {
+ # Return:
+ # all parentdirs except . of this dir or file - sorted desc by length
+ my $d = shift;
+ my @parents = ();
+ while($d =~ s:/[^/]+$::) {
+ if($d ne ".") {
+ push @parents, $d;
+ }
+ }
+ return @parents;
+}
+
+sub start($) {
+ # Setup STDOUT and STDERR for a job and start it.
+ # Returns:
+ # job-object or undef if job not to run
+
+ sub open3_setpgrp_internal {
+ # Run open3+setpgrp followed by the command
+ # Input:
+ # $stdin_fh = Filehandle to use as STDIN
+ # $stdout_fh = Filehandle to use as STDOUT
+ # $stderr_fh = Filehandle to use as STDERR
+ # $command = Command to run
+ # Returns:
+ # $pid = Process group of job started
+ my ($stdin_fh,$stdout_fh,$stderr_fh,$command) = @_;
+ my $pid;
+ local (*OUT,*ERR);
+ open OUT, '>&', $stdout_fh or ::die_bug("Can't dup STDOUT: $!");
+ open ERR, '>&', $stderr_fh or ::die_bug("Can't dup STDERR: $!");
+ # The eval is needed to catch exception from open3
+ eval {
+ if(not $pid = ::open3($stdin_fh, ">&OUT", ">&ERR", "-")) {
+ # Each child gets its own process group to make it safe to killall
+ eval{ setpgrp(0,0) };
+ eval{ setpriority(0,0,$opt::nice) };
+ exec($Global::shell,"-c",$command)
+ || ::die_bug("open3-$stdin_fh ".substr($command,0,200));
+ }
+ };
+ return $pid;
+ }
+
+ sub open3_setpgrp_external {
+ # Run open3 on $command wrapped with a perl script doing setpgrp
+ # Works on systems that do not support open3(,,,"-")
+ # Input:
+ # $stdin_fh = Filehandle to use as STDIN
+ # $stdout_fh = Filehandle to use as STDOUT
+ # $stderr_fh = Filehandle to use as STDERR
+ # $command = Command to run
+ # Returns:
+ # $pid = Process group of job started
+ my ($stdin_fh,$stdout_fh,$stderr_fh,$command) = @_;
+ local (*OUT,*ERR);
+ open OUT, '>&', $stdout_fh or ::die_bug("Can't dup STDOUT: $!");
+ open ERR, '>&', $stderr_fh or ::die_bug("Can't dup STDERR: $!");
+
+ my $pid;
+ my @setpgrp_wrap =
+ ('perl','-e',
+ "eval\{setpgrp\}\;eval\{setpriority\(0,0,$opt::nice\)\}\;".
+ "exec '$Global::shell', '-c', \@ARGV");
+ # The eval is needed to catch exception from open3
+ eval {
+ $pid = ::open3($stdin_fh, ">&OUT", ">&ERR", @setpgrp_wrap, $command)
+ || ::die_bug("open3-$stdin_fh");
+ 1;
+ };
+ return $pid;
+ }
+
+ sub redefine_open3_setpgrp {
+ my $setgprp_cache = shift;
+ # Select and run open3_setpgrp_internal/open3_setpgrp_external
+ no warnings 'redefine';
+ my ($outfh,$name) = ::tmpfile(SUFFIX => ".tst");
+ # Test to see if open3(x,x,x,"-") is fully supported
+ # Can an exported bash function be called via open3?
+ my $script = 'if($pid=::open3($i,$o,$e,"-")) { wait; } '.
+ 'else { exec("bash","-c","testfun && true"); }';
+ my $bash =
+ ::shell_quote_scalar_default(
+ "testfun() { rm $name; }; export -f testfun; ".
+ "perl -MIPC::Open3 -e ".
+ ::Q(::Q($script))
+ );
+ my $redefine_eval;
+ # Redirect STDERR temporarily,
+ # so errors on MacOS X are ignored.
+ open my $saveerr, ">&STDERR";
+ open STDERR, '>', "/dev/null";
+ # Run the test
+ ::debug("init",qq{bash -c $bash 2>/dev/null});
+ qx{ bash -c $bash 2>/dev/null };
+ open STDERR, ">&", $saveerr;
+
+ if(-e $name) {
+ # Does not support open3(x,x,x,"-")
+ # or does not have bash:
+ # Use (slow) external version
+ unlink($name);
+ $redefine_eval = '*open3_setpgrp = \&open3_setpgrp_external';
+ ::debug("init","open3_setpgrp_external chosen\n");
+ } else {
+ # Supports open3(x,x,x,"-")
+ # This is 0.5 ms faster to run
+ $redefine_eval = '*open3_setpgrp = \&open3_setpgrp_internal';
+ ::debug("init","open3_setpgrp_internal chosen\n");
+ }
+ if(open(my $fh, ">", $setgprp_cache)) {
+ print $fh $redefine_eval;
+ close $fh;
+ } else {
+ ::debug("init","Cannot write to $setgprp_cache");
+ }
+ eval $redefine_eval;
+ }
+
+ sub open3_setpgrp {
+ my $setgprp_cache = $Global::cache_dir . "/tmp/sshlogin/" .
+ ::hostname() . "/setpgrp_func";
+ sub read_cache() {
+ -e $setgprp_cache || return 0;
+ local $/ = undef;
+ open(my $fh, "<", $setgprp_cache) || return 0;
+ eval <$fh> || return 0;
+ close $fh;
+ return 1;
+ }
+ if(not read_cache()) {
+ redefine_open3_setpgrp($setgprp_cache);
+ }
+ # The sub is now redefined. Call it
+ return open3_setpgrp(@_);
+ }
+
+ my $job = shift;
+ # Get the shell command to be executed (possibly with ssh infront).
+ my $command = $job->wrapped();
+ my $pid;
+
+ if($Global::interactive or $Global::stderr_verbose) {
+ $job->interactive_start();
+ }
+ # Must be run after $job->interactive_start():
+ # $job->interactive_start() may call $job->skip()
+ if($job->{'commandline'}{'skip'}
+ or
+ not $job->filter()) {
+ # $job->skip() was called or job filtered
+ $command = "true";
+ }
+ $job->openoutputfiles();
+ $job->print_verbose_dryrun();
+ my($stdout_fh,$stderr_fh) = ($job->fh(1,"w"),$job->fh(2,"w"));
+ if($opt::dryrun or $opt::sqlmaster) { $command = "true"; }
+ $ENV{'PARALLEL_SEQ'} = $job->seq();
+ $ENV{'PARALLEL_PID'} = $$;
+ $ENV{$opt::process_slot_var} = -1 +
+ ($ENV{'PARALLEL_JOBSLOT'} = $job->slot());
+ $ENV{'PARALLEL_TMP'} = ::tmpname("par");
+ $job->add_rm($ENV{'PARALLEL_TMP'});
+ $job->fill_templates();
+ $ENV{'SSHPASS'} = $job->{'sshlogin'}->{'password'};
+ ::debug("run", $Global::total_running, " processes . Starting (",
+ $job->seq(), "): $command\n");
+ if($opt::pipe) {
+ my ($stdin_fh) = ::gensym();
+ $pid = open3_setpgrp($stdin_fh,$stdout_fh,$stderr_fh,$command);
+ if($opt::roundrobin and not $opt::keeporder) {
+ # --keep-order will make sure the order will be reproducible
+ ::set_fh_non_blocking($stdin_fh);
+ }
+ $job->set_fh(0,"w",$stdin_fh);
+ if($opt::tee or $opt::shard or $opt::bin) { $job->set_virgin(0); }
+ } elsif(($opt::tty or $opt::open_tty) and -c "/dev/tty" and
+ open(my $devtty_fh, "<", "/dev/tty")) {
+ # Give /dev/tty to the command if no one else is using it
+ # The eval is needed to catch exception from open3
+ local (*IN,*OUT,*ERR);
+ open OUT, '>&', $stdout_fh or ::die_bug("Can't dup STDOUT: $!");
+ open ERR, '>&', $stderr_fh or ::die_bug("Can't dup STDERR: $!");
+ *IN = $devtty_fh;
+ # The eval is needed to catch exception from open3
+ my @wrap = ('perl','-e',
+ "eval\{setpriority\(0,0,$opt::nice\)\}\;".
+ "exec '$Global::shell', '-c', \@ARGV");
+ eval {
+ $pid = ::open3("<&IN", ">&OUT", ">&ERR", @wrap, $command)
+ || ::die_bug("open3-/dev/tty");
+ 1;
+ };
+ close $devtty_fh;
+ $job->set_virgin(0);
+ } elsif($Global::semaphore) {
+ # Allow sem to read from stdin
+ $pid = open3_setpgrp("<&STDIN",$stdout_fh,$stderr_fh,$command);
+ $job->set_virgin(0);
+ } else {
+ $pid = open3_setpgrp(::gensym(),$stdout_fh,$stderr_fh,$command);
+ $job->set_virgin(0);
+ }
+ if($pid) {
+ # A job was started
+ $Global::total_running++;
+ $Global::total_started++;
+ $job->set_pid($pid);
+ $job->set_starttime();
+ $Global::running{$job->pid()} = $job;
+ if($opt::timeout) {
+ $Global::timeoutq->insert($job);
+ }
+ $Global::newest_job = $job;
+ $Global::newest_starttime = ::now();
+ return $job;
+ } else {
+ # No more processes
+ ::debug("run", "Cannot spawn more jobs.\n");
+ return undef;
+ }
+}
+
+sub interactive_start($) {
+ my $self = shift;
+ my $command = $self->wrapped();
+ if($Global::interactive) {
+ my $answer;
+ ::status_no_nl("$command ?...");
+ do{
+ my $tty_fh = ::open_or_exit("<","/dev/tty");
+ $answer = <$tty_fh>;
+ close $tty_fh;
+ # Sometime we get an empty string (not even \n)
+ # Do not know why, so let us just ignore it and try again
+ } while(length $answer < 1);
+ if (not ($answer =~ /^\s*y/i)) {
+ $self->{'commandline'}->skip();
+ }
+ } else {
+ print $Global::original_stderr "$command\n";
+ }
+}
+
+{
+ my $tmuxsocket;
+ my $qsocket;
+
+ sub tmux_wrap($) {
+ # Wrap command with tmux for session pPID
+ # Input:
+ # $actual_command = the actual command being run (incl ssh wrap)
+ my $self = shift;
+ my $actual_command = shift;
+ # Temporary file name. Used for fifo to communicate exit val
+ my $tmpfifo = ::tmpname("tmx");
+ $self->add_rm($tmpfifo);
+ if(length($tmpfifo) >=100) {
+ ::error(255,"tmux does not support sockets with path > 100.");
+ }
+ if($opt::tmuxpane) {
+ # Move the command into a pane in window 0
+ $actual_command = $ENV{'PARALLEL_TMUX'}.' joinp -t :0 ; '.
+ $ENV{'PARALLEL_TMUX'}.' select-layout -t :0 tiled ; '.
+ $actual_command;
+ }
+ my $visual_command = $self->replaced();
+ my $title = $visual_command;
+ if($visual_command =~ /\0/) {
+ ::error(255,"Command line contains NUL. ".
+ "tmux is confused by NUL.");
+ }
+ # ; causes problems
+ # ascii 194-245 annoys tmux
+ $title =~ tr/[\011-\016;\302-\365]/ /s;
+ $title = ::Q($title);
+
+ my $l_act = length($actual_command);
+ my $l_tit = length($title);
+ my $l_fifo = length($tmpfifo);
+ # The line to run contains a 118 chars extra code + the title 2x
+ my $l_tot = 2 * $l_tit + $l_act + $l_fifo;
+
+ my $quoted_space75 = ::Q(" ")x75;
+ while($l_tit < 1000 and
+ (
+ (890 < $l_tot and $l_tot < 1350)
+ or
+ (9250 < $l_tot and $l_tot < 9800)
+ )) {
+ # tmux blocks for certain lengths:
+ # 900 < title + command < 1200
+ # 9250 < title + command < 9800
+ # but only if title < 1000, so expand the title with 75 spaces
+ # The measured lengths are:
+ # 996 < (title + whole command) < 1127
+ # 9331 < (title + whole command) < 9636
+ $title .= $quoted_space75;
+ $l_tit = length($title);
+ $l_tot = 2 * $l_tit + $l_act + $l_fifo;
+ }
+
+ my $tmux;
+ $ENV{'PARALLEL_TMUX'} ||= "tmux";
+ if(not $tmuxsocket) {
+ $tmuxsocket = ::tmpname("tms");
+ $qsocket = ::Q($tmuxsocket);
+ ::debug("tmux", "Start: $ENV{'PARALLEL_TMUX'} -S $qsocket attach");
+ if($opt::fg) {
+ if(not fork) {
+ # Run tmux in the foreground
+ # Wait for the socket to appear
+ while (not -e $tmuxsocket) { }
+ `$ENV{'PARALLEL_TMUX'} -S $qsocket attach`;
+ exit;
+ }
+ }
+ ::status("See output with: $ENV{'PARALLEL_TMUX'} -S $qsocket attach");
+ }
+ $tmux = "sh -c ".::Q(
+ $ENV{'PARALLEL_TMUX'}.
+ " -S $qsocket new-session -s p$$ -d \"sleep .2\" >/dev/null 2>&1").";" .
+ $ENV{'PARALLEL_TMUX'}.
+ " -S $qsocket new-window -t p$$ -n $title";
+
+ ::debug("tmux", "title len:", $l_tit, " act ", $l_act, " max ",
+ $Limits::Command::line_max_len, " tot ",
+ $l_tot, "\n");
+ return "mkfifo ".::Q($tmpfifo)." && $tmux ".
+ # Run in tmux
+ ::Q
+ (
+ "(".$actual_command.');'.
+ # The triple print is needed - otherwise the testsuite fails
+ q[ perl -e 'while($t++<3){ print $ARGV[0],"\n" }' $?h/$status >> ].
+ ::Q($tmpfifo)."&".
+ "echo $title; echo \007Job finished at: `date`;sleep 10"
+ ).
+ # Run outside tmux
+ # Read a / separated line: 0h/2 for csh, 2/0 for bash.
+ # If csh the first will be 0h, so use the second as exit value.
+ # Otherwise just use the first value as exit value.
+ q{; exec perl -e '$/="/";$_=<>;$c=<>;unlink $ARGV; }.
+ q{/(\d+)h/ and exit($1);exit$c' }.::Q($tmpfifo);
+ }
+}
+
+sub is_already_in_results($) {
+ # Do we already have results for this job?
+ # Returns:
+ # $job_already_run = bool whether there is output for this or not
+ my $job = $_[0];
+ if($Global::csvsep) {
+ if($opt::joblog) {
+ # OK: You can look for job run in joblog
+ return 0
+ } else {
+ ::warning_once(
+ "--resume --results .csv/.tsv/.json is not supported yet\n");
+ # TODO read and parse the file
+ return 0
+ }
+ }
+ my $out = $job->{'commandline'}->results_out();
+ ::debug("run", "Test ${out}stdout", -e "${out}stdout", "\n");
+ return(-e $out."stdout" or -f $out);
+}
+
+sub is_already_in_joblog($) {
+ my $job = shift;
+ return vec($Global::job_already_run,$job->seq(),1);
+}
+
+sub set_job_in_joblog($) {
+ my $job = shift;
+ vec($Global::job_already_run,$job->seq(),1) = 1;
+}
+
+sub should_be_retried($) {
+ # Should this job be retried?
+ # Returns
+ # 0 - do not retry
+ # 1 - job queued for retry
+ my $self = shift;
+ if (not defined $opt::retries) { return 0; }
+ if(not $self->exitstatus() and not $self->exitsignal()) {
+ # Completed with success. If there is a recorded failure: forget it
+ $self->reset_failed_here();
+ return 0;
+ } else {
+ # The job failed. Should it be retried?
+ $self->add_failed_here();
+ my $retries = $self->{'commandline'}->
+ replace_placeholders([$opt::retries],0,0);
+ # 0 = Inf
+ if($retries == 0) { $retries = 2**31; }
+ # Ignore files already unlinked to avoid memory leak
+ $self->{'unlink'} = [ grep { -e $_ } @{$self->{'unlink'}} ];
+ map { -e $_ or delete $Global::unlink{$_} } keys %Global::unlink;
+ if($self->total_failed() == $retries) {
+ # This has been retried enough
+ return 0;
+ } else {
+ # This command should be retried
+ $self->set_endtime(undef);
+ $self->reset_exitstatus();
+ $Global::JobQueue->unget($self);
+ ::debug("run", "Retry ", $self->seq(), "\n");
+ return 1;
+ }
+ }
+}
+
+{
+ my (%print_later,$job_seq_to_print);
+
+ sub print_earlier_jobs($) {
+ # Print jobs whose output is postponed due to --keep-order
+ # Returns: N/A
+ my $job = shift;
+ $print_later{$job->seq()} = $job;
+ $job_seq_to_print ||= 1;
+ my $returnsize = 0;
+ ::debug("run", "Looking for: $job_seq_to_print ",
+ "This: ", $job->seq(), "\n");
+ for(;vec($Global::job_already_run,$job_seq_to_print,1);
+ $job_seq_to_print++) {}
+ while(my $j = $print_later{$job_seq_to_print}) {
+ $returnsize += $j->print();
+ if($j->endtime()) {
+ # Job finished - look at the next
+ delete $print_later{$job_seq_to_print};
+ $job_seq_to_print++;
+ next;
+ } else {
+ # Job not finished yet - look at it again next round
+ last;
+ }
+ }
+ return $returnsize;
+ }
+}
+
+sub print($) {
+ # Print the output of the jobs
+ # Returns: N/A
+ my $self = shift;
+
+ ::debug("print", ">>joboutput ", $self->replaced(), "\n");
+ if($opt::dryrun) {
+ # Nothing was printed to this job:
+ # cleanup tmp files if --files was set
+ ::rm($self->fh(1,"name"));
+ }
+ if($opt::pipe and $self->virgin() and not $opt::tee) {
+ # Skip --joblog, --dryrun, --verbose
+ } else {
+ if($opt::ungroup) {
+ # NULL returnsize = 0 returnsize
+ $self->returnsize() or $self->add_returnsize(0);
+ if($Global::joblog and defined $self->{'exitstatus'}) {
+ # Add to joblog when finished
+ $self->print_joblog();
+ # Printing is only relevant for grouped/--line-buffer output.
+ $opt::ungroup and return;
+ }
+ }
+ # Check for disk full
+ ::exit_if_disk_full();
+ }
+
+ my $returnsize = $self->returnsize();
+ my @fdno;
+ if($opt::latestline) {
+ @fdno = (1);
+ } else {
+ @fdno = (sort { $a <=> $b } keys %Global::fh);
+ }
+ for my $fdno (@fdno) {
+ # Sort by file descriptor numerically: 1,2,3,..,9,10,11
+ $fdno == 0 and next;
+ my $out_fh = $Global::fh{$fdno};
+ my $in_fh = $self->fh($fdno,"r");
+ if(not $in_fh) {
+ if(not $Job::file_descriptor_warning_printed{$fdno}++) {
+ # ::warning("File descriptor $fdno not defined\n");
+ }
+ next;
+ }
+ ::debug("print", "File descriptor $fdno (", $self->fh($fdno,"name"), "):\n");
+ if($Global::linebuffer) {
+ # Line buffered print out
+ $self->print_linebuffer($fdno,$in_fh,$out_fh);
+ } elsif($Global::files) {
+ $self->print_files($fdno,$in_fh,$out_fh);
+ } elsif($opt::results) {
+ $self->print_results($fdno,$in_fh,$out_fh);
+ } else {
+ $self->print_normal($fdno,$in_fh,$out_fh);
+ }
+ flush $out_fh;
+ }
+ ::debug("print", "<<joboutput\n");
+ if(defined $self->{'exitstatus'}
+ and not ($self->virgin() and $opt::pipe)) {
+ if($Global::joblog and not $opt::sqlworker) {
+ # Add to joblog when finished
+ $self->print_joblog();
+ }
+ if($opt::sqlworker and not $opt::results) {
+ $Global::sql->output($self);
+ }
+ if($Global::csvsep) {
+ # Add output to CSV when finished
+ $self->print_csv();
+ }
+ if($Global::jsonout) {
+ $self->print_json();
+ }
+ }
+ return $returnsize - $self->returnsize();
+}
+
+{
+ my %jsonmap;
+
+ sub print_json($) {
+ my $self = shift;
+ sub jsonquote($) {
+ my $a = shift;
+ if(not $jsonmap{"\001"}) {
+ map { $jsonmap{sprintf("%c",$_)} =
+ sprintf '\u%04x', $_ } 0..31;
+ }
+ $a =~ s/\\/\\\\/g;
+ $a =~ s/\"/\\"/g;
+ $a =~ s/([\000-\037])/$jsonmap{$1}/g;
+ return $a;
+ }
+
+ my $cmd;
+ if($Global::verbose <= 1) {
+ $cmd = jsonquote($self->replaced());
+ } else {
+ # Verbose level > 1: Print the rsync and stuff
+ $cmd = jsonquote(join " ", @{$self->{'commandline'}});
+ }
+ my $record_ref = $self->{'commandline'}{'arg_list_flat_orig'};
+
+ # Memory optimization: Overwrite with the joined output
+ $self->{'output'}{1} = join("", @{$self->{'output'}{1}});
+ $self->{'output'}{2} = join("", @{$self->{'output'}{2}});
+ # {
+ # "Seq": 12,
+ # "Host": "/usr/bin/ssh foo@lo",
+ # "Starttime": 1608344711.743,
+ # "JobRuntime": 0.01,
+ # "Send": 0,
+ # "Receive": 10,
+ # "Exitval": 0,
+ # "Signal": 0,
+ # "Command": "echo 1",
+ # "V": [
+ # "1"
+ # ],
+ # "Stdout": "1\n",
+ # "Stderr": ""
+ # }
+ #
+ printf($Global::csv_fh
+ q({ "Seq": %s, "Host": "%s", "Starttime": %s, "JobRuntime": %s, ).
+ q("Send": %s, "Receive": %s, "Exitval": %s, "Signal": %s, ).
+ q("Command": "%s", "V": [ %s ], "Stdout": "%s", "Stderr": "%s" }).
+ "\n",
+ $self->seq(),
+ jsonquote($self->sshlogin()->string()),
+ $self->starttime(), sprintf("%0.3f",$self->runtime()),
+ $self->transfersize(), $self->returnsize(),
+ $self->exitstatus(), $self->exitsignal(), $cmd,
+ (join ",",
+ map { '"'.jsonquote($_).'"' } @$record_ref[1..$#$record_ref],
+ ),
+ jsonquote($self->{'output'}{1}),
+ jsonquote($self->{'output'}{2})
+ );
+ }
+}
+
+{
+ my $header_printed;
+
+ sub print_csv($) {
+ my $self = shift;
+ my $cmd;
+ if($Global::verbose <= 1) {
+ $cmd = $self->replaced();
+ } else {
+ # Verbose level > 1: Print the rsync and stuff
+ $cmd = join " ", @{$self->{'commandline'}};
+ }
+ my $record_ref = $self->{'commandline'}{'arg_list_flat_orig'};
+
+ if(not $header_printed) {
+ # Variable headers
+ # Normal => V1..Vn
+ # --header : => first value from column
+ my @V;
+ if($opt::header) {
+ my $i = 1;
+ @V = (map { $Global::input_source_header{$i++} }
+ @$record_ref[1..$#$record_ref]);
+ } else {
+ my $V = "V1";
+ @V = (map { $V++ } @$record_ref[1..$#$record_ref]);
+ }
+ print $Global::csv_fh
+ (map { $$_ }
+ combine_ref("Seq", "Host", "Starttime", "JobRuntime",
+ "Send", "Receive", "Exitval", "Signal", "Command",
+ @V,
+ "Stdout","Stderr"
+ )),"\n";
+ $header_printed++;
+ }
+ # Memory optimization: Overwrite with the joined output
+ $self->{'output'}{1} = join("", @{$self->{'output'}{1}});
+ $self->{'output'}{2} = join("", @{$self->{'output'}{2}});
+ print $Global::csv_fh
+ (map { $$_ }
+ combine_ref
+ ($self->seq(),
+ $self->sshlogin()->string(),
+ $self->starttime(), sprintf("%0.3f",$self->runtime()),
+ $self->transfersize(), $self->returnsize(),
+ $self->exitstatus(), $self->exitsignal(), \$cmd,
+ \@$record_ref[1..$#$record_ref],
+ \$self->{'output'}{1},
+ \$self->{'output'}{2})),"\n";
+ }
+}
+
+sub combine_ref($) {
+ # Inspired by Text::CSV_PP::_combine (by Makamaka Hannyaharamitu)
+ my @part = @_;
+ my $sep = $Global::csvsep;
+ my $quot = '"';
+ my @out = ();
+
+ my $must_be_quoted;
+ for my $column (@part) {
+ # Memory optimization: Content transferred as reference
+ if(ref $column ne "SCALAR") {
+ # Convert all columns to scalar references
+ my $v = $column;
+ $column = \$v;
+ }
+ if(not defined $$column) {
+ $$column = '';
+ next;
+ }
+
+ $must_be_quoted = 0;
+
+ if($$column =~ s/$quot/$quot$quot/go){
+ # Replace " => ""
+ $must_be_quoted ||=1;
+ }
+ if($$column =~ /[\s\Q$sep\E]/o){
+ # Put quotes around if the column contains ,
+ $must_be_quoted ||=1;
+ }
+
+ $Global::use{"bytes"} ||= eval "use bytes; 1;";
+ if ($$column =~ /\0/) {
+ # Contains \0 => put quotes around
+ $must_be_quoted ||=1;
+ }
+ if($must_be_quoted){
+ push @out, \$sep, \$quot, $column, \$quot;
+ } else {
+ push @out, \$sep, $column;
+ }
+ }
+ # Remove the first $sep: ,val,"val" => val,"val"
+ shift @out;
+ return @out;
+}
+
+sub print_files($) {
+ # Print the name of the file containing stdout on stdout
+ # Uses:
+ # $opt::pipe
+ # $opt::group = Print when job is done
+ # $opt::linebuffer = Print ASAP
+ # Returns: N/A
+ my $self = shift;
+ my ($fdno,$in_fh,$out_fh) = @_;
+
+ # If the job is dead: close printing fh. Needed for --compress
+ close $self->fh($fdno,"w");
+ if($? and $opt::compress) {
+ ::error($opt::compress_program." failed.");
+ $self->set_exitstatus(255);
+ }
+ if($opt::compress) {
+ # Kill the decompressor which will not be needed
+ CORE::kill "TERM", $self->fh($fdno,"rpid");
+ }
+ close $in_fh;
+
+ if($opt::pipe and $self->virgin()) {
+ # Nothing was printed to this job:
+ # cleanup unused tmp files because --files was set
+ for my $fdno (1,2) {
+ ::rm($self->fh($fdno,"name"));
+ ::rm($self->fh($fdno,"unlink"));
+ }
+ } elsif($fdno == 1 and $self->fh($fdno,"name")) {
+ print $out_fh $self->tag(),$self->fh($fdno,"name"), $Global::files_sep;
+ if($Global::membuffer) {
+ push @{$self->{'output'}{$fdno}},
+ $self->tag(), $self->fh($fdno,"name");
+ }
+ $self->add_returnsize(-s $self->fh($fdno,"name"));
+ # Mark as printed - do not print again
+ $self->set_fh($fdno,"name",undef);
+ }
+}
+
+
+# Different print types
+# (--ll | --ll --bar | --lb | --group | --parset | --sql-worker)
+# (--files | --results (.json|.csv|.tsv) )
+# --color-failed
+# --color
+# --keep-order
+# --tag
+# --bar
+{
+ my ($up,$eol,$currow,$maxrow);
+ my ($minvisible,%print_later,%notvisible);
+ my (%binmodeset,%tab);
+
+ sub latestline_init() {
+ # cursor_up cuu1 = up one line
+ $up = `sh -c "tput cuu1 </dev/tty" 2>/dev/null`;
+ chomp($up);
+ $eol = `sh -c "tput el </dev/tty" 2>/dev/null`;
+ chomp($eol);
+ if($eol eq "") { $eol = "\033[K"; }
+ $currow = 1;
+ $maxrow = 1;
+ $minvisible = 1;
+ for(0..8) {
+ $tab{$_} = " "x(8-($_%8));
+ }
+ }
+
+ sub mbtrunc($$) {
+ # Simple mbtrunc to avoid using Text::WideChar::Util
+ my $str = shift;
+ my $len = shift;
+ if(::mbswidth($str) == length($str)) {
+ $str = substr($str,0,$len);
+ } else {
+ # mb chars (ヌー平行) are wider than 1 char on screen
+ # We need at most $len chars - they may be wide
+ $str =~ s/(.{$len}).*/$1/;
+ my $rlen = int((::mbswidth($str) - $len)/2+0.5);
+ do {
+ $str =~ s/.{$rlen}$//;
+ $rlen = int((::mbswidth($str) - $len)/2+0.5);
+ } while($rlen >= 1);
+ }
+ return $str;
+ }
+
+ sub print_latest_line($) {
+ my $self = shift;
+ my $out_fh = shift;
+ if(not defined $self->{$out_fh,'latestline'}) { return; }
+ my $row = $self->row();
+ # Is row visible?
+ if(not ($minvisible <= $row
+ and
+ $row < $minvisible + ::terminal_rows() - 1)) {
+ return;
+ }
+ if(not $binmodeset{$out_fh}++) {
+ # Enable utf8 if possible
+ eval q{ binmode $out_fh, "encoding(utf8)"; };
+ }
+ my ($color,$reset_color) = $self->color();
+ my $termcol = ::terminal_columns();
+ my $untabify_tag = ::decode_utf8($self->untabtag());
+ my $untabify_str =
+ ::untabify(::decode_utf8($self->{$out_fh,'latestline'}));
+ # -1 to make space for $truncated_str
+ my $maxtaglen = $termcol - 1;
+ $untabify_tag = mbtrunc($untabify_tag,$maxtaglen);
+ my $taglen = ::mbswidth($untabify_tag);
+ my $maxstrlen = $termcol - $taglen - 1;
+ $untabify_str = mbtrunc($untabify_str,$maxstrlen);
+ my $strlen = ::mbswidth($untabify_str);
+ my $truncated_tag = "";
+ my $truncated_str = "";
+ if($termcol - $taglen < 2) {
+ $truncated_tag = ">";
+ } else {
+ if($termcol - $taglen - $strlen <= 2) {
+ $truncated_str = ">";
+ }
+ }
+ $maxrow = ($row > $maxrow) ? $row : $maxrow;
+ printf($out_fh
+ ("%s%s%s%s". # up down \r eol
+ "%s%s". # tag trunc_tag
+ "%s%s%s%s". # color line trunc reset_color
+ "%s" # down
+ ),
+ "$up"x($currow - $row), "\n"x($row - $currow), "\r", $eol,
+ $untabify_tag,$truncated_tag,
+ $color, $untabify_str, $truncated_str, $reset_color,
+ "\n"x($maxrow - $row + 1));
+ $currow = $maxrow + 1;
+ }
+
+ sub print_linebuffer($) {
+ my $self = shift;
+ my ($fdno,$in_fh,$out_fh) = @_;
+ if(defined $self->{'exitstatus'}) {
+ # If the job is dead: close printing fh. Needed for --compress
+ close $self->fh($fdno,"w");
+ if($opt::compress) {
+ if($?) {
+ ::error($opt::compress_program." failed.");
+ $self->set_exitstatus(255);
+ }
+ # Blocked reading in final round
+ for my $fdno (1,2) { ::set_fh_blocking($self->fh($fdno,'r')); }
+ }
+ if($opt::latestline) { $print_later{$self->row()} = $self; }
+ }
+ if(not $self->virgin()) {
+ if($Global::files or ($opt::results and not $Global::csvsep)) {
+ # Print filename
+ if($fdno == 1 and not $self->fh($fdno,"printed")) {
+ print $out_fh $self->tag(),$self->fh($fdno,"name"),"\n";
+ if($Global::membuffer) {
+ push(@{$self->{'output'}{$fdno}}, $self->tag(),
+ $self->fh($fdno,"name"));
+ }
+ $self->set_fh($fdno,"printed",1);
+ }
+ # No need for reading $in_fh, as it is from "cat >/dev/null"
+ } else {
+ # Read halflines and print full lines
+ my $outputlength = 0;
+ my $halfline_ref = $self->{'halfline'}{$fdno};
+ my ($buf,$i,$rv);
+ # 1310720 gives 1.2 GB/s
+ # 131072 gives 0.9 GB/s
+ # The optimal block size differs
+ # It has been measured on:
+ # AMD 6376: 60800 (>70k is also reasonable)
+ # Intel i7-3632QM: 52-59k, 170-175k
+ # seq 64 | ppar --_test $1 --lb \
+ # 'yes {} `seq 1000`|head -c 10000000' >/dev/null
+ while($rv = sysread($in_fh, $buf, 60800)) {
+ $outputlength += $rv;
+ # TODO --recend
+ # Treat both \n and \r as line end
+ # Only test for \r if there is no \n
+ # Test:
+ # perl -e '$a="x"x1000000;
+ # $b="$a\r$a\n$a\r$a\n";
+ # map { print $b,$_ } 1..10'
+ $i = ((rindex($buf,"\n")+1) || (rindex($buf,"\r")+1));
+ if($i) {
+ if($opt::latestline) {
+ # Keep the latest full line
+ my $l = join('', @$halfline_ref,
+ substr($buf,0,$i-1));
+ # "ab\rb\n" = "bb", but we cannot process that correctly.
+ # Line may be:
+ # foo \r bar \n
+ # foo \r bar \r baz \r
+ # If so: Remove 'foo \r'
+ $l =~ s/.*\r//g;
+ my $j = ((rindex($l,"\n")+1) ||
+ (rindex($l,"\r")+1));
+ $self->{$out_fh,'latestline'} = substr($l,$j);
+ # Remove the processed part
+ # by keeping the unprocessed part
+ @$halfline_ref = (substr($buf,$i));
+ } else {
+ # One or more complete lines were found
+ if($Global::color) {
+ my $print = join("",@$halfline_ref,
+ substr($buf,0,$i));
+ chomp($print);
+ my ($color,$reset_color) = $self->color();
+ my $colortag = $color.$self->tag();
+ # \n => reset \n color tag
+ $print =~ s{([\n\r])(?=.|$)}
+ {$reset_color$1$colortag}gs;
+ print($out_fh $colortag, $print,
+ $reset_color, "\n");
+ } elsif($opt::tag or defined $opt::tagstring) {
+ # Replace ^ with $tag within the full line
+ if($Global::cache_replacement_eval) {
+ # Replace with the same value for tag
+ my $tag = $self->tag();
+ unshift @$halfline_ref, $tag;
+ # TODO --recend that can be partially in
+ # @$halfline_ref
+ substr($buf,0,$i-1) =~
+ s/([\n\r])(?=.|$)/$1$tag/gs;
+ } else {
+ # Replace with freshly computed tag-value
+ unshift @$halfline_ref, $self->tag();
+ substr($buf,0,$i-1) =~
+ s/([\n\r])(?=.|$)/$1.$self->tag()/gse;
+ }
+ # The length changed,
+ # so find the new ending pos
+ $i = ::max((rindex($buf,"\n")+1),
+ (rindex($buf,"\r")+1));
+ # Print the partial line (halfline)
+ # and the last half
+ print $out_fh @$halfline_ref, substr($buf,0,$i);
+ } else {
+ # Print the partial line (halfline)
+ # and the last half
+ print $out_fh @$halfline_ref, substr($buf,0,$i);
+ }
+ # Buffer in memory for SQL and CSV-output
+ if($Global::membuffer) {
+ push(@{$self->{'output'}{$fdno}},
+ @$halfline_ref, substr($buf,0,$i));
+ }
+ # Remove the printed part by keeping the unprinted
+ @$halfline_ref = (substr($buf,$i));
+ }
+ } else {
+ # No newline, so append to the halfline
+ push @$halfline_ref, $buf;
+ }
+ }
+ $self->add_returnsize($outputlength);
+ if($opt::latestline) { $self->print_latest_line($out_fh); }
+ }
+ if(defined $self->{'exitstatus'}) {
+ if($Global::files or ($opt::results and not $Global::csvsep)) {
+ $self->add_returnsize(-s $self->fh($fdno,"name"));
+ } else {
+ if($opt::latestline) {
+ # Force re-computing color if --colorfailed
+ if($opt::colorfailed) { delete $self->{'color'}; }
+ if($self->{$out_fh,'latestline'} ne "") {
+ $self->print_latest_line($out_fh);
+ }
+ if(@{$self->{'halfline'}{$fdno}}) {
+ my $l = join('', @{$self->{'halfline'}{$fdno}});
+ if($l ne "") {
+ $self->{$out_fh,'latestline'} = $l;
+ }
+ } else {
+ $self->{$out_fh,'latestline'} = undef;
+ }
+ # Print latest line from jobs that are already done
+ while($print_later{$minvisible}) {
+ $print_later{$minvisible}->print_latest_line($out_fh);
+ delete $print_later{$minvisible};
+ $minvisible++;
+ }
+ # Print latest line from jobs that are on screen now
+ for(my $row = $minvisible;
+ $row < $minvisible -1 + ::terminal_rows();
+ $row++) {
+ $print_later{$row} and
+ $print_later{$row}->print_latest_line($out_fh);
+ }
+ } else {
+ # If the job is dead: print the remaining partial line
+ # read remaining (already done for $opt::latestline)
+ my $halfline_ref = $self->{'halfline'}{$fdno};
+ if(grep /./, @$halfline_ref) {
+ my $returnsize = 0;
+ for(@{$self->{'halfline'}{$fdno}}) {
+ $returnsize += length $_;
+ }
+ $self->add_returnsize($returnsize);
+ if($opt::tag or defined $opt::tagstring) {
+ # Prepend $tag the the remaining half line
+ unshift @$halfline_ref, $self->tag();
+ }
+ # Print the partial line (halfline)
+ print $out_fh @{$self->{'halfline'}{$fdno}};
+ # Buffer in memory for SQL and CSV-output
+ if($Global::membuffer) {
+ push(@{$self->{'output'}{$fdno}}, @$halfline_ref);
+ }
+ @$halfline_ref = ();
+ }
+ }
+ }
+ if($self->fh($fdno,"rpid") and
+ CORE::kill 0, $self->fh($fdno,"rpid")) {
+ # decompress still running
+ } else {
+ # decompress done: close fh
+ close $in_fh;
+ if($? and $opt::compress) {
+ ::error($opt::decompress_program." failed.");
+ $self->set_exitstatus(255);
+ }
+ }
+ }
+ }
+ }
+}
+
+sub free_ressources() {
+ my $self = shift;
+ if(not $opt::ungroup) {
+ my $fh;
+ for my $fdno (sort { $a <=> $b } keys %Global::fh) {
+ $fh = $self->fh($fdno,"w");
+ $fh and close $fh;
+ $fh = $self->fh($fdno,"r");
+ $fh and close $fh;
+ }
+ }
+}
+
+sub print_parset($) {
+ # Wrap output with shell script code to set as variables
+ my $self = shift;
+ my ($fdno,$in_fh,$out_fh) = @_;
+ my $outputlength = 0;
+
+ ::debug("parset","print $Global::parset");
+ if($Global::parset eq "assoc") {
+ # Start: (done in parse_parset())
+ # eval "`echo 'declare -A myassoc; myassoc=(
+ # Each: (done here)
+ # [$'a\tb']=$'a\tb\tc ddd'
+ # End: (done in wait_and_exit())
+ # )'`"
+ print '[',::Q($self->{'commandline'}->
+ replace_placeholders(["\257<\257>"],0,0)),']=';
+ } elsif($Global::parset eq "array") {
+ # Start: (done in parse_parset())
+ # eval "`echo 'myassoc=(
+ # Each: (done here)
+ # $'a\tb\tc ddd'
+ # End: (done in wait_and_exit())
+ # )'`"
+ } elsif($Global::parset eq "var") {
+ # Start: (done in parse_parset())
+ # <empty>
+ # Each: (done here)
+ # var=$'a\tb\tc ddd'
+ # End: (done in wait_and_exit())
+ # <empty>
+ if(not @Global::parset_vars) {
+ ::error(255,"Too few named destination variables");
+ }
+ print shift @Global::parset_vars,"=";
+ }
+ local $/ = "\n";
+ my $tag = $self->tag();
+ my @out;
+ while(<$in_fh>) {
+ $outputlength += length $_;
+ # Tag lines with \r, too
+ $_ =~ s/(?<=[\r])(?=.|$)/$tag/gs;
+ push @out, $tag,$_;
+ }
+ # Remove last newline
+ # This often makes it easier to use the output in shell
+ @out and ${out[$#out]} =~ s/\n$//s;
+ print ::Q(join("",@out)),"\n";
+ return $outputlength;
+}
+
+sub print_normal($) {
+ my $self = shift;
+ my ($fdno,$in_fh,$out_fh) = @_;
+ my $buf;
+ close $self->fh($fdno,"w");
+ if($? and $opt::compress) {
+ ::error($opt::compress_program." failed.");
+ $self->set_exitstatus(255);
+ }
+ if(not $self->virgin()) {
+ seek $in_fh, 0, 0;
+ # $in_fh is now ready for reading at position 0
+ my $outputlength = 0;
+ my @output;
+
+ if($Global::parset and $fdno == 1) {
+ $outputlength += $self->print_parset($fdno,$in_fh,$out_fh);
+ } elsif(defined $opt::tag or defined $opt::tagstring
+ or $Global::color or $opt::colorfailed) {
+ if($Global::color or $opt::colorfailed) {
+ my ($color,$reset_color) = $self->color();
+ my $colortag = $color.$self->tag();
+ # Read line by line
+ local $/ = "\n";
+ while(<$in_fh>) {
+ $outputlength += length $_;
+ # Tag lines with \r, too
+ chomp;
+ s{([\n\r])(?=.|$)}{$reset_color$1$colortag}gs;
+ print $out_fh $colortag,$_,$reset_color,"\n";
+ }
+ } else {
+ my $tag = $self->tag();
+ my $pretag = 1;
+ my $s;
+ while(sysread($in_fh,$buf,32767)) {
+ $outputlength += length $buf;
+ $buf =~ s/(?<=[\r\n])(?=.)/$tag/gs;
+ print $out_fh ($pretag ? $tag : ""),$buf;
+ if($Global::membuffer) {
+ push @{$self->{'output'}{$fdno}},
+ ($pretag ? $tag : ""),$buf;
+ }
+ # Should next print start with a tag?
+ $s = substr($buf, -1);
+ # This is faster than ($s eq "\n") || ($s eq "\r")
+ $pretag = ($s eq "\n") ? 1 : ($s eq "\r");
+ }
+ }
+ } else {
+ # Most efficient way of copying data from $in_fh to $out_fh
+ # Intel i7-3632QM: 25k-
+ while(sysread($in_fh,$buf,32767)) {
+ print $out_fh $buf;
+ $outputlength += length $buf;
+ if($Global::membuffer) {
+ push @{$self->{'output'}{$fdno}}, $buf;
+ }
+ }
+ }
+ if($fdno == 1) {
+ $self->add_returnsize($outputlength);
+ }
+ close $in_fh;
+ if($? and $opt::compress) {
+ ::error($opt::decompress_program." failed.");
+ $self->set_exitstatus(255);
+ }
+ }
+}
+
+sub print_results($) {
+ my $self = shift;
+ my ($fdno,$in_fh,$out_fh) = @_;
+ my $buf;
+ close $self->fh($fdno,"w");
+ if($? and $opt::compress) {
+ ::error($opt::compress_program." failed.");
+ $self->set_exitstatus(255);
+ }
+ if(not $self->virgin()) {
+ seek $in_fh, 0, 0;
+ # $in_fh is now ready for reading at position 0
+ my $outputlength = 0;
+ my @output;
+
+ if($Global::membuffer) {
+ # Read data into membuffer
+ if($opt::tag or $opt::tagstring) {
+ # Read line by line
+ local $/ = "\n";
+ my $tag = $self->tag();
+ while(<$in_fh>) {
+ $outputlength += length $_;
+ # Tag lines with \r, too
+ $_ =~ s/(?<=[\r])(?=.|$)/$tag/gs;
+ push @{$self->{'output'}{$fdno}}, $tag, $_;
+ }
+ } else {
+ # Most efficient way of copying data from $in_fh to $out_fh
+ while(sysread($in_fh,$buf,60000)) {
+ $outputlength += length $buf;
+ push @{$self->{'output'}{$fdno}}, $buf;
+ }
+ }
+ } else {
+ # Not membuffer: No need to read the file
+ if($opt::compress) {
+ $outputlength = -1;
+ } else {
+ # Determine $outputlength = file length
+ seek($in_fh, 0, 2) || ::die_bug("cannot seek result");
+ $outputlength = tell($in_fh);
+ }
+ }
+ if($fdno == 1) { $self->add_returnsize($outputlength); }
+ close $in_fh;
+ if($? and $opt::compress) {
+ ::error($opt::decompress_program." failed.");
+ $self->set_exitstatus(255);
+ }
+ }
+}
+
+sub print_joblog($) {
+ my $self = shift;
+ my $cmd;
+ if($Global::verbose <= 1) {
+ $cmd = $self->replaced();
+ } else {
+ # Verbose level > 1: Print the rsync and stuff
+ $cmd = $self->wrapped();
+ }
+ # Newlines make it hard to parse the joblog
+ $cmd =~ s/\n/\0/g;
+ print $Global::joblog
+ join("\t", $self->seq(), $self->sshlogin()->string(),
+ $self->starttime(), sprintf("%10.3f",$self->runtime()),
+ $self->transfersize(), $self->returnsize(),
+ $self->exitstatus(), $self->exitsignal(), $cmd
+ ). "\n";
+ flush $Global::joblog;
+ $self->set_job_in_joblog();
+}
+
+sub tag($) {
+ my $self = shift;
+ if(not defined $self->{'tag'} or not $Global::cache_replacement_eval) {
+ if(defined $opt::tag or defined $opt::tagstring) {
+ $self->{'tag'} =
+ ($self->{'commandline'}->
+ replace_placeholders([$opt::tagstring],0,0)).
+ "\t";
+ } else {
+ # No tag
+ $self->{'tag'} = "";
+ }
+ }
+ return $self->{'tag'};
+}
+
+sub untabtag($) {
+ # tag with \t replaced with spaces
+ my $self = shift;
+ my $tag = $self->tag();
+ if(not defined $self->{'untab'}{$tag}) {
+ $self->{'untab'}{$tag} = ::untabify($tag);
+ }
+ return $self->{'untab'}{$tag};
+}
+
+{
+ my (@color,$eol,$reset_color,$init);
+
+ sub init_color() {
+ if(not $init) {
+ $init = 1;
+ # color combinations that are readable: black/white text
+ # on colored background, but not white on yellow
+ my @color_combinations =
+ # Force each color code to have the same length in chars
+ # This will make \t work as expected
+ ((map { [sprintf("%03d",$_),"000"] }
+ 6..7,9..11,13..15,40..51,75..87,113..123,147..159,
+ 171..182,185..231,249..254),
+ (map { [sprintf("%03d",$_),231] }
+ 1..9,12..13,16..45,52..81,88..114,124..149,
+ 160..178,180,182..184,196..214,232..250));
+ # reorder list so adjacent colors are dissimilar
+ # %23 and %7 were found experimentally
+ my @order = reverse sort {
+ (($a%23) <=> ($b%23))
+ or
+ (($b%7) <=> ($a%7));
+ } 0..$#color_combinations;
+ @order = @order[54 .. $#color_combinations, 0 .. 53];
+ @color = map {
+ # TODO Can this be done with `tput` codes?
+ "\033[48;5;".$_->[0].";38;5;".$_->[1]."m"
+ } @color_combinations[ @order ];
+
+ # clr_eol el = clear to end of line
+ $eol = `sh -c "tput el </dev/tty" 2>/dev/null`;
+ chomp($eol);
+ if($eol eq "") { $eol = "\033[K"; }
+ # exit_attribute_mode sgr0 = turn off all attributes
+ $reset_color = `sh -c "tput sgr0 </dev/tty" 2>/dev/null`;
+ chomp($reset_color);
+ if($reset_color eq "") { $reset_color = "\033[m"; }
+ }
+ }
+
+ sub color($) {
+ my $self = shift;
+ if(not defined $self->{'color'}) {
+ if($Global::color) {
+ # Choose a value based on the seq
+ $self->{'color'} = $color[$self->seq() % ($#color+1)].$eol;
+ $self->{'reset_color'} = $reset_color;
+ } else {
+ $self->{'color'} = "";
+ $self->{'reset_color'} = "";
+ }
+ if($opt::colorfailed) {
+ if($self->exitstatus()) {
+ # White on Red
+ # Can this be done more generally?
+ $self->{'color'} =
+ "\033[48;5;"."196".";38;5;"."231"."m".$eol;
+ $self->{'reset_color'} = $reset_color;
+ }
+ }
+ }
+ return ($self->{'color'},$self->{'reset_color'});
+ }
+}
+
+sub hostgroups($) {
+ my $self = shift;
+ if(not defined $self->{'hostgroups'}) {
+ $self->{'hostgroups'} =
+ $self->{'commandline'}->{'arg_list'}[0][0]->{'hostgroups'};
+ }
+ return @{$self->{'hostgroups'}};
+}
+
+sub exitstatus($) {
+ my $self = shift;
+ return $self->{'exitstatus'};
+}
+
+sub set_exitstatus($$) {
+ my $self = shift;
+ my $exitstatus = shift;
+ if($exitstatus) {
+ # Overwrite status if non-zero
+ $self->{'exitstatus'} = $exitstatus;
+ } else {
+ # Set status but do not overwrite
+ # Status may have been set by --timeout
+ $self->{'exitstatus'} ||= $exitstatus;
+ }
+ $opt::sqlworker and
+ $Global::sql->update("SET Exitval = ? WHERE Seq = ".$self->seq(),
+ $exitstatus);
+}
+
+sub reset_exitstatus($) {
+ my $self = shift;
+ undef $self->{'exitstatus'};
+}
+
+sub exitsignal($) {
+ my $self = shift;
+ return $self->{'exitsignal'};
+}
+
+sub set_exitsignal($$) {
+ my $self = shift;
+ my $exitsignal = shift;
+ $self->{'exitsignal'} = $exitsignal;
+ $opt::sqlworker and
+ $Global::sql->update("SET _Signal = ? WHERE Seq = ".$self->seq(),
+ $exitsignal);
+}
+
+{
+ my $total_jobs;
+
+ sub should_we_halt {
+ # Should we halt? Immediately? Gracefully?
+ # Returns: N/A
+ my $job = shift;
+ my $limit;
+ if($Global::semaphore) {
+ # Emulate Bash's +128 if there is a signal
+ $Global::halt_exitstatus =
+ ($job->exitstatus()
+ or
+ $job->exitsignal() ? $job->exitsignal() + 128 : 0);
+ }
+ if($job->exitstatus() or $job->exitsignal()) {
+ # Job failed
+ $Global::exitstatus++;
+ $Global::total_failed++;
+ if($Global::halt_fail) {
+ ::status("$Global::progname: This job failed:",
+ $job->replaced());
+ $limit = $Global::total_failed;
+ }
+ } elsif($Global::halt_success) {
+ ::status("$Global::progname: This job succeeded:",
+ $job->replaced());
+ $limit = $Global::total_completed - $Global::total_failed;
+ }
+ if($Global::halt_done) {
+ ::status("$Global::progname: This job finished:",
+ $job->replaced());
+ $limit = $Global::total_completed;
+ }
+ if(not defined $limit) {
+ return ""
+ }
+ # --halt # => 1..100 (number of jobs failed, 101 means > 100)
+ # --halt % => 1..100 (pct of jobs failed)
+ if($Global::halt_pct and not $Global::halt_count) {
+ $total_jobs ||= $Global::JobQueue->total_jobs();
+ # From the pct compute the number of jobs that must fail/succeed
+ $Global::halt_count = $total_jobs * $Global::halt_pct;
+ }
+ if($limit >= $Global::halt_count) {
+ # At least N jobs have failed/succeded/completed
+ # or at least N% have failed/succeded/completed
+ # So we should prepare for exit
+ if($Global::halt_fail or $Global::halt_done) {
+ # Set exit status
+ if(not defined $Global::halt_exitstatus) {
+ if($Global::halt_pct) {
+ # --halt now,fail=X% or soon,fail=X%
+ # --halt now,done=X% or soon,done=X%
+ $Global::halt_exitstatus =
+ ::ceil($Global::total_failed / $total_jobs * 100);
+ } elsif($Global::halt_count) {
+ # --halt now,fail=X or soon,fail=X
+ # --halt now,done=X or soon,done=X
+ $Global::halt_exitstatus =
+ ::min($Global::total_failed,101);
+ }
+ if($Global::halt_count and $Global::halt_count == 1) {
+ # --halt now,fail=1 or soon,fail=1
+ # --halt now,done=1 or soon,done=1
+ # Emulate Bash's +128 if there is a signal
+ $Global::halt_exitstatus =
+ ($job->exitstatus()
+ or
+ $job->exitsignal() ? $job->exitsignal() + 128 : 0);
+ }
+ }
+ ::debug("halt","Pct: ",$Global::halt_pct,
+ " count: ",$Global::halt_count,
+ " status: ",$Global::halt_exitstatus,"\n");
+ } elsif($Global::halt_success) {
+ $Global::halt_exitstatus = 0;
+ }
+ if($Global::halt_when eq "soon") {
+ $Global::start_no_new_jobs ||= 1;
+ if(scalar(keys %Global::running) > 0) {
+ # Only warn if there are more jobs running
+ ::status
+ ("$Global::progname: Starting no more jobs. ".
+ "Waiting for ". (keys %Global::running).
+ " jobs to finish.");
+ }
+ }
+ return($Global::halt_when);
+ }
+ return "";
+ }
+}
+
+
+package CommandLine;
+
+sub new($) {
+ my $class = shift;
+ my $seq = shift;
+ my $commandref = shift;
+ $commandref || die;
+ my $arg_queue = shift;
+ my $context_replace = shift;
+ my $max_number_of_args = shift; # for -N and normal (-n1)
+ my $transfer_files = shift;
+ my $return_files = shift;
+ my $template_names = shift;
+ my $template_contents = shift;
+ my $replacecount_ref = shift;
+ my $len_ref = shift;
+ my %replacecount = %$replacecount_ref;
+ my %len = %$len_ref;
+ for (keys %$replacecount_ref) {
+ # Total length of this replacement string {} replaced with all args
+ $len{$_} = 0;
+ }
+ return bless {
+ 'command' => $commandref,
+ 'seq' => $seq,
+ 'len' => \%len,
+ 'arg_list' => [],
+ 'arg_list_flat' => [],
+ 'arg_list_flat_orig' => [undef],
+ 'arg_queue' => $arg_queue,
+ 'max_number_of_args' => $max_number_of_args,
+ 'replacecount' => \%replacecount,
+ 'context_replace' => $context_replace,
+ 'transfer_files' => $transfer_files,
+ 'return_files' => $return_files,
+ 'template_names' => $template_names,
+ 'template_contents' => $template_contents,
+ 'replaced' => undef,
+ }, ref($class) || $class;
+}
+
+sub flush_cache() {
+ my $self = shift;
+ for my $arglist (@{$self->{'arg_list'}}) {
+ for my $arg (@$arglist) {
+ $arg->flush_cache();
+ }
+ }
+ $self->{'arg_queue'}->flush_cache();
+ $self->{'replaced'} = undef;
+}
+
+sub seq($) {
+ my $self = shift;
+ return $self->{'seq'};
+}
+
+sub set_seq($$) {
+ my $self = shift;
+ $self->{'seq'} = shift;
+}
+
+sub slot($) {
+ # Find the number of a free job slot and return it
+ # Uses:
+ # @Global::slots - list with free jobslots
+ # Returns:
+ # $jobslot = number of jobslot
+ my $self = shift;
+ if(not $self->{'slot'}) {
+ if(not @Global::slots) {
+ # $max_slot_number will typically be $Global::max_jobs_running
+ push @Global::slots, ++$Global::max_slot_number;
+ }
+ $self->{'slot'} = shift @Global::slots;
+ }
+ return $self->{'slot'};
+}
+
+{
+ my $already_spread;
+ my $darwin_max_len;
+
+ sub populate($) {
+ # Add arguments from arg_queue until the number of arguments or
+ # max line length is reached
+ # Uses:
+ # $Global::usable_command_line_length
+ # $opt::cat
+ # $opt::fifo
+ # $Global::JobQueue
+ # $opt::m
+ # $opt::X
+ # $Global::max_jobs_running
+ # Returns: N/A
+ my $self = shift;
+ my $next_arg;
+ my $max_len = $Global::usable_command_line_length || die;
+ if($^O eq "darwin") {
+ # Darwin's limit is affected by:
+ # * number of environment names (variables+functions)
+ # * size of environment
+ # * the length of arguments:
+ # a one-char argument lowers the limit by 5
+ # To be safe assume all arguments are one-char
+ # The max_len is cached between runs, but if the size of
+ # the environment is different we need to recompute the
+ # usable max length for this run of GNU Parallel
+ # See https://unix.stackexchange.com/a/604943/2972
+ if(not $darwin_max_len) {
+ my $envc = (keys %ENV);
+ my $envn = length join"",(keys %ENV);
+ my $envv = length join"",(values %ENV);
+ $darwin_max_len = -146+($max_len - $envn - $envv) - $envc*10;
+ ::debug("init",
+ "length: $darwin_max_len ".
+ "3+($max_len - $envn - $envv)/5 - $envc*2");
+ }
+ $max_len = $darwin_max_len;
+ }
+ if($opt::cat or $opt::fifo) {
+ # Get the empty arg added by --pipepart (if any)
+ $Global::JobQueue->{'commandlinequeue'}->{'arg_queue'}->get();
+ # $PARALLEL_TMP will point to a tempfile that will be used as {}
+ $Global::JobQueue->{'commandlinequeue'}->{'arg_queue'}->
+ unget([Arg->new('"$PARALLEL_TMP"')]);
+ }
+ while (not $self->{'arg_queue'}->empty()) {
+ $next_arg = $self->{'arg_queue'}->get();
+ if(not defined $next_arg) {
+ next;
+ }
+ $self->push($next_arg);
+ if($self->len() >= $max_len) {
+ # Command length is now > max_length
+ # If there are arguments: remove the last
+ # If there are no arguments: Error
+ # TODO stuff about -x opt_x
+ if($self->number_of_args() > 1) {
+ # There is something to work on
+ $self->{'arg_queue'}->unget($self->pop());
+ last;
+ } else {
+ my $args = join(" ", map { $_->orig() } @$next_arg);
+ ::error(255,"Command line too long (".
+ $self->len(). " >= ".
+ $max_len.
+ ") at input ".
+ $self->{'arg_queue'}->arg_number().
+ ": ".
+ ((length $args > 50) ?
+ (substr($args,0,50))."..." :
+ $args));
+ }
+ }
+
+ if(defined $self->{'max_number_of_args'}) {
+ if($self->number_of_args() >=
+ $self->{'max_number_of_args'}) {
+ last;
+ }
+ }
+ }
+ if(($opt::m or $opt::X) and not $already_spread
+ and $self->{'arg_queue'}->empty() and $Global::max_jobs_running) {
+ # -m or -X and EOF => Spread the arguments over all jobslots
+ # (unless they are already spread)
+ $already_spread ||= 1;
+ if($self->number_of_args() > 1) {
+ $self->{'max_number_of_args'} =
+ ::ceil($self->number_of_args()/$Global::max_jobs_running);
+ $Global::JobQueue->{'commandlinequeue'}->{'max_number_of_args'} =
+ $self->{'max_number_of_args'};
+ $self->{'arg_queue'}->unget($self->pop_all());
+ while($self->number_of_args() < $self->{'max_number_of_args'}) {
+ $self->push($self->{'arg_queue'}->get());
+ }
+ }
+ $Global::JobQueue->flush_total_jobs();
+ }
+
+ if($opt::sqlmaster) {
+ # Insert the V1..Vn for this $seq in SQL table
+ # instead of generating one
+ $Global::sql->insert_records($self->seq(), $self->{'command'},
+ $self->{'arg_list_flat_orig'});
+ }
+ }
+}
+
+sub push($) {
+ # Add one or more records as arguments
+ # Returns: N/A
+ my $self = shift;
+ my $record = shift;
+ push @{$self->{'arg_list_flat_orig'}}, map { $_->orig() } @$record;
+ push @{$self->{'arg_list_flat'}}, @$record;
+ push @{$self->{'arg_list'}}, $record;
+ # Make @arg available for {= =}
+ *Arg::arg = $self->{'arg_list_flat_orig'};
+
+ my $quote_arg = ($Global::quote_replace and not $Global::quoting);
+ my $col;
+ for my $perlexpr (keys %{$self->{'replacecount'}}) {
+ if($perlexpr =~ /^(-?\d+)(?:\D.*|)$/) {
+ # Positional replacement string
+ # Deal with negative positional replacement string
+ $col = ($1 < 0) ? $1 : $1-1;
+ if(defined($record->[$col])) {
+ $self->{'len'}{$perlexpr} +=
+ length $record->[$col]->replace($perlexpr,$quote_arg,$self);
+ }
+ } else {
+ for my $arg (@$record) {
+ if(defined $arg) {
+ $self->{'len'}{$perlexpr} +=
+ length $arg->replace($perlexpr,$quote_arg,$self);
+ }
+ }
+ }
+ }
+}
+
+sub pop($) {
+ # Remove last argument
+ # Returns:
+ # the last record
+ my $self = shift;
+ my $record = pop @{$self->{'arg_list'}};
+ # pop off arguments from @$record
+ splice @{$self->{'arg_list_flat_orig'}}, -($#$record+1), $#$record+1;
+ splice @{$self->{'arg_list_flat'}}, -($#$record+1), $#$record+1;
+ my $quote_arg = ($Global::quote_replace and not $Global::quoting);
+ for my $perlexpr (keys %{$self->{'replacecount'}}) {
+ if($perlexpr =~ /^(\d+) /) {
+ # Positional
+ defined($record->[$1-1]) or next;
+ $self->{'len'}{$perlexpr} -=
+ length $record->[$1-1]->replace($perlexpr,$quote_arg,$self);
+ } else {
+ for my $arg (@$record) {
+ if(defined $arg) {
+ $self->{'len'}{$perlexpr} -=
+ length $arg->replace($perlexpr,$quote_arg,$self);
+ }
+ }
+ }
+ }
+ return $record;
+}
+
+sub pop_all($) {
+ # Remove all arguments and zeros the length of replacement perlexpr
+ # Returns:
+ # all records
+ my $self = shift;
+ my @popped = @{$self->{'arg_list'}};
+ for my $perlexpr (keys %{$self->{'replacecount'}}) {
+ $self->{'len'}{$perlexpr} = 0;
+ }
+ $self->{'arg_list'} = [];
+ $self->{'arg_list_flat_orig'} = [undef];
+ $self->{'arg_list_flat'} = [];
+ return @popped;
+}
+
+sub number_of_args($) {
+ # The number of records
+ # Returns:
+ # number of records
+ my $self = shift;
+ # This is really the number of records
+ return $#{$self->{'arg_list'}}+1;
+}
+
+sub number_of_recargs($) {
+ # The number of args in records
+ # Returns:
+ # number of args records
+ my $self = shift;
+ my $sum = 0;
+ my $nrec = scalar @{$self->{'arg_list'}};
+ if($nrec) {
+ $sum = $nrec * (scalar @{$self->{'arg_list'}[0]});
+ }
+ return $sum;
+}
+
+sub args_as_string($) {
+ # Returns:
+ # all unmodified arguments joined with ' ' (similar to {})
+ my $self = shift;
+ return (join " ", map { $_->orig() }
+ map { @$_ } @{$self->{'arg_list'}});
+}
+
+sub results_out($) {
+ sub max_file_name_length {
+ # Figure out the max length of a subdir
+ # TODO and the max total length
+ # Ext4 = 255,130816
+ # Uses:
+ # $Global::max_file_length is set
+ # Returns:
+ # $Global::max_file_length
+ my $testdir = shift;
+
+ my $upper = 100_000_000;
+ # Dir length of 8 chars is supported everywhere
+ my $len = 8;
+ my $dir = "d"x$len;
+ do {
+ rmdir($testdir."/".$dir);
+ $len *= 16;
+ $dir = "d"x$len;
+ } while ($len < $upper and mkdir $testdir."/".$dir);
+ # Then search for the actual max length between $len/16 and $len
+ my $min = $len/16;
+ my $max = $len;
+ while($max-$min > 5) {
+ # If we are within 5 chars of the exact value:
+ # it is not worth the extra time to find the exact value
+ my $test = int(($min+$max)/2);
+ $dir = "d"x$test;
+ if(mkdir $testdir."/".$dir) {
+ rmdir($testdir."/".$dir);
+ $min = $test;
+ } else {
+ $max = $test;
+ }
+ }
+ $Global::max_file_length = $min;
+ return $min;
+ }
+
+ my $self = shift;
+ my $out = $self->replace_placeholders([$opt::results],0,0);
+ if($out eq $opt::results) {
+ # $opt::results simple string: Append args_as_dirname
+ my $args_as_dirname = $self->args_as_dirname(0);
+ # Output in: prefix/name1/val1/name2/val2/stdout
+ $out = $opt::results."/".$args_as_dirname;
+ if(-d $out or eval{ File::Path::mkpath($out); }) {
+ # OK
+ } else {
+ # mkpath failed: Argument too long or not quoted
+ # Set $Global::max_file_length, which will keep the individual
+ # dir names shorter than the max length
+ max_file_name_length($opt::results);
+ # Quote dirnames with +
+ $args_as_dirname = $self->args_as_dirname(1);
+ # prefix/name1/val1/name2/val2/
+ $out = $opt::results."/".$args_as_dirname;
+ File::Path::mkpath($out);
+ }
+ $out .="/";
+ } else {
+ if($out =~ m:/$:s) {
+ # / = dir
+ if(-d $out or eval{ File::Path::mkpath($out); }) {
+ # OK
+ } else {
+ ::error(255,"Cannot make dir '$out'.");
+ }
+ } else {
+ $out =~ m:(.*)/:s;
+ File::Path::mkpath($1);
+ }
+ }
+ return $out;
+}
+
+{
+ my %map;
+ my %stringmap;
+ my $sep;
+
+ # test: '' . .. a. a.. + ++ 0..255 on fat12 ext4
+ sub args_as_dirname($) {
+ # Returns:
+ # all arguments joined with '/' (similar to {})
+ # Chars that are not safe on all file systems are quoted.
+ sub init() {
+ # ext4: / \t \n \0 \\ \r
+ # fat: 0..31 " * / : < > ? \ | Maybe also: # [ ] ; = ,
+ # exfat: 128..255
+ # Other FS: , [ ] { } ( ) ! ; " ' * ? < > |
+ #
+ # Quote these as:
+ # + = ++
+ # \0 = +0
+ # \t = +t
+ # \\ = +b (backslash)
+ # \n = +n
+ # \r = +r
+ # / = +z (zlash)
+ # ? = +y (whY?)
+ # " = +d (double quote)
+ # ' = +q (quote)
+ # * = +a (asterisk)
+ # < = +l (less than)
+ # > = +g (greater than)
+ # : = +k (kolon)
+ # ! = +x (eXclamation)
+ # | = +p (pipe)
+ # # = +h (hash)
+ # ; = +s (semicolon)
+ # = = +e (equal)
+ # , = +c (comma)
+ # 1..32 128..255 = +XX (hex value)
+ # [ ] = +e +f
+ # ( ) = +i +j
+ # { } = +v +w
+ # Quote '' as +m (eMpty)
+ # Quote . as +_
+ # Quote .. as +__
+ # (Unused: ou)
+ %map = qw(
+ + ++
+ \0 +0
+ \t +t
+ \\ +b
+ \n +n
+ \r +r
+ / +z
+ ? +y
+ " +d
+ ' +q
+ * +a
+ < +l
+ > +g
+ : +k
+ ! +x
+ | +p
+ # +h
+ ; +s
+ = +e
+ , +c
+ [ +e
+ ( +i
+ { +v
+ ] +f
+ ) +j
+ } +w
+ );
+ # 1..32 128..255 = +XX (hex value)
+ map { $map{sprintf "%c",$_} = sprintf "+%02x",$_ } 1..32, 128..255;
+ # Default value = itself
+ map { $map{sprintf "%c",$_} ||= sprintf "%c",$_ } 0..255;
+ # Quote '' as +m (eMpty)
+ $stringmap{""} = "+m";
+ # Quote . as +_
+ $stringmap{"."} = "+_";
+ # Quote .. as +__
+ $stringmap{".."} = "+__";
+ # Set dir separator
+ eval 'use File::Spec; $sep = File::Spec->catfile("", "");';
+ $sep ||= '/';
+ }
+ # If $Global::max_file_length: Keep subdirs < $Global::max_file_length
+ my $self = shift;
+ my $quote = shift;
+ my @res = ();
+ if(not $sep) { init(); }
+
+ for my $rec_ref (@{$self->{'arg_list'}}) {
+ # If headers are used, sort by them.
+ # Otherwise keep the order from the command line.
+ my @header_indexes_sorted = header_indexes_sorted($#$rec_ref+1);
+ for my $n (@header_indexes_sorted) {
+ CORE::push(@res,
+ $Global::input_source_header{$n},
+ $quote ?
+ (
+ grep { $_ ne "\0noarg" } map {
+ my $s = $_;
+ # Quote + as ++
+ $s =~ s/(.)/$map{$1}/gs;
+ if($Global::max_file_length) {
+ # Keep each subdir shorter than the longest
+ # allowed file name
+ $s = substr($s,0,$Global::max_file_length);
+ }
+ $s; }
+ $rec_ref->[$n-1]->orig()
+ ) :
+ (
+ grep { $_ ne "\0noarg" } map {
+ my $s = $_;
+ # Quote / as +z and + as ++
+ $s =~ s/($sep|\+)/$map{$1}/gos;
+ if($Global::max_file_length) {
+ # Keep each subdir shorter than the longest
+ # allowed file name
+ $s = substr($s,0,$Global::max_file_length);
+ }
+ $s; }
+ $rec_ref->[$n-1]->orig()
+ )
+ );
+ }
+ }
+ return join $sep, map { $stringmap{$_} || $_ } @res;
+ }
+}
+
+sub header_indexes_sorted($) {
+ # Sort headers first by number then by name.
+ # E.g.: 1a 1b 11a 11b
+ # Returns:
+ # Indexes of %Global::input_source_header sorted
+ my $max_col = shift;
+
+ no warnings 'numeric';
+ for my $col (1 .. $max_col) {
+ # Make sure the header is defined. If it is not: use column number
+ if(not defined $Global::input_source_header{$col}) {
+ $Global::input_source_header{$col} = $col;
+ }
+ }
+ my @header_indexes_sorted = sort {
+ # Sort headers numerically then asciibetically
+ $Global::input_source_header{$a} <=> $Global::input_source_header{$b}
+ or
+ $Global::input_source_header{$a} cmp $Global::input_source_header{$b}
+ } 1 .. $max_col;
+ return @header_indexes_sorted;
+}
+
+sub len($) {
+ # Uses:
+ # @opt::shellquote
+ # The length of the command line with args substituted
+ my $self = shift;
+ my $len = 0;
+ # Add length of the original command with no args
+ # Length of command w/ all replacement args removed
+ $len += $self->{'len'}{'noncontext'} + @{$self->{'command'}} -1;
+ ::debug("length", "noncontext + command: $len\n");
+ # MacOS has an overhead of 8 bytes per argument
+ my $darwin = ($^O eq "darwin") ? 8 : 0;
+ my $recargs = $self->number_of_recargs();
+ if($self->{'context_replace'}) {
+ # Context is duplicated for each arg
+ $len += $recargs * $self->{'len'}{'context'};
+ for my $replstring (keys %{$self->{'replacecount'}}) {
+ # If the replacements string is more than once: mulitply its length
+ $len += $self->{'len'}{$replstring} *
+ $self->{'replacecount'}{$replstring};
+ ::debug("length", $replstring, " ", $self->{'len'}{$replstring}, "*",
+ $self->{'replacecount'}{$replstring}, "\n");
+ }
+ # echo 11 22 33 44 55 66 77 88 99 1010
+ # echo 1 2 3 4 5 6 7 8 9 10 1 2 3 4 5 6 7 8 9 10
+ # 5 + ctxgrp*arg
+ ::debug("length", "Ctxgrp: ", $self->{'len'}{'contextgroups'},
+ " Groups: ", $self->{'len'}{'noncontextgroups'}, "\n");
+ # Add space between context groups
+ $len += ($recargs-1) * ($self->{'len'}{'contextgroups'});
+ if($darwin) {
+ $len += $recargs * $self->{'len'}{'contextgroups'} * $darwin;
+ }
+ } else {
+ # Each replacement string may occur several times
+ # Add the length for each time
+ $len += 1*$self->{'len'}{'context'};
+ ::debug("length", "context+noncontext + command: $len\n");
+ for my $replstring (keys %{$self->{'replacecount'}}) {
+ # (space between recargs + length of replacement)
+ # * number this replacement is used
+ $len += ($recargs -1 + $self->{'len'}{$replstring}) *
+ $self->{'replacecount'}{$replstring};
+ if($darwin) {
+ $len += ($recargs * $self->{'replacecount'}{$replstring}
+ * $darwin);
+ }
+ }
+ }
+ if(defined $Global::parallel_env) {
+ # If we are using --env, add the prefix for that, too.
+ $len += length $Global::parallel_env;
+ }
+ if($Global::quoting) {
+ # Pessimistic length if -q is set
+ # Worse than worst case: ' => "'" + " => '"'
+ # TODO can we count the number of expanding chars?
+ # and count them in arguments, too?
+ $len *= 3;
+ }
+ if(@opt::shellquote) {
+ # Pessimistic length if --shellquote is set
+ # Worse than worst case: ' => "'"
+ for(@opt::shellquote) {
+ $len *= 3;
+ }
+ $len *= 5;
+ }
+ if(@opt::sshlogin) {
+ # Pessimistic length if remote
+ # Worst case is BASE64 encoding 3 bytes -> 4 bytes
+ $len = int($len*4/3);
+ }
+ return $len;
+}
+
+sub replaced($) {
+ # Uses:
+ # $Global::quote_replace
+ # $Global::quoting
+ # Returns:
+ # $replaced = command with place holders replaced and prepended
+ my $self = shift;
+ if(not defined $self->{'replaced'}) {
+ # Don't quote arguments if the input is the full command line
+ my $quote_arg = ($Global::quote_replace and not $Global::quoting);
+ # or if ($opt::cat or $opt::pipe) as they use $PARALLEL_TMP
+ $quote_arg = ($opt::cat || $opt::fifo) ? 0 : $quote_arg;
+ $self->{'replaced'} = $self->
+ replace_placeholders($self->{'command'},$Global::quoting,
+ $quote_arg);
+ my $len = length $self->{'replaced'};
+ if ($len != $self->len()) {
+ ::debug("length", $len, " != ", $self->len(),
+ " ", $self->{'replaced'}, "\n");
+ } else {
+ ::debug("length", $len, " == ", $self->len(),
+ " ", $self->{'replaced'}, "\n");
+ }
+ }
+ return $self->{'replaced'};
+}
+
+sub replace_placeholders($$$$) {
+ # Replace foo{}bar with fooargbar
+ # Input:
+ # $targetref = command as shell words
+ # $quote = should everything be quoted?
+ # $quote_arg = should replaced arguments be quoted?
+ # Uses:
+ # @Arg::arg = arguments as strings to be use in {= =}
+ # Returns:
+ # @target with placeholders replaced
+ my $self = shift;
+ my $targetref = shift;
+ my $quote = shift;
+ my $quote_arg = shift;
+ my %replace;
+
+ # Token description:
+ # \0spc = unquoted space
+ # \0end = last token element
+ # \0ign = dummy token to be ignored
+ # \257<...\257> = replacement expression
+ # " " = quoted space, that splits -X group
+ # text = normal text - possibly part of -X group
+ my $spacer = 0;
+ my @tokens = grep { length $_ > 0 } map {
+ if(/^\257<|^ $/) {
+ # \257<...\257> or space
+ $_
+ } else {
+ # Split each space/tab into a token
+ split /(?=\s)|(?<=\s)/
+ }
+ }
+ # Split \257< ... \257> into own token
+ map { split /(?=\257<)|(?<=\257>)/ }
+ # Insert "\0spc" between every element
+ # This space should never be quoted
+ map { $spacer++ ? ("\0spc",$_) : $_ }
+ map { $_ eq "" ? "\0empty" : $_ }
+ @$targetref;
+
+ if(not @tokens) {
+ # @tokens is empty: Return empty array
+ return @tokens;
+ }
+ ::debug("replace", "Tokens ".join":",@tokens,"\n");
+ # Make it possible to use $arg[2] in {= =}
+ *Arg::arg = $self->{'arg_list_flat_orig'};
+ # Flat list:
+ # $self->{'arg_list'} = [ [Arg11, Arg12], [Arg21, Arg22], [Arg31, Arg32] ]
+ # $self->{'arg_list_flat'} = [ Arg11, Arg12, Arg21, Arg22, Arg31, Arg32 ]
+ if(not @{$self->{'arg_list_flat'}}) {
+ @{$self->{'arg_list_flat'}} = Arg->new("");
+ }
+ my $argref = $self->{'arg_list_flat'};
+ # Number of arguments - used for positional arguments
+ my $n = $#$argref+1;
+
+ # $self is actually a CommandLine-object,
+ # but it looks nice to be able to say {= $job->slot() =}
+ my $job = $self;
+ # @replaced = tokens with \257< \257> replaced
+ my @replaced;
+ if($self->{'context_replace'}) {
+ my @ctxgroup;
+ for my $t (@tokens,"\0end") {
+ # \0end = last token was end of tokens.
+ if($t eq "\t" or $t eq " " or $t eq "\0end" or $t eq "\0spc") {
+ # Context group complete: Replace in it
+ if(grep { /^\257</ } @ctxgroup) {
+ # Context group contains a replacement string:
+ # Copy once per arg
+ my $space = "\0ign";
+ for my $arg (@$argref) {
+ my $normal_replace;
+ # Push output
+ # Put unquoted space before each context group
+ # except the first
+ CORE::push @replaced, $space, map {
+ $a = $_;
+ if($a =~
+ s{\257<(-?\d+)?(.*)\257>}
+ {
+ if($1) {
+ # Positional replace
+ # Find the relevant arg and replace it
+ ($argref->[$1 > 0 ? $1-1 : $n+$1] ? # If defined: replace
+ $argref->[$1 > 0 ? $1-1 : $n+$1]->
+ replace($2,$quote_arg,$self)
+ : "");
+ } else {
+ # Normal replace
+ $normal_replace ||= 1;
+ ($arg ? $arg->replace($2,$quote_arg,$self) : "");
+ }
+ }sgxe) {
+ # Token is \257<..\257>
+ } else {
+ if($Global::escape_string_present) {
+ # Command line contains \257:
+ # Unescape it \257\256 => \257
+ $a =~ s/\257\256/\257/g;
+ }
+ }
+ $a
+ } @ctxgroup;
+ $normal_replace or last;
+ $space = "\0spc";
+ }
+ } else {
+ # Context group has no a replacement string: Copy it once
+ CORE::push @replaced, map {
+ $Global::escape_string_present and s/\257\256/\257/g; $_;
+ } @ctxgroup;
+ }
+ # New context group
+ @ctxgroup=();
+ }
+ if($t eq "\0spc" or $t eq " ") {
+ CORE::push @replaced,$t;
+ } else {
+ CORE::push @ctxgroup,$t;
+ }
+ }
+ } else {
+ # @group = @token
+ # Replace in group
+ # Push output
+ # repquote = no if {} first on line, no if $quote, yes otherwise
+ for my $t (@tokens) {
+ if($t =~ /^\257</) {
+ my $space = "\0ign";
+ for my $arg (@$argref) {
+ my $normal_replace;
+ $a = $t;
+ $a =~
+ s{\257<(-?\d+)?(.*)\257>}
+ {
+ if($1) {
+ # Positional replace
+ # Find the relevant arg and replace it
+ ($argref->[$1 > 0 ? $1-1 : $n+$1] ?
+ # If defined: replace
+ $argref->[$1 > 0 ? $1-1 : $n+$1]->
+ replace($2,$quote_arg,$self)
+ : "");
+ } else {
+ # Normal replace
+ $normal_replace ||= 1;
+ ($arg ? $arg->replace($2,$quote_arg,$self) : "");
+ }
+ }sgxe;
+ CORE::push @replaced, $space, $a;
+ $normal_replace or last;
+ $space = "\0spc";
+ }
+ } else {
+ # No replacement
+ CORE::push @replaced, map {
+ $Global::escape_string_present and s/\257\256/\257/g; $_;
+ } $t;
+ }
+ }
+ }
+ *Arg::arg = [];
+ ::debug("replace","Replaced: ".join":",@replaced,"\n");
+
+ # Put tokens into groups that may be quoted.
+ my @quotegroup;
+ my @quoted;
+ for (map { $_ eq "\0empty" ? "" : $_ }
+ grep { $_ ne "\0ign" and $_ ne "\0noarg" and $_ ne "'\0noarg'" }
+ @replaced, "\0end") {
+ if($_ eq "\0spc" or $_ eq "\0end") {
+ # \0spc splits quotable groups
+ if($quote) {
+ if(@quotegroup) {
+ CORE::push @quoted, ::Q(join"",@quotegroup);;
+ }
+ } else {
+ CORE::push @quoted, join"",@quotegroup;
+ }
+ @quotegroup = ();
+ } else {
+ CORE::push @quotegroup, $_;
+ }
+ }
+ ::debug("replace","Quoted: ".join":",@quoted,"\n");
+ return wantarray ? @quoted : "@quoted";
+}
+
+sub skip($) {
+ # Skip this job
+ my $self = shift;
+ $self->{'skip'} = 1;
+}
+
+
+package CommandLineQueue;
+
+sub new($) {
+ my $class = shift;
+ my $commandref = shift;
+ my $read_from = shift;
+ my $context_replace = shift || 0;
+ my $max_number_of_args = shift;
+ my $transfer_files = shift;
+ my $return_files = shift;
+ my $template_names = shift;
+ my $template_contents = shift;
+ my @unget = ();
+ my $posrpl;
+ my ($replacecount_ref, $len_ref);
+ my @command = @$commandref;
+ my $seq = 1;
+ # Replace replacement strings with {= perl expr =}
+ # '{=' 'perlexpr' '=}' => '{= perlexpr =}'
+ @command = merge_rpl_parts(@command);
+
+ # Protect matching inside {= perl expr =}
+ # by replacing {= and =} with \257< and \257>
+ # in options that can contain replacement strings:
+ # @command, --transferfile, --return,
+ # --tagstring, --workdir, --results
+ for(@command, @$transfer_files, @$return_files,
+ @$template_names, @$template_contents,
+ $opt::tagstring, $opt::workdir, $opt::results, $opt::retries,
+ @opt::filter) {
+ # Skip if undefined
+ defined($_) or next;
+ # Escape \257 => \257\256
+ $Global::escape_string_present += s/\257/\257\256/g;
+ # Needs to match rightmost left parens (Perl defaults to leftmost)
+ # to deal with: {={==} and {={==}=}
+ # Replace {= -> \257< and =} -> \257>
+ #
+ # Complex way to do:
+ # s/{=(.*)=}/\257<$1\257>/g
+ # which would not work
+ s[\Q$Global::parensleft\E # Match {=
+ # Match . unless the next string is {= or =}
+ # needed to force matching the shortest {= =}
+ ((?:(?! \Q$Global::parensleft\E|\Q$Global::parensright\E ).)*?)
+ \Q$Global::parensright\E ] # Match =}
+ {\257<$1\257>}gxs;
+ for my $rpl (sort { length $b <=> length $a } keys %Global::rpl) {
+ # Replace long --rpl's before short ones, as a short may be a
+ # substring of a long:
+ # --rpl '% s/a/b/' --rpl '%% s/b/a/'
+ #
+ # Replace the shorthand string (--rpl)
+ # with the {= perl expr =}
+ #
+ # Avoid searching for shorthand strings inside existing {= perl expr =}
+ #
+ # Replace $$1 in {= perl expr =} with groupings in shorthand string
+ #
+ # --rpl '{/(\.\S+)/(\.\S+)} s/$$1/$$2/g;'
+ # echo {/.tar/.gz} ::: UU.tar.gz
+ my ($prefix,$grp_regexp,$postfix) =
+ $rpl =~ /^( [^(]* ) # Prefix - e.g. {%%
+ ( \(.*\) )? # Group capture regexp - e.g (.*)
+ ( [^)]* )$ # Postfix - e.g }
+ /xs;
+ $grp_regexp ||= '';
+ my $rplval = $Global::rpl{$rpl};
+ while(s{( (?: ^|\257> ) (?: [^\257]*|[\257][^<>] )*? )
+ # Don't replace after \257 unless \257>
+ \Q$prefix\E $grp_regexp \Q$postfix\E}
+ {
+ # The start remains the same
+ my $unchanged = $1;
+ # Dummy entry to start at 1.
+ my @grp = (1);
+ # $2 = first ()-group in $grp_regexp
+ # Put $2 in $grp[1], Put $3 in $grp[2]
+ # so first ()-group in $grp_regexp is $grp[1];
+ for(my $i = 2; defined $grp[$#grp]; $i++) {
+ push @grp, eval '$'.$i;
+ }
+ my $rv = $rplval;
+ # replace $$1 with $_pAr_gRp1, $$2 with $_pAr_gRp2
+ # in the code to be executed
+ $rv =~ s/\$\$ (\d+)/\$_pAr_gRp$1/gx;
+ # prepend with $_pAr_gRp1 = perlquote($1),
+ my $set_args = "";
+ for(my $i = 1;defined $grp[$i]; $i++) {
+ $set_args .= "\$_pAr_gRp$i = \"" .
+ ::perl_quote_scalar($grp[$i]) . "\";";
+ }
+ $unchanged . "\257<" . $set_args . $rv . "\257>"
+ }gxes) {
+ }
+ # Do the same for the positional replacement strings
+ $posrpl = $rpl;
+ if($posrpl =~ s/^\{//) {
+ # Only do this if the shorthand start with {
+ $prefix=~s/^\{//;
+ # Don't replace after \257 unless \257>
+ while(s{( (?: ^|\257> ) (?: [^\257]*|[\257][^<>] )*? )
+ \{(-?\d+) \s* \Q$prefix\E $grp_regexp \Q$postfix\E}
+ {
+ # The start remains the same
+ my $unchanged = $1;
+ my $position = $2;
+ # Dummy entry to start at 1.
+ my @grp = (1);
+ # $3 = first ()-group in $grp_regexp
+ # Put $3 in $grp[1], Put $4 in $grp[2]
+ # so first ()-group in $grp_regexp is $grp[1];
+ for(my $i = 3; defined $grp[$#grp]; $i++) {
+ push @grp, eval '$'.$i;
+ }
+ my $rv = $rplval;
+ # replace $$1 with $_pAr_gRp1, $$2 with $_pAr_gRp2
+ # in the code to be executed
+ $rv =~ s/\$\$ (\d+)/\$_pAr_gRp$1/gx;
+ # prepend with $_pAr_gRp1 = perlquote($1),
+ my $set_args = "";
+ for(my $i = 1;defined $grp[$i]; $i++) {
+ $set_args .= "\$_pAr_gRp$i = \"" .
+ ::perl_quote_scalar($grp[$i]) . "\";";
+ }
+ $unchanged . "\257<" . $position . $set_args . $rv . "\257>"
+ }gxes) {
+ }
+ }
+ }
+ }
+ # Add {} if no replacement strings in @command
+ ($replacecount_ref, $len_ref, @command) =
+ replacement_counts_and_lengths($transfer_files, $return_files,
+ $template_names, $template_contents,
+ @command);
+ if("@command" =~ /^[^ \t\n=]*\257</) {
+ # Replacement string is (part of) the command (and not just
+ # argument or variable definition V1={})
+ # E.g. parallel {}, parallel my_{= s/_//=}, parallel {2}
+ # Do no quote (Otherwise it will fail if the input contains spaces)
+ $Global::quote_replace = 0;
+ }
+
+ if($opt::sqlmaster and $Global::sql->append()) {
+ $seq = $Global::sql->max_seq() + 1;
+ }
+
+ return bless {
+ ('unget' => \@unget,
+ 'command' => \@command,
+ 'replacecount' => $replacecount_ref,
+ 'arg_queue' => RecordQueue->new($read_from,$opt::colsep),
+ 'context_replace' => $context_replace,
+ 'len' => $len_ref,
+ 'max_number_of_args' => $max_number_of_args,
+ 'size' => undef,
+ 'transfer_files' => $transfer_files,
+ 'return_files' => $return_files,
+ 'template_names' => $template_names,
+ 'template_contents' => $template_contents,
+ 'seq' => $seq,
+ )
+ }, ref($class) || $class;
+}
+
+sub merge_rpl_parts($) {
+ # '{=' 'perlexpr' '=}' => '{= perlexpr =}'
+ # Input:
+ # @in = the @command as given by the user
+ # Uses:
+ # $Global::parensleft
+ # $Global::parensright
+ # Returns:
+ # @command with parts merged to keep {= and =} as one
+ my @in = @_;
+ my @out;
+ my $l = quotemeta($Global::parensleft);
+ my $r = quotemeta($Global::parensright);
+
+ while(@in) {
+ my $s = shift @in;
+ $_ = $s;
+ # Remove matching (right most) parens
+ while(s/(.*)$l.*?$r/$1/os) {}
+ if(/$l/o) {
+ # Missing right parens
+ while(@in) {
+ $s .= " ".shift @in;
+ $_ = $s;
+ while(s/(.*)$l.*?$r/$1/os) {}
+ if(not /$l/o) {
+ last;
+ }
+ }
+ }
+ push @out, $s;
+ }
+ return @out;
+}
+
+sub replacement_counts_and_lengths($$@) {
+ # Count the number of different replacement strings.
+ # Find the lengths of context for context groups and non-context
+ # groups.
+ # If no {} found in @command: add it to @command
+ #
+ # Input:
+ # \@transfer_files = array of filenames to transfer
+ # \@return_files = array of filenames to return
+ # \@template_names = array of names to copy to
+ # \@template_contents = array of contents to write
+ # @command = command template
+ # Output:
+ # \%replacecount, \%len, @command
+ my $transfer_files = shift;
+ my $return_files = shift;
+ my $template_names = shift;
+ my $template_contents = shift;
+ my @command = @_;
+ my (%replacecount,%len);
+ my $sum = 0;
+ while($sum == 0) {
+ # Count how many times each replacement string is used
+ my @cmd = @command;
+ my $contextlen = 0;
+ my $noncontextlen = 0;
+ my $contextgroups = 0;
+ for my $c (@cmd) {
+ while($c =~ s/ \257<( (?: [^\257]*|[\257][^<>] )*?)\257> /\000/xs) {
+ # %replacecount = { "perlexpr" => number of times seen }
+ # e.g { "s/a/b/" => 2 }
+ $replacecount{$1}++;
+ $sum++;
+ }
+ # Measure the length of the context around the {= perl expr =}
+ # Use that {=...=} has been replaced with \000 above
+ # So there is no need to deal with \257<
+ while($c =~ s/ (\S*\000\S*) //xs) {
+ my $w = $1;
+ $w =~ tr/\000//d; # Remove all \000's
+ $contextlen += length($w);
+ $contextgroups++;
+ }
+ # All {= perl expr =} have been removed: The rest is non-context
+ $noncontextlen += length $c;
+ }
+ for(@$transfer_files, @$return_files,
+ @$template_names, @$template_contents,
+ @opt::filter,
+ $opt::tagstring, $opt::workdir, $opt::results, $opt::retries) {
+ # Options that can contain replacement strings
+ defined($_) or next;
+ my $t = $_;
+ while($t =~ s/ \257<( (?: [^\257]*|[\257][^<>] )* )\257> //xs) {
+ # %replacecount = { "perlexpr" => number of times seen }
+ # e.g { "$_++" => 2 }
+ # But for tagstring we just need to mark it as seen
+ $replacecount{$1} ||= 1;
+ }
+ }
+ if($opt::bar) {
+ # If the command does not contain {} force it to be computed
+ # as it is being used by --bar
+ $replacecount{""} ||= 1;
+ }
+
+ $len{'context'} = 0+$contextlen;
+ $len{'noncontext'} = $noncontextlen;
+ $len{'contextgroups'} = $contextgroups;
+ $len{'noncontextgroups'} = @cmd-$contextgroups;
+ ::debug("length", "@command Context: ", $len{'context'},
+ " Non: ", $len{'noncontext'}, " Ctxgrp: ", $len{'contextgroups'},
+ " NonCtxGrp: ", $len{'noncontextgroups'}, "\n");
+ if($sum == 0) {
+ if(not @command) {
+ # Default command = {}
+ @command = ("\257<\257>");
+ } elsif(($opt::pipe or $opt::pipepart)
+ and not $opt::fifo and not $opt::cat) {
+ # With --pipe / --pipe-part you can have no replacement
+ last;
+ } else {
+ # Append {} to the command if there are no {...}'s and no {=...=}
+ push @command, ("\257<\257>");
+ }
+ }
+ }
+ return(\%replacecount,\%len,@command);
+}
+
+sub get($) {
+ my $self = shift;
+ if(@{$self->{'unget'}}) {
+ my $cmd_line = shift @{$self->{'unget'}};
+ return ($cmd_line);
+ } else {
+ if($opt::sqlworker) {
+ # Get the sequence number from the SQL table
+ $self->set_seq($SQL::next_seq);
+ # Get the command from the SQL table
+ $self->{'command'} = $SQL::command_ref;
+ my @command;
+ # Recompute replace counts based on the read command
+ ($self->{'replacecount'},
+ $self->{'len'}, @command) =
+ replacement_counts_and_lengths($self->{'transfer_files'},
+ $self->{'return_files'},
+ $self->{'template_name'},
+ $self->{'template_contents'},
+ @$SQL::command_ref);
+ if("@command" =~ /^[^ \t\n=]*\257</) {
+ # Replacement string is (part of) the command (and not just
+ # argument or variable definition V1={})
+ # E.g. parallel {}, parallel my_{= s/_//=}, parallel {2}
+ # Do no quote (Otherwise it will fail if the input contains spaces)
+ $Global::quote_replace = 0;
+ }
+ }
+
+ my $cmd_line = CommandLine->new($self->seq(),
+ $self->{'command'},
+ $self->{'arg_queue'},
+ $self->{'context_replace'},
+ $self->{'max_number_of_args'},
+ $self->{'transfer_files'},
+ $self->{'return_files'},
+ $self->{'template_names'},
+ $self->{'template_contents'},
+ $self->{'replacecount'},
+ $self->{'len'},
+ );
+ $cmd_line->populate();
+ ::debug("run","cmd_line->number_of_args ",
+ $cmd_line->number_of_args(), "\n");
+ if(not $Global::no_more_input and ($opt::pipe or $opt::pipepart)) {
+ if($cmd_line->replaced() eq "") {
+ # Empty command - pipe requires a command
+ ::error(255,"--pipe/--pipepart must have a command to pipe ".
+ "into (e.g. 'cat').");
+ }
+ } elsif($cmd_line->number_of_args() == 0) {
+ # We did not get more args - maybe at EOF string?
+ return undef;
+ }
+ $self->set_seq($self->seq()+1);
+ return $cmd_line;
+ }
+}
+
+sub unget($) {
+ my $self = shift;
+ unshift @{$self->{'unget'}}, @_;
+}
+
+sub empty($) {
+ my $self = shift;
+ my $empty = (not @{$self->{'unget'}}) &&
+ $self->{'arg_queue'}->empty();
+ ::debug("run", "CommandLineQueue->empty $empty");
+ return $empty;
+}
+
+sub seq($) {
+ my $self = shift;
+ return $self->{'seq'};
+}
+
+sub set_seq($$) {
+ my $self = shift;
+ $self->{'seq'} = shift;
+}
+
+sub quote_args($) {
+ my $self = shift;
+ # If there is not command emulate |bash
+ return $self->{'command'};
+}
+
+
+package Limits::Command;
+
+# Maximal command line length (for -m and -X)
+sub max_length($) {
+ # Find the max_length of a command line and cache it
+ # Returns:
+ # number of chars on the longest command line allowed
+ if(not $Limits::Command::line_max_len) {
+ # Disk cache of max command line length
+ my $len_cache = $Global::cache_dir . "/tmp/sshlogin/" . ::hostname() .
+ "/linelen";
+ my $cached_limit;
+ local $/ = undef;
+ if(open(my $fh, "<", $len_cache)) {
+ $cached_limit = <$fh>;
+ $cached_limit || ::warning("Invalid content in $len_cache");
+ close $fh;
+ }
+ if(not $cached_limit) {
+ $cached_limit = real_max_length();
+ # If $HOME is write protected: Do not fail
+ my $dir = ::dirname($len_cache);
+ -d $dir or eval { File::Path::mkpath($dir); };
+ open(my $fh, ">", $len_cache.$$);
+ print $fh $cached_limit;
+ close $fh;
+ rename $len_cache.$$, $len_cache || ::die_bug("rename cache file");
+ }
+ $Limits::Command::line_max_len = tmux_length($cached_limit);
+ }
+ return int($Limits::Command::line_max_len);
+}
+
+sub real_max_length() {
+ # Find the max_length of a command line
+ # Returns:
+ # The maximal command line length with 1 byte arguments
+ # return find_max(" c");
+ return find_max("c");
+}
+
+sub find_max($) {
+ my $string = shift;
+ # This is slow on Cygwin, so give Cygwin users a warning
+ if($^O eq "cygwin" or $^O eq "msys") {
+ ::warning("Finding the maximal command line length. ".
+ "This may take up to 1 minute.")
+ }
+ # Use an upper bound of 100 MB if the shell allows for infinite
+ # long lengths
+ my $upper = 100_000_000;
+ my $lower;
+ # 1000 is supported everywhere, so the search can start anywhere 1..999
+ # 324 makes the search much faster on Cygwin, so let us use that
+ my $len = 324;
+ do {
+ if($len > $upper) { return $len };
+ $lower = $len;
+ $len *= 4;
+ ::debug("init", "Maxlen: $lower<$len<$upper(".($upper-$lower)."): ");
+ } while (is_acceptable_command_line_length($len,$string));
+ # Then search for the actual max length between
+ # last successful length ($len/16) and upper bound
+ return binary_find_max(int($len/16),$len,$string);
+}
+
+
+# Prototype forwarding
+sub binary_find_max($$$);
+sub binary_find_max($$$) {
+ # Given a lower and upper bound find the max (length or args) of a
+ # command line
+ # Returns:
+ # number of chars on the longest command line allowed
+ my ($lower, $upper, $string) = (@_);
+ if($lower == $upper
+ or $lower == $upper-1
+ or $lower/$upper > 0.99) {
+ # $lower is +- 1 or within 1%: Don't search more
+ return $lower;
+ }
+ # Unevenly split binary search which is faster for Microsoft Windows.
+ # Guessing too high is cheap. Guessing too low is expensive.
+ my $split = ($^O eq "cygwin" or $^O eq "msys") ? 0.93 : 0.5;
+ my $middle = int (($upper-$lower)*$split + $lower);
+ ::debug("init", "Maxlen: $lower<$middle<$upper(".($upper-$lower)."): ");
+ if (is_acceptable_command_line_length($middle,$string)) {
+ return binary_find_max($middle,$upper,$string);
+ } else {
+ return binary_find_max($lower,$middle,$string);
+ }
+}
+
+{
+ my $prg;
+
+ sub is_acceptable_command_line_length($$) {
+ # Test if a command line of this length can run
+ # in the current environment
+ # If the string is " x" it tests how many args are allowed
+ # Returns:
+ # 0 if the command line length is too long
+ # 1 otherwise
+ my $len = shift;
+ my $string = shift;
+ if($Global::parallel_env) {
+ $len += length $Global::parallel_env;
+ }
+ # Force using non-built-in command
+ $prg ||= ::which("echo") || ::error(255,'echo must be in $PATH');
+ my $l = length ::qqx("$prg ".${string}x(($len-1-length $prg)/length $string));
+ if($l < $len/2) {
+ # The command returned OK, but did not output $len chars
+ # => this failed (Centos3 does this craziness)
+ return 0
+ }
+ ::debug("init", "$len=$?\n");
+ return not $?;
+ }
+}
+
+sub tmux_length($) {
+ # If $opt::tmux set, find the limit for tmux
+ # tmux 1.8 has a 2kB limit
+ # tmux 1.9 has a 16kB limit
+ # tmux 2.0 has a 16kB limit
+ # tmux 2.1 has a 16kB limit
+ # tmux 2.2 has a 16kB limit
+ # Input:
+ # $len = maximal command line length
+ # Returns:
+ # $tmux_len = maximal length runable in tmux
+ local $/ = "\n";
+ my $len = shift;
+ if($opt::tmux) {
+ $ENV{'PARALLEL_TMUX'} ||= "tmux";
+ if(not ::which($ENV{'PARALLEL_TMUX'})) {
+ ::error(255,$ENV{'PARALLEL_TMUX'}." not found in \$PATH.");
+ }
+ my @out;
+ for my $l (1, 2020, 16320, 30000, $len) {
+ my $tmpfile = ::tmpname("tms");
+ my $qtmp = ::Q($tmpfile);
+ my $tmuxcmd = $ENV{'PARALLEL_TMUX'}.
+ " -S $qtmp new-session -d -n echo $l".
+ ("t"x$l). " && echo $l; rm -f $qtmp";
+ push @out, ::qqx($tmuxcmd);
+ ::rm($tmpfile);
+ }
+ ::debug("tmux","tmux-out ",@out);
+ chomp @out;
+ # The arguments is given 3 times on the command line
+ # and the tmux wrapping is around 30 chars
+ # (29 for tmux1.9, 33 for tmux1.8)
+ my $tmux_len = ::max(@out);
+ $len = ::min($len,int($tmux_len/4-33));
+ ::debug("tmux","tmux-length ",$len);
+ }
+ return $len;
+}
+
+
+package RecordQueue;
+
+sub new($) {
+ my $class = shift;
+ my $fhs = shift;
+ my $colsep = shift;
+ my @unget = ();
+ my $arg_sub_queue;
+ if($opt::sqlworker) {
+ # Open SQL table
+ $arg_sub_queue = SQLRecordQueue->new();
+ } elsif(defined $colsep) {
+ # Open one file with colsep or CSV
+ $arg_sub_queue = RecordColQueue->new($fhs);
+ } else {
+ # Open one or more files if multiple -a
+ $arg_sub_queue = MultifileQueue->new($fhs);
+ }
+ return bless {
+ 'unget' => \@unget,
+ 'arg_number' => 0,
+ 'arg_sub_queue' => $arg_sub_queue,
+ }, ref($class) || $class;
+}
+
+sub get($) {
+ # Returns:
+ # reference to array of Arg-objects
+ my $self = shift;
+ if(@{$self->{'unget'}}) {
+ $self->{'arg_number'}++;
+ # Flush cached computed replacements in Arg-objects
+ # To fix: parallel --bar echo {%} ::: a b c ::: d e f
+ my $ret = shift @{$self->{'unget'}};
+ if($ret) {
+ map { $_->flush_cache() } @$ret;
+ }
+ return $ret;
+ }
+ my $ret = $self->{'arg_sub_queue'}->get();
+ if($ret) {
+ if(grep { index($_->orig(),"\0") > 0 } @$ret) {
+ # Allow for \0 in position 0 because GNU Parallel uses "\0noarg"
+ # to mean no-string
+ ::warning("A NUL character in the input was replaced with \\0.",
+ "NUL cannot be passed through in the argument list.",
+ "Did you mean to use the --null option?");
+ for(grep { index($_->orig(),"\0") > 0 } @$ret) {
+ # Replace \0 with \\0
+ my $a = $_->orig();
+ $a =~ s/\0/\\0/g;
+ $_->set_orig($a);
+ }
+ }
+ if(defined $Global::max_number_of_args
+ and $Global::max_number_of_args == 0) {
+ ::debug("run", "Read 1 but return 0 args\n");
+ # \0noarg => nothing (not the empty string)
+ map { $_->set_orig("\0noarg"); } @$ret;
+ }
+ # Flush cached computed replacements in Arg-objects
+ # To fix: parallel --bar echo {%} ::: a b c ::: d e f
+ map { $_->flush_cache() } @$ret;
+ }
+ return $ret;
+}
+
+sub unget($) {
+ my $self = shift;
+ ::debug("run", "RecordQueue-unget\n");
+ $self->{'arg_number'} -= @_;
+ unshift @{$self->{'unget'}}, @_;
+}
+
+sub empty($) {
+ my $self = shift;
+ my $empty = (not @{$self->{'unget'}}) &&
+ $self->{'arg_sub_queue'}->empty();
+ ::debug("run", "RecordQueue->empty $empty");
+ return $empty;
+}
+
+sub flush_cache($) {
+ my $self = shift;
+ for my $record (@{$self->{'unget'}}) {
+ for my $arg (@$record) {
+ $arg->flush_cache();
+ }
+ }
+ $self->{'arg_sub_queue'}->flush_cache();
+}
+
+sub arg_number($) {
+ my $self = shift;
+ return $self->{'arg_number'};
+}
+
+
+package RecordColQueue;
+
+sub new($) {
+ my $class = shift;
+ my $fhs = shift;
+ my @unget = ();
+ my $arg_sub_queue = MultifileQueue->new($fhs);
+ return bless {
+ 'unget' => \@unget,
+ 'arg_sub_queue' => $arg_sub_queue,
+ }, ref($class) || $class;
+}
+
+sub get($) {
+ # Returns:
+ # reference to array of Arg-objects
+ my $self = shift;
+ if(@{$self->{'unget'}}) {
+ return shift @{$self->{'unget'}};
+ }
+ if($self->{'arg_sub_queue'}->empty()) {
+ return undef;
+ }
+ my $in_record = $self->{'arg_sub_queue'}->get();
+ if(defined $in_record) {
+ my @out_record = ();
+ for my $arg (@$in_record) {
+ ::debug("run", "RecordColQueue::arg $arg\n");
+ my $line = $arg->orig();
+ ::debug("run", "line='$line'\n");
+ if($line ne "") {
+ if($opt::csv) {
+ # Parse CSV and put it into a record
+ chomp $line;
+ if(not $Global::csv->parse($line)) {
+ die "CSV has unexpected format: ^$line^";
+ }
+ for($Global::csv->fields()) {
+ push @out_record, Arg->new($_);
+ }
+ } else {
+ # Split --colsep into record
+ for my $s (split /$opt::colsep/o, $line, -1) {
+ push @out_record, Arg->new($s);
+ }
+ }
+ } else {
+ push @out_record, Arg->new("");
+ }
+ }
+ return \@out_record;
+ } else {
+ return undef;
+ }
+}
+
+sub unget($) {
+ my $self = shift;
+ ::debug("run", "RecordColQueue-unget '@_'\n");
+ unshift @{$self->{'unget'}}, @_;
+}
+
+sub empty($) {
+ my $self = shift;
+ my $empty = (not @{$self->{'unget'}}) &&
+ $self->{'arg_sub_queue'}->empty();
+ ::debug("run", "RecordColQueue->empty $empty");
+ return $empty;
+}
+
+sub flush_cache($) {
+ my $self = shift;
+ for my $arg (@{$self->{'unget'}}) {
+ $arg->flush_cache();
+ }
+ $self->{'arg_sub_queue'}->flush_cache();
+}
+
+
+package SQLRecordQueue;
+
+sub new($) {
+ my $class = shift;
+ my @unget = ();
+ return bless {
+ 'unget' => \@unget,
+ }, ref($class) || $class;
+}
+
+sub get($) {
+ # Returns:
+ # reference to array of Arg-objects
+ my $self = shift;
+ if(@{$self->{'unget'}}) {
+ return shift @{$self->{'unget'}};
+ }
+ return $Global::sql->get_record();
+}
+
+sub unget($) {
+ my $self = shift;
+ ::debug("run", "SQLRecordQueue-unget '@_'\n");
+ unshift @{$self->{'unget'}}, @_;
+}
+
+sub empty($) {
+ my $self = shift;
+ if(@{$self->{'unget'}}) { return 0; }
+ my $get = $self->get();
+ if(defined $get) {
+ $self->unget($get);
+ }
+ my $empty = not $get;
+ ::debug("run", "SQLRecordQueue->empty $empty");
+ return $empty;
+}
+
+sub flush_cache($) {
+ my $self = shift;
+ for my $record (@{$self->{'unget'}}) {
+ for my $arg (@$record) {
+ $arg->flush_cache();
+ }
+ }
+}
+
+
+package MultifileQueue;
+
+@Global::unget_argv=();
+
+sub new($$) {
+ my $class = shift;
+ my $fhs = shift;
+ for my $fh (@$fhs) {
+ if(-t $fh and -t ($Global::status_fd || *STDERR)) {
+ ::warning(
+ "Input is read from the terminal. You are either an expert",
+ "(in which case: YOU ARE AWESOME!) or maybe you forgot",
+ "::: or :::: or -a or to pipe data into parallel. If so",
+ "consider going through the tutorial: man parallel_tutorial",
+ "Press CTRL-D to exit.");
+ }
+ }
+ return bless {
+ 'unget' => \@Global::unget_argv,
+ 'fhs' => $fhs,
+ 'arg_matrix' => undef,
+ }, ref($class) || $class;
+}
+
+sub get($) {
+ my $self = shift;
+ if($opt::link) {
+ return $self->link_get();
+ } else {
+ return $self->nest_get();
+ }
+}
+
+sub unget($) {
+ my $self = shift;
+ ::debug("run", "MultifileQueue-unget '@_'\n");
+ unshift @{$self->{'unget'}}, @_;
+}
+
+sub empty($) {
+ my $self = shift;
+ my $empty = (not @Global::unget_argv) &&
+ not @{$self->{'unget'}};
+ for my $fh (@{$self->{'fhs'}}) {
+ $empty &&= eof($fh);
+ }
+ ::debug("run", "MultifileQueue->empty $empty ");
+ return $empty;
+}
+
+sub flush_cache($) {
+ my $self = shift;
+ for my $record (@{$self->{'unget'}}, @{$self->{'arg_matrix'}}) {
+ for my $arg (@$record) {
+ $arg->flush_cache();
+ }
+ }
+}
+
+sub link_get($) {
+ my $self = shift;
+ if(@{$self->{'unget'}}) {
+ return shift @{$self->{'unget'}};
+ }
+ my @record = ();
+ my $prepend;
+ my $empty = 1;
+ for my $i (0..$#{$self->{'fhs'}}) {
+ my $fh = $self->{'fhs'}[$i];
+ my $arg = read_arg_from_fh($fh);
+ if(defined $arg) {
+ # Record $arg for recycling at end of file
+ push @{$self->{'arg_matrix'}[$i]}, $arg;
+ push @record, $arg;
+ $empty = 0;
+ } else {
+ ::debug("run", "EOA ");
+ # End of file: Recycle arguments
+ push @{$self->{'arg_matrix'}[$i]}, shift @{$self->{'arg_matrix'}[$i]};
+ # return last @{$args->{'args'}{$fh}};
+ push @record, @{$self->{'arg_matrix'}[$i]}[-1];
+ }
+ }
+ if($empty) {
+ return undef;
+ } else {
+ return \@record;
+ }
+}
+
+sub nest_get($) {
+ my $self = shift;
+ if(@{$self->{'unget'}}) {
+ return shift @{$self->{'unget'}};
+ }
+ my @record = ();
+ my $prepend;
+ my $empty = 1;
+ my $no_of_inputsources = $#{$self->{'fhs'}} + 1;
+ if(not $self->{'arg_matrix'}) {
+ # Initialize @arg_matrix with one arg from each file
+ # read one line from each file
+ my @first_arg_set;
+ my $all_empty = 1;
+ for (my $fhno = 0; $fhno < $no_of_inputsources ; $fhno++) {
+ my $arg = read_arg_from_fh($self->{'fhs'}[$fhno]);
+ if(defined $arg) {
+ $all_empty = 0;
+ }
+ $self->{'arg_matrix'}[$fhno][0] = $arg || Arg->new("");
+ push @first_arg_set, $self->{'arg_matrix'}[$fhno][0];
+ }
+ if($all_empty) {
+ # All filehandles were at eof or eof-string
+ return undef;
+ }
+ return [@first_arg_set];
+ }
+
+ # Treat the case with one input source special. For multiple
+ # input sources we need to remember all previously read values to
+ # generate all combinations. But for one input source we can
+ # forget the value after first use.
+ if($no_of_inputsources == 1) {
+ my $arg = read_arg_from_fh($self->{'fhs'}[0]);
+ if(defined($arg)) {
+ return [$arg];
+ }
+ return undef;
+ }
+ for (my $fhno = $no_of_inputsources - 1; $fhno >= 0; $fhno--) {
+ if(eof($self->{'fhs'}[$fhno])) {
+ next;
+ } else {
+ # read one
+ my $arg = read_arg_from_fh($self->{'fhs'}[$fhno]);
+ defined($arg) || next; # If we just read an EOF string: Treat this as EOF
+ my $len = $#{$self->{'arg_matrix'}[$fhno]} + 1;
+ $self->{'arg_matrix'}[$fhno][$len] = $arg;
+ # make all new combinations
+ my @combarg = ();
+ for (my $fhn = 0; $fhn < $no_of_inputsources; $fhn++) {
+ push(@combarg, [0, $#{$self->{'arg_matrix'}[$fhn]}],
+ # Is input source --link'ed to the next?
+ $opt::linkinputsource[$fhn+1]);
+ }
+ # Find only combinations with this new entry
+ $combarg[2*$fhno] = [$len,$len];
+ # map combinations
+ # [ 1, 3, 7 ], [ 2, 4, 1 ]
+ # =>
+ # [ m[0][1], m[1][3], m[2][7] ], [ m[0][2], m[1][4], m[2][1] ]
+ my @mapped;
+ for my $c (expand_combinations(@combarg)) {
+ my @a;
+ for my $n (0 .. $no_of_inputsources - 1 ) {
+ push @a, $self->{'arg_matrix'}[$n][$$c[$n]];
+ }
+ push @mapped, \@a;
+ }
+ # append the mapped to the ungotten arguments
+ push @{$self->{'unget'}}, @mapped;
+ # get the first
+ if(@mapped) {
+ return shift @{$self->{'unget'}};
+ }
+ }
+ }
+ # all are eof or at EOF string; return from the unget queue
+ return shift @{$self->{'unget'}};
+}
+
+{
+ my $cr_count = 0;
+ my $nl_count = 0;
+ my $dos_crnl_determined;
+ sub read_arg_from_fh($) {
+ # Read one Arg from filehandle
+ # Returns:
+ # Arg-object with one read line
+ # undef if end of file
+ my $fh = shift;
+ my $prepend;
+ my $arg;
+ my $half_record = 0;
+ do {{
+ # This makes 10% faster
+ if(not defined ($arg = <$fh>)) {
+ if(defined $prepend) {
+ return Arg->new($prepend);
+ } else {
+ return undef;
+ }
+ }
+ if(not $dos_crnl_determined and not defined $opt::d) {
+ # Warn if input has CR-NL and -d is not set
+ if($arg =~ /\r$/) {
+ $cr_count++;
+ } else {
+ $nl_count++;
+ }
+ if($cr_count == 3 or $nl_count == 3) {
+ $dos_crnl_determined = 1;
+ if($nl_count == 0 and $cr_count == 3) {
+ ::warning('The first three values end in CR-NL. '.
+ 'Consider using -d "\r\n"');
+ }
+ }
+ }
+ if($opt::csv) {
+ # We need to read a full CSV line.
+ if(($arg =~ y/"/"/) % 2 ) {
+ # The number of " on the line is uneven:
+ # If we were in a half_record => we have a full record now
+ # If we were outside a half_record =>
+ # we are in a half record now
+ $half_record = not $half_record;
+ }
+ if($half_record) {
+ # CSV half-record with quoting:
+ # col1,"col2 2""x3"" board newline <-this one
+ # cont",col3
+ $prepend .= $arg;
+ redo;
+ } else {
+ # Now we have a full CSV record
+ }
+ }
+ # Remove delimiter
+ chomp $arg;
+ if($Global::end_of_file_string and
+ $arg eq $Global::end_of_file_string) {
+ # Ignore the rest of input file
+ close $fh;
+ ::debug("run", "EOF-string ($arg) met\n");
+ if(defined $prepend) {
+ return Arg->new($prepend);
+ } else {
+ return undef;
+ }
+ }
+ if(defined $prepend) {
+ $arg = $prepend.$arg; # For line continuation
+ undef $prepend;
+ }
+ if($Global::ignore_empty) {
+ if($arg =~ /^\s*$/) {
+ redo; # Try the next line
+ }
+ }
+ if($Global::max_lines) {
+ if($arg =~ /\s$/) {
+ # Trailing space => continued on next line
+ $prepend = $arg;
+ redo;
+ }
+ }
+ }} while (1 == 0); # Dummy loop {{}} for redo
+ if(defined $arg) {
+ return Arg->new($arg);
+ } else {
+ ::die_bug("multiread arg undefined");
+ }
+ }
+}
+
+# Prototype forwarding
+sub expand_combinations(@);
+sub expand_combinations(@) {
+ # Input:
+ # ([xmin,xmax], [ymin,ymax], ...)
+ # Returns: ([x,y,...],[x,y,...])
+ # where xmin <= x <= xmax and ymin <= y <= ymax
+ my $minmax_ref = shift;
+ my $link = shift; # This is linked to the next input source
+ my $xmin = $$minmax_ref[0];
+ my $xmax = $$minmax_ref[1];
+ my @p;
+ if(@_) {
+ my @rest = expand_combinations(@_);
+ if($link) {
+ # Linked to next col with --link/:::+/::::+
+ # TODO BUG does not wrap values if not same number of vals
+ push(@p, map { [$$_[0], @$_] }
+ grep { $xmin <= $$_[0] and $$_[0] <= $xmax } @rest);
+ } else {
+ # If there are more columns: Compute those recursively
+ for(my $x = $xmin; $x <= $xmax; $x++) {
+ push @p, map { [$x, @$_] } @rest;
+ }
+ }
+ } else {
+ for(my $x = $xmin; $x <= $xmax; $x++) {
+ push @p, [$x];
+ }
+ }
+ return @p;
+}
+
+
+package Arg;
+
+sub new($) {
+ my $class = shift;
+ my $orig = shift;
+ my @hostgroups;
+ if($opt::hostgroups) {
+ if($orig =~ s:@(.+)::) {
+ # We found hostgroups on the arg
+ @hostgroups = split(/\+|,/, $1);
+ if(not grep { defined $Global::hostgroups{$_} } @hostgroups) {
+ # This hostgroup is not defined using -S
+ # Add it
+ ::warning("Adding hostgroups: @hostgroups");
+ # Add sshlogin
+ for(grep { not defined $Global::hostgroups{$_} } @hostgroups) {
+ my $sshlogin = SSHLogin->new($_);
+ my $sshlogin_string = $sshlogin->string();
+ $Global::host{$sshlogin_string} = $sshlogin;
+ $Global::hostgroups{$sshlogin_string} = 1;
+ }
+ }
+ } else {
+ # No hostgroup on the arg => any hostgroup
+ @hostgroups = (keys %Global::hostgroups);
+ }
+ }
+ return bless {
+ 'orig' => $orig,
+ 'hostgroups' => \@hostgroups,
+ }, ref($class) || $class;
+}
+
+sub Q($) {
+ # Q alias for ::shell_quote_scalar
+ my $ret = ::Q($_[0]);
+ no warnings 'redefine';
+ *Q = \&::Q;
+ return $ret;
+}
+
+sub pQ($) {
+ # pQ alias for ::perl_quote_scalar
+ my $ret = ::pQ($_[0]);
+ no warnings 'redefine';
+ *pQ = \&::pQ;
+ return $ret;
+}
+
+sub hash($) {
+ $Global::use{"DBI"} ||= eval "use B; 1;";
+ B::hash(@_);
+}
+
+sub total_jobs() {
+ return $Global::JobQueue->total_jobs();
+}
+
+{
+ my %perleval;
+ my $job;
+ sub skip() {
+ # shorthand for $job->skip();
+ $job->skip();
+ }
+ sub slot() {
+ # shorthand for $job->slot();
+ $job->slot();
+ }
+ sub seq() {
+ # shorthand for $job->seq();
+ $job->seq();
+ }
+ sub uq() {
+ # Do not quote this arg
+ $Global::unquote_arg = 1;
+ }
+ sub yyyy_mm_dd_hh_mm_ss(@) {
+ # ISO8601 2038-01-19T03:14:08
+ ::strftime("%Y-%m-%dT%H:%M:%S", localtime(shift || time()));
+ }
+ sub yyyy_mm_dd_hh_mm(@) {
+ # ISO8601 2038-01-19T03:14
+ ::strftime("%Y-%m-%dT%H:%M", localtime(shift || time()));
+ }
+ sub yyyy_mm_dd(@) {
+ # ISO8601 2038-01-19
+ ::strftime("%Y-%m-%d", localtime(shift || time()));
+ }
+ sub hh_mm_ss(@) {
+ # ISO8601 03:14:08
+ ::strftime("%H:%M:%S", localtime(shift || time()));
+ }
+ sub hh_mm(@) {
+ # ISO8601 03:14
+ ::strftime("%H:%M", localtime(shift || time()));
+ }
+ sub yyyymmddhhmmss(@) {
+ # ISO8601 20380119 + ISO8601 031408
+ ::strftime("%Y%m%d%H%M%S", localtime(shift || time()));
+ }
+ sub yyyymmddhhmm(@) {
+ # ISO8601 20380119 + ISO8601 0314
+ ::strftime("%Y%m%d%H%M", localtime(shift || time()));
+ }
+ sub yyyymmdd(@) {
+ # ISO8601 20380119
+ ::strftime("%Y%m%d", localtime(shift || time()));
+ }
+ sub hhmmss(@) {
+ # ISO8601 031408
+ ::strftime("%H%M%S", localtime(shift || time()));
+ }
+ sub hhmm(@) {
+ # ISO8601 0314
+ ::strftime("%H%M", localtime(shift || time()));
+ }
+
+ sub replace($$$$) {
+ # Calculates the corresponding value for a given perl expression
+ # Returns:
+ # The calculated string (quoted if asked for)
+ my $self = shift;
+ my $perlexpr = shift; # E.g. $_=$_ or s/.gz//
+ my $quote = shift; # should the string be quoted?
+ # This is actually a CommandLine-object,
+ # but it looks nice to be able to say {= $job->slot() =}
+ $job = shift;
+ # Positional replace treated as normal replace
+ $perlexpr =~ s/^(-?\d+)? *//;
+ if(not $Global::cache_replacement_eval
+ or
+ not $self->{'cache'}{$perlexpr}) {
+ # Only compute the value once
+ # Use $_ as the variable to change
+ local $_;
+ if($Global::trim eq "n") {
+ $_ = $self->{'orig'};
+ } else {
+ # Trim the input
+ $_ = trim_of($self->{'orig'});
+ }
+ ::debug("replace", "eval ", $perlexpr, " ", $_, "\n");
+ if(not $perleval{$perlexpr}) {
+ # Make an anonymous function of the $perlexpr
+ # And more importantly: Compile it only once
+ if($perleval{$perlexpr} =
+ eval('sub { no strict; no warnings; my $job = shift; '.
+ $perlexpr.' }')) {
+ # All is good
+ } else {
+ # The eval failed. Maybe $perlexpr is invalid perl?
+ ::error(255,"Cannot use $perlexpr: $@");
+ }
+ }
+ # Execute the function
+ $perleval{$perlexpr}->($job);
+ $self->{'cache'}{$perlexpr} = $_;
+ if($Global::unquote_arg) {
+ # uq() was called in perlexpr
+ $self->{'cache'}{'unquote'}{$perlexpr} = 1;
+ # Reset for next perlexpr
+ $Global::unquote_arg = 0;
+ }
+ }
+ # Return the value quoted if needed
+ if($self->{'cache'}{'unquote'}{$perlexpr}) {
+ return($self->{'cache'}{$perlexpr});
+ } else {
+ return($quote ? Q($self->{'cache'}{$perlexpr})
+ : $self->{'cache'}{$perlexpr});
+ }
+ }
+}
+
+sub flush_cache($) {
+ # Flush cache of computed values
+ my $self = shift;
+ $self->{'cache'} = undef;
+}
+
+sub orig($) {
+ my $self = shift;
+ return $self->{'orig'};
+}
+
+sub set_orig($$) {
+ my $self = shift;
+ $self->{'orig'} = shift;
+}
+
+sub trim_of($) {
+ # Removes white space as specifed by --trim:
+ # n = nothing
+ # l = start
+ # r = end
+ # lr|rl = both
+ # Returns:
+ # string with white space removed as needed
+ my @strings = map { defined $_ ? $_ : "" } (@_);
+ my $arg;
+ if($Global::trim eq "n") {
+ # skip
+ } elsif($Global::trim eq "l") {
+ for my $arg (@strings) { $arg =~ s/^\s+//; }
+ } elsif($Global::trim eq "r") {
+ for my $arg (@strings) { $arg =~ s/\s+$//; }
+ } elsif($Global::trim eq "rl" or $Global::trim eq "lr") {
+ for my $arg (@strings) { $arg =~ s/^\s+//; $arg =~ s/\s+$//; }
+ } else {
+ ::error(255,"--trim must be one of: r l rl lr.");
+ }
+ return wantarray ? @strings : "@strings";
+}
+
+
+package TimeoutQueue;
+
+sub new($) {
+ my $class = shift;
+ my $delta_time = shift;
+ my ($pct);
+ if($delta_time =~ /(\d+(\.\d+)?)%/) {
+ # Timeout in percent
+ $pct = $1/100;
+ $delta_time = 1_000_000;
+ }
+ $delta_time = ::multiply_time_units($delta_time);
+
+ return bless {
+ 'queue' => [],
+ 'delta_time' => $delta_time,
+ 'pct' => $pct,
+ 'remedian_idx' => 0,
+ 'remedian_arr' => [],
+ 'remedian' => undef,
+ }, ref($class) || $class;
+}
+
+sub delta_time($) {
+ my $self = shift;
+ return $self->{'delta_time'};
+}
+
+sub set_delta_time($$) {
+ my $self = shift;
+ $self->{'delta_time'} = shift;
+}
+
+sub remedian($) {
+ my $self = shift;
+ return $self->{'remedian'};
+}
+
+sub set_remedian($$) {
+ # Set median of the last 999^3 (=997002999) values using Remedian
+ #
+ # Rousseeuw, Peter J., and Gilbert W. Bassett Jr. "The remedian: A
+ # robust averaging method for large data sets." Journal of the
+ # American Statistical Association 85.409 (1990): 97-104.
+ my $self = shift;
+ my $val = shift;
+ my $i = $self->{'remedian_idx'}++;
+ my $rref = $self->{'remedian_arr'};
+ $rref->[0][$i%999] = $val;
+ $rref->[1][$i/999%999] = (sort @{$rref->[0]})[$#{$rref->[0]}/2];
+ $rref->[2][$i/999/999%999] = (sort @{$rref->[1]})[$#{$rref->[1]}/2];
+ $self->{'remedian'} = (sort @{$rref->[2]})[$#{$rref->[2]}/2];
+}
+
+sub update_median_runtime($) {
+ # Update delta_time based on runtime of finished job if timeout is
+ # a percentage
+ my $self = shift;
+ my $runtime = shift;
+ if($self->{'pct'}) {
+ $self->set_remedian($runtime);
+ $self->{'delta_time'} = $self->{'pct'} * $self->remedian();
+ ::debug("run", "Timeout: $self->{'delta_time'}s ");
+ }
+}
+
+sub process_timeouts($) {
+ # Check if there was a timeout
+ my $self = shift;
+ # $self->{'queue'} is sorted by start time
+ while (@{$self->{'queue'}}) {
+ my $job = $self->{'queue'}[0];
+ if($job->endtime()) {
+ # Job already finished. No need to timeout the job
+ # This could be because of --keep-order
+ shift @{$self->{'queue'}};
+ } elsif($job->is_timedout($self->{'delta_time'})) {
+ # Need to shift off queue before kill
+ # because kill calls usleep that calls process_timeouts
+ shift @{$self->{'queue'}};
+ ::warning("This job was killed because it timed out:",
+ $job->replaced());
+ $job->kill();
+ } else {
+ # Because they are sorted by start time the rest are later
+ last;
+ }
+ }
+}
+
+sub insert($) {
+ my $self = shift;
+ my $in = shift;
+ push @{$self->{'queue'}}, $in;
+}
+
+
+package SQL;
+
+sub new($) {
+ my $class = shift;
+ my $dburl = shift;
+ $Global::use{"DBI"} ||= eval "use DBI; 1;";
+ # +DBURL = append to this DBURL
+ my $append = $dburl=~s/^\+//;
+ my %options = parse_dburl(get_alias($dburl));
+ my %driveralias = ("sqlite" => "SQLite",
+ "sqlite3" => "SQLite",
+ "pg" => "Pg",
+ "postgres" => "Pg",
+ "postgresql" => "Pg",
+ "csv" => "CSV",
+ "oracle" => "Oracle",
+ "ora" => "Oracle");
+ my $driver = $driveralias{$options{'databasedriver'}} ||
+ $options{'databasedriver'};
+ my $database = $options{'database'};
+ my $host = $options{'host'} ? ";host=".$options{'host'} : "";
+ my $port = $options{'port'} ? ";port=".$options{'port'} : "";
+ my $dsn = "DBI:$driver:dbname=$database$host$port";
+ my $userid = $options{'user'};
+ my $password = $options{'password'};;
+ if(not grep /$driver/, DBI->available_drivers) {
+ ::error(255,"$driver not supported. ".
+ "Are you missing a perl DBD::$driver module?");
+ }
+ my $dbh;
+ if($driver eq "CSV") {
+ # CSV does not use normal dsn
+ if(-d $database) {
+ $dbh = DBI->connect("dbi:CSV:", "", "", { f_dir => "$database", })
+ or die $DBI::errstr;
+ } else {
+ ::error(255,"$database is not a directory.");
+ }
+ } else {
+ $dbh = DBI->connect($dsn, $userid, $password,
+ { RaiseError => 1, AutoInactiveDestroy => 1 })
+ or die $DBI::errstr;
+ }
+ $dbh->{'PrintWarn'} = $Global::debug || 0;
+ $dbh->{'PrintError'} = $Global::debug || 0;
+ $dbh->{'RaiseError'} = 1;
+ $dbh->{'ShowErrorStatement'} = 1;
+ $dbh->{'HandleError'} = sub {};
+ if(not defined $options{'table'}) {
+ ::error(255,"The DBURL ($dburl) must contain a table.");
+ }
+
+ return bless {
+ 'dbh' => $dbh,
+ 'driver' => $driver,
+ 'max_number_of_args' => undef,
+ 'table' => $options{'table'},
+ 'append' => $append,
+ }, ref($class) || $class;
+}
+
+# Prototype forwarding
+sub get_alias($);
+sub get_alias($) {
+ my $alias = shift;
+ $alias =~ s/^(sql:)*//; # Accept aliases prepended with sql:
+ if ($alias !~ /^:/) {
+ return $alias;
+ }
+
+ # Find the alias
+ my $path;
+ if (-l $0) {
+ ($path) = readlink($0) =~ m|^(.*)/|;
+ } else {
+ ($path) = $0 =~ m|^(.*)/|;
+ }
+
+ my @deprecated = ("$ENV{HOME}/.dburl.aliases",
+ "$path/dburl.aliases", "$path/dburl.aliases.dist");
+ for (@deprecated) {
+ if(-r $_) {
+ ::warning("$_ is deprecated. ".
+ "Use .sql/aliases instead (read man sql).");
+ }
+ }
+ my @urlalias=();
+ check_permissions("$ENV{HOME}/.sql/aliases");
+ check_permissions("$ENV{HOME}/.dburl.aliases");
+ my @search = ("$ENV{HOME}/.sql/aliases",
+ "$ENV{HOME}/.dburl.aliases", "/etc/sql/aliases",
+ "$path/dburl.aliases", "$path/dburl.aliases.dist");
+ for my $alias_file (@search) {
+ # local $/ needed if -0 set
+ local $/ = "\n";
+ if(-r $alias_file) {
+ my $in = ::open_or_exit("<",$alias_file);
+ push @urlalias, <$in>;
+ close $in;
+ }
+ }
+ my ($alias_part,$rest) = $alias=~/(:\w*)(.*)/;
+ # If we saw this before: we have an alias loop
+ if(grep {$_ eq $alias_part } @Private::seen_aliases) {
+ ::error("$alias_part is a cyclic alias.");
+ exit -1;
+ } else {
+ push @Private::seen_aliases, $alias_part;
+ }
+
+ my $dburl;
+ for (@urlalias) {
+ /^$alias_part\s+(\S+.*)/ and do { $dburl = $1; last; }
+ }
+
+ if($dburl) {
+ return get_alias($dburl.$rest);
+ } else {
+ ::error("$alias is not defined in @search");
+ exit(-1);
+ }
+}
+
+sub check_permissions($) {
+ my $file = shift;
+
+ if(-e $file) {
+ if(not -o $file) {
+ my $username = (getpwuid($<))[0];
+ ::warning("$file should be owned by $username: ".
+ "chown $username $file");
+ }
+ my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,
+ $atime,$mtime,$ctime,$blksize,$blocks) = stat($file);
+ if($mode & 077) {
+ my $username = (getpwuid($<))[0];
+ ::warning("$file should be only be readable by $username: ".
+ "chmod 600 $file");
+ }
+ }
+}
+
+sub parse_dburl($) {
+ my $url = shift;
+ my %options = ();
+ # sql:mysql://[[user][:password]@][host][:port]/[database[/table][?query]]
+
+ if($url=~m!^(?:sql:)? # You can prefix with 'sql:'
+ ((?:oracle|ora|mysql|pg|postgres|postgresql)(?:s|ssl|)|
+ (?:sqlite|sqlite2|sqlite3|csv)):// # Databasedriver ($1)
+ (?:
+ ([^:@/][^:@]*|) # Username ($2)
+ (?:
+ :([^@]*) # Password ($3)
+ )?
+ @)?
+ ([^:/]*)? # Hostname ($4)
+ (?:
+ :
+ ([^/]*)? # Port ($5)
+ )?
+ (?:
+ /
+ ([^/?]*)? # Database ($6)
+ )?
+ (?:
+ /
+ ([^?]*)? # Table ($7)
+ )?
+ (?:
+ \?
+ (.*)? # Query ($8)
+ )?
+ $!ix) {
+ $options{databasedriver} = ::undef_if_empty(lc(uri_unescape($1)));
+ $options{user} = ::undef_if_empty(uri_unescape($2));
+ $options{password} = ::undef_if_empty(uri_unescape($3));
+ $options{host} = ::undef_if_empty(uri_unescape($4));
+ $options{port} = ::undef_if_empty(uri_unescape($5));
+ $options{database} = ::undef_if_empty(uri_unescape($6));
+ $options{table} = ::undef_if_empty(uri_unescape($7));
+ $options{query} = ::undef_if_empty(uri_unescape($8));
+ ::debug("sql", "dburl $url\n");
+ ::debug("sql", "databasedriver ", $options{databasedriver},
+ " user ", $options{user},
+ " password ", $options{password}, " host ", $options{host},
+ " port ", $options{port}, " database ", $options{database},
+ " table ", $options{table}, " query ", $options{query}, "\n");
+ } else {
+ ::error("$url is not a valid DBURL");
+ exit 255;
+ }
+ return %options;
+}
+
+sub uri_unescape($) {
+ # Copied from http://cpansearch.perl.org/src/GAAS/URI-1.55/URI/Escape.pm
+ # to avoid depending on URI::Escape
+ # This section is (C) Gisle Aas.
+ # Note from RFC1630: "Sequences which start with a percent sign
+ # but are not followed by two hexadecimal characters are reserved
+ # for future extension"
+ my $str = shift;
+ if (@_ && wantarray) {
+ # not executed for the common case of a single argument
+ my @str = ($str, @_); # need to copy
+ foreach (@str) {
+ s/%([0-9A-Fa-f]{2})/chr(hex($1))/eg;
+ }
+ return @str;
+ }
+ $str =~ s/%([0-9A-Fa-f]{2})/chr(hex($1))/eg if defined $str;
+ $str;
+}
+
+sub run($) {
+ my $self = shift;
+ my $stmt = shift;
+ if($self->{'driver'} eq "CSV") {
+ $stmt=~ s/;$//;
+ if($stmt eq "BEGIN" or
+ $stmt eq "COMMIT") {
+ return undef;
+ }
+ }
+ my @retval;
+ my $dbh = $self->{'dbh'};
+ ::debug("sql","$opt::sqlmaster$opt::sqlworker run $stmt\n");
+ # Execute with the rest of the args - if any
+ my $rv;
+ my $sth;
+ my $lockretry = 0;
+ while($lockretry < 10) {
+ $sth = $dbh->prepare($stmt);
+ if($sth
+ and
+ eval { $rv = $sth->execute(@_) }) {
+ last;
+ } else {
+ if($@ =~ /no such table|Table .* doesn.t exist|relation ".*" does not exist/
+ or
+ $DBI::errstr =~ /no such table|Table .* doesn.t exist|relation ".*" does not exist/) {
+ # This is fine:
+ # It is just a worker that reported back too late -
+ # another worker had finished the job first
+ # and the table was then dropped
+ $rv = $sth = 0;
+ last;
+ }
+ if($DBI::errstr =~ /locked/) {
+ ::debug("sql", "Lock retry: $lockretry");
+ $lockretry++;
+ ::usleep(rand()*300);
+ } elsif(not $sth) {
+ # Try again
+ $lockretry++;
+ } else {
+ ::error(255,$DBI::errstr);
+ }
+ }
+ }
+ if($lockretry >= 10) {
+ ::die_bug("retry > 10: $DBI::errstr");
+ }
+ if($rv < 0 and $DBI::errstr){
+ ::error(255,$DBI::errstr);
+ }
+ return $sth;
+}
+
+sub get($) {
+ my $self = shift;
+ my $sth = $self->run(@_);
+ my @retval;
+ # If $sth = 0 it means the table was dropped by another process
+ while($sth) {
+ my @row = $sth->fetchrow_array();
+ @row or last;
+ push @retval, \@row;
+ }
+ return \@retval;
+}
+
+sub table($) {
+ my $self = shift;
+ return $self->{'table'};
+}
+
+sub append($) {
+ my $self = shift;
+ return $self->{'append'};
+}
+
+sub update($) {
+ my $self = shift;
+ my $stmt = shift;
+ my $table = $self->table();
+ $self->run("UPDATE $table $stmt",@_);
+}
+
+sub output($) {
+ my $self = shift;
+ my $commandline = shift;
+
+ $self->update("SET Stdout = ?, Stderr = ? WHERE Seq = ".
+ $commandline->seq(),
+ join("",@{$commandline->{'output'}{1}}),
+ join("",@{$commandline->{'output'}{2}}));
+}
+
+sub max_number_of_args($) {
+ # Maximal number of args for this table
+ my $self = shift;
+ if(not $self->{'max_number_of_args'}) {
+ # Read the number of args from the SQL table
+ my $table = $self->table();
+ my $v = $self->get("SELECT * FROM $table LIMIT 1;");
+ my @reserved_columns = qw(Seq Host Starttime JobRuntime Send
+ Receive Exitval _Signal Command Stdout Stderr);
+ if(not $v) {
+ ::error("$table contains no records");
+ }
+ # Count the number of Vx columns
+ $self->{'max_number_of_args'} = $#{$v->[0]} - $#reserved_columns;
+ }
+ return $self->{'max_number_of_args'};
+}
+
+sub set_max_number_of_args($$) {
+ my $self = shift;
+ $self->{'max_number_of_args'} = shift;
+}
+
+sub create_table($) {
+ my $self = shift;
+ if($self->append()) { return; }
+ my $max_number_of_args = shift;
+ $self->set_max_number_of_args($max_number_of_args);
+ my $table = $self->table();
+ $self->run(qq(DROP TABLE IF EXISTS $table;));
+ # BIGINT and TEXT are not supported in these databases or are too small
+ my %vartype = (
+ "Oracle" => { "BIGINT" => "NUMBER(19,0)",
+ "TEXT" => "CLOB", },
+ "mysql" => { "TEXT" => "BLOB", },
+ "CSV" => { "BIGINT" => "INT",
+ "FLOAT" => "REAL", },
+ );
+ my $BIGINT = $vartype{$self->{'driver'}}{"BIGINT"} || "BIGINT";
+ my $TEXT = $vartype{$self->{'driver'}}{"TEXT"} || "TEXT";
+ my $FLOAT = $vartype{$self->{'driver'}}{"FLOAT"} || "FLOAT(44)";
+ my $v_def = join "", map { "V$_ $TEXT," } (1..$self->max_number_of_args());
+ $self->run(qq{CREATE TABLE $table
+ (Seq $BIGINT,
+ Host $TEXT,
+ Starttime $FLOAT,
+ JobRuntime $FLOAT,
+ Send $BIGINT,
+ Receive $BIGINT,
+ Exitval $BIGINT,
+ _Signal $BIGINT,
+ Command $TEXT,}.
+ $v_def.
+ qq{Stdout $TEXT,
+ Stderr $TEXT);});
+}
+
+sub insert_records($) {
+ my $self = shift;
+ my $seq = shift;
+ my $command_ref = shift;
+ my $record_ref = shift;
+ my $table = $self->table();
+ # For SQL encode the command with \257 space as split points
+ my $command = join("\257 ",@$command_ref);
+ my @v_cols = map { ", V$_" } (1..$self->max_number_of_args());
+ # Two extra value due to $seq, Exitval, Send
+ my $v_vals = join ",", map { "?" } (1..$self->max_number_of_args()+4);
+ $self->run("INSERT INTO $table (Seq,Command,Exitval,Send @v_cols) ".
+ "VALUES ($v_vals);", $seq, $command, -1000,
+ 0, @$record_ref[1..$#$record_ref]);
+}
+
+
+sub get_record($) {
+ my $self = shift;
+ my @retval;
+ my $table = $self->table();
+ my @v_cols = map { ", V$_" } (1..$self->max_number_of_args());
+ my $rand = "Reserved-".$$.rand();
+ my $v;
+ my $more_pending;
+
+ do {
+ if($self->{'driver'} eq "CSV") {
+ # Sub SELECT is not supported in CSV
+ # So to minimize the race condition below select a job at random
+ my $r = $self->get("SELECT Seq, Command @v_cols FROM $table ".
+ "WHERE Exitval = -1000 LIMIT 100;");
+ $v = [ sort { rand() > 0.5 } @$r ];
+ } else {
+ # Avoid race condition where multiple workers get the same job
+ # by setting Stdout to a unique string
+ # (SELECT * FROM (...) AS dummy) is needed due to sillyness in MySQL
+ $self->update("SET Stdout = ?,Exitval = ? ".
+ "WHERE Seq = (".
+ " SELECT * FROM (".
+ " SELECT min(Seq) FROM $table WHERE Exitval = -1000".
+ " ) AS dummy".
+ ") AND Exitval = -1000;", $rand, -1210);
+ # If a parallel worker overwrote the unique string this will get nothing
+ $v = $self->get("SELECT Seq, Command @v_cols FROM $table ".
+ "WHERE Stdout = ?;", $rand);
+ }
+ if($v->[0]) {
+ my $val_ref = $v->[0];
+ # Mark record as taken
+ my $seq = shift @$val_ref;
+ # Save the sequence number to use when running the job
+ $SQL::next_seq = $seq;
+ $self->update("SET Exitval = ? WHERE Seq = ".$seq, -1220);
+ # Command is encoded with '\257 space' as splitting char
+ my @command = split /\257 /, shift @$val_ref;
+ $SQL::command_ref = \@command;
+ for (@$val_ref) {
+ push @retval, Arg->new($_);
+ }
+ } else {
+ # If the record was updated by another job in parallel,
+ # then we may not be done, so see if there are more jobs pending
+ $more_pending =
+ $self->get("SELECT Seq FROM $table WHERE Exitval = ?;", -1210);
+ }
+ } while (not $v->[0] and $more_pending->[0]);
+
+ if(@retval) {
+ return \@retval;
+ } else {
+ return undef;
+ }
+}
+
+sub total_jobs($) {
+ my $self = shift;
+ my $table = $self->table();
+ my $v = $self->get("SELECT count(*) FROM $table;");
+ if($v->[0]) {
+ return $v->[0]->[0];
+ } else {
+ ::die_bug("SQL::total_jobs");
+ }
+}
+
+sub max_seq($) {
+ my $self = shift;
+ my $table = $self->table();
+ my $v = $self->get("SELECT max(Seq) FROM $table;");
+ if($v->[0]) {
+ return $v->[0]->[0];
+ } else {
+ ::die_bug("SQL::max_seq");
+ }
+}
+
+sub finished($) {
+ # Check if there are any jobs left in the SQL table that do not
+ # have a "real" exitval
+ my $self = shift;
+ if($opt::wait or $Global::start_sqlworker) {
+ my $table = $self->table();
+ my $rv = $self->get("select Seq,Exitval from $table ".
+ "where Exitval <= -1000 limit 1");
+ return not $rv->[0];
+ } else {
+ return 1;
+ }
+}
+
+package Semaphore;
+
+# This package provides a counting semaphore
+#
+# If a process dies without releasing the semaphore the next process
+# that needs that entry will clean up dead semaphores
+#
+# The semaphores are stored in $PARALLEL_HOME/semaphores/id-<name> Each
+# file in $PARALLEL_HOME/semaphores/id-<name>/ is the process ID of the
+# process holding the entry. If the process dies, the entry can be
+# taken by another process.
+
+sub new($) {
+ my $class = shift;
+ my $id = shift;
+ my $count = shift;
+ $id =~ s/([^-_a-z0-9])/unpack("H*",$1)/ige; # Convert non-word chars to hex
+ $id = "id-".$id; # To distinguish it from a process id
+ my $parallel_locks = $Global::cache_dir . "/semaphores";
+ -d $parallel_locks or ::mkdir_or_die($parallel_locks);
+ my $lockdir = "$parallel_locks/$id";
+ my $lockfile = $lockdir.".lock";
+ if(-d $parallel_locks and -w $parallel_locks
+ and -r $parallel_locks and -x $parallel_locks) {
+ # skip
+ } else {
+ ::error(255,"Semaphoredir must be writable: '$parallel_locks'");
+ }
+
+ if($count < 1) { ::die_bug("semaphore-count: $count"); }
+ return bless {
+ 'lockfile' => $lockfile,
+ 'lockfh' => Symbol::gensym(),
+ 'lockdir' => $lockdir,
+ 'id' => $id,
+ 'idfile' => $lockdir."/".$id,
+ 'pid' => $$,
+ 'pidfile' => $lockdir."/".$$.'@'.::hostname(),
+ 'count' => $count + 1 # nlinks returns a link for the 'id-' as well
+ }, ref($class) || $class;
+}
+
+sub remove_dead_locks($) {
+ my $self = shift;
+ my $lockdir = $self->{'lockdir'};
+
+ for my $d (glob "$lockdir/*") {
+ $d =~ m:$lockdir/([0-9]+)\@([-\._a-z0-9]+)$:o or next;
+ my ($pid, $host) = ($1, $2);
+ if($host eq ::hostname()) {
+ if(kill 0, $pid) {
+ ::debug("sem", "Alive: $pid $d\n");
+ } else {
+ ::debug("sem", "Dead: $d\n");
+ ::rm($d);
+ }
+ }
+ }
+}
+
+sub acquire($) {
+ my $self = shift;
+ my $sleep = 1; # 1 ms
+ my $start_time = time;
+ while(1) {
+ # Can we get a lock?
+ $self->atomic_link_if_count_less_than() and last;
+ $self->remove_dead_locks();
+ # Retry slower and slower up to 1 second
+ $sleep = ($sleep < 1000) ? ($sleep * 1.1) : ($sleep);
+ # Random to avoid every sleeping job waking up at the same time
+ ::usleep(rand()*$sleep);
+ if($opt::semaphoretimeout) {
+ if($opt::semaphoretimeout > 0
+ and
+ time - $start_time > $opt::semaphoretimeout) {
+ # Timeout: Take the semaphore anyway
+ ::warning("Semaphore timed out. Stealing the semaphore.");
+ if(not -e $self->{'idfile'}) {
+ open (my $fh, ">", $self->{'idfile'}) or
+ ::die_bug("timeout_write_idfile: $self->{'idfile'}");
+ close $fh;
+ }
+ link $self->{'idfile'}, $self->{'pidfile'};
+ last;
+ }
+ if($opt::semaphoretimeout < 0
+ and
+ time - $start_time > -$opt::semaphoretimeout) {
+ # Timeout: Exit
+ ::warning("Semaphore timed out. Exiting.");
+ exit(1);
+ last;
+ }
+ }
+ }
+ ::debug("sem", "acquired $self->{'pid'}\n");
+}
+
+sub release($) {
+ my $self = shift;
+ ::rm($self->{'pidfile'});
+ if($self->nlinks() == 1) {
+ # This is the last link, so atomic cleanup
+ $self->lock();
+ if($self->nlinks() == 1) {
+ ::rm($self->{'idfile'});
+ rmdir $self->{'lockdir'};
+ }
+ $self->unlock();
+ }
+ ::debug("run", "released $self->{'pid'}\n");
+}
+
+sub pid_change($) {
+ # This should do what release()+acquire() would do without having
+ # to re-acquire the semaphore
+ my $self = shift;
+
+ my $old_pidfile = $self->{'pidfile'};
+ $self->{'pid'} = $$;
+ $self->{'pidfile'} = $self->{'lockdir'}."/".$$.'@'.::hostname();
+ my $retval = link $self->{'idfile'}, $self->{'pidfile'};
+ ::debug("sem","link($self->{'idfile'},$self->{'pidfile'})=$retval\n");
+ ::rm($old_pidfile);
+}
+
+sub atomic_link_if_count_less_than($) {
+ # Link $file1 to $file2 if nlinks to $file1 < $count
+ my $self = shift;
+ my $retval = 0;
+ $self->lock();
+ my $nlinks = $self->nlinks();
+ ::debug("sem","$nlinks<$self->{'count'} ");
+ if($nlinks < $self->{'count'}) {
+ -d $self->{'lockdir'} or ::mkdir_or_die($self->{'lockdir'});
+ if(not -e $self->{'idfile'}) {
+ open (my $fh, ">", $self->{'idfile'}) or
+ ::die_bug("write_idfile: $self->{'idfile'}");
+ close $fh;
+ }
+ $retval = link $self->{'idfile'}, $self->{'pidfile'};
+ ::debug("sem","link($self->{'idfile'},$self->{'pidfile'})=$retval\n");
+ }
+ $self->unlock();
+ ::debug("sem", "atomic $retval");
+ return $retval;
+}
+
+sub nlinks($) {
+ my $self = shift;
+ if(-e $self->{'idfile'}) {
+ return (stat(_))[3];
+ } else {
+ return 0;
+ }
+}
+
+sub lock($) {
+ my $self = shift;
+ my $sleep = 100; # 100 ms
+ my $total_sleep = 0;
+ $Global::use{"Fcntl"} ||= eval "use Fcntl qw(:DEFAULT :flock); 1;";
+ my $locked = 0;
+ while(not $locked) {
+ if(tell($self->{'lockfh'}) == -1) {
+ # File not open
+ open($self->{'lockfh'}, ">", $self->{'lockfile'})
+ or ::debug("run", "Cannot open $self->{'lockfile'}");
+ }
+ if($self->{'lockfh'}) {
+ # File is open
+ chmod 0666, $self->{'lockfile'}; # assuming you want it a+rw
+ if(flock($self->{'lockfh'}, LOCK_EX()|LOCK_NB())) {
+ # The file is locked: No need to retry
+ $locked = 1;
+ last;
+ } else {
+ if ($! =~ m/Function not implemented/) {
+ ::warning("flock: $!",
+ "Will wait for a random while.");
+ ::usleep(rand(5000));
+ # File cannot be locked: No need to retry
+ $locked = 2;
+ last;
+ }
+ }
+ }
+ # Locking failed in first round
+ # Sleep and try again
+ $sleep = ($sleep < 1000) ? ($sleep * 1.1) : ($sleep);
+ # Random to avoid every sleeping job waking up at the same time
+ ::usleep(rand()*$sleep);
+ $total_sleep += $sleep;
+ if($opt::semaphoretimeout) {
+ if($opt::semaphoretimeout > 0
+ and
+ $total_sleep/1000 > $opt::semaphoretimeout) {
+ # Timeout: Take the semaphore anyway
+ ::warning("Semaphore timed out. Taking the semaphore.");
+ $locked = 3;
+ last;
+ }
+ if($opt::semaphoretimeout < 0
+ and
+ $total_sleep/1000 > -$opt::semaphoretimeout) {
+ # Timeout: Exit
+ ::warning("Semaphore timed out. Exiting.");
+ $locked = 4;
+ last;
+ }
+ } else {
+ if($total_sleep/1000 > 30) {
+ ::warning("Semaphore stuck for 30 seconds. ".
+ "Consider using --semaphoretimeout.");
+ }
+ }
+ }
+ ::debug("run", "locked $self->{'lockfile'}");
+}
+
+sub unlock($) {
+ my $self = shift;
+ ::rm($self->{'lockfile'});
+ close $self->{'lockfh'};
+ ::debug("run", "unlocked\n");
+}
+
+# Keep perl -w happy
+
+$opt::x = $Semaphore::timeout = $Semaphore::wait =
+$Job::file_descriptor_warning_printed = $Global::envdef = @Arg::arg =
+$Global::max_slot_number = $opt::session;
+
+package main;
+
+
+sub main() {
+ unpack_combined_executable();
+ save_stdin_stdout_stderr();
+ save_original_signal_handler();
+ parse_options();
+ ::debug("init", "Open file descriptors: ", join(" ",keys %Global::fh), "\n");
+ my $number_of_args;
+ if($Global::max_number_of_args) {
+ $number_of_args = $Global::max_number_of_args;
+ } elsif ($opt::X or $opt::m or $opt::xargs) {
+ $number_of_args = undef;
+ } else {
+ $number_of_args = 1;
+ }
+
+ my @command = @ARGV;
+ my @input_source_fh;
+ if($opt::pipepart) {
+ if($opt::tee) {
+ @input_source_fh = map { open_or_exit("<",$_) } @opt::a;
+ # Remove the first: It will be the file piped.
+ shift @input_source_fh;
+ if(not @input_source_fh and not $opt::pipe) {
+ @input_source_fh = (*STDIN);
+ }
+ } else {
+ # -a is used for data - not for command line args
+ @input_source_fh = map { open_or_exit("<",$_) } "/dev/null";
+ }
+ } else {
+ @input_source_fh = map { open_or_exit("<",$_) } @opt::a;
+ if(not @input_source_fh and not $opt::pipe) {
+ @input_source_fh = (*STDIN);
+ }
+ }
+
+ if($opt::skip_first_line) {
+ # Skip the first line for the first file handle
+ my $fh = $input_source_fh[0];
+ <$fh>;
+ }
+
+ set_input_source_header(\@command,\@input_source_fh);
+ if($opt::filter_hosts and (@opt::sshlogin or @opt::sshloginfile)) {
+ # Parallel check all hosts are up. Remove hosts that are down
+ filter_hosts();
+ }
+ if($opt::sqlmaster and $opt::sqlworker) {
+ # Start a real --sqlworker in the background later
+ $Global::start_sqlworker = 1;
+ $opt::sqlworker = undef;
+ }
+
+ $Global::start_time = ::now();
+ if($opt::nonall or $opt::onall) {
+ onall(\@input_source_fh,@command);
+ wait_and_exit(min(undef_as_zero($Global::exitstatus),254));
+ }
+
+ $Global::JobQueue = JobQueue->new(
+ \@command, \@input_source_fh, $Global::ContextReplace,
+ $number_of_args, \@Global::transfer_files, \@Global::ret_files,
+ \@Global::template_names, \@Global::template_contents
+ );
+
+ if($opt::sqlmaster) {
+ # Create SQL table to hold joblog + output
+ # Figure out how many arguments are in a job
+ # (It is affected by --colsep, -N, $number_source_fh)
+ my $record_queue = $Global::JobQueue->{'commandlinequeue'}{'arg_queue'};
+ my $record = $record_queue->get();
+ my $no_of_values = $number_of_args * (1+$#{$record});
+ $record_queue->unget($record);
+ $Global::sql->create_table($no_of_values);
+ if($opt::sqlworker) {
+ # Start a real --sqlworker in the background later
+ $Global::start_sqlworker = 1;
+ $opt::sqlworker = undef;
+ }
+ }
+
+ if($opt::pipepart) {
+ pipepart_setup();
+ } elsif($opt::pipe) {
+ if($opt::tee) {
+ pipe_tee_setup();
+ } elsif($opt::shard or $opt::bin) {
+ pipe_shard_setup();
+ } elsif($opt::groupby) {
+ pipe_group_by_setup();
+ }
+ }
+
+ if($opt::eta or $opt::bar or $opt::shuf or $Global::halt_pct) {
+ # Count the number of jobs or shuffle all jobs
+ # before starting any.
+ # Must be done after ungetting any --pipepart jobs.
+ $Global::JobQueue->total_jobs();
+ }
+ # Compute $Global::max_jobs_running
+ # Must be done after ungetting any --pipepart jobs.
+ max_jobs_running();
+ init_run_jobs();
+ my $sem;
+ if($Global::semaphore) {
+ $sem = acquire_semaphore();
+ }
+ $SIG{TERM} = $Global::original_sig{TERM};
+ $SIG{HUP} = \&start_no_new_jobs;
+
+ if($opt::progress) {
+ ::status_no_nl(init_progress());
+ }
+ if($opt::tee or $opt::shard or $opt::bin) {
+ # All jobs must be running in parallel for --tee/--shard/--bin
+ while(start_more_jobs()) {}
+ $Global::start_no_new_jobs = 1;
+ if(not $Global::JobQueue->empty()) {
+ if($opt::tee) {
+ ::error(255,"--tee requires --jobs to be higher. ".
+ "Try --jobs 0.");
+ } elsif($opt::bin) {
+ ::error(255,"--bin requires --jobs to be higher than ".
+ "the number of", "arguments. Increase --jobs.");
+ } elsif($opt::shard) {
+ ::error(255,"--shard requires --jobs to be higher ".
+ "than the number of",
+ "arguments. Increase --jobs.");
+ } else {
+ ::die_bug("--bin/--shard/--tee should not get here");
+ }
+ }
+ } elsif($opt::pipe and not $opt::pipepart and not $opt::semaphore) {
+ # Fill all jobslots
+ while(start_more_jobs()) {}
+ spreadstdin();
+ } else {
+ # Reap the finished jobs and start more
+ while(reapers() + start_more_jobs()) {}
+ }
+ ::debug("init", "Start draining\n");
+ drain_job_queue(@command);
+ ::debug("init", "Done draining\n");
+ reapers();
+ ::debug("init", "Done reaping\n");
+ if($Global::semaphore) { $sem->release(); }
+ cleanup();
+ ::debug("init", "Halt\n");
+ halt();
+}
+
+main();
diff --git a/src/parallel.pod b/src/parallel.pod
new file mode 100644
index 0000000..8bcb7cf
--- /dev/null
+++ b/src/parallel.pod
@@ -0,0 +1,4618 @@
+#!/usr/bin/perl -w
+
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GFDL-1.3-or-later
+# SPDX-License-Identifier: CC-BY-SA-4.0
+
+=encoding utf8
+
+=head1 NAME
+
+parallel - build and execute shell command lines from standard input
+in parallel
+
+
+=head1 SYNOPSIS
+
+B<parallel> [options] [I<command> [arguments]] < list_of_arguments
+
+B<parallel> [options] [I<command> [arguments]] ( B<:::> arguments |
+B<:::+> arguments | B<::::> argfile(s) | B<::::+> argfile(s) ) ...
+
+B<parallel> --semaphore [options] I<command>
+
+B<#!/usr/bin/parallel> --shebang [options] [I<command> [arguments]]
+
+B<#!/usr/bin/parallel> --shebang-wrap [options] [I<command>
+[arguments]]
+
+
+=head1 DESCRIPTION
+
+STOP!
+
+Read the B<Reader's guide> below if you are new to GNU B<parallel>.
+
+GNU B<parallel> is a shell tool for executing jobs in parallel using
+one or more computers. A job can be a single command or a small script
+that has to be run for each of the lines in the input. The typical
+input is a list of files, a list of hosts, a list of users, a list of
+URLs, or a list of tables. A job can also be a command that reads from
+a pipe. GNU B<parallel> can then split the input into blocks and pipe
+a block into each command in parallel.
+
+If you use xargs and tee today you will find GNU B<parallel> very easy
+to use as GNU B<parallel> is written to have the same options as
+xargs. If you write loops in shell, you will find GNU B<parallel> may
+be able to replace most of the loops and make them run faster by
+running several jobs in parallel.
+
+GNU B<parallel> makes sure output from the commands is the same output
+as you would get had you run the commands sequentially. This makes it
+possible to use output from GNU B<parallel> as input for other
+programs.
+
+For each line of input GNU B<parallel> will execute I<command> with
+the line as arguments. If no I<command> is given, the line of input is
+executed. Several lines will be run in parallel. GNU B<parallel> can
+often be used as a substitute for B<xargs> or B<cat | bash>.
+
+
+=head2 Reader's guide
+
+GNU B<parallel> includes the 4 types of documentation: Tutorial,
+how-to, reference and explanation/design.
+
+
+=head3 Tutorial
+
+If you prefer reading a book buy B<GNU Parallel 2018> at
+https://www.lulu.com/shop/ole-tange/gnu-parallel-2018/paperback/product-23558902.html
+or download it at: https://doi.org/10.5281/zenodo.1146014 Read at
+least chapter 1+2. It should take you less than 20 minutes.
+
+Otherwise start by watching the intro videos for a quick introduction:
+https://youtube.com/playlist?list=PL284C9FF2488BC6D1
+
+If you want to dive deeper: spend a couple of hours walking through
+the tutorial (B<man parallel_tutorial>). Your command line will love
+you for it.
+
+
+=head3 How-to
+
+You can find a lot of examples of use in B<man
+parallel_examples>. They will give you an idea of what GNU B<parallel>
+is capable of, and you may find a solution you can simply adapt to
+your situation.
+
+If the example do not cover your exact needs, the options map
+(https://www.gnu.org/software/parallel/parallel_options_map.pdf) can
+help you identify options that are related, so you can look these up
+in the man page.
+
+
+=head3 Reference
+
+If you need a one page printable cheat sheet you can find it on:
+https://www.gnu.org/software/parallel/parallel_cheat.pdf
+
+The man page is the reference for all options, and reading the man
+page from cover to cover is probably not what you need.
+
+
+=head3 Design discussion
+
+If you want to know the design decisions behind GNU B<parallel>, try:
+B<man parallel_design>. This is also a good intro if you intend to
+change GNU B<parallel>.
+
+
+
+=head1 OPTIONS
+
+=over 4
+
+=item I<command>
+
+Command to execute.
+
+If I<command> or the following arguments contain
+replacement strings (such as B<{}>) every instance will be substituted
+with the input.
+
+If I<command> is given, GNU B<parallel> solve the same tasks as
+B<xargs>. If I<command> is not given GNU B<parallel> will behave
+similar to B<cat | sh>.
+
+The I<command> must be an executable, a script, a composed command, an
+alias, or a function.
+
+B<Bash functions>: B<export -f> the function first or use B<env_parallel>.
+
+B<Bash, Csh, or Tcsh aliases>: Use B<env_parallel>.
+
+B<Zsh, Fish, Ksh, and Pdksh functions and aliases>: Use B<env_parallel>.
+
+=item B<{}>
+
+Input line.
+
+This replacement string will be replaced by a full line read from the
+input source. The input source is normally stdin (standard input), but
+can also be given with B<--arg-file>, B<:::>, or B<::::>.
+
+The replacement string B<{}> can be changed with B<-I>.
+
+If the command line contains no replacement strings then B<{}> will be
+appended to the command line.
+
+Replacement strings are normally quoted, so special characters are not
+parsed by the shell. The exception is if the command starts with a
+replacement string; then the string is not quoted.
+
+See also: B<--plus> B<{.}> B<{/}> B<{//}> B<{/.}> B<{#}> B<{%}>
+B<{>I<n>B<}> B<{=>I<perl expression>B<=}>
+
+
+=item B<{.}>
+
+Input line without extension.
+
+This replacement string will be replaced by the input with the
+extension removed. If the input line contains B<.> after the last
+B</>, the last B<.> until the end of the string will be removed and
+B<{.}> will be replaced with the remaining. E.g. I<foo.jpg> becomes
+I<foo>, I<subdir/foo.jpg> becomes I<subdir/foo>,
+I<sub.dir/foo.jpg> becomes I<sub.dir/foo>, I<sub.dir/bar> remains
+I<sub.dir/bar>. If the input line does not contain B<.> it will remain
+unchanged.
+
+The replacement string B<{.}> can be changed with B<--extensionreplace>
+
+See also: B<{}> B<--extensionreplace>
+
+
+=item B<{/}>
+
+Basename of input line.
+
+This replacement string will be replaced by the input with the
+directory part removed.
+
+See also: B<{}> B<--basenamereplace>
+
+
+=item B<{//}>
+
+Dirname of input line.
+
+This replacement string will be replaced by the dir of the input
+line. See B<dirname>(1).
+
+See also: B<{}> B<--dirnamereplace>
+
+
+=item B<{/.}>
+
+Basename of input line without extension.
+
+This replacement string will be replaced by the input with the
+directory and extension part removed. B<{/.}> is a combination of
+B<{/}> and B<{.}>.
+
+See also: B<{}> B<--basenameextensionreplace>
+
+
+=item B<{#}>
+
+Sequence number of the job to run.
+
+This replacement string will be replaced by the sequence number of the
+job being run. It contains the same number as $PARALLEL_SEQ.
+
+See also: B<{}> B<--seqreplace>
+
+
+=item B<{%}>
+
+Job slot number.
+
+This replacement string will be replaced by the job's slot number
+between 1 and number of jobs to run in parallel. There will never be 2
+jobs running at the same time with the same job slot number.
+
+If the job needs to be retried (e.g using B<--retries> or
+B<--retry-failed>) the job slot is not automatically updated. You
+should then instead use B<$PARALLEL_JOBSLOT>:
+
+ $ do_test() {
+ id="$3 {%}=$1 PARALLEL_JOBSLOT=$2"
+ echo run "$id";
+ sleep 1
+ # fail if {%} is odd
+ return `echo $1%2 | bc`
+ }
+ $ export -f do_test
+ $ parallel -j3 --jl mylog do_test {%} \$PARALLEL_JOBSLOT {} ::: A B C D
+ run A {%}=1 PARALLEL_JOBSLOT=1
+ run B {%}=2 PARALLEL_JOBSLOT=2
+ run C {%}=3 PARALLEL_JOBSLOT=3
+ run D {%}=1 PARALLEL_JOBSLOT=1
+ $ parallel --retry-failed -j3 --jl mylog do_test {%} \$PARALLEL_JOBSLOT {} ::: A B C D
+ run A {%}=1 PARALLEL_JOBSLOT=1
+ run C {%}=3 PARALLEL_JOBSLOT=2
+ run D {%}=1 PARALLEL_JOBSLOT=3
+
+Notice how {%} and $PARALLEL_JOBSLOT differ in the retry run of C and D.
+
+See also: B<{}> B<--jobs> B<--slotreplace>
+
+
+=item B<{>I<n>B<}>
+
+Argument from input source I<n> or the I<n>'th argument.
+
+This positional replacement string will be replaced by the input from
+input source I<n> (when used with B<--arg-file> or B<::::>) or with the
+I<n>'th argument (when used with B<-N>). If I<n> is negative it refers
+to the I<n>'th last argument.
+
+See also: B<{}> B<{>I<n>.B<}> B<{>I<n>/B<}> B<{>I<n>//B<}>
+B<{>I<n>/.B<}>
+
+
+=item B<{>I<n>.B<}>
+
+Argument from input source I<n> or the I<n>'th argument without
+extension.
+
+B<{>I<n>.B<}> is a combination of B<{>I<n>B<}> and B<{.}>.
+
+This positional replacement string will be replaced by the input from
+input source I<n> (when used with B<--arg-file> or B<::::>) or with the
+I<n>'th argument (when used with B<-N>). The input will have the
+extension removed.
+
+See also: B<{>I<n>B<}> B<{.}>
+
+
+=item B<{>I<n>/B<}>
+
+Basename of argument from input source I<n> or the I<n>'th argument.
+
+B<{>I<n>/B<}> is a combination of B<{>I<n>B<}> and B<{/}>.
+
+This positional replacement string will be replaced by the input from
+input source I<n> (when used with B<--arg-file> or B<::::>) or with the
+I<n>'th argument (when used with B<-N>). The input will have the
+directory (if any) removed.
+
+See also: B<{>I<n>B<}> B<{/}>
+
+
+=item B<{>I<n>//B<}>
+
+Dirname of argument from input source I<n> or the I<n>'th argument.
+
+B<{>I<n>//B<}> is a combination of B<{>I<n>B<}> and B<{//}>.
+
+This positional replacement string will be replaced by the dir of the
+input from input source I<n> (when used with B<--arg-file> or B<::::>) or with
+the I<n>'th argument (when used with B<-N>). See B<dirname>(1).
+
+See also: B<{>I<n>B<}> B<{//}>
+
+
+=item B<{>I<n>/.B<}>
+
+Basename of argument from input source I<n> or the I<n>'th argument
+without extension.
+
+B<{>I<n>/.B<}> is a combination of B<{>I<n>B<}>, B<{/}>, and
+B<{.}>.
+
+This positional replacement string will be replaced by the input from
+input source I<n> (when used with B<--arg-file> or B<::::>) or with the
+I<n>'th argument (when used with B<-N>). The input will have the
+directory (if any) and extension removed.
+
+See also: B<{>I<n>B<}> B<{/.}>
+
+
+=item B<{=>I<perl expression>B<=}>
+
+Replace with calculated I<perl expression>.
+
+B<$_> will contain the same as B<{}>. After evaluating I<perl
+expression> B<$_> will be used as the value. It is recommended to only
+change $_ but you have full access to all of GNU B<parallel>'s
+internal functions and data structures.
+
+The expression must give the same result if evaluated twice -
+otherwise the behaviour is undefined. E.g. in some versions of GNU
+B<parallel> this will not work as expected:
+
+ parallel echo '{= $_= ++$wrong_counter =}' ::: a b c
+
+A few convenience functions and data structures have been made:
+
+=over 2
+
+=item Z<> B<Q(>I<string>B<)>
+
+Shell quote a string. Example:
+
+ parallel echo {} is quoted as '{= $_=Q($_) =}' ::: \$PWD
+
+=item Z<> B<pQ(>I<string>B<)>
+
+Perl quote a string. Example:
+
+ parallel echo {} is quoted as '{= $_=pQ($_) =}' ::: \$PWD
+
+=item Z<> B<uq()> (or B<uq>)
+
+Do not quote current replacement string. Example:
+
+ parallel echo {} has the value '{= uq =}' ::: \$PWD
+
+=item Z<> B<hash(val)>
+
+Compute B::hash(val). Example:
+
+ parallel echo Hash of {} is '{= $_=hash($_) =}' ::: a b c
+
+=item Z<> B<total_jobs()>
+
+Number of jobs in total. Example:
+
+ parallel echo Number of jobs: '{= $_=total_jobs() =}' ::: a b c
+
+=item Z<> B<slot()>
+
+Slot number of job. Example:
+
+ parallel echo Job slot of {} is '{= $_=slot() =}' ::: a b c
+
+=item Z<> B<seq()>
+
+Sequence number of job. Example:
+
+ parallel echo Seq number of {} is '{= $_=seq() =}' ::: a b c
+
+=item Z<> B<@arg>
+
+The arguments counting from 1 ($arg[1] = {1} = first argument). Example:
+
+ parallel echo {1}+{2}='{=1 $_=$arg[1]+$arg[2] =}' \
+ ::: 1 2 3 ::: 2 3 4
+
+('{=1' forces this to be a positional replacement string, and
+therefore will not repeat the value for each arg.)
+
+=item Z<> B<skip()>
+
+Skip this job (see also B<--filter>). Example:
+
+ parallel echo '{= $arg[1] >= $arg[2] and skip =}' \
+ ::: 1 2 3 ::: 2 3 4
+
+=item Z<> B<yyyy_mm_dd_hh_mm_ss(sec)>
+
+=item Z<> B<yyyy_mm_dd_hh_mm(sec)>
+
+=item Z<> B<yyyy_mm_dd(sec)>
+
+=item Z<> B<hh_mm_ss(sec)>
+
+=item Z<> B<hh_mm(sec)>
+
+=item Z<> B<yyyymmddhhmmss(sec)>
+
+=item Z<> B<yyyymmddhhmm(sec)>
+
+=item Z<> B<yyyymmdd(sec)>
+
+=item Z<> B<hhmmss(sec)>
+
+=item Z<> B<hhmm(sec)>
+
+Time functions. I<sec> is number of seconds since epoch. If left out
+it will use current local time. Example:
+
+ parallel echo 'Now: {= $_=yyyy_mm_dd_hh_mm_ss() =}' ::: Dummy
+ parallel echo 'The end: {= $_=yyyy_mm_dd_hh_mm_ss($_) =}' \
+ ::: 2147483648
+
+=back
+
+Example:
+
+ seq 10 | parallel echo {} + 1 is {= '$_++' =}
+ parallel csh -c {= '$_="mkdir ".Q($_)' =} ::: '12" dir'
+ seq 50 | parallel echo job {#} of {= '$_=total_jobs()' =}
+
+See also: B<--rpl> B<--parens> B<{}> B<{=>I<n> I<perl expression>B<=}>
+B<--filter>
+
+
+=item B<{=>I<n> I<perl expression>B<=}>
+
+Positional equivalent to B<{=>I<perl expression>B<=}>.
+
+To understand positional replacement strings see B<{>I<n>B<}>.
+
+See also: B<{=>I<perl expression>B<=}> B<{>I<n>B<}>
+
+
+=item B<:::> I<arguments>
+
+Use arguments on the command line as input source.
+
+Unlike other options for GNU B<parallel> B<:::> is placed after the
+I<command> and before the arguments.
+
+The following are equivalent:
+
+ (echo file1; echo file2) | parallel gzip
+ parallel gzip ::: file1 file2
+ parallel gzip {} ::: file1 file2
+ parallel --arg-sep ,, gzip {} ,, file1 file2
+ parallel --arg-sep ,, gzip ,, file1 file2
+ parallel ::: "gzip file1" "gzip file2"
+
+To avoid treating B<:::> as special use B<--arg-sep> to set the
+argument separator to something else.
+
+If multiple B<:::> are given, each group will be treated as an input
+source, and all combinations of input sources will be
+generated. E.g. ::: 1 2 ::: a b c will result in the combinations
+(1,a) (1,b) (1,c) (2,a) (2,b) (2,c). This is useful for replacing
+nested for-loops.
+
+B<:::>, B<::::>, and B<--arg-file> can be mixed. So these are equivalent:
+
+ parallel echo {1} {2} {3} ::: 6 7 ::: 4 5 ::: 1 2 3
+ parallel echo {1} {2} {3} :::: <(seq 6 7) <(seq 4 5) \
+ :::: <(seq 1 3)
+ parallel -a <(seq 6 7) echo {1} {2} {3} :::: <(seq 4 5) \
+ :::: <(seq 1 3)
+ parallel -a <(seq 6 7) -a <(seq 4 5) echo {1} {2} {3} \
+ ::: 1 2 3
+ seq 6 7 | parallel -a - -a <(seq 4 5) echo {1} {2} {3} \
+ ::: 1 2 3
+ seq 4 5 | parallel echo {1} {2} {3} :::: <(seq 6 7) - \
+ ::: 1 2 3
+
+See also: B<--arg-sep> B<--arg-file> B<::::> B<:::+> B<::::+> B<--link>
+
+
+=item B<:::+> I<arguments>
+
+Like B<:::> but linked like B<--link> to the previous input source.
+
+Contrary to B<--link>, values do not wrap: The shortest input source
+determines the length.
+
+Example:
+
+ parallel echo ::: a b c :::+ 1 2 3 ::: X Y :::+ 11 22
+
+See also: B<::::+> B<--link>
+
+
+=item B<::::> I<argfiles>
+
+Another way to write B<--arg-file> I<argfile1> B<--arg-file> I<argfile2> ...
+
+B<:::> and B<::::> can be mixed.
+
+See also: B<--arg-file> B<:::> B<::::+> B<--link>
+
+
+=item B<::::+> I<argfiles>
+
+Like B<::::> but linked like B<--link> to the previous input source.
+
+Contrary to B<--link>, values do not wrap: The shortest input source
+determines the length.
+
+See also: B<--arg-file> B<:::+> B<--link>
+
+
+=item B<--null>
+
+=item B<-0>
+
+Use NUL as delimiter.
+
+Normally input lines will end in \n (newline). If they end in \0
+(NUL), then use this option. It is useful for processing arguments
+that may contain \n (newline).
+
+Shorthand for B<--delimiter '\0'>.
+
+See also: B<--delimiter>
+
+
+=item B<--arg-file> I<input-file>
+
+=item B<-a> I<input-file>
+
+Use I<input-file> as input source.
+
+If multiple B<--arg-file> are given, each I<input-file> will be treated as an
+input source, and all combinations of input sources will be
+generated. E.g. The file B<foo> contains B<1 2>, the file
+B<bar> contains B<a b c>. B<-a foo> B<-a bar> will result in the combinations
+(1,a) (1,b) (1,c) (2,a) (2,b) (2,c). This is useful for replacing
+nested for-loops.
+
+If I<input-file> starts with B<+> the file will be linked to the
+previous B<--arg-file> E.g. The file B<foo> contains B<1 2>, the file
+B<bar> contains B<a b>. B<-a foo> B<-a +bar> will result in the
+combinations (1,a) (2,b) like B<--link> instead of generating all
+combinations.
+
+See also: B<--link> B<{>I<n>B<}> B<::::> B<::::+> B<:::>
+
+
+=item B<--arg-file-sep> I<sep-str>
+
+Use I<sep-str> instead of B<::::> as separator string between command
+and argument files.
+
+Useful if B<::::> is used for something else by the command.
+
+See also: B<::::>
+
+
+=item B<--arg-sep> I<sep-str>
+
+Use I<sep-str> instead of B<:::> as separator string.
+
+Useful if B<:::> is used for something else by the command.
+
+Also useful if you command uses B<:::> but you still want to read
+arguments from stdin (standard input): Simply change B<--arg-sep> to a
+string that is not in the command line.
+
+See also: B<:::>
+
+
+=item B<--bar>
+
+Show progress as a progress bar.
+
+In the bar is shown: % of jobs completed, estimated seconds left, and
+number of jobs started.
+
+It is compatible with B<zenity>:
+
+ seq 1000 | parallel -j30 --bar '(echo {};sleep 0.1)' \
+ 2> >(perl -pe 'BEGIN{$/="\r";$|=1};s/\r/\n/g' |
+ zenity --progress --auto-kill) | wc
+
+See also: B<--eta> B<--progress> B<--total-jobs>
+
+
+=item B<--basefile> I<file>
+
+=item B<--bf> I<file>
+
+I<file> will be transferred to each sshlogin before first job is
+started.
+
+It will be removed if B<--cleanup> is active. The file may be a script
+to run or some common base data needed for the job. Multiple
+B<--bf> can be specified to transfer more basefiles. The I<file> will be
+transferred the same way as B<--transferfile>.
+
+See also: B<--sshlogin> B<--transfer> B<--return> B<--cleanup>
+B<--workdir>
+
+=item B<--basenamereplace> I<replace-str>
+
+=item B<--bnr> I<replace-str>
+
+Use the replacement string I<replace-str> instead of B<{/}> for
+basename of input line.
+
+See also: B<{/}>
+
+
+=item B<--basenameextensionreplace> I<replace-str>
+
+=item B<--bner> I<replace-str>
+
+Use the replacement string I<replace-str> instead of B<{/.}> for basename of input line without extension.
+
+See also: B<{/.}>
+
+
+=item B<--bin> I<binexpr>
+
+Use I<binexpr> as binning key and bin input to the jobs.
+
+I<binexpr> is [column number|column name] [perlexpression] e.g.:
+
+ 3
+ Address
+ 3 $_%=100
+ Address s/\D//g
+
+Each input line is split using B<--colsep>. The value of the column is
+put into $_, the perl expression is executed, the resulting value is
+is the job slot that will be given the line. If the value is bigger
+than the number of jobslots the value will be modulo number of jobslots.
+
+This is similar to B<--shard> but the hashing algorithm is a simple
+modulo, which makes it predictible which jobslot will receive which
+value.
+
+The performance is in the order of 100K rows per second. Faster if the
+I<bincol> is small (<10), slower if it is big (>100).
+
+B<--bin> requires B<--pipe> and a fixed numeric value for B<--jobs>.
+
+See also: SPREADING BLOCKS OF DATA B<--group-by> B<--round-robin>
+B<--shard>
+
+
+=item B<--bg>
+
+Run command in background.
+
+GNU B<parallel> will normally wait for the completion of a job. With
+B<--bg> GNU B<parallel> will not wait for completion of the command
+before exiting.
+
+This is the default if B<--semaphore> is set.
+
+Implies B<--semaphore>.
+
+See also: B<--fg> B<man sem>
+
+
+=cut
+
+# You accept to be added to a public hall of shame by
+# removing this section.
+=item B<--bibtex>
+
+=item B<--citation>
+
+Print the citation notice and BibTeX entry for GNU B<parallel>,
+silence citation notice for all future runs, and exit. It will not run
+any commands.
+
+If it is impossible for you to run B<--citation> you can instead use
+B<--will-cite>, which will run commands, but which will only silence
+the citation notice for this single run.
+
+If you use B<--will-cite> in scripts to be run by others you are
+making it harder for others to see the citation notice. The
+development of GNU B<parallel> is indirectly financed through
+citations, so if your users do not know they should cite then you are
+making it harder to finance development. However, if you pay 10000
+EUR, you have done your part to finance future development and should
+feel free to use B<--will-cite> in scripts.
+
+If you do not want to help financing future development by letting
+other users see the citation notice or by paying, then please consider
+using another tool instead of GNU B<parallel>. You can find some of
+the alternatives in B<man parallel_alternatives>.
+
+
+=item B<--block> I<size>
+
+=item B<--block-size> I<size>
+
+Size of block in bytes to read at a time.
+
+The I<size> can be postfixed with K, M, G, T, P, k, m, g, t, or p.
+
+GNU B<parallel> tries to meet the block size but can be off by the
+length of one record. For performance reasons I<size> should be bigger
+than a two records. GNU B<parallel> will warn you and automatically
+increase the size if you choose a I<size> that is too small.
+
+If you use B<-N>, B<--block> should be bigger than N+1 records.
+
+I<size> defaults to 1M.
+
+When using B<--pipe-part> a negative block size is not interpreted as a
+blocksize but as the number of blocks each jobslot should have. So
+this will run 10*5 = 50 jobs in total:
+
+ parallel --pipe-part -a myfile --block -10 -j5 wc
+
+This is an efficient alternative to B<--round-robin> because data is
+never read by GNU B<parallel>, but you can still have very few
+jobslots process large amounts of data.
+
+See also: UNIT PREFIX B<-N> B<--pipe> B<--pipe-part> B<--round-robin>
+B<--block-timeout>
+
+=item B<--block-timeout> I<duration>
+
+=item B<--bt> I<duration>
+
+Timeout for reading block when using B<--pipe>.
+
+If it takes longer than I<duration> to read a full block, use the
+partial block read so far.
+
+I<duration> is in seconds, but can be postfixed with s, m, h, or d.
+
+See also: TIME POSTFIXES B<--pipe> B<--block>
+
+
+=item B<--cat>
+
+Create a temporary file with content.
+
+Normally B<--pipe>/B<--pipe-part> will give data to the program on
+stdin (standard input). With B<--cat> GNU B<parallel> will create a
+temporary file with the name in B<{}>, so you can do: B<parallel
+--pipe --cat wc {}>.
+
+Implies B<--pipe> unless B<--pipe-part> is used.
+
+See also: B<--pipe> B<--pipe-part> B<--fifo>
+
+
+=item B<--cleanup>
+
+Remove transferred files.
+
+B<--cleanup> will remove the transferred files on the remote computer
+after processing is done.
+
+ find log -name '*gz' | parallel \
+ --sshlogin server.example.com --transferfile {} \
+ --return {.}.bz2 --cleanup "zcat {} | bzip -9 >{.}.bz2"
+
+With B<--transferfile {}> the file transferred to the remote computer
+will be removed on the remote computer. Directories on the remote
+computer containing the file will be removed if they are empty.
+
+With B<--return> the file transferred from the remote computer will be
+removed on the remote computer. Directories on the remote
+computer containing the file will be removed if they are empty.
+
+B<--cleanup> is ignored when not used with B<--basefile>,
+B<--transfer>, B<--transferfile> or B<--return>.
+
+See also: B<--basefile> B<--transfer> B<--transferfile> B<--sshlogin>
+B<--return>
+
+
+=item B<--color>
+
+Colour output.
+
+Colour the output. Each job gets its own colour combination
+(background+foreground).
+
+B<--color> is ignored when using B<-u>.
+
+See also: B<--color-failed>
+
+
+=item B<--color-failed>
+
+=item B<--cf>
+
+Colour the output from failing jobs white on red.
+
+Useful if you have a lot of jobs and want to focus on the failing
+jobs.
+
+B<--color-failed> is ignored when using B<-u>, B<--line-buffer> and
+unreliable when using B<--latest-line>.
+
+See also: B<--color>
+
+
+=item B<--colsep> I<regexp>
+
+=item B<-C> I<regexp>
+
+Column separator.
+
+The input will be treated as a table with I<regexp> separating the
+columns. The n'th column can be accessed using B<{>I<n>B<}> or
+B<{>I<n>.B<}>. E.g. B<{3}> is the 3rd column.
+
+If there are more input sources, each input source will be separated,
+but the columns from each input source will be linked.
+
+ parallel --colsep '-' echo {4} {3} {2} {1} \
+ ::: A-B C-D ::: e-f g-h
+
+B<--colsep> implies B<--trim rl>, which can be overridden with
+B<--trim n>.
+
+I<regexp> is a Perl Regular Expression:
+https://perldoc.perl.org/perlre.html
+
+See also: B<--csv> B<{>I<n>B<}> B<--trim> B<--link>
+
+
+=item B<--combineexec> I<name> (beta testing)
+
+=item B<--combine-executable> I<name> (beta testing)
+
+Combine GNU B<parallel> with another program into a single executable.
+
+Let us say you have developed I<myprg> which takes a single
+argument. You do not want to parallelize it yourself.
+
+You could write a wrapper that uses GNU B<parallel> called B<myparprg>:
+
+ #!/bin/sh
+
+ parallel myprg ::: "$@"
+
+But for others to use this, they need to install: GNU B<parallel>,
+B<myprg>, and B<myparprg>.
+
+It would be easier to install if all could be packed into a single
+executable.
+
+If B<myprg> is written in shell, you can use B<--embed>.
+
+If B<myprg> is a binary you can use B<--combineexec>.
+
+Here we use B<gzip> as example:
+
+ parallel --combineexec pargzip gzip -9 :::
+
+You can now do:
+
+ ./pargzip foo bar baz
+
+If you want to pass options to B<gzip> you can do:
+
+ parallel --combineexec pargzip gzip
+
+Followed by:
+
+ ./pargzip -1 ::: foo bar baz
+
+See also: B<--embed> B<--shebang> B<--shebang-wrap>
+
+
+=item B<--compress>
+
+Compress temporary files.
+
+If the output is big and very compressible this will take up less disk
+space in $TMPDIR and possibly be faster due to less disk I/O.
+
+GNU B<parallel> will try B<pzstd>, B<lbzip2>, B<pbzip2>, B<zstd>,
+B<pigz>, B<lz4>, B<lzop>, B<plzip>, B<lzip>, B<lrz>, B<gzip>, B<pxz>,
+B<lzma>, B<bzip2>, B<xz>, B<clzip>, in that order, and use the first
+available.
+
+GNU B<parallel> will use up to 8 processes per job waiting to be
+printed. See B<man parallel_design> for details.
+
+See also: B<--compress-program>
+
+
+=item B<--compress-program> I<prg>
+
+=item B<--decompress-program> I<prg>
+
+Use I<prg> for (de)compressing temporary files.
+
+It is assumed that I<prg -dc> will decompress stdin (standard input)
+to stdout (standard output) unless B<--decompress-program> is given.
+
+See also: B<--compress>
+
+
+=item B<--csv>
+
+Treat input as CSV-format.
+
+B<--colsep> sets the field delimiter. It works very much like
+B<--colsep> except it deals correctly with quoting. Compare:
+
+ echo '"1 big, 2 small","2""x4"" plank",12.34' |
+ parallel --csv echo {1} of {2} at {3}
+
+ echo '"1 big, 2 small","2""x4"" plank",12.34' |
+ parallel --colsep ',' echo {1} of {2} at {3}
+
+Even quoted newlines are parsed correctly:
+
+ (echo '"Start of field 1 with newline'
+ echo 'Line 2 in field 1";value 2') |
+ parallel --csv --colsep ';' echo Field 1: {1} Field 2: {2}
+
+When used with B<--pipe> only pass full CSV-records.
+
+See also: B<--pipe> B<--link> B<{>I<n>B<}> B<--colsep> B<--header>
+
+
+=item B<--ctag> (obsolete: use B<--color> B<--tag>)
+
+Color tag.
+
+If the values look very similar looking at the output it can be hard
+to tell when a new value is used. B<--ctag> gives each value a random
+color.
+
+See also: B<--color> B<--tag>
+
+
+=item B<--ctagstring> I<str> (obsolete: use B<--color> B<--tagstring>)
+
+Color tagstring.
+
+See also: B<--color> B<--ctag> B<--tagstring>
+
+
+=item B<--delay> I<duration>
+
+Delay starting next job by I<duration>.
+
+GNU B<parallel> will not start another job for the next I<duration>.
+
+I<duration> is in seconds, but can be postfixed with s, m, h, or d.
+
+If you append 'auto' to I<duration> (e.g. 13m3sauto) GNU B<parallel>
+will automatically try to find the optimal value: If a job fails,
+I<duration> is increased by 30%. If a job succeeds, I<duration> is
+decreased by 10%.
+
+See also: TIME POSTFIXES B<--retries> B<--ssh-delay>
+
+
+=item B<--delimiter> I<delim>
+
+=item B<-d> I<delim>
+
+Input items are terminated by I<delim>.
+
+The specified delimiter may be characters, C-style character escapes
+such as \n, or octal or hexadecimal escape codes. Octal and
+hexadecimal escape codes are understood as for the printf command.
+
+See also: B<--colsep>
+
+
+=item B<--dirnamereplace> I<replace-str>
+
+=item B<--dnr> I<replace-str>
+
+Use the replacement string I<replace-str> instead of B<{//}> for
+dirname of input line.
+
+See also: B<{//}>
+
+
+=item B<--dry-run>
+
+Print the job to run on stdout (standard output), but do not run the
+job.
+
+Use B<-v -v> to include the wrapping that GNU B<parallel> generates
+(for remote jobs, B<--tmux>, B<--nice>, B<--pipe>, B<--pipe-part>,
+B<--fifo> and B<--cat>). Do not count on this literally, though, as
+the job may be scheduled on another computer or the local computer if
+: is in the list.
+
+See also: B<-v>
+
+
+=item B<-E> I<eof-str>
+
+Set the end of file string to I<eof-str>.
+
+If the end of file string occurs as a line of input, the rest of the
+input is not read. If neither B<-E> nor B<-e> is used, no end of file
+string is used.
+
+
+=item B<--eof>[=I<eof-str>]
+
+=item B<-e>[I<eof-str>]
+
+This option is a synonym for the B<-E> option.
+
+Use B<-E> instead, because it is POSIX compliant for B<xargs> while
+this option is not. If I<eof-str> is omitted, there is no end of file
+string. If neither B<-E> nor B<-e> is used, no end of file string is
+used.
+
+
+=item B<--embed>
+
+Embed GNU B<parallel> in a shell script.
+
+If you need to distribute your script to someone who does not want to
+install GNU B<parallel> you can embed GNU B<parallel> in your own
+shell script:
+
+ parallel --embed > new_script
+
+After which you add your code at the end of B<new_script>. This is tested
+on B<ash>, B<bash>, B<dash>, B<ksh>, B<sh>, and B<zsh>.
+
+
+=item B<--env> I<var>
+
+Copy exported environment variable I<var>.
+
+This will copy I<var> to the environment that the command is run
+in. This is especially useful for remote execution.
+
+In Bash I<var> can also be a Bash function - just remember to B<export
+-f> the function.
+
+The variable '_' is special. It will copy all exported environment
+variables except for the ones mentioned in ~/.parallel/ignored_vars.
+
+To copy the full environment (both exported and not exported
+variables, arrays, and functions) use B<env_parallel>.
+
+See also: B<--record-env> B<--session> B<--sshlogin> I<command>
+B<env_parallel>
+
+
+=item B<--eta>
+
+Show the estimated number of seconds before finishing.
+
+This forces GNU B<parallel> to read all jobs before starting to find
+the number of jobs (unless you use B<--total-jobs>). GNU B<parallel>
+normally only reads the next job to run.
+
+The estimate is based on the runtime of finished jobs, so the first
+estimate will only be shown when the first job has finished.
+
+Implies B<--progress>.
+
+See also: B<--bar> B<--progress> B<--total-jobs>
+
+
+=item B<--fg>
+
+Run command in foreground.
+
+With B<--tmux> and B<--tmuxpane> GNU B<parallel> will start B<tmux> in
+the foreground.
+
+With B<--semaphore> GNU B<parallel> will run the command in the
+foreground (opposite B<--bg>), and wait for completion of the command
+before exiting. Exit code will be that of the command.
+
+See also: B<--bg> B<man sem>
+
+
+=item B<--fifo>
+
+Create a temporary fifo with content.
+
+Normally B<--pipe> and B<--pipe-part> will give data to the program on
+stdin (standard input). With B<--fifo> GNU B<parallel> will create a
+temporary fifo with the name in B<{}>, so you can do:
+
+ parallel --pipe --fifo wc {}
+
+Beware: If the fifo is never opened for reading, the job will block forever:
+
+ seq 1000000 | parallel --fifo echo This will block forever
+ seq 1000000 | parallel --fifo 'echo This will not block < {}'
+
+By using B<--fifo> instead of B<--cat> you may save I/O as B<--cat>
+will write to a temporary file, whereas B<--fifo> will not.
+
+Implies B<--pipe> unless B<--pipe-part> is used.
+
+See also: B<--cat> B<--pipe> B<--pipe-part>
+
+
+=item B<--filter> I<filter>
+
+Only run jobs where I<filter> is true.
+
+I<filter> can contain replacement strings and Perl code. Example:
+
+ parallel --filter '{1}+{2}+{3} < 10' echo {1},{2},{3} \
+ ::: {1..10} ::: {3..8} ::: {3..10}
+
+Outputs: 1,3,3 1,3,4 1,3,5 1,4,3 1,4,4 1,5,3 2,3,3 2,3,4 2,4,3 3,3,3
+
+ parallel --filter '{1} < {2}*{2}' echo {1},{2} \
+ ::: {1..10} ::: {1..3}
+
+Outputs: 1,2 1,3 2,2 2,3 3,2 3,3 4,3 5,3 6,3 7,3 8,3
+
+ parallel --filter '{choose_k}' --plus echo {1},{2},{3} \
+ ::: {1..5} ::: {1..5} ::: {1..5}
+
+Outputs: 1,2,3 1,2,4 1,2,5 1,3,4 1,3,5 1,4,5 2,3,4 2,3,5 2,4,5 3,4,5
+
+See also: B<skip()> B<--no-run-if-empty> B<{choose_k}>
+
+
+=item B<--filter-hosts>
+
+Remove down hosts.
+
+For each remote host: check that login through ssh works. If not: do
+not use this host.
+
+For performance reasons, this check is performed only at the start and
+every time B<--sshloginfile> is changed. If an host goes down after
+the first check, it will go undetected until B<--sshloginfile> is
+changed; B<--retries> can be used to mitigate this.
+
+Currently you can I<not> put B<--filter-hosts> in a profile,
+$PARALLEL, /etc/parallel/config or similar. This is because GNU
+B<parallel> uses GNU B<parallel> to compute this, so you will get an
+infinite loop. This will likely be fixed in a later release.
+
+See also: B<--sshloginfile> B<--sshlogin> B<--retries>
+
+
+=item B<--gnu>
+
+Behave like GNU B<parallel>.
+
+This option historically took precedence over B<--tollef>. The
+B<--tollef> option is now retired, and therefore may not be
+used. B<--gnu> is kept for compatibility, but does nothing.
+
+
+=item B<--group>
+
+Group output.
+
+Output from each job is grouped together and is only printed when the
+command is finished. Stdout (standard output) first followed by stderr
+(standard error).
+
+This takes in the order of 0.5ms CPU time per job and depends on the
+speed of your disk for larger output.
+
+B<--group> is the default.
+
+See also: B<--line-buffer> B<--ungroup> B<--tag>
+
+
+=item B<--group-by> I<val>
+
+Group input by value.
+
+Combined with B<--pipe>/B<--pipe-part> B<--group-by> groups lines with
+the same value into a record.
+
+The value can be computed from the full line or from a single column.
+
+I<val> can be:
+
+=over 15
+
+=item Z<> column number
+
+Use the value in the column numbered.
+
+=item Z<> column name
+
+Treat the first line as a header and use the value in the column
+named.
+
+(Not supported with B<--pipe-part>).
+
+=item Z<> perl expression
+
+Run the perl expression and use $_ as the value.
+
+=item Z<> column number perl expression
+
+Put the value of the column put in $_, run the perl expression, and use $_ as the value.
+
+=item Z<> column name perl expression
+
+Put the value of the column put in $_, run the perl expression, and use $_ as the value.
+
+(Not supported with B<--pipe-part>).
+
+=back
+
+Example:
+
+ UserID, Consumption
+ 123, 1
+ 123, 2
+ 12-3, 1
+ 221, 3
+ 221, 1
+ 2/21, 5
+
+If you want to group 123, 12-3, 221, and 2/21 into 4 records and pass
+one record at a time to B<wc>:
+
+ tail -n +2 table.csv | \
+ parallel --pipe --colsep , --group-by 1 -kN1 wc
+
+Make GNU B<parallel> treat the first line as a header:
+
+ cat table.csv | \
+ parallel --pipe --colsep , --header : --group-by 1 -kN1 wc
+
+Address column by column name:
+
+ cat table.csv | \
+ parallel --pipe --colsep , --header : --group-by UserID -kN1 wc
+
+If 12-3 and 123 are really the same UserID, remove non-digits in
+UserID when grouping:
+
+ cat table.csv | parallel --pipe --colsep , --header : \
+ --group-by 'UserID s/\D//g' -kN1 wc
+
+See also: SPREADING BLOCKS OF DATA B<--pipe> B<--pipe-part> B<--bin>
+B<--shard> B<--round-robin>
+
+
+=item B<--help>
+
+=item B<-h>
+
+Print a summary of the options to GNU B<parallel> and exit.
+
+
+=item B<--halt-on-error> I<val>
+
+=item B<--halt> I<val>
+
+When should GNU B<parallel> terminate?
+
+In some situations it makes no sense to run all jobs. GNU
+B<parallel> should simply stop as soon as a condition is met.
+
+I<val> defaults to B<never>, which runs all jobs no matter what.
+
+I<val> can also take on the form of I<when>,I<why>.
+
+I<when> can be 'now' which means kill all running jobs and halt
+immediately, or it can be 'soon' which means wait for all running jobs
+to complete, but start no new jobs.
+
+I<why> can be 'fail=X', 'fail=Y%', 'success=X', 'success=Y%',
+'done=X', or 'done=Y%' where X is the number of jobs that has to fail,
+succeed, or be done before halting, and Y is the percentage of jobs
+that has to fail, succeed, or be done before halting.
+
+Example:
+
+=over 23
+
+=item Z<> --halt now,fail=1
+
+exit when a job has failed. Kill running jobs.
+
+=item Z<> --halt soon,fail=3
+
+exit when 3 jobs have failed, but wait for running jobs to complete.
+
+=item Z<> --halt soon,fail=3%
+
+exit when 3% of the jobs have failed, but wait for running jobs to complete.
+
+=item Z<> --halt now,success=1
+
+exit when a job has succeeded. Kill running jobs.
+
+=item Z<> --halt soon,success=3
+
+exit when 3 jobs have succeeded, but wait for running jobs to complete.
+
+=item Z<> --halt now,success=3%
+
+exit when 3% of the jobs have succeeded. Kill running jobs.
+
+=item Z<> --halt now,done=1
+
+exit when a job has finished. Kill running jobs.
+
+=item Z<> --halt soon,done=3
+
+exit when 3 jobs have finished, but wait for running jobs to complete.
+
+=item Z<> --halt now,done=3%
+
+exit when 3% of the jobs have finished. Kill running jobs.
+
+=back
+
+For backwards compatibility these also work:
+
+=over 12
+
+=item Z<>0
+
+never
+
+=item Z<>1
+
+soon,fail=1
+
+=item Z<>2
+
+now,fail=1
+
+=item Z<>-1
+
+soon,success=1
+
+=item Z<>-2
+
+now,success=1
+
+=item Z<>1-99%
+
+soon,fail=1-99%
+
+=back
+
+
+=item B<--header> I<regexp>
+
+Use regexp as header.
+
+For normal usage the matched header (typically the first line:
+B<--header '.*\n'>) will be split using B<--colsep> (which will
+default to '\t') and column names can be used as replacement
+variables: B<{column name}>, B<{column name/}>, B<{column name//}>,
+B<{column name/.}>, B<{column name.}>, B<{=column name perl expression
+=}>, ..
+
+For B<--pipe> the matched header will be prepended to each output.
+
+B<--header :> is an alias for B<--header '.*\n'>.
+
+If I<regexp> is a number, it is a fixed number of lines.
+
+B<--header 0> is special: It will make replacement strings for files
+given with B<--arg-file> or B<::::>. It will make B<{foo/bar}> for the
+file B<foo/bar>.
+
+See also: B<--colsep> B<--pipe> B<--pipe-part> B<--arg-file>
+
+
+=item B<--hostgroups>
+
+=item B<--hgrp>
+
+Enable hostgroups on arguments.
+
+If an argument contains '@' the string after '@' will be removed and
+treated as a list of hostgroups on which this job is allowed to
+run. If there is no B<--sshlogin> with a corresponding group, the job
+will run on any hostgroup.
+
+Example:
+
+ parallel --hostgroups \
+ --sshlogin @grp1/myserver1 -S @grp1+grp2/myserver2 \
+ --sshlogin @grp3/myserver3 \
+ echo ::: my_grp1_arg@grp1 arg_for_grp2@grp2 third@grp1+grp3
+
+B<my_grp1_arg> may be run on either B<myserver1> or B<myserver2>,
+B<third> may be run on either B<myserver1> or B<myserver3>,
+but B<arg_for_grp2> will only be run on B<myserver2>.
+
+See also: B<--sshlogin> B<$PARALLEL_HOSTGROUPS> B<$PARALLEL_ARGHOSTGROUPS>
+
+
+=item B<-I> I<replace-str>
+
+Use the replacement string I<replace-str> instead of B<{}>.
+
+See also: B<{}>
+
+
+=item B<--replace> [I<replace-str>]
+
+=item B<-i> [I<replace-str>]
+
+This option is deprecated; use B<-I> instead.
+
+This option is a synonym for B<-I>I<replace-str> if I<replace-str> is
+specified, and for B<-I {}> otherwise.
+
+See also: B<{}>
+
+
+=item B<--joblog> I<logfile>
+
+=item B<--jl> I<logfile>
+
+Logfile for executed jobs.
+
+Save a list of the executed jobs to I<logfile> in the following TAB
+separated format: sequence number, sshlogin, start time as seconds
+since epoch, run time in seconds, bytes in files transferred, bytes in
+files returned, exit status, signal, and command run.
+
+For B<--pipe> bytes transferred and bytes returned are number of input
+and output of bytes.
+
+If B<logfile> is prepended with '+' log lines will be appended to the
+logfile.
+
+To convert the times into ISO-8601 strict do:
+
+ cat logfile | perl -a -F"\t" -ne \
+ 'chomp($F[2]=`date -d \@$F[2] +%FT%T`); print join("\t",@F)'
+
+If the host is long, you can use B<column -t> to pretty print it:
+
+ cat joblog | column -t
+
+See also: B<--resume> B<--resume-failed>
+
+
+=item B<--jobs> I<num>
+
+=item B<-j> I<num>
+
+=item B<--max-procs> I<num>
+
+=item B<-P> I<num>
+
+Number of jobslots on each machine.
+
+Run up to I<num> jobs in parallel. Default is 100%.
+
+=over 7
+
+=item I<num>
+
+Run up to I<num> jobs in parallel.
+
+=item Z<>0
+
+Run as many as possible (this can take a while to determine).
+
+Due to a bug B<-j 0> will also evaluate replacement strings twice up
+to the number of joblots:
+
+ # This will not count from 1 but from number-of-jobslots
+ seq 10000 | parallel -j0 echo '{= $_ = $foo++; =}' | head
+ # This will count from 1
+ seq 10000 | parallel -j100 echo '{= $_ = $foo++; =}' | head
+
+=item I<num>%
+
+Multiply the number of CPU threads by I<num> percent. E.g. 100% means
+one job per CPU thread on each machine.
+
+=item +I<num>
+
+Add I<num> to the number of CPU threads.
+
+=item -I<num>
+
+Subtract I<num> from the number of CPU threads.
+
+=item I<expr>
+
+Evaluate I<expr>. E.g. '12/2' to get 6, '+25%' gives the same as
+'125%', or complex expressions like '+3*log(55)%' which means:
+multiply 3 by log(55), multiply that by the number of CPU threads and
+divide by 100, add this to the number of CPU threads.
+
+An expression that evalutates to less that 1 is replaced with 1.
+
+=item I<procfile>
+
+Read parameter from file.
+
+Use the content of I<procfile> as parameter for
+I<-j>. E.g. I<procfile> could contain the string 100% or +2 or 10.
+
+If I<procfile> is changed when a job completes, I<procfile> is read
+again and the new number of jobs is computed. If the number is lower
+than before, running jobs will be allowed to finish but new jobs will
+not be started until the wanted number of jobs has been reached. This
+makes it possible to change the number of simultaneous running jobs
+while GNU B<parallel> is running.
+
+=back
+
+If the evaluated number is less than 1 then 1 will be used.
+
+If B<--semaphore> is set, the default is 1 thus making a mutex.
+
+See also: B<--use-cores-instead-of-threads>
+B<--use-sockets-instead-of-threads>
+
+
+=item B<--keep-order>
+
+=item B<-k>
+
+Keep sequence of output same as the order of input.
+
+Normally the output of a job will be printed as soon as the job
+completes. Try this to see the difference:
+
+ parallel -j4 sleep {}\; echo {} ::: 2 1 4 3
+ parallel -j4 -k sleep {}\; echo {} ::: 2 1 4 3
+
+If used with B<--onall> or B<--nonall> the output will grouped by
+sshlogin in sorted order.
+
+B<--keep-order> cannot keep the output order when used with B<--pipe
+--round-robin>. Here it instead means, that the jobslots will get the
+same blocks as input in the same order in every run if the input is
+kept the same. Run each of these twice and compare:
+
+ seq 10000000 | parallel --pipe --round-robin 'sleep 0.$RANDOM; wc'
+ seq 10000000 | parallel --pipe -k --round-robin 'sleep 0.$RANDOM; wc'
+
+B<-k> only affects the order in which the output is printed - not the
+order in which jobs are run.
+
+See also: B<--group> B<--line-buffer>
+
+
+=item B<-L> I<recsize>
+
+When used with B<--pipe>: Read records of I<recsize>.
+
+When used otherwise: Use at most I<recsize> nonblank input lines per
+command line. Trailing blanks cause an input line to be logically
+continued on the next input line.
+
+B<-L 0> means read one line, but insert 0 arguments on the command
+line.
+
+I<recsize> can be postfixed with K, M, G, T, P, k, m, g, t, or p.
+
+Implies B<-X> unless B<-m>, B<--xargs>, or B<--pipe> is set.
+
+See also: UNIT PREFIX B<-N> B<--max-lines> B<--block> B<-X> B<-m>
+B<--xargs> B<--pipe>
+
+
+=item B<--max-lines> [I<recsize>]
+
+=item B<-l>[I<recsize>]
+
+When used with B<--pipe>: Read records of I<recsize> lines.
+
+When used otherwise: Synonym for the B<-L> option. Unlike B<-L>, the
+I<recsize> argument is optional. If I<recsize> is not specified,
+it defaults to one. The B<-l> option is deprecated since the POSIX
+standard specifies B<-L> instead.
+
+B<-l 0> is an alias for B<-l 1>.
+
+Implies B<-X> unless B<-m>, B<--xargs>, or B<--pipe> is set.
+
+See also: UNIT PREFIX B<-N> B<--block> B<-X> B<-m>
+B<--xargs> B<--pipe>
+
+
+=item B<--limit> "I<command> I<args>"
+
+Dynamic job limit.
+
+Before starting a new job run I<command> with I<args>. The exit value
+of I<command> determines what GNU B<parallel> will do:
+
+=over 4
+
+=item Z<>0
+
+Below limit. Start another job.
+
+=item Z<>1
+
+Over limit. Start no jobs.
+
+=item Z<>2
+
+Way over limit. Kill the youngest job.
+
+=back
+
+You can use any shell command. There are 3 predefined commands:
+
+=over 10
+
+=item "io I<n>"
+
+Limit for I/O. The amount of disk I/O will be computed as a value
+0-100, where 0 is no I/O and 100 is at least one disk is 100%
+saturated.
+
+=item "load I<n>"
+
+Similar to B<--load>.
+
+=item "mem I<n>"
+
+Similar to B<--memfree>.
+
+=back
+
+See also: B<--memfree> B<--load>
+
+
+=item B<--latest-line>
+
+=item B<--ll>
+
+Print the lastest line. Each job gets a single line that is updated
+with the lastest output from the job.
+
+Example:
+
+ slow_seq() {
+ seq "$@" |
+ perl -ne '$|=1; for(split//){ print; select($a,$a,$a,0.03);}'
+ }
+ export -f slow_seq
+ parallel --shuf -j99 --ll --tag --bar --color slow_seq {} ::: {1..300}
+
+See also: B<--line-buffer>
+
+
+=item B<--line-buffer>
+
+=item B<--lb>
+
+Buffer output on line basis.
+
+B<--group> will keep the output together for a whole job. B<--ungroup>
+allows output to mixup with half a line coming from one job and half a
+line coming from another job. B<--line-buffer> fits between these two:
+GNU B<parallel> will print a full line, but will allow for mixing
+lines of different jobs.
+
+B<--line-buffer> takes more CPU power than both B<--group> and
+B<--ungroup>, but can be much faster than B<--group> if the CPU is not
+the limiting factor.
+
+Normally B<--line-buffer> does not buffer on disk, and can thus
+process an infinite amount of data, but it will buffer on disk when
+combined with: B<--keep-order>, B<--results>, B<--compress>, and
+B<--files>. This will make it as slow as B<--group> and will limit
+output to the available disk space.
+
+With B<--keep-order> B<--line-buffer> will output lines from the first
+job continuously while it is running, then lines from the second job
+while that is running. It will buffer full lines, but jobs will not
+mix. Compare:
+
+ parallel -j0 'echo [{};sleep {};echo {}]' ::: 1 3 2 4
+ parallel -j0 --lb 'echo [{};sleep {};echo {}]' ::: 1 3 2 4
+ parallel -j0 -k --lb 'echo [{};sleep {};echo {}]' ::: 1 3 2 4
+
+See also: B<--group> B<--ungroup> B<--keep-order> B<--tag>
+
+
+=item B<--link>
+
+=item B<--xapply>
+
+Link input sources.
+
+Read multiple input sources like the command B<xapply>. If multiple
+input sources are given, one argument will be read from each of the
+input sources. The arguments can be accessed in the command as B<{1}>
+.. B<{>I<n>B<}>, so B<{1}> will be a line from the first input source,
+and B<{6}> will refer to the line with the same line number from the
+6th input source.
+
+Compare these two:
+
+ parallel echo {1} {2} ::: 1 2 3 ::: a b c
+ parallel --link echo {1} {2} ::: 1 2 3 ::: a b c
+
+Arguments will be recycled if one input source has more arguments than the others:
+
+ parallel --link echo {1} {2} {3} \
+ ::: 1 2 ::: I II III ::: a b c d e f g
+
+See also: B<--header> B<:::+> B<::::+>
+
+
+=item B<--load> I<max-load>
+
+Only start jobs if load is less than max-load.
+
+Do not start new jobs on a given computer unless the number of running
+processes on the computer is less than I<max-load>. I<max-load> uses
+the same syntax as B<--jobs>, so I<100%> for one per CPU is a valid
+setting. Only difference is 0 which is interpreted as 0.01.
+
+See also: B<--limit> B<--jobs>
+
+
+=item B<--controlmaster>
+
+=item B<-M>
+
+Use ssh's ControlMaster to make ssh connections faster.
+
+Useful if jobs run remote and are very fast to run. This is disabled
+for sshlogins that specify their own ssh command.
+
+See also: B<--ssh> B<--sshlogin>
+
+
+=item B<-m>
+
+Multiple arguments.
+
+Insert as many arguments as the command line length permits. If
+multiple jobs are being run in parallel: distribute the arguments
+evenly among the jobs. Use B<-j1> or B<--xargs> to avoid this.
+
+If B<{}> is not used the arguments will be appended to the
+line. If B<{}> is used multiple times each B<{}> will be replaced
+with all the arguments.
+
+Support for B<-m> with B<--sshlogin> is limited and may fail.
+
+If in doubt use B<-X> as that will most likely do what is needed.
+
+See also: B<-X> B<--xargs>
+
+
+=item B<--memfree> I<size>
+
+Minimum memory free when starting another job.
+
+The I<size> can be postfixed with K, M, G, T, P, k, m, g, t, or p.
+
+If the jobs take up very different amount of RAM, GNU B<parallel> will
+only start as many as there is memory for. If less than I<size> bytes
+are free, no more jobs will be started. If less than 50% I<size> bytes
+are free, the youngest job will be killed (as per B<--term-seq>), and
+put back on the queue to be run later.
+
+B<--retries> must be set to determine how many times GNU B<parallel>
+should retry a given job.
+
+See also: UNIT PREFIX B<--term-seq> B<--retries> B<--memsuspend>
+
+
+=item B<--memsuspend> I<size>
+
+Suspend jobs when there is less memory available.
+
+If the available memory falls below 2 * I<size>, GNU B<parallel> will
+suspend some of the running jobs. If the available memory falls below
+I<size>, only one job will be running.
+
+If a single job fits in the given size, all jobs will complete without
+running out of memory. If you have swap available, you can usually
+lower I<size> to around half the size of a single job - with the slight
+risk of swapping a little.
+
+Jobs will be resumed when more RAM is available - typically when the
+oldest job completes.
+
+B<--memsuspend> only works on local jobs because there is no obvious
+way to suspend remote jobs.
+
+I<size> can be postfixed with K, M, G, T, P, k, m, g, t, or p.
+
+See also: UNIT PREFIX B<--memfree>
+
+
+=item B<--minversion> I<version>
+
+Print the version GNU B<parallel> and exit.
+
+If the current version of GNU B<parallel> is less than I<version> the
+exit code is 255. Otherwise it is 0.
+
+This is useful for scripts that depend on features only available from
+a certain version of GNU B<parallel>:
+
+ parallel --minversion 20170422 &&
+ echo halt done=50% supported from version 20170422 &&
+ parallel --halt now,done=50% echo ::: {1..100}
+
+See also: B<--version>
+
+
+=item B<--max-args> I<max-args>
+
+=item B<-n> I<max-args>
+
+Use at most I<max-args> arguments per command line.
+
+Fewer than I<max-args> arguments will be used if the size (see the
+B<-s> option) is exceeded, unless the B<-x> option is given, in which
+case GNU B<parallel> will exit.
+
+B<-n 0> means read one argument, but insert 0 arguments on the command
+line.
+
+I<max-args> can be postfixed with K, M, G, T, P, k, m, g, t, or p (see
+UNIT PREFIX).
+
+Implies B<-X> unless B<-m> is set.
+
+See also: B<-X> B<-m> B<--xargs> B<--max-replace-args>
+
+
+=item B<--max-replace-args> I<max-args>
+
+=item B<-N> I<max-args>
+
+Use at most I<max-args> arguments per command line.
+
+Like B<-n> but also makes replacement strings B<{1}>
+.. B<{>I<max-args>B<}> that represents argument 1 .. I<max-args>. If
+too few args the B<{>I<n>B<}> will be empty.
+
+B<-N 0> means read one argument, but insert 0 arguments on the command
+line.
+
+This will set the owner of the homedir to the user:
+
+ tr ':' '\n' < /etc/passwd | parallel -N7 chown {1} {6}
+
+Implies B<-X> unless B<-m> or B<--pipe> is set.
+
+I<max-args> can be postfixed with K, M, G, T, P, k, m, g, t, or p.
+
+When used with B<--pipe> B<-N> is the number of records to read. This
+is somewhat slower than B<--block>.
+
+See also: UNIT PREFIX B<--pipe> B<--block> B<-m> B<-X> B<--max-args>
+
+
+=item B<--nonall>
+
+B<--onall> with no arguments.
+
+Run the command on all computers given with B<--sshlogin> but take no
+arguments. GNU B<parallel> will log into B<--jobs> number of computers
+in parallel and run the job on the computer. B<-j> adjusts how many
+computers to log into in parallel.
+
+This is useful for running the same command (e.g. uptime) on a list of
+servers.
+
+See also: B<--onall> B<--sshlogin>
+
+
+=item B<--onall>
+
+Run all the jobs on all computers given with B<--sshlogin>.
+
+GNU B<parallel> will log into B<--jobs> number of computers in
+parallel and run one job at a time on the computer. The order of the
+jobs will not be changed, but some computers may finish before others.
+
+When using B<--group> the output will be grouped by each server, so
+all the output from one server will be grouped together.
+
+B<--joblog> will contain an entry for each job on each server, so
+there will be several job sequence 1.
+
+See also: B<--nonall> B<--sshlogin>
+
+
+=item B<--open-tty>
+
+=item B<-o>
+
+Open terminal tty.
+
+Similar to B<--tty> but does not set B<--jobs> or B<--ungroup>.
+
+See also: B<--tty>
+
+
+=item B<--output-as-files>
+
+=item B<--outputasfiles>
+
+=item B<--files>
+
+=item B<--files0>
+
+Save output to files.
+
+Instead of printing the output to stdout (standard output) the output
+of each job is saved in a file and the filename is then printed.
+
+B<--files0> uses NUL (\0) instead of newline (\n) as separator.
+
+See also: B<--results>
+
+
+=item B<--pipe>
+
+=item B<--spreadstdin>
+
+Spread input to jobs on stdin (standard input).
+
+Read a block of data from stdin (standard input) and give one block of
+data as input to one job.
+
+The block size is determined by B<--block> (default: 1M).
+
+Except for the first and last record GNU B<parallel> only passes full
+records to the job. The strings B<--recstart> and B<--recend>
+determine where a record starts and ends: The border between two
+records is defined as B<--recend> immediately followed by
+B<--recstart>. GNU B<parallel> splits exactly after B<--recend> and
+before B<--recstart>. The block will have the last partial record
+removed before the block is passed on to the job. The partial record
+will be prepended to next block.
+
+You can limit the number of records to be passed with B<-N>, and set
+the record size with B<-L>.
+
+B<--pipe> maxes out at around 1 GB/s input, and 100 MB/s output. If
+performance is important use B<--pipe-part>.
+
+B<--fifo> and B<--cat> will give stdin (standard input) on a fifo or a
+temporary file.
+
+If data is arriving slowly, you can use B<--block-timeout> to finish
+reading a block early.
+
+The data can be spread between the jobs in specific ways using
+B<--round-robin>, B<--bin>, B<--shard>, B<--group-by>. See the
+section: SPREADING BLOCKS OF DATA
+
+See also: B<--block> B<--block-timeout> B<--recstart> B<--recend>
+B<--fifo> B<--cat> B<--pipe-part> B<-N> B<-L> B<--round-robin>
+
+
+=item B<--pipe-part>
+
+Pipe parts of a physical file.
+
+B<--pipe-part> works similar to B<--pipe>, but is much faster. 5 GB/s
+can easily be delivered.
+
+B<--pipe-part> has a few limitations:
+
+=over 3
+
+=item *
+
+The file must be a normal file or a block device (technically it must
+be seekable) and must be given using B<--arg-file> or B<::::>. The file cannot
+be a pipe, a fifo, or a stream as they are not seekable.
+
+If using a block device with lot of NUL bytes, remember to set
+B<--recend ''>.
+
+=item *
+
+Record counting (B<-N>) and line counting (B<-L>/B<-l>) do not
+work. Instead use B<--recstart> and B<--recend> to determine
+where records end.
+
+=back
+
+See also: B<--pipe> B<--recstart> B<--recend> B<--arg-file> B<::::>
+
+
+=item B<--plain>
+
+Ignore B<--profile>, $PARALLEL, and ~/.parallel/config.
+
+Ignore any B<--profile>, $PARALLEL, and ~/.parallel/config to get full
+control on the command line (used by GNU B<parallel> internally when
+called with B<--sshlogin>).
+
+See also: B<--profile>
+
+
+=item B<--plus>
+
+Add more replacement strings.
+
+Activate additional replacement strings: {+/} {+.} {+..} {+...} {..}
+{...} {/..} {/...} {##}. The idea being that '{+foo}' matches the
+opposite of '{foo}' so that:
+
+{} = {+/}/{/} = {.}.{+.} = {+/}/{/.}.{+.} = {..}.{+..} =
+{+/}/{/..}.{+..} = {...}.{+...} = {+/}/{/...}.{+...}
+
+B<{##}> is the total number of jobs to be run. It is incompatible with
+B<-X>/B<-m>/B<--xargs>.
+
+B<{0%}> zero-padded jobslot.
+
+B<{0#}> zero-padded sequence number.
+
+B<{slot-1}> jobslot - 1 (i.e. counting from 0). -1 can be any perl
+expression: {slot**2-1} = slot*slot-1.
+
+B<{seq-1}> sequence number - 1 (i.e. counting from 0). -1 can be any
+perl expression.
+
+B<{choose_k}> is inspired by n choose k: Given a list of n elements,
+choose k. k is the number of input sources and n is the number of
+arguments in an input source. The content of the input sources must
+be the same and the arguments must be unique.
+
+B<{uniq}> skips jobs where values from two input sources are the same.
+
+Shorthands for variables:
+
+ {slot} $PARALLEL_JOBSLOT (see {%})
+ {sshlogin} $PARALLEL_SSHLOGIN
+ {host} $PARALLEL_SSHHOST
+ {agrp} $PARALLEL_ARGHOSTGROUPS
+ {hgrp} $PARALLEL_HOSTGROUPS
+
+The following dynamic replacement strings are also activated. They are
+inspired by bash's parameter expansion:
+
+ {:-str} str if the value is empty
+ {:num} remove the first num characters
+ {:pos:len} substring from position pos length len
+ {#regexp} remove prefix regexp (non-greedy)
+ {##regexp} remove prefix regexp (greedy)
+ {%regexp} remove postfix regexp (non-greedy)
+ {%%regexp} remove postfix regexp (greedy)
+ {/regexp/str} replace one regexp with str
+ {//regexp/str} replace every regexp with str
+ {^str} uppercase str if found at the start
+ {^^str} uppercase str
+ {,str} lowercase str if found at the start
+ {,,str} lowercase str
+
+See also: B<--rpl> B<{}>
+
+
+=item B<--process-slot-var> I<varname>
+
+Set the environment variable I<varname> to the jobslot number-1.
+
+ seq 10 | parallel --process-slot-var=name echo '$name' {}
+
+
+=item B<--progress>
+
+Show progress of computations.
+
+List the computers involved in the task with number of CPUs detected
+and the max number of jobs to run. After that show progress for each
+computer: number of running jobs, number of completed jobs, and
+percentage of all jobs done by this computer. The percentage will only
+be available after all jobs have been scheduled as GNU B<parallel>
+only read the next job when ready to schedule it - this is to avoid
+wasting time and memory by reading everything at startup.
+
+By sending GNU B<parallel> SIGUSR2 you can toggle turning on/off
+B<--progress> on a running GNU B<parallel> process.
+
+See also: B<--eta> B<--bar>
+
+
+=item B<--max-line-length-allowed>
+
+Print maximal command line length.
+
+Print the maximal number of characters allowed on the command line and
+exit (used by GNU B<parallel> itself to determine the line length
+on remote computers).
+
+See also: B<--show-limits>
+
+
+=item B<--number-of-cpus> (obsolete)
+
+Print the number of physical CPU cores and exit.
+
+
+=item B<--number-of-cores>
+
+Print the number of physical CPU cores and exit (used by GNU B<parallel> itself
+to determine the number of physical CPU cores on remote computers).
+
+See also: B<--number-of-sockets> B<--number-of-threads>
+B<--use-cores-instead-of-threads> B<--jobs>
+
+
+=item B<--number-of-sockets>
+
+Print the number of filled CPU sockets and exit (used by GNU
+B<parallel> itself to determine the number of filled CPU sockets on
+remote computers).
+
+See also: B<--number-of-cores> B<--number-of-threads>
+B<--use-sockets-instead-of-threads> B<--jobs>
+
+
+=item B<--number-of-threads>
+
+Print the number of hyperthreaded CPU cores and exit (used by GNU
+B<parallel> itself to determine the number of hyperthreaded CPU cores
+on remote computers).
+
+See also: B<--number-of-cores> B<--number-of-sockets> B<--jobs>
+
+
+=item B<--no-keep-order>
+
+Overrides an earlier B<--keep-order> (e.g. if set in
+B<~/.parallel/config>).
+
+
+=item B<--nice> I<niceness>
+
+Run the command at this niceness.
+
+By default GNU B<parallel> will run jobs at the same nice level as GNU
+B<parallel> is started - both on the local machine and remote servers,
+so you are unlikely to ever use this option.
+
+Setting B<--nice> will override this nice level. If the nice level is
+smaller than the current nice level, it will only affect remote jobs
+(e.g. if current level is 10 then B<--nice 5> will cause local jobs to
+be run at level 10, but remote jobs run at nice level 5).
+
+
+=item B<--interactive>
+
+=item B<-p>
+
+Ask user before running a job.
+
+Prompt the user about whether to run each command line and read a line
+from the terminal. Only run the command line if the response starts
+with 'y' or 'Y'. Implies B<-t>.
+
+
+=item B<--_parset> I<type>,I<varname>
+
+Used internally by B<parset>.
+
+Generate shell code to be eval'ed which will set the variable(s)
+I<varname>. I<type> can be 'assoc' for associative array or 'var' for
+normal variables.
+
+The only supported use is as part of B<parset>.
+
+
+=item B<--parens> I<parensstring>
+
+Use I<parensstring> instead of B<{==}>.
+
+Define start and end parenthesis for B<{=perl expression=}>. The
+left and the right parenthesis can be multiple characters and are
+assumed to be the same length. The default is B<{==}> giving B<{=> as
+the start parenthesis and B<=}> as the end parenthesis.
+
+Another useful setting is B<,,,,> which would make both parenthesis
+B<,,>:
+
+ parallel --parens ,,,, echo foo is ,,s/I/O/g,, ::: FII
+
+See also: B<--rpl> B<{=>I<perl expression>B<=}>
+
+
+=item B<--profile> I<profilename>
+
+=item B<-J> I<profilename>
+
+Use profile I<profilename> for options.
+
+This is useful if you want to have multiple profiles. You could have
+one profile for running jobs in parallel on the local computer and a
+different profile for running jobs on remote computers.
+
+I<profilename> corresponds to the file ~/.parallel/I<profilename>.
+
+You can give multiple profiles by repeating B<--profile>. If parts of
+the profiles conflict, the later ones will be used.
+
+Default: ~/.parallel/config
+
+See also: PROFILE FILES
+
+
+=item B<--quote>
+
+=item B<-q>
+
+Quote I<command>.
+
+If your command contains special characters that should not be
+interpreted by the shell (e.g. ; \ | *), use B<--quote> to escape
+these. The command must be a simple command (see B<man bash>) without
+redirections and without variable assignments.
+
+Most people will not need this. Quoting is disabled by default.
+
+See also: QUOTING I<command> B<--shell-quote> B<uq()> B<Q()>
+
+
+=item B<--no-run-if-empty>
+
+=item B<-r>
+
+Do not run empty input.
+
+If the stdin (standard input) only contains whitespace, do not run the
+command.
+
+If used with B<--pipe> this is slow.
+
+See also: I<command> B<--pipe> B<--interactive>
+
+
+=item B<--noswap>
+
+Do not start job is computer is swapping.
+
+Do not start new jobs on a given computer if there is both swap-in and
+swap-out activity.
+
+The swap activity is only sampled every 10 seconds as the sampling
+takes 1 second to do.
+
+Swap activity is computed as (swap-in)*(swap-out) which in practice is
+a good value: swapping out is not a problem, swapping in is not a
+problem, but both swapping in and out usually indicates a problem.
+
+B<--memfree> and B<--memsuspend> may give better results, so try using
+those first.
+
+See also: B<--memfree> B<--memsuspend>
+
+
+=item B<--record-env>
+
+Record exported environment.
+
+Record current exported environment variables in
+B<~/.parallel/ignored_vars>. This will ignore variables currently set
+when using B<--env _>. So you should set the variables/fuctions, you
+want to use I<after> running B<--record-env>.
+
+See also: B<--env> B<--session> B<env_parallel>
+
+
+=item B<--recstart> I<startstring>
+
+=item B<--recend> I<endstring>
+
+Split record between I<endstring> and I<startstring>.
+
+If B<--recstart> is given I<startstring> will be used to split at record start.
+
+If B<--recend> is given I<endstring> will be used to split at record end.
+
+If both B<--recstart> and B<--recend> are given the combined string
+I<endstring>I<startstring> will have to match to find a split
+position. This is useful if either I<startstring> or I<endstring>
+match in the middle of a record.
+
+If neither B<--recstart> nor B<--recend> are given, then B<--recend>
+defaults to '\n'. To have no record separator (e.g. for binary files)
+use B<--recend "">.
+
+B<--recstart> and B<--recend> are used with B<--pipe>.
+
+Use B<--regexp> to interpret B<--recstart> and B<--recend> as regular
+expressions. This is slow, however.
+
+Use B<--remove-rec-sep> to remove B<--recstart> and B<--recend> before
+passing the block to the job.
+
+See also: B<--pipe> B<--regexp> B<--remove-rec-sep>
+
+
+=item B<--regexp>
+
+Use B<--regexp> to interpret B<--recstart> and B<--recend> as regular
+expressions. This is slow, however.
+
+See also: B<--pipe> B<--regexp> B<--remove-rec-sep> B<--recstart>
+B<--recend>
+
+
+=item B<--remove-rec-sep>
+
+=item B<--removerecsep>
+
+=item B<--rrs>
+
+Remove record separator.
+
+Remove the text matched by B<--recstart> and B<--recend> before piping
+it to the command.
+
+Only used with B<--pipe>/B<--pipe-part>.
+
+See also: B<--pipe> B<--regexp> B<--pipe-part> B<--recstart>
+B<--recend>
+
+
+=item B<--results> I<name>
+
+=item B<--res> I<name>
+
+Save the output into files.
+
+B<Simple string output dir>
+
+If I<name> does not contain replacement strings and does not end in
+B<.csv/.tsv>, the output will be stored in a directory tree rooted at
+I<name>. Within this directory tree, each command will result in
+three files: I<name>/<ARGS>/stdout and I<name>/<ARGS>/stderr,
+I<name>/<ARGS>/seq, where <ARGS> is a sequence of directories
+representing the header of the input source (if using B<--header :>)
+or the number of the input source and corresponding values.
+
+E.g:
+
+ parallel --header : --results foo echo {a} {b} \
+ ::: a I II ::: b III IIII
+
+will generate the files:
+
+ foo/a/II/b/III/seq
+ foo/a/II/b/III/stderr
+ foo/a/II/b/III/stdout
+ foo/a/II/b/IIII/seq
+ foo/a/II/b/IIII/stderr
+ foo/a/II/b/IIII/stdout
+ foo/a/I/b/III/seq
+ foo/a/I/b/III/stderr
+ foo/a/I/b/III/stdout
+ foo/a/I/b/IIII/seq
+ foo/a/I/b/IIII/stderr
+ foo/a/I/b/IIII/stdout
+
+and
+
+ parallel --results foo echo {1} {2} ::: I II ::: III IIII
+
+will generate the files:
+
+ foo/1/II/2/III/seq
+ foo/1/II/2/III/stderr
+ foo/1/II/2/III/stdout
+ foo/1/II/2/IIII/seq
+ foo/1/II/2/IIII/stderr
+ foo/1/II/2/IIII/stdout
+ foo/1/I/2/III/seq
+ foo/1/I/2/III/stderr
+ foo/1/I/2/III/stdout
+ foo/1/I/2/IIII/seq
+ foo/1/I/2/IIII/stderr
+ foo/1/I/2/IIII/stdout
+
+
+B<CSV file output>
+
+If I<name> ends in B<.csv>/B<.tsv> the output will be a CSV-file
+named I<name>.
+
+B<.csv> gives a comma separated value file. B<.tsv> gives a TAB
+separated value file.
+
+B<-.csv>/B<-.tsv> are special: It will give the file on stdout
+(standard output).
+
+
+B<JSON file output>
+
+If I<name> ends in B<.json> the output will be a JSON-file
+named I<name>.
+
+B<-.json> is special: It will give the file on stdout (standard
+output).
+
+
+B<Replacement string output file>
+
+If I<name> contains a replacement string and the replaced result does
+not end in /, then the standard output will be stored in a file named
+by this result. Standard error will be stored in the same file name
+with '.err' added, and the sequence number will be stored in the same
+file name with '.seq' added.
+
+E.g.
+
+ parallel --results my_{} echo ::: foo bar baz
+
+will generate the files:
+
+ my_bar
+ my_bar.err
+ my_bar.seq
+ my_baz
+ my_baz.err
+ my_baz.seq
+ my_foo
+ my_foo.err
+ my_foo.seq
+
+
+B<Replacement string output dir>
+
+If I<name> contains a replacement string and the replaced result ends
+in /, then output files will be stored in the resulting dir.
+
+E.g.
+
+ parallel --results my_{}/ echo ::: foo bar baz
+
+will generate the files:
+
+ my_bar/seq
+ my_bar/stderr
+ my_bar/stdout
+ my_baz/seq
+ my_baz/stderr
+ my_baz/stdout
+ my_foo/seq
+ my_foo/stderr
+ my_foo/stdout
+
+See also: B<--output-as-files> B<--tag> B<--header> B<--joblog>
+
+
+=item B<--resume>
+
+Resumes from the last unfinished job.
+
+By reading B<--joblog> or the
+B<--results> dir GNU B<parallel> will figure out the last unfinished
+job and continue from there. As GNU B<parallel> only looks at the
+sequence numbers in B<--joblog> then the input, the command, and
+B<--joblog> all have to remain unchanged; otherwise GNU B<parallel>
+may run wrong commands.
+
+See also: B<--joblog> B<--results> B<--resume-failed> B<--retries>
+
+
+=item B<--resume-failed>
+
+Retry all failed and resume from the last unfinished job.
+
+By reading
+B<--joblog> GNU B<parallel> will figure out the failed jobs and run
+those again. After that it will resume last unfinished job and
+continue from there. As GNU B<parallel> only looks at the sequence
+numbers in B<--joblog> then the input, the command, and B<--joblog>
+all have to remain unchanged; otherwise GNU B<parallel> may run wrong
+commands.
+
+See also: B<--joblog> B<--resume> B<--retry-failed> B<--retries>
+
+
+=item B<--retry-failed>
+
+Retry all failed jobs in joblog.
+
+By reading B<--joblog> GNU
+B<parallel> will figure out the failed jobs and run those again.
+
+B<--retry-failed> ignores the command and arguments on the command
+line: It only looks at the joblog.
+
+B<Differences between --resume, --resume-failed, --retry-failed>
+
+In this example B<exit {= $_%=2 =}> will cause every other job to fail.
+
+ timeout -k 1 4 parallel --joblog log -j10 \
+ 'sleep {}; exit {= $_%=2 =}' ::: {10..1}
+
+4 jobs completed. 2 failed:
+
+ Seq [...] Exitval Signal Command
+ 10 [...] 1 0 sleep 1; exit 1
+ 9 [...] 0 0 sleep 2; exit 0
+ 8 [...] 1 0 sleep 3; exit 1
+ 7 [...] 0 0 sleep 4; exit 0
+
+B<--resume> does not care about the Exitval, but only looks at Seq. If
+the Seq is run, it will not be run again. So if needed, you can change
+the command for the seqs not run yet:
+
+ parallel --resume --joblog log -j10 \
+ 'sleep .{}; exit {= $_%=2 =}' ::: {10..1}
+
+ Seq [...] Exitval Signal Command
+ [... as above ...]
+ 1 [...] 0 0 sleep .10; exit 0
+ 6 [...] 1 0 sleep .5; exit 1
+ 5 [...] 0 0 sleep .6; exit 0
+ 4 [...] 1 0 sleep .7; exit 1
+ 3 [...] 0 0 sleep .8; exit 0
+ 2 [...] 1 0 sleep .9; exit 1
+
+B<--resume-failed> cares about the Exitval, but also only looks at Seq
+to figure out which commands to run. Again this means you can change
+the command, but not the arguments. It will run the failed seqs and
+the seqs not yet run:
+
+ parallel --resume-failed --joblog log -j10 \
+ 'echo {};sleep .{}; exit {= $_%=3 =}' ::: {10..1}
+
+ Seq [...] Exitval Signal Command
+ [... as above ...]
+ 10 [...] 1 0 echo 1;sleep .1; exit 1
+ 8 [...] 0 0 echo 3;sleep .3; exit 0
+ 6 [...] 2 0 echo 5;sleep .5; exit 2
+ 4 [...] 1 0 echo 7;sleep .7; exit 1
+ 2 [...] 0 0 echo 9;sleep .9; exit 0
+
+B<--retry-failed> cares about the Exitval, but takes the command from
+the joblog. It ignores any arguments or commands given on the command
+line:
+
+ parallel --retry-failed --joblog log -j10 this part is ignored
+
+ Seq [...] Exitval Signal Command
+ [... as above ...]
+ 10 [...] 1 0 echo 1;sleep .1; exit 1
+ 6 [...] 2 0 echo 5;sleep .5; exit 2
+ 4 [...] 1 0 echo 7;sleep .7; exit 1
+
+See also: B<--joblog> B<--resume> B<--resume-failed> B<--retries>
+
+
+=item B<--retries> I<n>
+
+Try failing jobs I<n> times.
+
+If a job fails, retry it on another computer on which it has not
+failed. Do this I<n> times. If there are fewer than I<n> computers in
+B<--sshlogin> GNU B<parallel> will re-use all the computers. This is
+useful if some jobs fail for no apparent reason (such as network
+failure).
+
+I<n>=0 means infinite.
+
+See also: B<--term-seq> B<--sshlogin>
+
+
+=item B<--return> I<filename>
+
+Transfer files from remote computers.
+
+B<--return> is used with
+B<--sshlogin> when the arguments are files on the remote computers. When
+processing is done the file I<filename> will be transferred
+from the remote computer using B<rsync> and will be put relative to
+the default login dir. E.g.
+
+ echo foo/bar.txt | parallel --return {.}.out \
+ --sshlogin server.example.com touch {.}.out
+
+This will transfer the file I<$HOME/foo/bar.out> from the computer
+I<server.example.com> to the file I<foo/bar.out> after running
+B<touch foo/bar.out> on I<server.example.com>.
+
+ parallel -S server --trc out/./{}.out touch {}.out ::: in/file
+
+This will transfer the file I<in/file.out> from the computer
+I<server.example.com> to the files I<out/in/file.out> after running
+B<touch in/file.out> on I<server>.
+
+ echo /tmp/foo/bar.txt | parallel --return {.}.out \
+ --sshlogin server.example.com touch {.}.out
+
+This will transfer the file I</tmp/foo/bar.out> from the computer
+I<server.example.com> to the file I</tmp/foo/bar.out> after running
+B<touch /tmp/foo/bar.out> on I<server.example.com>.
+
+Multiple files can be transferred by repeating the option multiple
+times:
+
+ echo /tmp/foo/bar.txt | parallel \
+ --sshlogin server.example.com \
+ --return {.}.out --return {.}.out2 touch {.}.out {.}.out2
+
+B<--return> is ignored when used with B<--sshlogin :> or when not used
+with B<--sshlogin>.
+
+For details on transferring see B<--transferfile>.
+
+See also: B<--transfer> B<--transferfile> B<--sshlogin> B<--cleanup>
+B<--workdir>
+
+
+=item B<--round-robin>
+
+=item B<--round>
+
+Distribute chunks of standard input in a round robin fashion.
+
+Normally B<--pipe> will give a single block to each instance of the
+command. With B<--round-robin> all blocks will at random be written to
+commands already running. This is useful if the command takes a long
+time to initialize.
+
+With B<--keep-order> and B<--round-robin> the jobslots will get the
+same blocks as input in the same order in every run if the input is
+kept the same. See details under B<--keep-order>.
+
+B<--round-robin> implies B<--pipe>, except if B<--pipe-part> is given.
+
+See the section: SPREADING BLOCKS OF DATA.
+
+See also: B<--bin> B<--group-by> B<--shard>
+
+
+=item B<--rpl> 'I<tag> I<perl expression>'
+
+Define replacement string.
+
+Use I<tag> as a replacement string for I<perl expression>. This makes
+it possible to define your own replacement strings. GNU B<parallel>'s
+7 replacement strings are implemented as:
+
+ --rpl '{} '
+ --rpl '{#} 1 $_=$job->seq()'
+ --rpl '{%} 1 $_=$job->slot()'
+ --rpl '{/} s:.*/::'
+ --rpl '{//} $Global::use{"File::Basename"} ||=
+ eval "use File::Basename; 1;"; $_ = dirname($_);'
+ --rpl '{/.} s:.*/::; s:\.[^/.]+$::;'
+ --rpl '{.} s:\.[^/.]+$::'
+
+The B<--plus> replacement strings are implemented as:
+
+ --rpl '{+/} s:/[^/]*$:: || s:.*$::'
+ --rpl '{+.} s:.*\.:: || s:.*$::'
+ --rpl '{+..} s:.*\.([^/.]+\.[^/.]+)$:$1: || s:.*$::'
+ --rpl '{+...} s:.*\.([^/.]+\.[^/.]+\.[^/.]+)$:$1: || s:.*$::'
+ --rpl '{..} s:\.[^/.]+\.[^/.]+$::'
+ --rpl '{...} s:\.[^/.]+\.[^/.]+\.[^/.]+$::'
+ --rpl '{/..} s:.*/::; s:\.[^/.]+\.[^/.]+$::'
+ --rpl '{/...} s:.*/::; s:\.[^/.]+\.[^/.]+\.[^/.]+$::'
+ --rpl '{choose_k}
+ for $t (2..$#arg){ if($arg[$t-1] ge $arg[$t]) { skip() } }'
+ --rpl '{##} 1 $_=total_jobs()'
+ --rpl '{0%} 1 $f=1+int((log($Global::max_jobs_running||1)/
+ log(10))); $_=sprintf("%0${f}d",slot())'
+ --rpl '{0#} 1 $f=1+int((log(total_jobs())/log(10)));
+ $_=sprintf("%0${f}d",seq())'
+ --rpl '{seq(.*?)} $_=eval q{$job->seq()}.qq{$$1}'
+ --rpl '{slot(.*?)} $_=eval q{$job->slot()}.qq{$$1}'
+
+ --rpl '{:-([^}]+?)} $_ ||= $$1'
+ --rpl '{:(\d+?)} substr($_,0,$$1) = ""'
+ --rpl '{:(\d+?):(\d+?)} $_ = substr($_,$$1,$$2);'
+ --rpl '{#([^#}][^}]*?)} $nongreedy=::make_regexp_ungreedy($$1);
+ s/^$nongreedy(.*)/$1/;'
+ --rpl '{##([^#}][^}]*?)} s/^$$1//;'
+ --rpl '{%([^}]+?)} $nongreedy=::make_regexp_ungreedy($$1);
+ s/(.*)$nongreedy$/$1/;'
+ --rpl '{%%([^}]+?)} s/$$1$//;'
+ --rpl '{/([^}]+?)/([^}]*?)} s/$$1/$$2/;'
+ --rpl '{^([^}]+?)} s/^($$1)/uc($1)/e;'
+ --rpl '{^^([^}]+?)} s/($$1)/uc($1)/eg;'
+ --rpl '{,([^}]+?)} s/^($$1)/lc($1)/e;'
+ --rpl '{,,([^}]+?)} s/($$1)/lc($1)/eg;'
+
+ --rpl '{slot} 1 $_="\${PARALLEL_JOBSLOT}";uq()'
+ --rpl '{host} 1 $_="\${PARALLEL_SSHHOST}";uq()'
+ --rpl '{sshlogin} 1 $_="\${PARALLEL_SSHLOGIN}";uq()'
+ --rpl '{hgrp} 1 $_="\${PARALLEL_HOSTGROUPS}";uq()'
+ --rpl '{agrp} 1 $_="\${PARALLEL_ARGHOSTGROUPS}";uq()'
+
+If the user defined replacement string starts with '{' it can also be
+used as a positional replacement string (like B<{2.}>).
+
+It is recommended to only change $_ but you have full access to all
+of GNU B<parallel>'s internal functions and data structures.
+
+Here are a few examples:
+
+ Is the job sequence even or odd?
+ --rpl '{odd} $_ = seq() % 2 ? "odd" : "even"'
+ Pad job sequence with leading zeros to get equal width
+ --rpl '{0#} $f=1+int("".(log(total_jobs())/log(10)));
+ $_=sprintf("%0${f}d",seq())'
+ Job sequence counting from 0
+ --rpl '{#0} $_ = seq() - 1'
+ Job slot counting from 2
+ --rpl '{%1} $_ = slot() + 1'
+ Remove all extensions
+ --rpl '{:} s:(\.[^/]+)*$::'
+
+You can have dynamic replacement strings by including parenthesis in
+the replacement string and adding a regular expression between the
+parenthesis. The matching string will be inserted as $$1:
+
+ parallel --rpl '{%(.*?)} s/$$1//' echo {%.tar.gz} ::: my.tar.gz
+ parallel --rpl '{:%(.+?)} s:$$1(\.[^/]+)*$::' \
+ echo {:%_file} ::: my_file.tar.gz
+ parallel -n3 --rpl '{/:%(.*?)} s:.*/(.*)$$1(\.[^/]+)*$:$1:' \
+ echo job {#}: {2} {2.} {3/:%_1} ::: a/b.c c/d.e f/g_1.h.i
+
+You can even use multiple matches:
+
+ parallel --rpl '{/(.+?)/(.*?)} s/$$1/$$2/;'
+ echo {/replacethis/withthis} {/b/C} ::: a_replacethis_b
+
+ parallel --rpl '{(.*?)/(.*?)} $_="$$2$_$$1"' \
+ echo {swap/these} ::: -middle-
+
+See also: B<{=>I<perl expression>B<=}> B<--parens>
+
+
+=item B<--rsync-opts> I<options>
+
+Options to pass on to B<rsync>.
+
+Setting B<--rsync-opts> takes precedence over setting the environment
+variable $PARALLEL_RSYNC_OPTS.
+
+
+=item B<--max-chars> I<max-chars>
+
+=item B<-s> I<max-chars>
+
+Limit length of command.
+
+Use at most I<max-chars> characters per command line, including the
+command and initial-arguments and the terminating nulls at the ends of
+the argument strings. The largest allowed value is system-dependent,
+and is calculated as the argument length limit for exec, less the size
+of your environment. The default value is the maximum.
+
+I<max-chars> can be postfixed with K, M, G, T, P, k, m, g, t, or p
+(see UNIT PREFIX).
+
+Implies B<-X> unless B<-m> or B<--xargs> is set.
+
+See also: B<-X> B<-m> B<--xargs> B<--max-line-length-allowed>
+B<--show-limits>
+
+
+=item B<--show-limits>
+
+Display limits given by the operating system.
+
+Display the limits on the command-line length which are imposed by the
+operating system and the B<-s> option. Pipe the input from /dev/null
+(and perhaps specify --no-run-if-empty) if you don't want GNU B<parallel>
+to do anything.
+
+See also: B<--max-chars> B<--max-line-length-allowed> B<--version>
+
+
+=item B<--semaphore>
+
+Work as a counting semaphore.
+
+B<--semaphore> will cause GNU B<parallel> to start I<command> in the
+background. When the number of jobs given by B<--jobs> is reached, GNU
+B<parallel> will wait for one of these to complete before starting
+another command.
+
+B<--semaphore> implies B<--bg> unless B<--fg> is specified.
+
+The command B<sem> is an alias for B<parallel --semaphore>.
+
+See also: B<man sem> B<--bg> B<--fg> B<--semaphore-name>
+B<--semaphore-timeout> B<--wait>
+
+
+=item B<--semaphore-name> I<name>
+
+=item B<--id> I<name>
+
+Use B<name> as the name of the semaphore.
+
+The default is the name of the controlling tty (output from B<tty>).
+
+The default normally works as expected when used interactively, but
+when used in a script I<name> should be set. I<$$> or I<my_task_name>
+are often a good value.
+
+The semaphore is stored in ~/.parallel/semaphores/
+
+Implies B<--semaphore>.
+
+See also: B<man sem> B<--semaphore>
+
+
+=item B<--semaphore-timeout> I<secs>
+
+=item B<--st> I<secs>
+
+If I<secs> > 0: If the semaphore is not released within I<secs>
+seconds, take it anyway.
+
+If I<secs> < 0: If the semaphore is not released within I<secs>
+seconds, exit.
+
+I<secs> is in seconds, but can be postfixed with s, m, h, or d (see
+the section TIME POSTFIXES).
+
+Implies B<--semaphore>.
+
+See also: B<man sem>
+
+
+=item B<--seqreplace> I<replace-str>
+
+Use the replacement string I<replace-str> instead of B<{#}> for
+job sequence number.
+
+See also: B<{#}>
+
+
+=item B<--session>
+
+Record names in current environment in B<$PARALLEL_IGNORED_NAMES> and
+exit.
+
+Only used with B<env_parallel>. Aliases, functions, and variables with
+names in B<$PARALLEL_IGNORED_NAMES> will not be copied. So you should
+set variables/function you want copied I<after> running B<--session>.
+
+It is similar to B<--record-env>, but only for this session.
+
+Only supported in B<Ash, Bash, Dash, Ksh, Sh, and Zsh>.
+
+See also: B<--env> B<--record-env> B<env_parallel>
+
+
+=item B<--shard> I<shardexpr>
+
+Use I<shardexpr> as shard key and shard input to the jobs.
+
+I<shardexpr> is [column number|column name] [perlexpression] e.g.:
+
+ 3
+ Address
+ 3 $_%=100
+ Address s/\d//g
+
+Each input line is split using B<--colsep>. The string of the column
+is put into $_, the perl expression is executed, the resulting string
+is hashed so that all lines of a given value is given to the same job
+slot.
+
+This is similar to sharding in databases.
+
+The performance is in the order of 100K rows per second. Faster if the
+I<shardcol> is small (<10), slower if it is big (>100).
+
+B<--shard> requires B<--pipe> and a fixed numeric value for B<--jobs>.
+
+See the section: SPREADING BLOCKS OF DATA.
+
+See also: B<--bin> B<--group-by> B<--round-robin>
+
+
+=item B<--shebang>
+
+=item B<--hashbang>
+
+GNU B<parallel> can be called as a shebang (#!) command as the first
+line of a script. The content of the file will be treated as
+inputsource.
+
+Like this:
+
+ #!/usr/bin/parallel --shebang -r wget
+
+ https://ftpmirror.gnu.org/parallel/parallel-20120822.tar.bz2
+ https://ftpmirror.gnu.org/parallel/parallel-20130822.tar.bz2
+ https://ftpmirror.gnu.org/parallel/parallel-20140822.tar.bz2
+
+B<--shebang> must be set as the first option.
+
+On FreeBSD B<env> is needed:
+
+ #!/usr/bin/env -S parallel --shebang -r wget
+
+ https://ftpmirror.gnu.org/parallel/parallel-20120822.tar.bz2
+ https://ftpmirror.gnu.org/parallel/parallel-20130822.tar.bz2
+ https://ftpmirror.gnu.org/parallel/parallel-20140822.tar.bz2
+
+There are many limitations of shebang (#!) depending on your operating
+system. See details on https://www.in-ulm.de/~mascheck/various/shebang/
+
+See also: B<--shebang-wrap>
+
+
+=item B<--shebang-wrap>
+
+GNU B<parallel> can parallelize scripts by wrapping the shebang
+line. If the program can be run like this:
+
+ cat arguments | parallel the_program
+
+then the script can be changed to:
+
+ #!/usr/bin/parallel --shebang-wrap /original/parser --options
+
+E.g.
+
+ #!/usr/bin/parallel --shebang-wrap /usr/bin/python
+
+If the program can be run like this:
+
+ cat data | parallel --pipe the_program
+
+then the script can be changed to:
+
+ #!/usr/bin/parallel --shebang-wrap --pipe /orig/parser --opts
+
+E.g.
+
+ #!/usr/bin/parallel --shebang-wrap --pipe /usr/bin/perl -w
+
+B<--shebang-wrap> must be set as the first option.
+
+See also: B<--shebang>
+
+
+=item B<--shell-completion> I<shell>
+
+Generate shell completion code for interactive shells.
+
+Supported shells: bash zsh.
+
+Use I<auto> as I<shell> to automatically detect running shell.
+
+Activate the completion code with:
+
+ zsh% eval "$(parallel --shell-completion auto)"
+ bash$ eval "$(parallel --shell-completion auto)"
+
+Or put this `/usr/share/zsh/site-functions/_parallel`, then `compinit`
+to generate `~/.zcompdump`:
+
+ #compdef parallel
+
+ (( $+functions[_comp_parallel] )) ||
+ eval "$(parallel --shell-completion auto)" &&
+ _comp_parallel
+
+
+=item B<--shell-quote>
+
+Does not run the command but quotes it. Useful for making quoted
+composed commands for GNU B<parallel>.
+
+Multiple B<--shell-quote> with quote the string multiple times, so
+B<parallel --shell-quote | parallel --shell-quote> can be written as
+B<parallel --shell-quote --shell-quote>.
+
+See also: B<--quote>
+
+
+=item B<--shuf>
+
+Shuffle jobs.
+
+When having multiple input sources it is hard to randomize
+jobs. B<--shuf> will generate all jobs, and shuffle them before
+running them. This is useful to get a quick preview of the results
+before running the full batch.
+
+Combined with B<--halt soon,done=1%> you can run a random 1% sample of
+all jobs:
+
+ parallel --shuf --halt soon,done=1% echo ::: {1..100} ::: {1..100}
+
+See also: B<--halt>
+
+
+=item B<--skip-first-line>
+
+Do not use the first line of input (used by GNU B<parallel> itself
+when called with B<--shebang>).
+
+
+=item B<--sql> I<DBURL> (obsolete)
+
+Use B<--sql-master> instead.
+
+
+=item B<--sql-master> I<DBURL>
+
+Submit jobs via SQL server. I<DBURL> must point to a table, which will
+contain the same information as B<--joblog>, the values from the input
+sources (stored in columns V1 .. Vn), and the output (stored in
+columns Stdout and Stderr).
+
+If I<DBURL> is prepended with '+' GNU B<parallel> assumes the table is
+already made with the correct columns and appends the jobs to it.
+
+If I<DBURL> is not prepended with '+' the table will be dropped and
+created with the correct amount of V-columns unless
+
+B<--sqlmaster> does not run any jobs, but it creates the values for
+the jobs to be run. One or more B<--sqlworker> must be run to actually
+execute the jobs.
+
+If B<--wait> is set, GNU B<parallel> will wait for the jobs to
+complete.
+
+The format of a DBURL is:
+
+ [sql:]vendor://[[user][:pwd]@][host][:port]/[db]/table
+
+E.g.
+
+ sql:mysql://hr:hr@localhost:3306/hrdb/jobs
+ mysql://scott:tiger@my.example.com/pardb/paralleljobs
+ sql:oracle://scott:tiger@ora.example.com/xe/parjob
+ postgresql://scott:tiger@pg.example.com/pgdb/parjob
+ pg:///parjob
+ sqlite3:///%2Ftmp%2Fpardb.sqlite/parjob
+ csv:///%2Ftmp%2Fpardb/parjob
+
+Notice how / in the path of sqlite and CVS must be encoded as
+%2F. Except the last / in CSV which must be a /.
+
+It can also be an alias from ~/.sql/aliases:
+
+ :myalias mysql:///mydb/paralleljobs
+
+See also: B<--sql-and-worker> B<--sql-worker> B<--joblog>
+
+
+=item B<--sql-and-worker> I<DBURL>
+
+Shorthand for: B<--sql-master> I<DBURL> B<--sql-worker> I<DBURL>.
+
+See also: B<--sql-master> B<--sql-worker>
+
+
+=item B<--sql-worker> I<DBURL>
+
+Execute jobs via SQL server. Read the input sources variables from the
+table pointed to by I<DBURL>. The I<command> on the command line
+should be the same as given by B<--sqlmaster>.
+
+If you have more than one B<--sqlworker> jobs may be run more than
+once.
+
+If B<--sqlworker> runs on the local machine, the hostname in the SQL
+table will not be ':' but instead the hostname of the machine.
+
+See also: B<--sql-master> B<--sql-and-worker>
+
+
+=item B<--ssh> I<sshcommand>
+
+GNU B<parallel> defaults to using B<ssh> for remote access. This can
+be overridden with B<--ssh>. It can also be set on a per server
+basis with B<--sshlogin>.
+
+See also: B<--sshlogin>
+
+
+=item B<--ssh-delay> I<duration>
+
+Delay starting next ssh by I<duration>.
+
+GNU B<parallel> will not start another ssh for the next I<duration>.
+
+I<duration> is in seconds, but can be postfixed with s, m, h, or d.
+
+See also: TIME POSTFIXES B<--sshlogin> B<--delay>
+
+
+=item B<--sshlogin> I<[@hostgroups/][ncpus/]sshlogin[,[@hostgroups/][ncpus/]sshlogin[,...]]>
+
+=item B<--sshlogin> I<@hostgroup>
+
+=item B<-S> I<[@hostgroups/][ncpus/]sshlogin[,[@hostgroups/][ncpus/]sshlogin[,...]]>
+
+=item B<-S> I<@hostgroup>
+
+Distribute jobs to remote computers.
+
+The jobs will be run on a list of remote computers.
+
+If I<hostgroups> is given, the I<sshlogin> will be added to that
+hostgroup. Multiple hostgroups are separated by '+'. The I<sshlogin>
+will always be added to a hostgroup named the same as I<sshlogin>.
+
+If only the I<@hostgroup> is given, only the sshlogins in that
+hostgroup will be used. Multiple I<@hostgroup> can be given.
+
+GNU B<parallel> will determine the number of CPUs on the remote
+computers and run the number of jobs as specified by B<-j>. If the
+number I<ncpus> is given GNU B<parallel> will use this number for
+number of CPUs on the host. Normally I<ncpus> will not be
+needed.
+
+An I<sshlogin> is of the form:
+
+ [sshcommand [options]] [username[:password]@]hostname
+
+If I<password> is given, B<sshpass> will be used. Otherwise the
+sshlogin must not require a password (B<ssh-agent> and B<ssh-copy-id>
+may help with that).
+
+If the hostname is an IPv6 address, the port can be given separated
+with p or #. If the address is enclosed in [] you can also use :.
+E.g. ::1p2222 ::1#2222 [::1]:2222
+
+Ranges of hostnames can be given in [] like this: server[1,3,8-10]
+(for server1, server3, server8, server9, server10) or
+server[001,003,008-010] (for server001, server003, server008,
+server009, server010). With Bash's brace expansion you can do:
+-S{dev,prod}[001-100] to get -Sdev[001-100] -Sprod[001-100]
+More [] are allowed: server[1-10].cluster[1-5].example.net
+
+The sshlogin ':' is special, it means 'no ssh' and will therefore run
+on the local computer.
+
+The sshlogin '..' is special, it read sshlogins from ~/.parallel/sshloginfile or
+$XDG_CONFIG_HOME/parallel/sshloginfile
+
+The sshlogin '-' is special, too, it read sshlogins from stdin
+(standard input).
+
+To specify more sshlogins separate the sshlogins by comma, newline (in
+the same string), or repeat the options multiple times.
+
+GNU B<parallel> splits on , (comma) so if your sshlogin contains ,
+(comma) you need to replace it with \, or ,,
+
+For examples: see B<--sshloginfile>.
+
+The remote host must have GNU B<parallel> installed.
+
+B<--sshlogin> is known to cause problems with B<-m> and B<-X>.
+
+See also: B<--basefile> B<--transferfile> B<--return> B<--cleanup>
+B<--trc> B<--sshloginfile> B<--workdir> B<--filter-hosts>
+B<--ssh>
+
+
+=item B<--sshloginfile> I<filename>
+
+=item B<--slf> I<filename>
+
+File with sshlogins. The file consists of sshlogins on separate
+lines. Empty lines and lines starting with '#' are ignored. Example:
+
+ server.example.com
+ username@server2.example.com
+ 8/my-8-cpu-server.example.com
+ 2/my_other_username@my-dualcore.example.net
+ # This server has SSH running on port 2222
+ ssh -p 2222 server.example.net
+ 4/ssh -p 2222 quadserver.example.net
+ # Use a different ssh program
+ myssh -p 2222 -l myusername hexacpu.example.net
+ # Use a different ssh program with default number of CPUs
+ //usr/local/bin/myssh -p 2222 -l myusername hexacpu
+ # Use a different ssh program with 6 CPUs
+ 6//usr/local/bin/myssh -p 2222 -l myusername hexacpu
+ # Assume 16 CPUs on the local computer
+ 16/:
+ # Put server1 in hostgroup1
+ @hostgroup1/server1
+ # Put myusername@server2 in hostgroup1+hostgroup2
+ @hostgroup1+hostgroup2/myusername@server2
+ # Force 4 CPUs and put 'ssh -p 2222 server3' in hostgroup1
+ @hostgroup1/4/ssh -p 2222 server3
+
+When using a different ssh program the last argument must be the hostname.
+
+Multiple B<--sshloginfile> are allowed.
+
+GNU B<parallel> will first look for the file in current dir; if that
+fails it look for the file in ~/.parallel.
+
+The sshloginfile '..' is special, it read sshlogins from
+~/.parallel/sshloginfile
+
+The sshloginfile '.' is special, it read sshlogins from
+/etc/parallel/sshloginfile
+
+The sshloginfile '-' is special, too, it read sshlogins from stdin
+(standard input).
+
+If the sshloginfile is changed it will be re-read when a job finishes
+though at most once per second. This makes it possible to add and
+remove hosts while running.
+
+This can be used to have a daemon that updates the sshloginfile to
+only contain servers that are up:
+
+ cp original.slf tmp2.slf
+ while [ 1 ] ; do
+ nice parallel --nonall -j0 -k --slf original.slf \
+ --tag echo | perl 's/\t$//' > tmp.slf
+ if diff tmp.slf tmp2.slf; then
+ mv tmp.slf tmp2.slf
+ fi
+ sleep 10
+ done &
+ parallel --slf tmp2.slf ...
+
+See also: B<--filter-hosts>
+
+
+=item B<--slotreplace> I<replace-str>
+
+Use the replacement string I<replace-str> instead of B<{%}> for
+job slot number.
+
+See also: B<{%}>
+
+
+=item B<--silent>
+
+Silent.
+
+The job to be run will not be printed. This is the default. Can be
+reversed with B<-v>.
+
+See also: B<-v>
+
+
+=item B<--template> I<file>=I<repl>
+
+=item B<--tmpl> I<file>=I<repl>
+
+Replace replacement strings in I<file> and save it in I<repl>.
+
+All replacement strings in the contents of I<file> will be
+replaced. All replacement strings in the name I<repl> will be
+replaced.
+
+With B<--cleanup> the new file will be removed when the job is done.
+
+If I<my.tmpl> contains this:
+
+ Xval: {x}
+ Yval: {y}
+ FixedValue: 9
+ # x with 2 decimals
+ DecimalX: {=x $_=sprintf("%.2f",$_) =}
+ TenX: {=x $_=$_*10 =}
+ RandomVal: {=1 $_=rand() =}
+
+it can be used like this:
+
+ myprog() { echo Using "$@"; cat "$@"; }
+ export -f myprog
+ parallel --cleanup --header : --tmpl my.tmpl={#}.t myprog {#}.t \
+ ::: x 1.234 2.345 3.45678 ::: y 1 2 3
+
+See also: B<{}> B<--cleanup>
+
+
+=item B<--tty>
+
+Open terminal tty.
+
+If GNU B<parallel> is used for starting a program that accesses the
+tty (such as an interactive program) then this option may be
+needed. It will default to starting only one job at a time
+(i.e. B<-j1>), not buffer the output (i.e. B<-u>), and it will open a
+tty for the job.
+
+You can of course override B<-j1> and B<-u>.
+
+Using B<--tty> unfortunately means that GNU B<parallel> cannot kill
+the jobs (with B<--timeout>, B<--memfree>, or B<--halt>). This is due
+to GNU B<parallel> giving each child its own process group, which is
+then killed. Process groups are dependant on the tty.
+
+See also: B<--ungroup> B<--open-tty>
+
+
+=item B<--tag>
+
+Tag lines with arguments.
+
+Each output line will be prepended with the arguments and TAB
+(\t). When combined with B<--onall> or B<--nonall> the lines will be
+prepended with the sshlogin instead.
+
+B<--tag> is ignored when using B<-u>.
+
+See also: B<--tagstring> B<--ctag>
+
+
+=item B<--tagstring> I<str>
+
+Tag lines with a string.
+
+Each output line will be prepended with I<str> and TAB (\t). I<str>
+can contain replacement strings such as B<{}>.
+
+B<--tagstring> is ignored when using B<-u>, B<--onall>, and B<--nonall>.
+
+See also: B<--tag> B<--ctagstring>
+
+
+=item B<--tee>
+
+Pipe all data to all jobs.
+
+Used with B<--pipe>/B<--pipe-part> and B<:::>.
+
+ seq 1000 | parallel --pipe --tee -v wc {} ::: -w -l -c
+
+How many numbers in 1..1000 contain 0..9, and how many bytes do they
+fill:
+
+ seq 1000 | parallel --pipe --tee --tag \
+ 'grep {1} | wc {2}' ::: {0..9} ::: -l -c
+
+How many words contain a..z and how many bytes do they fill?
+
+ parallel -a /usr/share/dict/words --pipe-part --tee --tag \
+ 'grep {1} | wc {2}' ::: {a..z} ::: -l -c
+
+See also: B<:::> B<--pipe> B<--pipe-part>
+
+
+=item B<--term-seq> I<sequence>
+
+Termination sequence.
+
+When a job is killed due to B<--timeout>, B<--memfree>, B<--halt>, or
+abnormal termination of GNU B<parallel>, I<sequence> determines how
+the job is killed. The default is:
+
+ TERM,200,TERM,100,TERM,50,KILL,25
+
+which sends a TERM signal, waits 200 ms, sends another TERM signal,
+waits 100 ms, sends another TERM signal, waits 50 ms, sends a KILL
+signal, waits 25 ms, and exits. GNU B<parallel> detects if a process
+dies before the waiting time is up.
+
+See also: B<--halt> B<--timeout> B<--memfree>
+
+
+=item B<--total-jobs> I<jobs>
+
+=item B<--total> I<jobs>
+
+Provide the total number of jobs for computing ETA which is also used
+for B<--bar>.
+
+Without B<--total-jobs> GNU Parallel will read all jobs before
+starting a job. B<--total-jobs> is useful if the input is generated
+slowly.
+
+See also: B<--bar> B<--eta>
+
+
+=item B<--tmpdir> I<dirname>
+
+Directory for temporary files.
+
+GNU B<parallel> normally buffers output into temporary files in
+/tmp. By setting B<--tmpdir> you can use a different dir for the
+files. Setting B<--tmpdir> is equivalent to setting $TMPDIR.
+
+See also: B<--compress> B<$TMPDIR> B<$PARALLEL_REMOTE_TMPDIR>
+
+
+=item B<--tmux> (Long beta testing)
+
+Use B<tmux> for output. Start a B<tmux> session and run each job in a
+window in that session. No other output will be produced.
+
+See also: B<--tmuxpane>
+
+
+=item B<--tmuxpane> (Long beta testing)
+
+Use B<tmux> for output but put output into panes in the first window.
+Useful if you want to monitor the progress of less than 100 concurrent
+jobs.
+
+See also: B<--tmux>
+
+
+=item B<--timeout> I<duration>
+
+Time out for command. If the command runs for longer than I<duration>
+seconds it will get killed as per B<--term-seq>.
+
+If I<duration> is followed by a % then the timeout will dynamically be
+computed as a percentage of the median average runtime of successful
+jobs. Only values > 100% will make sense.
+
+I<duration> is in seconds, but can be postfixed with s, m, h, or d.
+
+See also: TIME POSTFIXES B<--term-seq> B<--retries>
+
+
+=item B<--verbose>
+
+=item B<-t>
+
+Print the job to be run on stderr (standard error).
+
+See also: B<-v> B<--interactive>
+
+
+=item B<--transfer>
+
+Transfer files to remote computers.
+
+Shorthand for: B<--transferfile {}>.
+
+See also: B<--transferfile>.
+
+
+=item B<--transferfile> I<filename>
+
+=item B<--tf> I<filename>
+
+Transfer I<filename> to remote computers.
+
+B<--transferfile> is used with B<--sshlogin> to transfer files to the
+remote computers. The files will be transferred using B<rsync> and
+will be put relative to the work dir.
+
+The I<filename> will normally contain a replacement string.
+
+If the path contains /./ the remaining path will be relative to the
+work dir (for details: see B<rsync>). If the work dir is
+B</home/user>, the transferring will be as follows:
+
+ /tmp/foo/bar => /tmp/foo/bar
+ tmp/foo/bar => /home/user/tmp/foo/bar
+ /tmp/./foo/bar => /home/user/foo/bar
+ tmp/./foo/bar => /home/user/foo/bar
+
+I<Examples>
+
+This will transfer the file I<foo/bar.txt> to the computer
+I<server.example.com> to the file I<$HOME/foo/bar.txt> before running
+B<wc foo/bar.txt> on I<server.example.com>:
+
+ echo foo/bar.txt | parallel --transferfile {} \
+ --sshlogin server.example.com wc
+
+This will transfer the file I</tmp/foo/bar.txt> to the computer
+I<server.example.com> to the file I</tmp/foo/bar.txt> before running
+B<wc /tmp/foo/bar.txt> on I<server.example.com>:
+
+ echo /tmp/foo/bar.txt | parallel --transferfile {} \
+ --sshlogin server.example.com wc
+
+This will transfer the file I</tmp/foo/bar.txt> to the computer
+I<server.example.com> to the file I<foo/bar.txt> before running
+B<wc ./foo/bar.txt> on I<server.example.com>:
+
+ echo /tmp/./foo/bar.txt | parallel --transferfile {} \
+ --sshlogin server.example.com wc {= s:.*/\./:./: =}
+
+B<--transferfile> is often used with B<--return> and B<--cleanup>. A
+shorthand for B<--transferfile {}> is B<--transfer>.
+
+B<--transferfile> is ignored when used with B<--sshlogin :> or when
+not used with B<--sshlogin>.
+
+See also: B<--workdir> B<--sshlogin> B<--basefile> B<--return>
+B<--cleanup>
+
+
+=item B<--trc> I<filename>
+
+Transfer, Return, Cleanup. Shorthand for: B<--transfer> B<--return>
+I<filename> B<--cleanup>
+
+See also: B<--transfer> B<--return> B<--cleanup>
+
+
+=item B<--trim> <n|l|r|lr|rl>
+
+Trim white space in input.
+
+=over 4
+
+=item n
+
+No trim. Input is not modified. This is the default.
+
+=item l
+
+Left trim. Remove white space from start of input. E.g. " a bc " -> "a bc ".
+
+=item r
+
+Right trim. Remove white space from end of input. E.g. " a bc " -> " a bc".
+
+=item lr
+
+=item rl
+
+Both trim. Remove white space from both start and end of input. E.g. "
+a bc " -> "a bc". This is the default if B<--colsep> is used.
+
+=back
+
+See also: B<--no-run-if-empty> B<{}> B<--colsep>
+
+
+=item B<--ungroup>
+
+=item B<-u>
+
+Ungroup output.
+
+Output is printed as soon as possible and bypasses GNU B<parallel>
+internal processing. This may cause output from different commands to
+be mixed thus should only be used if you do not care about the
+output. Compare these:
+
+ seq 4 | parallel -j0 \
+ 'sleep {};echo -n start{};sleep {};echo {}end'
+ seq 4 | parallel -u -j0 \
+ 'sleep {};echo -n start{};sleep {};echo {}end'
+
+It also disables B<--tag>. GNU B<parallel> outputs faster with
+B<-u>. Compare the speeds of these:
+
+ parallel seq ::: 300000000 >/dev/null
+ parallel -u seq ::: 300000000 >/dev/null
+ parallel --line-buffer seq ::: 300000000 >/dev/null
+
+Can be reversed with B<--group>.
+
+See also: B<--line-buffer> B<--group>
+
+
+=item B<--extensionreplace> I<replace-str>
+
+=item B<--er> I<replace-str>
+
+Use the replacement string I<replace-str> instead of B<{.}> for input
+line without extension.
+
+See also: B<{.}>
+
+
+=item B<--use-sockets-instead-of-threads>
+
+See also: B<--use-cores-instead-of-threads>
+
+
+=item B<--use-cores-instead-of-threads>
+
+=item B<--use-cpus-instead-of-cores> (obsolete)
+
+Determine how GNU B<parallel> counts the number of CPUs.
+
+GNU B<parallel> uses this number when the number of jobslots
+(B<--jobs>) is computed relative to the number of CPUs (e.g. 100% or
++1).
+
+CPUs can be counted in three different ways:
+
+=over 8
+
+=item sockets
+
+The number of filled CPU sockets (i.e. the number of physical chips).
+
+=item cores
+
+The number of physical cores (i.e. the number of physical compute
+cores).
+
+=item threads
+
+The number of hyperthreaded cores (i.e. the number of virtual
+cores - with some of them possibly being hyperthreaded)
+
+=back
+
+Normally the number of CPUs is computed as the number of CPU
+threads. With B<--use-sockets-instead-of-threads> or
+B<--use-cores-instead-of-threads> you can force it to be computed as
+the number of filled sockets or number of cores instead.
+
+Most users will not need these options.
+
+B<--use-cpus-instead-of-cores> is a (misleading) alias for
+B<--use-sockets-instead-of-threads> and is kept for backwards
+compatibility.
+
+See also: B<--number-of-threads> B<--number-of-cores>
+B<--number-of-sockets>
+
+
+=item B<-v>
+
+Verbose.
+
+Print the job to be run on stdout (standard output). Can be reversed
+with B<--silent>.
+
+Use B<-v> B<-v> to print the wrapping ssh command when running remotely.
+
+See also: B<-t>
+
+
+=item B<--version>
+
+=item B<-V>
+
+Print the version GNU B<parallel> and exit.
+
+
+=item B<--workdir> I<mydir>
+
+=item B<--wd> I<mydir>
+
+Jobs will be run in the dir I<mydir>. The default is the current dir
+for the local machine, and the login dir for remote computers.
+
+Files transferred using B<--transferfile> and B<--return> will be
+relative to I<mydir> on remote computers.
+
+The special I<mydir> value B<...> will create working dirs under
+B<~/.parallel/tmp/>. If B<--cleanup> is given these dirs will be
+removed.
+
+The special I<mydir> value B<.> uses the current working dir. If the
+current working dir is beneath your home dir, the value B<.> is
+treated as the relative path to your home dir. This means that if your
+home dir is different on remote computers (e.g. if your login is
+different) the relative path will still be relative to your home dir.
+
+To see the difference try:
+
+ parallel -S server pwd ::: ""
+ parallel --wd . -S server pwd ::: ""
+ parallel --wd ... -S server pwd ::: ""
+
+I<mydir> can contain GNU B<parallel>'s replacement strings.
+
+
+=item B<--wait>
+
+Wait for all commands to complete.
+
+Used with B<--semaphore> or B<--sqlmaster>.
+
+See also: B<man sem>
+
+
+=item B<-X>
+
+Multiple arguments with context replace. Insert as many arguments as
+the command line length permits. If multiple jobs are being run in
+parallel: distribute the arguments evenly among the jobs. Use B<-j1>
+to avoid this.
+
+If B<{}> is not used the arguments will be appended to the line. If
+B<{}> is used as part of a word (like I<pic{}.jpg>) then the whole
+word will be repeated. If B<{}> is used multiple times each B<{}> will
+be replaced with the arguments.
+
+Normally B<-X> will do the right thing, whereas B<-m> can give
+unexpected results if B<{}> is used as part of a word.
+
+Support for B<-X> with B<--sshlogin> is limited and may fail.
+
+See also: B<-m>
+
+
+=item B<--exit>
+
+=item B<-x>
+
+Exit if the size (see the B<-s> option) is exceeded.
+
+
+=item B<--xargs>
+
+Multiple arguments. Insert as many arguments as the command line
+length permits.
+
+If B<{}> is not used the arguments will be appended to the
+line. If B<{}> is used multiple times each B<{}> will be replaced
+with all the arguments.
+
+Support for B<--xargs> with B<--sshlogin> is limited and may fail.
+
+See also: B<-X>
+
+
+=back
+
+
+=head1 EXAMPLES
+
+See: B<man parallel_examples>
+
+
+=head1 SPREADING BLOCKS OF DATA
+
+B<--round-robin>, B<--pipe-part>, B<--shard>, B<--bin> and
+B<--group-by> are all specialized versions of B<--pipe>.
+
+In the following I<n> is the number of jobslots given by B<--jobs>. A
+record starts with B<--recstart> and ends with B<--recend>. It is
+typically a full line. A chunk is a number of full records that is
+approximately the size of a block. A block can contain half records, a
+chunk cannot.
+
+B<--pipe> starts one job per chunk. It reads blocks from stdin
+(standard input). It finds a record end near a block border and passes
+a chunk to the program.
+
+B<--pipe-part> starts one job per chunk - just like normal
+B<--pipe>. It first finds record endings near all block borders in the
+file and then starts the jobs. By using B<--block -1> it will set the
+block size to size-of-file/I<n>. Used this way it will start I<n>
+jobs in total.
+
+B<--round-robin> starts I<n> jobs in total. It reads a block and
+passes a chunk to whichever job is ready to read. It does not parse
+the content except for identifying where a record ends to make sure it
+only passes full records.
+
+B<--shard> starts I<n> jobs in total. It parses each line to read the
+string in the given column. Based on this string the line is passed to
+one of the I<n> jobs. All lines having this string will be given to the
+same jobslot.
+
+B<--bin> works like B<--shard> but the value of the column must be
+numeric and is the jobslot number it will be passed to. If the value
+is bigger than I<n>, then I<n> will be subtracted from the value until
+the value is smaller than or equal to I<n>.
+
+B<--group-by> starts one job per chunk. Record borders are not given
+by B<--recend>/B<--recstart>. Instead a record is defined by a group
+of lines having the same string in a given column. So the string of a
+given column changes at a chunk border. With B<--pipe> every line is
+parsed, with B<--pipe-part> only a few lines are parsed to find the
+chunk border.
+
+B<--group-by> can be combined with B<--round-robin> or B<--pipe-part>.
+
+
+=head1 TIME POSTFIXES
+
+Arguments that give a duration are given in seconds, but can be
+expressed as floats postfixed with B<s>, B<m>, B<h>, or B<d> which
+would multiply the float by 1, 60, 60*60, or 60*60*24. Thus these are
+equivalent: 100000 and 1d3.5h16.6m4s.
+
+
+=head1 UNIT PREFIX
+
+Many numerical arguments in GNU B<parallel> can be postfixed with K,
+M, G, T, P, k, m, g, t, or p which would multiply the number with
+1024, 1048576, 1073741824, 1099511627776, 1125899906842624, 1000,
+1000000, 1000000000, 1000000000000, or 1000000000000000, respectively.
+
+You can even give it as a math expression. E.g. 1000000 can be written
+as 1M-12*2.024*2k.
+
+
+=head1 QUOTING
+
+GNU B<parallel> is very liberal in quoting. You only need to quote
+characters that have special meaning in shell:
+
+ ( ) $ ` ' " < > ; | \
+
+and depending on context these needs to be quoted, too:
+
+ ~ & # ! ? space * {
+
+Therefore most people will never need more quoting than putting '\'
+in front of the special characters.
+
+Often you can simply put \' around every ':
+
+ perl -ne '/^\S+\s+\S+$/ and print $ARGV,"\n"' file
+
+can be quoted:
+
+ parallel perl -ne \''/^\S+\s+\S+$/ and print $ARGV,"\n"'\' ::: file
+
+However, when you want to use a shell variable you need to quote the
+$-sign. Here is an example using $PARALLEL_SEQ. This variable is set
+by GNU B<parallel> itself, so the evaluation of the $ must be done by
+the sub shell started by GNU B<parallel>:
+
+ seq 10 | parallel -N2 echo seq:\$PARALLEL_SEQ arg1:{1} arg2:{2}
+
+If the variable is set before GNU B<parallel> starts you can do this:
+
+ VAR=this_is_set_before_starting
+ echo test | parallel echo {} $VAR
+
+Prints: B<test this_is_set_before_starting>
+
+It is a little more tricky if the variable contains more than one space in a row:
+
+ VAR="two spaces between each word"
+ echo test | parallel echo {} \'"$VAR"\'
+
+Prints: B<test two spaces between each word>
+
+If the variable should not be evaluated by the shell starting GNU
+B<parallel> but be evaluated by the sub shell started by GNU
+B<parallel>, then you need to quote it:
+
+ echo test | parallel VAR=this_is_set_after_starting \; echo {} \$VAR
+
+Prints: B<test this_is_set_after_starting>
+
+It is a little more tricky if the variable contains space:
+
+ echo test |\
+ parallel VAR='"two spaces between each word"' echo {} \'"$VAR"\'
+
+Prints: B<test two spaces between each word>
+
+$$ is the shell variable containing the process id of the shell. This
+will print the process id of the shell running GNU B<parallel>:
+
+ seq 10 | parallel echo $$
+
+And this will print the process ids of the sub shells started by GNU
+B<parallel>.
+
+ seq 10 | parallel echo \$\$
+
+If the special characters should not be evaluated by the sub shell
+then you need to protect it against evaluation from both the shell
+starting GNU B<parallel> and the sub shell:
+
+ echo test | parallel echo {} \\\$VAR
+
+Prints: B<test $VAR>
+
+GNU B<parallel> can protect against evaluation by the sub shell by
+using -q:
+
+ echo test | parallel -q echo {} \$VAR
+
+Prints: B<test $VAR>
+
+This is particularly useful if you have lots of quoting. If you want
+to run a perl script like this:
+
+ perl -ne '/^\S+\s+\S+$/ and print $ARGV,"\n"' file
+
+It needs to be quoted like one of these:
+
+ ls | parallel perl -ne '/^\\S+\\s+\\S+\$/\ and\ print\ \$ARGV,\"\\n\"'
+ ls | parallel perl -ne \''/^\S+\s+\S+$/ and print $ARGV,"\n"'\'
+
+Notice how spaces, \'s, "'s, and $'s need to be quoted. GNU
+B<parallel> can do the quoting by using option -q:
+
+ ls | parallel -q perl -ne '/^\S+\s+\S+$/ and print $ARGV,"\n"'
+
+However, this means you cannot make the sub shell interpret special
+characters. For example because of B<-q> this WILL NOT WORK:
+
+ ls *.gz | parallel -q "zcat {} >{.}"
+ ls *.gz | parallel -q "zcat {} | bzip2 >{.}.bz2"
+
+because > and | need to be interpreted by the sub shell.
+
+If you get errors like:
+
+ sh: -c: line 0: syntax error near unexpected token
+ sh: Syntax error: Unterminated quoted string
+ sh: -c: line 0: unexpected EOF while looking for matching `''
+ sh: -c: line 1: syntax error: unexpected end of file
+ zsh:1: no matches found:
+
+then you might try using B<-q>.
+
+If you are using B<bash> process substitution like B<<(cat foo)> then
+you may try B<-q> and prepending I<command> with B<bash -c>:
+
+ ls | parallel -q bash -c 'wc -c <(echo {})'
+
+Or for substituting output:
+
+ ls | parallel -q bash -c \
+ 'tar c {} | tee >(gzip >{}.tar.gz) | bzip2 >{}.tar.bz2'
+
+B<Conclusion>: If this is confusing consider avoiding having to deal
+with quoting by writing a small script or a function (remember to
+B<export -f> the function) and have GNU B<parallel> call that.
+
+
+=head1 LIST RUNNING JOBS
+
+If you want a list of the jobs currently running you can run:
+
+ killall -USR1 parallel
+
+GNU B<parallel> will then print the currently running jobs on stderr
+(standard error).
+
+
+=head1 COMPLETE RUNNING JOBS BUT DO NOT START NEW JOBS
+
+If you regret starting a lot of jobs you can simply break GNU B<parallel>,
+but if you want to make sure you do not have half-completed jobs you
+should send the signal B<SIGHUP> to GNU B<parallel>:
+
+ killall -HUP parallel
+
+This will tell GNU B<parallel> to not start any new jobs, but wait until
+the currently running jobs are finished before exiting.
+
+
+=head1 ENVIRONMENT VARIABLES
+
+=over 9
+
+=item $PARALLEL_HOME
+
+Dir where GNU B<parallel> stores config files, semaphores, and caches
+information between invocations. If set to a non-existent dir, the dir
+will be created.
+
+Default: $HOME/.parallel.
+
+
+=item $PARALLEL_ARGHOSTGROUPS
+
+When using B<--hostgroups> GNU B<parallel> sets this to the hostgroups
+of the job.
+
+Remember to quote the $, so it gets evaluated by the correct shell. Or
+use B<--plus> and {agrp}.
+
+
+=item $PARALLEL_HOSTGROUPS
+
+When using B<--hostgroups> GNU B<parallel> sets this to the hostgroups
+of the sshlogin that the job is run on.
+
+Remember to quote the $, so it gets evaluated by the correct shell. Or
+use B<--plus> and {hgrp}.
+
+
+=item $PARALLEL_JOBSLOT
+
+Set by GNU B<parallel> and can be used in jobs run by GNU B<parallel>.
+Remember to quote the $, so it gets evaluated by the correct shell. Or
+use B<--plus> and {slot}.
+
+$PARALLEL_JOBSLOT is the jobslot of the job. It is equal to {%} unless
+the job is being retried. See {%} for details.
+
+
+=item $PARALLEL_PID
+
+Set by GNU B<parallel> and can be used in jobs run by GNU B<parallel>.
+Remember to quote the $, so it gets evaluated by the correct shell.
+
+This makes it possible for the jobs to communicate directly to GNU
+B<parallel>.
+
+B<Example:> If each of the jobs tests a solution and one of jobs finds
+the solution the job can tell GNU B<parallel> not to start more jobs
+by: B<kill -HUP $PARALLEL_PID>. This only works on the local
+computer.
+
+
+=item $PARALLEL_RSYNC_OPTS
+
+Options to pass on to B<rsync>. Defaults to: -rlDzR.
+
+
+=item $PARALLEL_SHELL
+
+Use this shell for the commands run by GNU B<parallel>:
+
+=over 2
+
+=item *
+
+$PARALLEL_SHELL. If undefined use:
+
+=item *
+
+The shell that started GNU B<parallel>. If that cannot be determined:
+
+=item *
+
+$SHELL. If undefined use:
+
+=item *
+
+/bin/sh
+
+=back
+
+
+=item $PARALLEL_SSH
+
+GNU B<parallel> defaults to using the B<ssh> command for remote
+access. This can be overridden with $PARALLEL_SSH, which again can be
+overridden with B<--ssh>. It can also be set on a per server basis
+(see B<--sshlogin>).
+
+
+=item $PARALLEL_SSHHOST
+
+Set by GNU B<parallel> and can be used in jobs run by GNU B<parallel>.
+Remember to quote the $, so it gets evaluated by the correct shell. Or
+use B<--plus> and {host}.
+
+
+$PARALLEL_SSHHOST is the host part of an sshlogin line. E.g.
+
+ 4//usr/bin/specialssh user@host
+
+becomes:
+
+ host
+
+
+=item $PARALLEL_SSHLOGIN
+
+Set by GNU B<parallel> and can be used in jobs run by GNU B<parallel>.
+Remember to quote the $, so it gets evaluated by the correct shell. Or
+use B<--plus> and {sshlogin}.
+
+
+The value is the sshlogin line with number of threads removed. E.g.
+
+ 4//usr/bin/specialssh user@host
+
+becomes:
+
+ /usr/bin/specialssh user@host
+
+
+=item $PARALLEL_SEQ
+
+Set by GNU B<parallel> and can be used in jobs run by GNU B<parallel>.
+Remember to quote the $, so it gets evaluated by the correct shell.
+
+$PARALLEL_SEQ is the sequence number of the job running.
+
+B<Example:>
+
+ seq 10 | parallel -N2 \
+ echo seq:'$'PARALLEL_SEQ arg1:{1} arg2:{2}
+
+{#} is a shorthand for $PARALLEL_SEQ.
+
+
+=item $PARALLEL_TMUX
+
+Path to B<tmux>. If unset the B<tmux> in $PATH is used.
+
+
+=item $TMPDIR
+
+Directory for temporary files.
+
+See also: B<--tmpdir>
+
+
+=item $PARALLEL_REMOTE_TMPDIR
+
+Directory for temporary files on remote servers.
+
+See also: B<--tmpdir>
+
+
+=item $PARALLEL
+
+The environment variable $PARALLEL will be used as default options for
+GNU B<parallel>. If the variable contains special shell characters
+(e.g. $, *, or space) then these need to be to be escaped with \.
+
+B<Example:>
+
+ cat list | parallel -j1 -k -v ls
+ cat list | parallel -j1 -k -v -S"myssh user@server" ls
+
+can be written as:
+
+ cat list | PARALLEL="-kvj1" parallel ls
+ cat list | PARALLEL='-kvj1 -S myssh\ user@server' \
+ parallel echo
+
+Notice the \ after 'myssh' is needed because 'myssh' and 'user@server'
+must be one argument.
+
+See also: B<--profile>
+
+=back
+
+
+=head1 DEFAULT PROFILE (CONFIG FILE)
+
+The global configuration file /etc/parallel/config, followed by user
+configuration file ~/.parallel/config (formerly known as .parallelrc)
+will be read in turn if they exist. Lines starting with '#' will be
+ignored. The format can follow that of the environment variable
+$PARALLEL, but it is often easier to simply put each option on its own
+line.
+
+Options on the command line take precedence, followed by the
+environment variable $PARALLEL, user configuration file
+~/.parallel/config, and finally the global configuration file
+/etc/parallel/config.
+
+Note that no file that is read for options, nor the environment
+variable $PARALLEL, may contain retired options such as B<--tollef>.
+
+=head1 PROFILE FILES
+
+If B<--profile> set, GNU B<parallel> will read the profile from that
+file rather than the global or user configuration files. You can have
+multiple B<--profiles>.
+
+Profiles are searched for in B<~/.parallel>. If the name starts with
+B</> it is seen as an absolute path. If the name starts with B<./> it
+is seen as a relative path from current dir.
+
+Example: Profile for running a command on every sshlogin in
+~/.ssh/sshlogins and prepend the output with the sshlogin:
+
+ echo --tag -S .. --nonall > ~/.parallel/nonall_profile
+ parallel -J nonall_profile uptime
+
+Example: Profile for running every command with B<-j-1> and B<nice>
+
+ echo -j-1 nice > ~/.parallel/nice_profile
+ parallel -J nice_profile bzip2 -9 ::: *
+
+Example: Profile for running a perl script before every command:
+
+ echo "perl -e '\$a=\$\$; print \$a,\" \",'\$PARALLEL_SEQ',\" \";';" \
+ > ~/.parallel/pre_perl
+ parallel -J pre_perl echo ::: *
+
+Note how the $ and " need to be quoted using \.
+
+Example: Profile for running distributed jobs with B<nice> on the
+remote computers:
+
+ echo -S .. nice > ~/.parallel/dist
+ parallel -J dist --trc {.}.bz2 bzip2 -9 ::: *
+
+
+=head1 EXIT STATUS
+
+Exit status depends on B<--halt-on-error> if one of these is used:
+success=X, success=Y%, fail=Y%.
+
+=over 6
+
+=item Z<>0
+
+All jobs ran without error. If success=X is used: X jobs ran without
+error. If success=Y% is used: Y% of the jobs ran without error.
+
+=item Z<>1-100
+
+Some of the jobs failed. The exit status gives the number of failed
+jobs. If Y% is used the exit status is the percentage of jobs that
+failed.
+
+=item Z<>101
+
+More than 100 jobs failed.
+
+=item Z<>255
+
+Other error.
+
+=item Z<>-1 (In joblog and SQL table)
+
+Killed by Ctrl-C, timeout, not enough memory or similar.
+
+=item Z<>-2 (In joblog and SQL table)
+
+skip() was called in B<{= =}>.
+
+=item Z<>-1000 (In SQL table)
+
+Job is ready to run (set by --sqlmaster).
+
+=item Z<>-1220 (In SQL table)
+
+Job is taken by worker (set by --sqlworker).
+
+=back
+
+If fail=1 is used, the exit status will be the exit status of the
+failing job.
+
+
+=head1 DIFFERENCES BETWEEN GNU Parallel AND ALTERNATIVES
+
+See: B<man parallel_alternatives>
+
+
+=head1 BUGS
+
+=head2 Quoting of newline
+
+Because of the way newline is quoted this will not work:
+
+ echo 1,2,3 | parallel -vkd, "echo 'a{}b'"
+
+However, these will all work:
+
+ echo 1,2,3 | parallel -vkd, echo a{}b
+ echo 1,2,3 | parallel -vkd, "echo 'a'{}'b'"
+ echo 1,2,3 | parallel -vkd, "echo 'a'"{}"'b'"
+
+
+=head2 Speed
+
+=head3 Startup
+
+GNU B<parallel> is slow at starting up - around 250 ms the first time
+and 150 ms after that.
+
+=head3 Job startup
+
+Starting a job on the local machine takes around 3-10 ms. This can be
+a big overhead if the job takes very few ms to run. Often you can
+group small jobs together using B<-X> which will make the overhead
+less significant. Or you can run multiple GNU B<parallel>s as
+described in B<EXAMPLE: Speeding up fast jobs>.
+
+=head3 SSH
+
+When using multiple computers GNU B<parallel> opens B<ssh> connections
+to them to figure out how many connections can be used reliably
+simultaneously (Namely SSHD's MaxStartups). This test is done for each
+host in serial, so if your B<--sshloginfile> contains many hosts it may
+be slow.
+
+If your jobs are short you may see that there are fewer jobs running
+on the remote systems than expected. This is due to time spent logging
+in and out. B<-M> may help here.
+
+=head3 Disk access
+
+A single disk can normally read data faster if it reads one file at a
+time instead of reading a lot of files in parallel, as this will avoid
+disk seeks. However, newer disk systems with multiple drives can read
+faster if reading from multiple files in parallel.
+
+If the jobs are of the form read-all-compute-all-write-all, so
+everything is read before anything is written, it may be faster to
+force only one disk access at the time:
+
+ sem --id diskio cat file | compute | sem --id diskio cat > file
+
+If the jobs are of the form read-compute-write, so writing starts
+before all reading is done, it may be faster to force only one reader
+and writer at the time:
+
+ sem --id read cat file | compute | sem --id write cat > file
+
+If the jobs are of the form read-compute-read-compute, it may be
+faster to run more jobs in parallel than the system has CPUs, as some
+of the jobs will be stuck waiting for disk access.
+
+=head2 --nice limits command length
+
+The current implementation of B<--nice> is too pessimistic in the max
+allowed command length. It only uses a little more than half of what
+it could. This affects B<-X> and B<-m>. If this becomes a real problem for
+you, file a bug-report.
+
+=head2 Aliases and functions do not work
+
+If you get:
+
+ Can't exec "command": No such file or directory
+
+or:
+
+ open3: exec of by command failed
+
+or:
+
+ /bin/bash: command: command not found
+
+it may be because I<command> is not known, but it could also be
+because I<command> is an alias or a function. If it is a function you
+need to B<export -f> the function first or use B<env_parallel>. An
+alias will only work if you use B<env_parallel>.
+
+=head2 Database with MySQL fails randomly
+
+The B<--sql*> options may fail randomly with MySQL. This problem does
+not exist with PostgreSQL.
+
+
+=head1 REPORTING BUGS
+
+Report bugs to <parallel@gnu.org> or
+https://savannah.gnu.org/bugs/?func=additem&group=parallel
+
+When you write your report, please keep in mind, that you must give
+the reader enough information to be able to run exactly what you
+run. So you need to include all data and programs that you use to
+show the problem.
+
+See a perfect bug report on
+https://lists.gnu.org/archive/html/bug-parallel/2015-01/msg00000.html
+
+Your bug report should always include:
+
+=over 2
+
+=item *
+
+The error message you get (if any). If the error message is not from
+GNU B<parallel> you need to show why you think GNU B<parallel> caused
+this.
+
+=item *
+
+The complete output of B<parallel --version>. If you are not running
+the latest released version (see https://ftp.gnu.org/gnu/parallel/) you
+should specify why you believe the problem is not fixed in that
+version.
+
+=item *
+
+A minimal, complete, and verifiable example (See description on
+https://stackoverflow.com/help/mcve).
+
+It should be a complete example that others can run which shows the
+problem including all files needed to run the example. This should
+preferably be small and simple, so try to remove as many options as
+possible.
+
+A combination of B<yes>, B<seq>, B<cat>, B<echo>, B<wc>, and B<sleep>
+can reproduce most errors.
+
+If your example requires large files, see if you can make them with
+something like B<seq 100000000> > B<bigfile> or B<yes | head -n
+1000000000> > B<file>. If you need multiple columns: B<paste <(seq
+1000) <(seq 1000 1999)>
+
+If your example requires remote execution, see if you can use
+B<localhost> - maybe using another login.
+
+If you have access to a different system (maybe a VirtualBox on your
+own machine), test if your MCVE shows the problem on that system. If
+it does not, read below.
+
+=item *
+
+The output of your example. If your problem is not easily reproduced
+by others, the output might help them figure out the problem.
+
+=item *
+
+Whether you have watched the intro videos
+(https://www.youtube.com/playlist?list=PL284C9FF2488BC6D1), walked
+through the tutorial (man parallel_tutorial), and read the examples
+(man parallel_examples).
+
+=back
+
+=head2 Bug dependent on environment
+
+If you suspect the error is dependent on your environment or
+distribution, please see if you can reproduce the error on one of
+these VirtualBox images:
+https://sourceforge.net/projects/virtualboximage/files/
+https://www.osboxes.org/virtualbox-images/
+
+Specifying the name of your distribution is not enough as you may have
+installed software that is not in the VirtualBox images.
+
+If you cannot reproduce the error on any of the VirtualBox images
+above, see if you can build a VirtualBox image on which you can
+reproduce the error. If not you should assume the debugging will be
+done through you. That will put a lot more burden on you and it is
+extra important you give any information that help. In general the
+problem will be fixed faster and with much less work for you if you
+can reproduce the error on a VirtualBox - even if you have to build a
+VirtualBox image.
+
+=head2 In summary
+
+Your report must include:
+
+=over 2
+
+=item *
+
+B<parallel --version>
+
+=item *
+
+output + error message
+
+=item *
+
+full example including all files
+
+=item *
+
+VirtualBox image, if you cannot reproduce it on other systems
+
+=back
+
+
+
+=head1 AUTHOR
+
+When using GNU B<parallel> for a publication please cite:
+
+O. Tange (2011): GNU Parallel - The Command-Line Power Tool, ;login:
+The USENIX Magazine, February 2011:42-47.
+
+This helps funding further development; and it won't cost you a cent.
+If you pay 10000 EUR you should feel free to use GNU Parallel without citing.
+
+Copyright (C) 2007-10-18 Ole Tange, http://ole.tange.dk
+
+Copyright (C) 2008-2010 Ole Tange, http://ole.tange.dk
+
+Copyright (C) 2010-2024 Ole Tange, http://ole.tange.dk and Free
+Software Foundation, Inc.
+
+Parts of the manual concerning B<xargs> compatibility is inspired by
+the manual of B<xargs> from GNU findutils 4.4.2.
+
+
+=head1 LICENSE
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3 of the License, or
+at your option any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+=head2 Documentation license I
+
+Permission is granted to copy, distribute and/or modify this
+documentation under the terms of the GNU Free Documentation License,
+Version 1.3 or any later version published by the Free Software
+Foundation; with no Invariant Sections, with no Front-Cover Texts, and
+with no Back-Cover Texts. A copy of the license is included in the
+file LICENSES/GFDL-1.3-or-later.txt.
+
+=head2 Documentation license II
+
+You are free:
+
+=over 9
+
+=item B<to Share>
+
+to copy, distribute and transmit the work
+
+=item B<to Remix>
+
+to adapt the work
+
+=back
+
+Under the following conditions:
+
+=over 9
+
+=item B<Attribution>
+
+You must attribute the work in the manner specified by the author or
+licensor (but not in any way that suggests that they endorse you or
+your use of the work).
+
+=item B<Share Alike>
+
+If you alter, transform, or build upon this work, you may distribute
+the resulting work only under the same, similar or a compatible
+license.
+
+=back
+
+With the understanding that:
+
+=over 9
+
+=item B<Waiver>
+
+Any of the above conditions can be waived if you get permission from
+the copyright holder.
+
+=item B<Public Domain>
+
+Where the work or any of its elements is in the public domain under
+applicable law, that status is in no way affected by the license.
+
+=item B<Other Rights>
+
+In no way are any of the following rights affected by the license:
+
+=over 2
+
+=item *
+
+Your fair dealing or fair use rights, or other applicable
+copyright exceptions and limitations;
+
+=item *
+
+The author's moral rights;
+
+=item *
+
+Rights other persons may have either in the work itself or in
+how the work is used, such as publicity or privacy rights.
+
+=back
+
+=back
+
+=over 9
+
+=item B<Notice>
+
+For any reuse or distribution, you must make clear to others the
+license terms of this work.
+
+=back
+
+A copy of the full license is included in the file as
+LICENCES/CC-BY-SA-4.0.txt
+
+
+=head1 DEPENDENCIES
+
+GNU B<parallel> uses Perl, and the Perl modules Getopt::Long,
+IPC::Open3, Symbol, IO::File, POSIX, and File::Temp.
+
+For B<--csv> it uses the Perl module Text::CSV.
+
+For remote usage it uses B<rsync> with B<ssh>.
+
+
+=head1 SEE ALSO
+
+B<parallel_tutorial>(1), B<env_parallel>(1), B<parset>(1),
+B<parsort>(1), B<parallel_alternatives>(1), B<parallel_design>(7),
+B<niceload>(1), B<sql>(1), B<ssh>(1), B<ssh-agent>(1), B<sshpass>(1),
+B<ssh-copy-id>(1), B<rsync>(1)
+
+=cut
diff --git a/src/parallel_alternatives.pod b/src/parallel_alternatives.pod
new file mode 100644
index 0000000..f878e08
--- /dev/null
+++ b/src/parallel_alternatives.pod
@@ -0,0 +1,4765 @@
+#!/usr/bin/perl -w
+
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GFDL-1.3-or-later
+# SPDX-License-Identifier: CC-BY-SA-4.0
+
+=encoding utf8
+
+=head1 NAME
+
+parallel_alternatives - Alternatives to GNU B<parallel>
+
+
+=head1 DIFFERENCES BETWEEN GNU Parallel AND ALTERNATIVES
+
+There are a lot programs that share functionality with GNU
+B<parallel>. Some of these are specialized tools, and while GNU
+B<parallel> can emulate many of them, a specialized tool can be better
+at a given task. GNU B<parallel> strives to include the best of the
+general functionality without sacrificing ease of use.
+
+B<parallel> has existed since 2002-01-06 and as GNU B<parallel> since
+2010. A lot of the alternatives have not had the vitality to survive
+that long, but have come and gone during that time.
+
+GNU B<parallel> is actively maintained with a new release every month
+since 2010. Most other alternatives are fleeting interests of the
+developers with irregular releases and only maintained for a few
+years.
+
+
+=head2 SUMMARY LEGEND
+
+The following features are in some of the comparable tools:
+
+=head3 Inputs
+
+=over
+
+=item I1. Arguments can be read from stdin
+
+=item I2. Arguments can be read from a file
+
+=item I3. Arguments can be read from multiple files
+
+=item I4. Arguments can be read from command line
+
+=item I5. Arguments can be read from a table
+
+=item I6. Arguments can be read from the same file using #! (shebang)
+
+=item I7. Line oriented input as default (Quoting of special chars not needed)
+
+=back
+
+
+=head3 Manipulation of input
+
+=over
+
+=item M1. Composed command
+
+=item M2. Multiple arguments can fill up an execution line
+
+=item M3. Arguments can be put anywhere in the execution line
+
+=item M4. Multiple arguments can be put anywhere in the execution line
+
+=item M5. Arguments can be replaced with context
+
+=item M6. Input can be treated as the complete command line
+
+=back
+
+
+=head3 Outputs
+
+=over
+
+=item O1. Grouping output so output from different jobs do not mix
+
+=item O2. Send stderr (standard error) to stderr (standard error)
+
+=item O3. Send stdout (standard output) to stdout (standard output)
+
+=item O4. Order of output can be same as order of input
+
+=item O5. Stdout only contains stdout (standard output) from the command
+
+=item O6. Stderr only contains stderr (standard error) from the command
+
+=item O7. Buffering on disk
+
+=item O8. No temporary files left if killed
+
+=item O9. Test if disk runs full during run
+
+=item O10. Output of a line bigger than 4 GB
+
+=back
+
+
+=head3 Execution
+
+=over
+
+=item E1. Run jobs in parallel
+
+=item E2. List running jobs
+
+=item E3. Finish running jobs, but do not start new jobs
+
+=item E4. Number of running jobs can depend on number of cpus
+
+=item E5. Finish running jobs, but do not start new jobs after first failure
+
+=item E6. Number of running jobs can be adjusted while running
+
+=item E7. Only spawn new jobs if load is less than a limit
+
+=back
+
+
+=head3 Remote execution
+
+=over
+
+=item R1. Jobs can be run on remote computers
+
+=item R2. Basefiles can be transferred
+
+=item R3. Argument files can be transferred
+
+=item R4. Result files can be transferred
+
+=item R5. Cleanup of transferred files
+
+=item R6. No config files needed
+
+=item R7. Do not run more than SSHD's MaxStartups can handle
+
+=item R8. Configurable SSH command
+
+=item R9. Retry if connection breaks occasionally
+
+=back
+
+
+=head3 Semaphore
+
+=over
+
+=item S1. Possibility to work as a mutex
+
+=item S2. Possibility to work as a counting semaphore
+
+=back
+
+
+=head3 Legend
+
+=over
+
+=item - = no
+
+=item x = not applicable
+
+=item ID = yes
+
+=back
+
+As every new version of the programs are not tested the table may be
+outdated. Please file a bug report if you find errors (See REPORTING
+BUGS).
+
+parallel:
+
+=over
+
+=item I1 I2 I3 I4 I5 I6 I7
+
+=item M1 M2 M3 M4 M5 M6
+
+=item O1 O2 O3 O4 O5 O6 O7 O8 O9 O10
+
+=item E1 E2 E3 E4 E5 E6 E7
+
+=item R1 R2 R3 R4 R5 R6 R7 R8 R9
+
+=item S1 S2
+
+=back
+
+
+=head2 DIFFERENCES BETWEEN xargs AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item I1 I2 - - - - -
+
+=item - M2 M3 - - -
+
+=item - O2 O3 - O5 O6
+
+=item E1 - - - - - -
+
+=item - - - - - x - - -
+
+=item - -
+
+=back
+
+B<xargs> offers some of the same possibilities as GNU B<parallel>.
+
+B<xargs> deals badly with special characters (such as space, \, ' and
+"). To see the problem try this:
+
+ touch important_file
+ touch 'not important_file'
+ ls not* | xargs rm
+ mkdir -p "My brother's 12\" records"
+ ls | xargs rmdir
+ touch 'c:\windows\system32\clfs.sys'
+ echo 'c:\windows\system32\clfs.sys' | xargs ls -l
+
+You can specify B<-0>, but many input generators are not optimized for
+using B<NUL> as separator but are optimized for B<newline> as
+separator. E.g. B<awk>, B<ls>, B<echo>, B<tar -v>, B<head> (requires
+using B<-z>), B<tail> (requires using B<-z>), B<sed> (requires using
+B<-z>), B<perl> (B<-0> and \0 instead of \n), B<locate> (requires
+using B<-0>), B<find> (requires using B<-print0>), B<grep> (requires
+using B<-z> or B<-Z>), B<sort> (requires using B<-z>).
+
+GNU B<parallel>'s newline separation can be emulated with:
+
+ cat | xargs -d "\n" -n1 command
+
+B<xargs> can run a given number of jobs in parallel, but has no
+support for running number-of-cpu-cores jobs in parallel.
+
+B<xargs> has no support for grouping the output, therefore output may
+run together, e.g. the first half of a line is from one process and
+the last half of the line is from another process. The example
+B<Parallel grep> cannot be done reliably with B<xargs> because of
+this. To see this in action try:
+
+ parallel perl -e "'"'$a="1"."{}"x10000000;print $a,"\n"'"'" \
+ '>' {} ::: a b c d e f g h
+ # Serial = no mixing = the wanted result
+ # 'tr -s a-z' squeezes repeating letters into a single letter
+ echo a b c d e f g h | xargs -P1 -n1 grep 1 | tr -s a-z
+ # Compare to 8 jobs in parallel
+ parallel -kP8 -n1 grep 1 ::: a b c d e f g h | tr -s a-z
+ echo a b c d e f g h | xargs -P8 -n1 grep 1 | tr -s a-z
+ echo a b c d e f g h | xargs -P8 -n1 grep --line-buffered 1 | \
+ tr -s a-z
+
+Or try this:
+
+ slow_seq() {
+ echo Count to "$@"
+ seq "$@" |
+ perl -ne '$|=1; for(split//){ print; select($a,$a,$a,0.100);}'
+ }
+ export -f slow_seq
+ # Serial = no mixing = the wanted result
+ seq 8 | xargs -n1 -P1 -I {} bash -c 'slow_seq {}'
+ # Compare to 8 jobs in parallel
+ seq 8 | parallel -P8 slow_seq {}
+ seq 8 | xargs -n1 -P8 -I {} bash -c 'slow_seq {}'
+
+B<xargs> has no support for keeping the order of the output, therefore
+if running jobs in parallel using B<xargs> the output of the second
+job cannot be postponed till the first job is done.
+
+B<xargs> has no support for running jobs on remote computers.
+
+B<xargs> has no support for context replace, so you will have to create the
+arguments.
+
+If you use a replace string in B<xargs> (B<-I>) you can not force
+B<xargs> to use more than one argument.
+
+Quoting in B<xargs> works like B<-q> in GNU B<parallel>. This means
+composed commands and redirection require using B<bash -c>.
+
+ ls | parallel "wc {} >{}.wc"
+ ls | parallel "echo {}; ls {}|wc"
+
+becomes (assuming you have 8 cores and that none of the filenames
+contain space, " or ').
+
+ ls | xargs -d "\n" -P8 -I {} bash -c "wc {} >{}.wc"
+ ls | xargs -d "\n" -P8 -I {} bash -c "echo {}; ls {}|wc"
+
+A more extreme example can be found on:
+https://unix.stackexchange.com/q/405552/
+
+https://www.gnu.org/software/findutils/
+
+
+=head2 DIFFERENCES BETWEEN find -exec AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item - - - x - x -
+
+=item - M2 M3 - - - -
+
+=item - O2 O3 O4 O5 O6
+
+=item - - - - - - -
+
+=item - - - - - - - - -
+
+=item x x
+
+=back
+
+B<find -exec> offers some of the same possibilities as GNU B<parallel>.
+
+B<find -exec> only works on files. Processing other input (such as
+hosts or URLs) will require creating these inputs as files. B<find
+-exec> has no support for running commands in parallel.
+
+https://www.gnu.org/software/findutils/
+(Last checked: 2019-01)
+
+
+=head2 DIFFERENCES BETWEEN make -j AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item - - - - - - -
+
+=item - - - - - -
+
+=item O1 O2 O3 - x O6
+
+=item E1 - - - E5 -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+B<make -j> can run jobs in parallel, but requires a crafted Makefile
+to do this. That results in extra quoting to get filenames containing
+newlines to work correctly.
+
+B<make -j> computes a dependency graph before running jobs. Jobs run
+by GNU B<parallel> does not depend on each other.
+
+(Very early versions of GNU B<parallel> were coincidentally implemented
+using B<make -j>).
+
+https://www.gnu.org/software/make/
+(Last checked: 2019-01)
+
+
+=head2 DIFFERENCES BETWEEN ppss AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item I1 I2 - - - - I7
+
+=item M1 - M3 - - M6
+
+=item O1 - - x - -
+
+=item E1 E2 ?E3 E4 - - -
+
+=item R1 R2 R3 R4 - - ?R7 ? ?
+
+=item - -
+
+=back
+
+B<ppss> is also a tool for running jobs in parallel.
+
+The output of B<ppss> is status information and thus not useful for
+using as input for another command. The output from the jobs are put
+into files.
+
+The argument replace string ($ITEM) cannot be changed. Arguments must
+be quoted - thus arguments containing special characters (space '"&!*)
+may cause problems. More than one argument is not supported. Filenames
+containing newlines are not processed correctly. When reading input
+from a file null cannot be used as a terminator. B<ppss> needs to read
+the whole input file before starting any jobs.
+
+Output and status information is stored in ppss_dir and thus requires
+cleanup when completed. If the dir is not removed before running
+B<ppss> again it may cause nothing to happen as B<ppss> thinks the
+task is already done. GNU B<parallel> will normally not need cleaning
+up if running locally and will only need cleaning up if stopped
+abnormally and running remote (B<--cleanup> may not complete if
+stopped abnormally). The example B<Parallel grep> would require extra
+postprocessing if written using B<ppss>.
+
+For remote systems PPSS requires 3 steps: config, deploy, and
+start. GNU B<parallel> only requires one step.
+
+=head3 EXAMPLES FROM ppss MANUAL
+
+Here are the examples from B<ppss>'s manual page with the equivalent
+using GNU B<parallel>:
+
+ 1$ ./ppss.sh standalone -d /path/to/files -c 'gzip '
+
+ 1$ find /path/to/files -type f | parallel gzip
+
+ 2$ ./ppss.sh standalone -d /path/to/files \
+ -c 'cp "$ITEM" /destination/dir '
+
+ 2$ find /path/to/files -type f | parallel cp {} /destination/dir
+
+ 3$ ./ppss.sh standalone -f list-of-urls.txt -c 'wget -q '
+
+ 3$ parallel -a list-of-urls.txt wget -q
+
+ 4$ ./ppss.sh standalone -f list-of-urls.txt -c 'wget -q "$ITEM"'
+
+ 4$ parallel -a list-of-urls.txt wget -q {}
+
+ 5$ ./ppss config -C config.cfg -c 'encode.sh ' -d /source/dir \
+ -m 192.168.1.100 -u ppss -k ppss-key.key -S ./encode.sh \
+ -n nodes.txt -o /some/output/dir --upload --download;
+ ./ppss deploy -C config.cfg
+ ./ppss start -C config
+
+ 5$ # parallel does not use configs. If you want
+ # a different username put it in nodes.txt: user@hostname
+ find source/dir -type f |
+ parallel --sshloginfile nodes.txt --trc {.}.mp3 \
+ lame -a {} -o {.}.mp3 --preset standard --quiet
+
+ 6$ ./ppss stop -C config.cfg
+
+ 6$ killall -TERM parallel
+
+ 7$ ./ppss pause -C config.cfg
+
+ 7$ Press: CTRL-Z or killall -SIGTSTP parallel
+
+ 8$ ./ppss continue -C config.cfg
+
+ 8$ Enter: fg or killall -SIGCONT parallel
+
+ 9$ ./ppss.sh status -C config.cfg
+
+ 9$ killall -SIGUSR2 parallel
+
+https://github.com/louwrentius/PPSS
+(Last checked: 2010-12)
+
+
+=head2 DIFFERENCES BETWEEN pexec AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item I1 I2 - I4 I5 - -
+
+=item M1 - M3 - - M6
+
+=item O1 O2 O3 - O5 O6
+
+=item E1 - - E4 - E6 -
+
+=item R1 - - - - R6 - - -
+
+=item S1 -
+
+=back
+
+B<pexec> is also a tool for running jobs in parallel.
+
+=head3 EXAMPLES FROM pexec MANUAL
+
+Here are the examples from B<pexec>'s info page with the equivalent
+using GNU B<parallel>:
+
+ 1$ pexec -o sqrt-%s.dat -p "$(seq 10)" -e NUM -n 4 -c -- \
+ 'echo "scale=10000;sqrt($NUM)" | bc'
+
+ 1$ seq 10 | parallel -j4 'echo "scale=10000;sqrt({})" | \
+ bc > sqrt-{}.dat'
+
+ 2$ pexec -p "$(ls myfiles*.ext)" -i %s -o %s.sort -- sort
+
+ 2$ ls myfiles*.ext | parallel sort {} ">{}.sort"
+
+ 3$ pexec -f image.list -n auto -e B -u star.log -c -- \
+ 'fistar $B.fits -f 100 -F id,x,y,flux -o $B.star'
+
+ 3$ parallel -a image.list \
+ 'fistar {}.fits -f 100 -F id,x,y,flux -o {}.star' 2>star.log
+
+ 4$ pexec -r *.png -e IMG -c -o - -- \
+ 'convert $IMG ${IMG%.png}.jpeg ; "echo $IMG: done"'
+
+ 4$ ls *.png | parallel 'convert {} {.}.jpeg; echo {}: done'
+
+ 5$ pexec -r *.png -i %s -o %s.jpg -c 'pngtopnm | pnmtojpeg'
+
+ 5$ ls *.png | parallel 'pngtopnm < {} | pnmtojpeg > {}.jpg'
+
+ 6$ for p in *.png ; do echo ${p%.png} ; done | \
+ pexec -f - -i %s.png -o %s.jpg -c 'pngtopnm | pnmtojpeg'
+
+ 6$ ls *.png | parallel 'pngtopnm < {} | pnmtojpeg > {.}.jpg'
+
+ 7$ LIST=$(for p in *.png ; do echo ${p%.png} ; done)
+ pexec -r $LIST -i %s.png -o %s.jpg -c 'pngtopnm | pnmtojpeg'
+
+ 7$ ls *.png | parallel 'pngtopnm < {} | pnmtojpeg > {.}.jpg'
+
+ 8$ pexec -n 8 -r *.jpg -y unix -e IMG -c \
+ 'pexec -j -m blockread -d $IMG | \
+ jpegtopnm | pnmscale 0.5 | pnmtojpeg | \
+ pexec -j -m blockwrite -s th_$IMG'
+
+ 8$ # Combining GNU B<parallel> and GNU B<sem>.
+ ls *jpg | parallel -j8 'sem --id blockread cat {} | jpegtopnm |' \
+ 'pnmscale 0.5 | pnmtojpeg | sem --id blockwrite cat > th_{}'
+
+ # If reading and writing is done to the same disk, this may be
+ # faster as only one process will be either reading or writing:
+ ls *jpg | parallel -j8 'sem --id diskio cat {} | jpegtopnm |' \
+ 'pnmscale 0.5 | pnmtojpeg | sem --id diskio cat > th_{}'
+
+https://www.gnu.org/software/pexec/
+(Last checked: 2010-12)
+
+
+=head2 DIFFERENCES BETWEEN xjobs AND GNU Parallel
+
+B<xjobs> is also a tool for running jobs in parallel. It only supports
+running jobs on your local computer.
+
+B<xjobs> deals badly with special characters just like B<xargs>. See
+the section B<DIFFERENCES BETWEEN xargs AND GNU Parallel>.
+
+=head3 EXAMPLES FROM xjobs MANUAL
+
+Here are the examples from B<xjobs>'s man page with the equivalent
+using GNU B<parallel>:
+
+ 1$ ls -1 *.zip | xjobs unzip
+
+ 1$ ls *.zip | parallel unzip
+
+ 2$ ls -1 *.zip | xjobs -n unzip
+
+ 2$ ls *.zip | parallel unzip >/dev/null
+
+ 3$ find . -name '*.bak' | xjobs gzip
+
+ 3$ find . -name '*.bak' | parallel gzip
+
+ 4$ ls -1 *.jar | sed 's/\(.*\)/\1 > \1.idx/' | xjobs jar tf
+
+ 4$ ls *.jar | parallel jar tf {} '>' {}.idx
+
+ 5$ xjobs -s script
+
+ 5$ cat script | parallel
+
+ 6$ mkfifo /var/run/my_named_pipe;
+ xjobs -s /var/run/my_named_pipe &
+ echo unzip 1.zip >> /var/run/my_named_pipe;
+ echo tar cf /backup/myhome.tar /home/me >> /var/run/my_named_pipe
+
+ 6$ mkfifo /var/run/my_named_pipe;
+ cat /var/run/my_named_pipe | parallel &
+ echo unzip 1.zip >> /var/run/my_named_pipe;
+ echo tar cf /backup/myhome.tar /home/me >> /var/run/my_named_pipe
+
+https://www.maier-komor.de/xjobs.html
+(Last checked: 2019-01)
+
+
+=head2 DIFFERENCES BETWEEN prll AND GNU Parallel
+
+B<prll> is also a tool for running jobs in parallel. It does not
+support running jobs on remote computers.
+
+B<prll> encourages using BASH aliases and BASH functions instead of
+scripts. GNU B<parallel> supports scripts directly, functions if they
+are exported using B<export -f>, and aliases if using B<env_parallel>.
+
+B<prll> generates a lot of status information on stderr (standard
+error) which makes it harder to use the stderr (standard error) output
+of the job directly as input for another program.
+
+=head3 EXAMPLES FROM prll's MANUAL
+
+Here is the example from B<prll>'s man page with the equivalent
+using GNU B<parallel>:
+
+ 1$ prll -s 'mogrify -flip $1' *.jpg
+
+ 1$ parallel mogrify -flip ::: *.jpg
+
+https://github.com/exzombie/prll
+(Last checked: 2019-01)
+
+
+=head2 DIFFERENCES BETWEEN dxargs AND GNU Parallel
+
+B<dxargs> is also a tool for running jobs in parallel.
+
+B<dxargs> does not deal well with more simultaneous jobs than SSHD's
+MaxStartups. B<dxargs> is only built for remote run jobs, but does not
+support transferring of files.
+
+https://web.archive.org/web/20120518070250/http://www.
+semicomplete.com/blog/geekery/distributed-xargs.html
+(Last checked: 2019-01)
+
+
+=head2 DIFFERENCES BETWEEN mdm/middleman AND GNU Parallel
+
+middleman(mdm) is also a tool for running jobs in parallel.
+
+=head3 EXAMPLES FROM middleman's WEBSITE
+
+Here are the shellscripts of
+https://web.archive.org/web/20110728064735/http://mdm.
+berlios.de/usage.html ported to GNU B<parallel>:
+
+ 1$ seq 19 | parallel buffon -o - | sort -n > result
+ cat files | parallel cmd
+ find dir -execdir sem cmd {} \;
+
+https://github.com/cklin/mdm
+(Last checked: 2019-01)
+
+
+=head2 DIFFERENCES BETWEEN xapply AND GNU Parallel
+
+B<xapply> can run jobs in parallel on the local computer.
+
+=head3 EXAMPLES FROM xapply's MANUAL
+
+Here are the examples from B<xapply>'s man page with the equivalent
+using GNU B<parallel>:
+
+ 1$ xapply '(cd %1 && make all)' */
+
+ 1$ parallel 'cd {} && make all' ::: */
+
+ 2$ xapply -f 'diff %1 ../version5/%1' manifest | more
+
+ 2$ parallel diff {} ../version5/{} < manifest | more
+
+ 3$ xapply -p/dev/null -f 'diff %1 %2' manifest1 checklist1
+
+ 3$ parallel --link diff {1} {2} :::: manifest1 checklist1
+
+ 4$ xapply 'indent' *.c
+
+ 4$ parallel indent ::: *.c
+
+ 5$ find ~ksb/bin -type f ! -perm -111 -print | \
+ xapply -f -v 'chmod a+x' -
+
+ 5$ find ~ksb/bin -type f ! -perm -111 -print | \
+ parallel -v chmod a+x
+
+ 6$ find */ -... | fmt 960 1024 | xapply -f -i /dev/tty 'vi' -
+
+ 6$ sh <(find */ -... | parallel -s 1024 echo vi)
+
+ 6$ find */ -... | parallel -s 1024 -Xuj1 vi
+
+ 7$ find ... | xapply -f -5 -i /dev/tty 'vi' - - - - -
+
+ 7$ sh <(find ... | parallel -n5 echo vi)
+
+ 7$ find ... | parallel -n5 -uj1 vi
+
+ 8$ xapply -fn "" /etc/passwd
+
+ 8$ parallel -k echo < /etc/passwd
+
+ 9$ tr ':' '\012' < /etc/passwd | \
+ xapply -7 -nf 'chown %1 %6' - - - - - - -
+
+ 9$ tr ':' '\012' < /etc/passwd | parallel -N7 chown {1} {6}
+
+ 10$ xapply '[ -d %1/RCS ] || echo %1' */
+
+ 10$ parallel '[ -d {}/RCS ] || echo {}' ::: */
+
+ 11$ xapply -f '[ -f %1 ] && echo %1' List | ...
+
+ 11$ parallel '[ -f {} ] && echo {}' < List | ...
+
+https://www.databits.net/~ksb/msrc/local/bin/xapply/xapply.html (Last
+checked: 2010-12)
+
+
+=head2 DIFFERENCES BETWEEN AIX apply AND GNU Parallel
+
+B<apply> can build command lines based on a template and arguments -
+very much like GNU B<parallel>. B<apply> does not run jobs in
+parallel. B<apply> does not use an argument separator (like B<:::>);
+instead the template must be the first argument.
+
+=head3 EXAMPLES FROM IBM's KNOWLEDGE CENTER
+
+Here are the examples from IBM's Knowledge Center and the
+corresponding command using GNU B<parallel>:
+
+=head4 To obtain results similar to those of the B<ls> command, enter:
+
+ 1$ apply echo *
+ 1$ parallel echo ::: *
+
+=head4 To compare the file named a1 to the file named b1, and
+the file named a2 to the file named b2, enter:
+
+ 2$ apply -2 cmp a1 b1 a2 b2
+ 2$ parallel -N2 cmp ::: a1 b1 a2 b2
+
+=head4 To run the B<who> command five times, enter:
+
+ 3$ apply -0 who 1 2 3 4 5
+ 3$ parallel -N0 who ::: 1 2 3 4 5
+
+=head4 To link all files in the current directory to the directory
+/usr/joe, enter:
+
+ 4$ apply 'ln %1 /usr/joe' *
+ 4$ parallel ln {} /usr/joe ::: *
+
+https://www-01.ibm.com/support/knowledgecenter/
+ssw_aix_71/com.ibm.aix.cmds1/apply.htm
+(Last checked: 2019-01)
+
+
+=head2 DIFFERENCES BETWEEN paexec AND GNU Parallel
+
+B<paexec> can run jobs in parallel on both the local and remote computers.
+
+B<paexec> requires commands to print a blank line as the last
+output. This means you will have to write a wrapper for most programs.
+
+B<paexec> has a job dependency facility so a job can depend on another
+job to be executed successfully. Sort of a poor-man's B<make>.
+
+=head3 EXAMPLES FROM paexec's EXAMPLE CATALOG
+
+Here are the examples from B<paexec>'s example catalog with the equivalent
+using GNU B<parallel>:
+
+=head4 1_div_X_run
+
+ 1$ ../../paexec -s -l -c "`pwd`/1_div_X_cmd" -n +1 <<EOF [...]
+
+ 1$ parallel echo {} '|' `pwd`/1_div_X_cmd <<EOF [...]
+
+=head4 all_substr_run
+
+ 2$ ../../paexec -lp -c "`pwd`/all_substr_cmd" -n +3 <<EOF [...]
+
+ 2$ parallel echo {} '|' `pwd`/all_substr_cmd <<EOF [...]
+
+=head4 cc_wrapper_run
+
+ 3$ ../../paexec -c "env CC=gcc CFLAGS=-O2 `pwd`/cc_wrapper_cmd" \
+ -n 'host1 host2' \
+ -t '/usr/bin/ssh -x' <<EOF [...]
+
+ 3$ parallel echo {} '|' "env CC=gcc CFLAGS=-O2 `pwd`/cc_wrapper_cmd" \
+ -S host1,host2 <<EOF [...]
+
+ # This is not exactly the same, but avoids the wrapper
+ parallel gcc -O2 -c -o {.}.o {} \
+ -S host1,host2 <<EOF [...]
+
+=head4 toupper_run
+
+ 4$ ../../paexec -lp -c "`pwd`/toupper_cmd" -n +10 <<EOF [...]
+
+ 4$ parallel echo {} '|' ./toupper_cmd <<EOF [...]
+
+ # Without the wrapper:
+ parallel echo {} '| awk {print\ toupper\(\$0\)}' <<EOF [...]
+
+https://github.com/cheusov/paexec
+(Last checked: 2010-12)
+
+
+=head2 DIFFERENCES BETWEEN map(sitaramc) AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item I1 - - I4 - - (I7)
+
+=item M1 (M2) M3 (M4) M5 M6
+
+=item - O2 O3 - O5 - - x x O10
+
+=item E1 - - - - - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+(I7): Only under special circumstances. See below.
+
+(M2+M4): Only if there is a single replacement string.
+
+B<map> rejects input with special characters:
+
+ echo "The Cure" > My\ brother\'s\ 12\"\ records
+
+ ls | map 'echo %; wc %'
+
+It works with GNU B<parallel>:
+
+ ls | parallel 'echo {}; wc {}'
+
+Under some circumstances it also works with B<map>:
+
+ ls | map 'echo % works %'
+
+But tiny changes make it reject the input with special characters:
+
+ ls | map 'echo % does not work "%"'
+
+This means that many UTF-8 characters will be rejected. This is by
+design. From the web page: "As such, programs that I<quietly handle
+them, with no warnings at all,> are doing their users a disservice."
+
+B<map> delays each job by 0.01 s. This can be emulated by using
+B<parallel --delay 0.01>.
+
+B<map> prints '+' on stderr when a job starts, and '-' when a job
+finishes. This cannot be disabled. B<parallel> has B<--bar> if you
+need to see progress.
+
+B<map>'s replacement strings (% %D %B %E) can be simulated in GNU
+B<parallel> by putting this in B<~/.parallel/config>:
+
+ --rpl '%'
+ --rpl '%D $_=Q(::dirname($_));'
+ --rpl '%B s:.*/::;s:\.[^/.]+$::;'
+ --rpl '%E s:.*\.::'
+
+B<map> does not have an argument separator on the command line, but
+uses the first argument as command. This makes quoting harder which again
+may affect readability. Compare:
+
+ map -p 2 'perl -ne '"'"'/^\S+\s+\S+$/ and print $ARGV,"\n"'"'" *
+
+ parallel -q perl -ne '/^\S+\s+\S+$/ and print $ARGV,"\n"' ::: *
+
+B<map> can do multiple arguments with context replace, but not without
+context replace:
+
+ parallel --xargs echo 'BEGIN{'{}'}END' ::: 1 2 3
+
+ map "echo 'BEGIN{'%'}END'" 1 2 3
+
+B<map> has no support for grouping. So this gives the wrong results:
+
+ parallel perl -e '\$a=\"1{}\"x10000000\;print\ \$a,\"\\n\"' '>' {} \
+ ::: a b c d e f
+ ls -l a b c d e f
+ parallel -kP4 -n1 grep 1 ::: a b c d e f > out.par
+ map -n1 -p 4 'grep 1' a b c d e f > out.map-unbuf
+ map -n1 -p 4 'grep --line-buffered 1' a b c d e f > out.map-linebuf
+ map -n1 -p 1 'grep --line-buffered 1' a b c d e f > out.map-serial
+ ls -l out*
+ md5sum out*
+
+=head3 EXAMPLES FROM map's WEBSITE
+
+Here are the examples from B<map>'s web page with the equivalent using
+GNU B<parallel>:
+
+ 1$ ls *.gif | map convert % %B.png # default max-args: 1
+
+ 1$ ls *.gif | parallel convert {} {.}.png
+
+ 2$ map "mkdir %B; tar -C %B -xf %" *.tgz # default max-args: 1
+
+ 2$ parallel 'mkdir {.}; tar -C {.} -xf {}' ::: *.tgz
+
+ 3$ ls *.gif | map cp % /tmp # default max-args: 100
+
+ 3$ ls *.gif | parallel -X cp {} /tmp
+
+ 4$ ls *.tar | map -n 1 tar -xf %
+
+ 4$ ls *.tar | parallel tar -xf
+
+ 5$ map "cp % /tmp" *.tgz
+
+ 5$ parallel cp {} /tmp ::: *.tgz
+
+ 6$ map "du -sm /home/%/mail" alice bob carol
+
+ 6$ parallel "du -sm /home/{}/mail" ::: alice bob carol
+ or if you prefer running a single job with multiple args:
+ 6$ parallel -Xj1 "du -sm /home/{}/mail" ::: alice bob carol
+
+ 7$ cat /etc/passwd | map -d: 'echo user %1 has shell %7'
+
+ 7$ cat /etc/passwd | parallel --colsep : 'echo user {1} has shell {7}'
+
+ 8$ export MAP_MAX_PROCS=$(( `nproc` / 2 ))
+
+ 8$ export PARALLEL=-j50%
+
+https://github.com/sitaramc/map
+(Last checked: 2020-05)
+
+
+=head2 DIFFERENCES BETWEEN ladon AND GNU Parallel
+
+B<ladon> can run multiple jobs on files in parallel.
+
+B<ladon> only works on files and the only way to specify files is
+using a quoted glob string (such as \*.jpg). It is not possible to
+list the files manually.
+
+As replacement strings it uses FULLPATH DIRNAME BASENAME EXT RELDIR
+RELPATH
+
+These can be simulated using GNU B<parallel> by putting this in
+B<~/.parallel/config>:
+
+ --rpl 'FULLPATH $_=Q($_);chomp($_=qx{readlink -f $_});'
+ --rpl 'DIRNAME $_=Q(::dirname($_));chomp($_=qx{readlink -f $_});'
+ --rpl 'BASENAME s:.*/::;s:\.[^/.]+$::;'
+ --rpl 'EXT s:.*\.::'
+ --rpl 'RELDIR $_=Q($_);chomp(($_,$c)=qx{readlink -f $_;pwd});
+ s:\Q$c/\E::;$_=::dirname($_);'
+ --rpl 'RELPATH $_=Q($_);chomp(($_,$c)=qx{readlink -f $_;pwd});
+ s:\Q$c/\E::;'
+
+B<ladon> deals badly with filenames containing " and newline, and it
+fails for output larger than 200k:
+
+ ladon '*' -- seq 36000 | wc
+
+=head3 EXAMPLES FROM ladon MANUAL
+
+It is assumed that the '--rpl's above are put in B<~/.parallel/config>
+and that it is run under a shell that supports '**' globbing (such as B<zsh>):
+
+ 1$ ladon "**/*.txt" -- echo RELPATH
+
+ 1$ parallel echo RELPATH ::: **/*.txt
+
+ 2$ ladon "~/Documents/**/*.pdf" -- shasum FULLPATH >hashes.txt
+
+ 2$ parallel shasum FULLPATH ::: ~/Documents/**/*.pdf >hashes.txt
+
+ 3$ ladon -m thumbs/RELDIR "**/*.jpg" -- convert FULLPATH \
+ -thumbnail 100x100^ -gravity center -extent 100x100 \
+ thumbs/RELPATH
+
+ 3$ parallel mkdir -p thumbs/RELDIR\; convert FULLPATH
+ -thumbnail 100x100^ -gravity center -extent 100x100 \
+ thumbs/RELPATH ::: **/*.jpg
+
+ 4$ ladon "~/Music/*.wav" -- lame -V 2 FULLPATH DIRNAME/BASENAME.mp3
+
+ 4$ parallel lame -V 2 FULLPATH DIRNAME/BASENAME.mp3 ::: ~/Music/*.wav
+
+https://github.com/danielgtaylor/ladon
+(Last checked: 2019-01)
+
+
+=head2 DIFFERENCES BETWEEN jobflow AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item I1 - - - - - I7
+
+=item - - M3 - - (M6)
+
+=item O1 O2 O3 - O5 O6 (O7) - - O10
+
+=item E1 - - - - E6 -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+
+B<jobflow> can run multiple jobs in parallel.
+
+Just like B<xargs> output from B<jobflow> jobs running in parallel mix
+together by default. B<jobflow> can buffer into files with
+B<-buffered> (placed in /run/shm), but these are not cleaned up if
+B<jobflow> dies unexpectedly (e.g. by Ctrl-C). If the total output is
+big (in the order of RAM+swap) it can cause the system to slow to a
+crawl and eventually run out of memory.
+
+Just like B<xargs> redirection and composed commands require wrapping
+with B<bash -c>.
+
+Input lines can at most be 4096 bytes.
+
+B<jobflow> is faster than GNU B<parallel> but around 6 times slower
+than B<parallel-bash>.
+
+B<jobflow> has no equivalent for B<--pipe>, or B<--sshlogin>.
+
+B<jobflow> makes it possible to set resource limits on the running
+jobs. This can be emulated by GNU B<parallel> using B<bash>'s B<ulimit>:
+
+ jobflow -limits=mem=100M,cpu=3,fsize=20M,nofiles=300 myjob
+
+ parallel 'ulimit -v 102400 -t 3 -f 204800 -n 300 myjob'
+
+
+=head3 EXAMPLES FROM jobflow README
+
+ 1$ cat things.list | jobflow -threads=8 -exec ./mytask {}
+
+ 1$ cat things.list | parallel -j8 ./mytask {}
+
+ 2$ seq 100 | jobflow -threads=100 -exec echo {}
+
+ 2$ seq 100 | parallel -j100 echo {}
+
+ 3$ cat urls.txt | jobflow -threads=32 -exec wget {}
+
+ 3$ cat urls.txt | parallel -j32 wget {}
+
+ 4$ find . -name '*.bmp' | \
+ jobflow -threads=8 -exec bmp2jpeg {.}.bmp {.}.jpg
+
+ 4$ find . -name '*.bmp' | \
+ parallel -j8 bmp2jpeg {.}.bmp {.}.jpg
+
+ 5$ seq 100 | jobflow -skip 10 -count 10
+
+ 5$ seq 100 | parallel --filter '{1} > 10 and {1} <= 20' echo
+
+ 5$ seq 100 | parallel echo '{= $_>10 and $_<=20 or skip() =}'
+
+https://github.com/rofl0r/jobflow
+(Last checked: 2022-05)
+
+
+=head2 DIFFERENCES BETWEEN gargs AND GNU Parallel
+
+B<gargs> can run multiple jobs in parallel.
+
+Older versions cache output in memory. This causes it to be extremely
+slow when the output is larger than the physical RAM, and can cause
+the system to run out of memory.
+
+See more details on this in B<man parallel_design>.
+
+Newer versions cache output in files, but leave files in $TMPDIR if it
+is killed.
+
+Output to stderr (standard error) is changed if the command fails.
+
+=head3 EXAMPLES FROM gargs WEBSITE
+
+ 1$ seq 12 -1 1 | gargs -p 4 -n 3 "sleep {0}; echo {1} {2}"
+
+ 1$ seq 12 -1 1 | parallel -P 4 -n 3 "sleep {1}; echo {2} {3}"
+
+ 2$ cat t.txt | gargs --sep "\s+" \
+ -p 2 "echo '{0}:{1}-{2}' full-line: \'{}\'"
+
+ 2$ cat t.txt | parallel --colsep "\\s+" \
+ -P 2 "echo '{1}:{2}-{3}' full-line: \'{}\'"
+
+https://github.com/brentp/gargs
+(Last checked: 2016-08)
+
+
+=head2 DIFFERENCES BETWEEN orgalorg AND GNU Parallel
+
+B<orgalorg> can run the same job on multiple machines. This is related
+to B<--onall> and B<--nonall>.
+
+B<orgalorg> supports entering the SSH password - provided it is the
+same for all servers. GNU B<parallel> advocates using B<ssh-agent>
+instead, but it is possible to emulate B<orgalorg>'s behavior by
+setting SSHPASS and by using B<--ssh "sshpass ssh">.
+
+To make the emulation easier, make a simple alias:
+
+ alias par_emul="parallel -j0 --ssh 'sshpass ssh' --nonall --tag --lb"
+
+If you want to supply a password run:
+
+ SSHPASS=`ssh-askpass`
+
+or set the password directly:
+
+ SSHPASS=P4$$w0rd!
+
+If the above is set up you can then do:
+
+ orgalorg -o frontend1 -o frontend2 -p -C uptime
+ par_emul -S frontend1 -S frontend2 uptime
+
+ orgalorg -o frontend1 -o frontend2 -p -C top -bid 1
+ par_emul -S frontend1 -S frontend2 top -bid 1
+
+ orgalorg -o frontend1 -o frontend2 -p -er /tmp -n \
+ 'md5sum /tmp/bigfile' -S bigfile
+ par_emul -S frontend1 -S frontend2 --basefile bigfile \
+ --workdir /tmp md5sum /tmp/bigfile
+
+B<orgalorg> has a progress indicator for the transferring of a
+file. GNU B<parallel> does not.
+
+https://github.com/reconquest/orgalorg
+(Last checked: 2016-08)
+
+
+=head2 DIFFERENCES BETWEEN Rust parallel(mmstick) AND GNU Parallel
+
+Rust parallel focuses on speed. It is almost as fast as B<xargs>, but
+not as fast as B<parallel-bash>. It implements a few features from GNU
+B<parallel>, but lacks many functions. All these fail:
+
+ # Read arguments from file
+ parallel -a file echo
+ # Changing the delimiter
+ parallel -d _ echo ::: a_b_c_
+
+These do something different from GNU B<parallel>
+
+ # -q to protect quoted $ and space
+ parallel -q perl -e '$a=shift; print "$a"x10000000' ::: a b c
+ # Generation of combination of inputs
+ parallel echo {1} {2} ::: red green blue ::: S M L XL XXL
+ # {= perl expression =} replacement string
+ parallel echo '{= s/new/old/ =}' ::: my.new your.new
+ # --pipe
+ seq 100000 | parallel --pipe wc
+ # linked arguments
+ parallel echo ::: S M L :::+ sml med lrg ::: R G B :::+ red grn blu
+ # Run different shell dialects
+ zsh -c 'parallel echo \={} ::: zsh && true'
+ csh -c 'parallel echo \$\{\} ::: shell && true'
+ bash -c 'parallel echo \$\({}\) ::: pwd && true'
+ # Rust parallel does not start before the last argument is read
+ (seq 10; sleep 5; echo 2) | time parallel -j2 'sleep 2; echo'
+ tail -f /var/log/syslog | parallel echo
+
+Most of the examples from the book GNU Parallel 2018 do not work, thus
+Rust parallel is not close to being a compatible replacement.
+
+Rust parallel has no remote facilities.
+
+It uses /tmp/parallel for tmp files and does not clean up if
+terminated abruptly. If another user on the system uses Rust parallel,
+then /tmp/parallel will have the wrong permissions and Rust parallel
+will fail. A malicious user can setup the right permissions and
+symlink the output file to one of the user's files and next time the
+user uses Rust parallel it will overwrite this file.
+
+ attacker$ mkdir /tmp/parallel
+ attacker$ chmod a+rwX /tmp/parallel
+ # Symlink to the file the attacker wants to zero out
+ attacker$ ln -s ~victim/.important-file /tmp/parallel/stderr_1
+ victim$ seq 1000 | parallel echo
+ # This file is now overwritten with stderr from 'echo'
+ victim$ cat ~victim/.important-file
+
+If /tmp/parallel runs full during the run, Rust parallel does not
+report this, but finishes with success - thereby risking data loss.
+
+https://github.com/mmstick/parallel
+(Last checked: 2016-08)
+
+
+=head2 DIFFERENCES BETWEEN Rush AND GNU Parallel
+
+B<rush> (https://github.com/shenwei356/rush) is written in Go and
+based on B<gargs>.
+
+Just like GNU B<parallel> B<rush> buffers in temporary files. But
+opposite GNU B<parallel> B<rush> does not clean up, if the process
+dies abnormally.
+
+B<rush> has some string manipulations that can be emulated by putting
+this into ~/.parallel/config (/ is used instead of %, and % is used
+instead of ^ as that is closer to bash's ${var%postfix}):
+
+ --rpl '{:} s:(\.[^/]+)*$::'
+ --rpl '{:%([^}]+?)} s:$$1(\.[^/]+)*$::'
+ --rpl '{/:%([^}]*?)} s:.*/(.*)$$1(\.[^/]+)*$:$1:'
+ --rpl '{/:} s:(.*/)?([^/.]+)(\.[^/]+)*$:$2:'
+ --rpl '{@(.*?)} /$$1/ and $_=$1;'
+
+=head3 EXAMPLES FROM rush's WEBSITE
+
+Here are the examples from B<rush>'s website with the equivalent
+command in GNU B<parallel>.
+
+B<1. Simple run, quoting is not necessary>
+
+ 1$ seq 1 3 | rush echo {}
+
+ 1$ seq 1 3 | parallel echo {}
+
+B<2. Read data from file (`-i`)>
+
+ 2$ rush echo {} -i data1.txt -i data2.txt
+
+ 2$ cat data1.txt data2.txt | parallel echo {}
+
+B<3. Keep output order (`-k`)>
+
+ 3$ seq 1 3 | rush 'echo {}' -k
+
+ 3$ seq 1 3 | parallel -k echo {}
+
+
+B<4. Timeout (`-t`)>
+
+ 4$ time seq 1 | rush 'sleep 2; echo {}' -t 1
+
+ 4$ time seq 1 | parallel --timeout 1 'sleep 2; echo {}'
+
+B<5. Retry (`-r`)>
+
+ 5$ seq 1 | rush 'python unexisted_script.py' -r 1
+
+ 5$ seq 1 | parallel --retries 2 'python unexisted_script.py'
+
+Use B<-u> to see it is really run twice:
+
+ 5$ seq 1 | parallel -u --retries 2 'python unexisted_script.py'
+
+B<6. Dirname (`{/}`) and basename (`{%}`) and remove custom
+suffix (`{^suffix}`)>
+
+ 6$ echo dir/file_1.txt.gz | rush 'echo {/} {%} {^_1.txt.gz}'
+
+ 6$ echo dir/file_1.txt.gz |
+ parallel --plus echo {//} {/} {%_1.txt.gz}
+
+B<7. Get basename, and remove last (`{.}`) or any (`{:}`) extension>
+
+ 7$ echo dir.d/file.txt.gz | rush 'echo {.} {:} {%.} {%:}'
+
+ 7$ echo dir.d/file.txt.gz | parallel 'echo {.} {:} {/.} {/:}'
+
+B<8. Job ID, combine fields index and other replacement strings>
+
+ 8$ echo 12 file.txt dir/s_1.fq.gz |
+ rush 'echo job {#}: {2} {2.} {3%:^_1}'
+
+ 8$ echo 12 file.txt dir/s_1.fq.gz |
+ parallel --colsep ' ' 'echo job {#}: {2} {2.} {3/:%_1}'
+
+B<9. Capture submatch using regular expression (`{@regexp}`)>
+
+ 9$ echo read_1.fq.gz | rush 'echo {@(.+)_\d}'
+
+ 9$ echo read_1.fq.gz | parallel 'echo {@(.+)_\d}'
+
+B<10. Custom field delimiter (`-d`)>
+
+ 10$ echo a=b=c | rush 'echo {1} {2} {3}' -d =
+
+ 10$ echo a=b=c | parallel -d = echo {1} {2} {3}
+
+B<11. Send multi-lines to every command (`-n`)>
+
+ 11$ seq 5 | rush -n 2 -k 'echo "{}"; echo'
+
+ 11$ seq 5 |
+ parallel -n 2 -k \
+ 'echo {=-1 $_=join"\n",@arg[1..$#arg] =}; echo'
+
+ 11$ seq 5 | rush -n 2 -k 'echo "{}"; echo' -J ' '
+
+ 11$ seq 5 | parallel -n 2 -k 'echo {}; echo'
+
+
+B<12. Custom record delimiter (`-D`), note that empty records are not used.>
+
+ 12$ echo a b c d | rush -D " " -k 'echo {}'
+
+ 12$ echo a b c d | parallel -d " " -k 'echo {}'
+
+ 12$ echo abcd | rush -D "" -k 'echo {}'
+
+ Cannot be done by GNU Parallel
+
+ 12$ cat fasta.fa
+ >seq1
+ tag
+ >seq2
+ cat
+ gat
+ >seq3
+ attac
+ a
+ cat
+
+ 12$ cat fasta.fa | rush -D ">" \
+ 'echo FASTA record {#}: name: {1} sequence: {2}' -k -d "\n"
+ # rush fails to join the multiline sequences
+
+ 12$ cat fasta.fa | (read -n1 ignore_first_char;
+ parallel -d '>' --colsep '\n' echo FASTA record {#}: \
+ name: {1} sequence: '{=2 $_=join"",@arg[2..$#arg]=}'
+ )
+
+B<13. Assign value to variable, like `awk -v` (`-v`)>
+
+ 13$ seq 1 |
+ rush 'echo Hello, {fname} {lname}!' -v fname=Wei -v lname=Shen
+
+ 13$ seq 1 |
+ parallel -N0 \
+ 'fname=Wei; lname=Shen; echo Hello, ${fname} ${lname}!'
+
+ 13$ for var in a b; do \
+ 13$ seq 1 3 | rush -k -v var=$var 'echo var: {var}, data: {}'; \
+ 13$ done
+
+In GNU B<parallel> you would typically do:
+
+ 13$ seq 1 3 | parallel -k echo var: {1}, data: {2} ::: a b :::: -
+
+If you I<really> want the var:
+
+ 13$ seq 1 3 |
+ parallel -k var={1} ';echo var: $var, data: {}' ::: a b :::: -
+
+If you I<really> want the B<for>-loop:
+
+ 13$ for var in a b; do
+ export var;
+ seq 1 3 | parallel -k 'echo var: $var, data: {}';
+ done
+
+Contrary to B<rush> this also works if the value is complex like:
+
+ My brother's 12" records
+
+
+B<14. Preset variable (`-v`), avoid repeatedly writing verbose replacement strings>
+
+ 14$ # naive way
+ echo read_1.fq.gz | rush 'echo {:^_1} {:^_1}_2.fq.gz'
+
+ 14$ echo read_1.fq.gz | parallel 'echo {:%_1} {:%_1}_2.fq.gz'
+
+ 14$ # macro + removing suffix
+ echo read_1.fq.gz |
+ rush -v p='{:^_1}' 'echo {p} {p}_2.fq.gz'
+
+ 14$ echo read_1.fq.gz |
+ parallel 'p={:%_1}; echo $p ${p}_2.fq.gz'
+
+ 14$ # macro + regular expression
+ echo read_1.fq.gz | rush -v p='{@(.+?)_\d}' 'echo {p} {p}_2.fq.gz'
+
+ 14$ echo read_1.fq.gz | parallel 'p={@(.+?)_\d}; echo $p ${p}_2.fq.gz'
+
+Contrary to B<rush> GNU B<parallel> works with complex values:
+
+ 14$ echo "My brother's 12\"read_1.fq.gz" |
+ parallel 'p={@(.+?)_\d}; echo $p ${p}_2.fq.gz'
+
+B<15. Interrupt jobs by `Ctrl-C`, rush will stop unfinished commands and exit.>
+
+ 15$ seq 1 20 | rush 'sleep 1; echo {}'
+ ^C
+
+ 15$ seq 1 20 | parallel 'sleep 1; echo {}'
+ ^C
+
+B<16. Continue/resume jobs (`-c`). When some jobs failed (by
+execution failure, timeout, or canceling by user with `Ctrl + C`),
+please switch flag `-c/--continue` on and run again, so that `rush`
+can save successful commands and ignore them in I<NEXT> run.>
+
+ 16$ seq 1 3 | rush 'sleep {}; echo {}' -t 3 -c
+ cat successful_cmds.rush
+ seq 1 3 | rush 'sleep {}; echo {}' -t 3 -c
+
+ 16$ seq 1 3 | parallel --joblog mylog --timeout 2 \
+ 'sleep {}; echo {}'
+ cat mylog
+ seq 1 3 | parallel --joblog mylog --retry-failed \
+ 'sleep {}; echo {}'
+
+Multi-line jobs:
+
+ 16$ seq 1 3 | rush 'sleep {}; echo {}; \
+ echo finish {}' -t 3 -c -C finished.rush
+ cat finished.rush
+ seq 1 3 | rush 'sleep {}; echo {}; \
+ echo finish {}' -t 3 -c -C finished.rush
+
+ 16$ seq 1 3 |
+ parallel --joblog mylog --timeout 2 'sleep {}; echo {}; \
+ echo finish {}'
+ cat mylog
+ seq 1 3 |
+ parallel --joblog mylog --retry-failed 'sleep {}; echo {}; \
+ echo finish {}'
+
+B<17. A comprehensive example: downloading 1K+ pages given by
+three URL list files using `phantomjs save_page.js` (some page
+contents are dynamically generated by Javascript, so `wget` does not
+work). Here I set max jobs number (`-j`) as `20`, each job has a max
+running time (`-t`) of `60` seconds and `3` retry changes
+(`-r`). Continue flag `-c` is also switched on, so we can continue
+unfinished jobs. Luckily, it's accomplished in one run :)>
+
+ 17$ for f in $(seq 2014 2016); do \
+ /bin/rm -rf $f; mkdir -p $f; \
+ cat $f.html.txt | rush -v d=$f -d = \
+ 'phantomjs save_page.js "{}" > {d}/{3}.html' \
+ -j 20 -t 60 -r 3 -c; \
+ done
+
+GNU B<parallel> can append to an existing joblog with '+':
+
+ 17$ rm mylog
+ for f in $(seq 2014 2016); do
+ /bin/rm -rf $f; mkdir -p $f;
+ cat $f.html.txt |
+ parallel -j20 --timeout 60 --retries 4 --joblog +mylog \
+ --colsep = \
+ phantomjs save_page.js {1}={2}={3} '>' $f/{3}.html
+ done
+
+B<18. A bioinformatics example: mapping with `bwa`, and
+processing result with `samtools`:>
+
+ 18$ ref=ref/xxx.fa
+ threads=25
+ ls -d raw.cluster.clean.mapping/* \
+ | rush -v ref=$ref -v j=$threads -v p='{}/{%}' \
+ 'bwa mem -t {j} -M -a {ref} {p}_1.fq.gz {p}_2.fq.gz >{p}.sam;\
+ samtools view -bS {p}.sam > {p}.bam; \
+ samtools sort -T {p}.tmp -@ {j} {p}.bam -o {p}.sorted.bam; \
+ samtools index {p}.sorted.bam; \
+ samtools flagstat {p}.sorted.bam > {p}.sorted.bam.flagstat; \
+ /bin/rm {p}.bam {p}.sam;' \
+ -j 2 --verbose -c -C mapping.rush
+
+GNU B<parallel> would use a function:
+
+ 18$ ref=ref/xxx.fa
+ export ref
+ thr=25
+ export thr
+ bwa_sam() {
+ p="$1"
+ bam="$p".bam
+ sam="$p".sam
+ sortbam="$p".sorted.bam
+ bwa mem -t $thr -M -a $ref ${p}_1.fq.gz ${p}_2.fq.gz > "$sam"
+ samtools view -bS "$sam" > "$bam"
+ samtools sort -T ${p}.tmp -@ $thr "$bam" -o "$sortbam"
+ samtools index "$sortbam"
+ samtools flagstat "$sortbam" > "$sortbam".flagstat
+ /bin/rm "$bam" "$sam"
+ }
+ export -f bwa_sam
+ ls -d raw.cluster.clean.mapping/* |
+ parallel -j 2 --verbose --joblog mylog bwa_sam
+
+=head3 Other B<rush> features
+
+B<rush> has:
+
+=over 4
+
+=item * B<awk -v> like custom defined variables (B<-v>)
+
+With GNU B<parallel> you would simply set a shell variable:
+
+ parallel 'v={}; echo "$v"' ::: foo
+ echo foo | rush -v v={} 'echo {v}'
+
+Also B<rush> does not like special chars. So these B<do not work>:
+
+ echo does not work | rush -v v=\" 'echo {v}'
+ echo "My brother's 12\" records" | rush -v v={} 'echo {v}'
+
+Whereas the corresponding GNU B<parallel> version works:
+
+ parallel 'v=\"; echo "$v"' ::: works
+ parallel 'v={}; echo "$v"' ::: "My brother's 12\" records"
+
+=item * Exit on first error(s) (-e)
+
+This is called B<--halt now,fail=1> (or shorter: B<--halt 2>) when
+used with GNU B<parallel>.
+
+=item * Settable records sending to every command (B<-n>, default 1)
+
+This is also called B<-n> in GNU B<parallel>.
+
+=item * Practical replacement strings
+
+=over 4
+
+=item {:} remove any extension
+
+With GNU B<parallel> this can be emulated by:
+
+ parallel --plus echo '{/\..*/}' ::: foo.ext.bar.gz
+
+=item {^suffix}, remove suffix
+
+With GNU B<parallel> this can be emulated by:
+
+ parallel --plus echo '{%.bar.gz}' ::: foo.ext.bar.gz
+
+=item {@regexp}, capture submatch using regular expression
+
+With GNU B<parallel> this can be emulated by:
+
+ parallel --rpl '{@(.*?)} /$$1/ and $_=$1;' \
+ echo '{@\d_(.*).gz}' ::: 1_foo.gz
+
+=item {%.}, {%:}, basename without extension
+
+With GNU B<parallel> this can be emulated by:
+
+ parallel echo '{= s:.*/::;s/\..*// =}' ::: dir/foo.bar.gz
+
+And if you need it often, you define a B<--rpl> in
+B<$HOME/.parallel/config>:
+
+ --rpl '{%.} s:.*/::;s/\..*//'
+ --rpl '{%:} s:.*/::;s/\..*//'
+
+Then you can use them as:
+
+ parallel echo {%.} {%:} ::: dir/foo.bar.gz
+
+=back
+
+=item * Preset variable (macro)
+
+E.g.
+
+ echo foosuffix | rush -v p={^suffix} 'echo {p}_new_suffix'
+
+With GNU B<parallel> this can be emulated by:
+
+ echo foosuffix |
+ parallel --plus 'p={%suffix}; echo ${p}_new_suffix'
+
+Opposite B<rush> GNU B<parallel> works fine if the input contains
+double space, ' and ":
+
+ echo "1'6\" foosuffix" |
+ parallel --plus 'p={%suffix}; echo "${p}"_new_suffix'
+
+
+=item * Commands of multi-lines
+
+While you I<can> use multi-lined commands in GNU B<parallel>, to
+improve readability GNU B<parallel> discourages the use of multi-line
+commands. In most cases it can be written as a function:
+
+ seq 1 3 |
+ parallel --timeout 2 --joblog my.log 'sleep {}; echo {}; \
+ echo finish {}'
+
+Could be written as:
+
+ doit() {
+ sleep "$1"
+ echo "$1"
+ echo finish "$1"
+ }
+ export -f doit
+ seq 1 3 | parallel --timeout 2 --joblog my.log doit
+
+The failed commands can be resumed with:
+
+ seq 1 3 |
+ parallel --resume-failed --joblog my.log 'sleep {}; echo {};\
+ echo finish {}'
+
+=back
+
+https://github.com/shenwei356/rush
+(Last checked: 2017-05)
+
+
+=head2 DIFFERENCES BETWEEN ClusterSSH AND GNU Parallel
+
+ClusterSSH solves a different problem than GNU B<parallel>.
+
+ClusterSSH opens a terminal window for each computer and using a
+master window you can run the same command on all the computers. This
+is typically used for administrating several computers that are almost
+identical.
+
+GNU B<parallel> runs the same (or different) commands with different
+arguments in parallel possibly using remote computers to help
+computing. If more than one computer is listed in B<-S> GNU B<parallel> may
+only use one of these (e.g. if there are 8 jobs to be run and one
+computer has 8 cores).
+
+GNU B<parallel> can be used as a poor-man's version of ClusterSSH:
+
+B<parallel --nonall -S server-a,server-b do_stuff foo bar>
+
+https://github.com/duncs/clusterssh
+(Last checked: 2010-12)
+
+
+=head2 DIFFERENCES BETWEEN coshell AND GNU Parallel
+
+B<coshell> only accepts full commands on standard input. Any quoting
+needs to be done by the user.
+
+Commands are run in B<sh> so any B<bash>/B<tcsh>/B<zsh> specific
+syntax will not work.
+
+Output can be buffered by using B<-d>. Output is buffered in memory,
+so big output can cause swapping and therefore be terrible slow or
+even cause out of memory.
+
+https://github.com/gdm85/coshell
+(Last checked: 2019-01)
+
+
+=head2 DIFFERENCES BETWEEN spread AND GNU Parallel
+
+B<spread> runs commands on all directories.
+
+It can be emulated with GNU B<parallel> using this Bash function:
+
+ spread() {
+ _cmds() {
+ perl -e '$"=" && ";print "@ARGV"' "cd {}" "$@"
+ }
+ parallel $(_cmds "$@")'|| echo exit status $?' ::: */
+ }
+
+This works except for the B<--exclude> option.
+
+(Last checked: 2017-11)
+
+
+=head2 DIFFERENCES BETWEEN pyargs AND GNU Parallel
+
+B<pyargs> deals badly with input containing spaces. It buffers stdout,
+but not stderr. It buffers in RAM. {} does not work as replacement
+string. It does not support running functions.
+
+B<pyargs> does not support composed commands if run with B<--lines>,
+and fails on B<pyargs traceroute gnu.org fsf.org>.
+
+=head3 Examples
+
+ seq 5 | pyargs -P50 -L seq
+ seq 5 | parallel -P50 --lb seq
+
+ seq 5 | pyargs -P50 --mark -L seq
+ seq 5 | parallel -P50 --lb \
+ --tagstring OUTPUT'[{= $_=$job->replaced() =}]' seq
+ # Similar, but not precisely the same
+ seq 5 | parallel -P50 --lb --tag seq
+
+ seq 5 | pyargs -P50 --mark command
+ # Somewhat longer with GNU Parallel due to the special
+ # --mark formatting
+ cmd="$(echo "command" | parallel --shellquote)"
+ wrap_cmd() {
+ echo "MARK $cmd $@================================" >&3
+ echo "OUTPUT START[$cmd $@]:"
+ eval $cmd "$@"
+ echo "OUTPUT END[$cmd $@]"
+ }
+ (seq 5 | env_parallel -P2 wrap_cmd) 3>&1
+ # Similar, but not exactly the same
+ seq 5 | parallel -t --tag command
+
+ (echo '1 2 3';echo 4 5 6) | pyargs --stream seq
+ (echo '1 2 3';echo 4 5 6) | perl -pe 's/\n/ /' |
+ parallel -r -d' ' seq
+ # Similar, but not exactly the same
+ parallel seq ::: 1 2 3 4 5 6
+
+https://github.com/robertblackwell/pyargs
+(Last checked: 2019-01)
+
+
+=head2 DIFFERENCES BETWEEN concurrently AND GNU Parallel
+
+B<concurrently> runs jobs in parallel.
+
+The output is prepended with the job number, and may be incomplete:
+
+ $ concurrently 'seq 100000' | (sleep 3;wc -l)
+ 7165
+
+When pretty printing it caches output in memory. Output mixes by using
+test MIX below whether or not output is cached.
+
+There seems to be no way of making a template command and have
+B<concurrently> fill that with different args. The full commands must
+be given on the command line.
+
+There is also no way of controlling how many jobs should be run in
+parallel at a time - i.e. "number of jobslots". Instead all jobs are
+simply started in parallel.
+
+https://github.com/kimmobrunfeldt/concurrently
+(Last checked: 2019-01)
+
+
+=head2 DIFFERENCES BETWEEN map(soveran) AND GNU Parallel
+
+B<map> does not run jobs in parallel by default. The README suggests using:
+
+ ... | map t 'sleep $t && say done &'
+
+But this fails if more jobs are run in parallel than the number of
+available processes. Since there is no support for parallelization in
+B<map> itself, the output also mixes:
+
+ seq 10 | map i 'echo start-$i && sleep 0.$i && echo end-$i &'
+
+The major difference is that GNU B<parallel> is built for parallelization
+and B<map> is not. So GNU B<parallel> has lots of ways of dealing with the
+issues that parallelization raises:
+
+=over 4
+
+=item *
+
+Keep the number of processes manageable
+
+=item *
+
+Make sure output does not mix
+
+=item *
+
+Make Ctrl-C kill all running processes
+
+=back
+
+=head3 EXAMPLES FROM maps WEBSITE
+
+Here are the 5 examples converted to GNU Parallel:
+
+ 1$ ls *.c | map f 'foo $f'
+ 1$ ls *.c | parallel foo
+
+ 2$ ls *.c | map f 'foo $f; bar $f'
+ 2$ ls *.c | parallel 'foo {}; bar {}'
+
+ 3$ cat urls | map u 'curl -O $u'
+ 3$ cat urls | parallel curl -O
+
+ 4$ printf "1\n1\n1\n" | map t 'sleep $t && say done'
+ 4$ printf "1\n1\n1\n" | parallel 'sleep {} && say done'
+ 4$ parallel 'sleep {} && say done' ::: 1 1 1
+
+ 5$ printf "1\n1\n1\n" | map t 'sleep $t && say done &'
+ 5$ printf "1\n1\n1\n" | parallel -j0 'sleep {} && say done'
+ 5$ parallel -j0 'sleep {} && say done' ::: 1 1 1
+
+https://github.com/soveran/map
+(Last checked: 2019-01)
+
+
+=head2 DIFFERENCES BETWEEN loop AND GNU Parallel
+
+B<loop> mixes stdout and stderr:
+
+ loop 'ls /no-such-file' >/dev/null
+
+B<loop>'s replacement string B<$ITEM> does not quote strings:
+
+ echo 'two spaces' | loop 'echo $ITEM'
+
+B<loop> cannot run functions:
+
+ myfunc() { echo joe; }
+ export -f myfunc
+ loop 'myfunc this fails'
+
+=head3 EXAMPLES FROM loop's WEBSITE
+
+Some of the examples from https://github.com/Miserlou/Loop/ can be
+emulated with GNU B<parallel>:
+
+ # A couple of functions will make the code easier to read
+ $ loopy() {
+ yes | parallel -uN0 -j1 "$@"
+ }
+ $ export -f loopy
+ $ time_out() {
+ parallel -uN0 -q --timeout "$@" ::: 1
+ }
+ $ match() {
+ perl -0777 -ne 'grep /'"$1"'/,$_ and print or exit 1'
+ }
+ $ export -f match
+
+ $ loop 'ls' --every 10s
+ $ loopy --delay 10s ls
+
+ $ loop 'touch $COUNT.txt' --count-by 5
+ $ loopy touch '{= $_=seq()*5 =}'.txt
+
+ $ loop --until-contains 200 -- \
+ ./get_response_code.sh --site mysite.biz`
+ $ loopy --halt now,success=1 \
+ './get_response_code.sh --site mysite.biz | match 200'
+
+ $ loop './poke_server' --for-duration 8h
+ $ time_out 8h loopy ./poke_server
+
+ $ loop './poke_server' --until-success
+ $ loopy --halt now,success=1 ./poke_server
+
+ $ cat files_to_create.txt | loop 'touch $ITEM'
+ $ cat files_to_create.txt | parallel touch {}
+
+ $ loop 'ls' --for-duration 10min --summary
+ # --joblog is somewhat more verbose than --summary
+ $ time_out 10m loopy --joblog my.log ./poke_server; cat my.log
+
+ $ loop 'echo hello'
+ $ loopy echo hello
+
+ $ loop 'echo $COUNT'
+ # GNU Parallel counts from 1
+ $ loopy echo {#}
+ # Counting from 0 can be forced
+ $ loopy echo '{= $_=seq()-1 =}'
+
+ $ loop 'echo $COUNT' --count-by 2
+ $ loopy echo '{= $_=2*(seq()-1) =}'
+
+ $ loop 'echo $COUNT' --count-by 2 --offset 10
+ $ loopy echo '{= $_=10+2*(seq()-1) =}'
+
+ $ loop 'echo $COUNT' --count-by 1.1
+ # GNU Parallel rounds 3.3000000000000003 to 3.3
+ $ loopy echo '{= $_=1.1*(seq()-1) =}'
+
+ $ loop 'echo $COUNT $ACTUALCOUNT' --count-by 2
+ $ loopy echo '{= $_=2*(seq()-1) =} {#}'
+
+ $ loop 'echo $COUNT' --num 3 --summary
+ # --joblog is somewhat more verbose than --summary
+ $ seq 3 | parallel --joblog my.log echo; cat my.log
+
+ $ loop 'ls -foobarbatz' --num 3 --summary
+ # --joblog is somewhat more verbose than --summary
+ $ seq 3 | parallel --joblog my.log -N0 ls -foobarbatz; cat my.log
+
+ $ loop 'echo $COUNT' --count-by 2 --num 50 --only-last
+ # Can be emulated by running 2 jobs
+ $ seq 49 | parallel echo '{= $_=2*(seq()-1) =}' >/dev/null
+ $ echo 50| parallel echo '{= $_=2*(seq()-1) =}'
+
+ $ loop 'date' --every 5s
+ $ loopy --delay 5s date
+
+ $ loop 'date' --for-duration 8s --every 2s
+ $ time_out 8s loopy --delay 2s date
+
+ $ loop 'date -u' --until-time '2018-05-25 20:50:00' --every 5s
+ $ seconds=$((`date -d 2019-05-25T20:50:00 +%s` - `date +%s`))s
+ $ time_out $seconds loopy --delay 5s date -u
+
+ $ loop 'echo $RANDOM' --until-contains "666"
+ $ loopy --halt now,success=1 'echo $RANDOM | match 666'
+
+ $ loop 'if (( RANDOM % 2 )); then
+ (echo "TRUE"; true);
+ else
+ (echo "FALSE"; false);
+ fi' --until-success
+ $ loopy --halt now,success=1 'if (( $RANDOM % 2 )); then
+ (echo "TRUE"; true);
+ else
+ (echo "FALSE"; false);
+ fi'
+
+ $ loop 'if (( RANDOM % 2 )); then
+ (echo "TRUE"; true);
+ else
+ (echo "FALSE"; false);
+ fi' --until-error
+ $ loopy --halt now,fail=1 'if (( $RANDOM % 2 )); then
+ (echo "TRUE"; true);
+ else
+ (echo "FALSE"; false);
+ fi'
+
+ $ loop 'date' --until-match "(\d{4})"
+ $ loopy --halt now,success=1 'date | match [0-9][0-9][0-9][0-9]'
+
+ $ loop 'echo $ITEM' --for red,green,blue
+ $ parallel echo ::: red green blue
+
+ $ cat /tmp/my-list-of-files-to-create.txt | loop 'touch $ITEM'
+ $ cat /tmp/my-list-of-files-to-create.txt | parallel touch
+
+ $ ls | loop 'cp $ITEM $ITEM.bak'; ls
+ $ ls | parallel cp {} {}.bak; ls
+
+ $ loop 'echo $ITEM | tr a-z A-Z' -i
+ $ parallel 'echo {} | tr a-z A-Z'
+ # Or more efficiently:
+ $ parallel --pipe tr a-z A-Z
+
+ $ loop 'echo $ITEM' --for "`ls`"
+ $ parallel echo {} ::: "`ls`"
+
+ $ ls | loop './my_program $ITEM' --until-success;
+ $ ls | parallel --halt now,success=1 ./my_program {}
+
+ $ ls | loop './my_program $ITEM' --until-fail;
+ $ ls | parallel --halt now,fail=1 ./my_program {}
+
+ $ ./deploy.sh;
+ loop 'curl -sw "%{http_code}" http://coolwebsite.biz' \
+ --every 5s --until-contains 200;
+ ./announce_to_slack.sh
+ $ ./deploy.sh;
+ loopy --delay 5s --halt now,success=1 \
+ 'curl -sw "%{http_code}" http://coolwebsite.biz | match 200';
+ ./announce_to_slack.sh
+
+ $ loop "ping -c 1 mysite.com" --until-success; ./do_next_thing
+ $ loopy --halt now,success=1 ping -c 1 mysite.com; ./do_next_thing
+
+ $ ./create_big_file -o my_big_file.bin;
+ loop 'ls' --until-contains 'my_big_file.bin';
+ ./upload_big_file my_big_file.bin
+ # inotifywait is a better tool to detect file system changes.
+ # It can even make sure the file is complete
+ # so you are not uploading an incomplete file
+ $ inotifywait -qmre MOVED_TO -e CLOSE_WRITE --format %w%f . |
+ grep my_big_file.bin
+
+ $ ls | loop 'cp $ITEM $ITEM.bak'
+ $ ls | parallel cp {} {}.bak
+
+ $ loop './do_thing.sh' --every 15s --until-success --num 5
+ $ parallel --retries 5 --delay 15s ::: ./do_thing.sh
+
+https://github.com/Miserlou/Loop/
+(Last checked: 2018-10)
+
+
+=head2 DIFFERENCES BETWEEN lorikeet AND GNU Parallel
+
+B<lorikeet> can run jobs in parallel. It does this based on a
+dependency graph described in a file, so this is similar to B<make>.
+
+https://github.com/cetra3/lorikeet
+(Last checked: 2018-10)
+
+
+=head2 DIFFERENCES BETWEEN spp AND GNU Parallel
+
+B<spp> can run jobs in parallel. B<spp> does not use a command
+template to generate the jobs, but requires jobs to be in a
+file. Output from the jobs mix.
+
+https://github.com/john01dav/spp
+(Last checked: 2019-01)
+
+
+=head2 DIFFERENCES BETWEEN paral AND GNU Parallel
+
+B<paral> prints a lot of status information and stores the output from
+the commands run into files. This means it cannot be used the middle
+of a pipe like this
+
+ paral "echo this" "echo does not" "echo work" | wc
+
+Instead it puts the output into files named like
+B<out_#_I<command>.out.log>. To get a very similar behaviour with GNU
+B<parallel> use B<--results
+'out_{#}_{=s/[^\sa-z_0-9]//g;s/\s+/_/g=}.log' --eta>
+
+B<paral> only takes arguments on the command line and each argument
+should be a full command. Thus it does not use command templates.
+
+This limits how many jobs it can run in total, because they all need
+to fit on a single command line.
+
+B<paral> has no support for running jobs remotely.
+
+=head3 EXAMPLES FROM README.markdown
+
+The examples from B<README.markdown> and the corresponding command run
+with GNU B<parallel> (B<--results
+'out_{#}_{=s/[^\sa-z_0-9]//g;s/\s+/_/g=}.log' --eta> is omitted from
+the GNU B<parallel> command):
+
+ 1$ paral "command 1" "command 2 --flag" "command arg1 arg2"
+ 1$ parallel ::: "command 1" "command 2 --flag" "command arg1 arg2"
+
+ 2$ paral "sleep 1 && echo c1" "sleep 2 && echo c2" \
+ "sleep 3 && echo c3" "sleep 4 && echo c4" "sleep 5 && echo c5"
+ 2$ parallel ::: "sleep 1 && echo c1" "sleep 2 && echo c2" \
+ "sleep 3 && echo c3" "sleep 4 && echo c4" "sleep 5 && echo c5"
+ # Or shorter:
+ parallel "sleep {} && echo c{}" ::: {1..5}
+
+ 3$ paral -n=0 "sleep 5 && echo c5" "sleep 4 && echo c4" \
+ "sleep 3 && echo c3" "sleep 2 && echo c2" "sleep 1 && echo c1"
+ 3$ parallel ::: "sleep 5 && echo c5" "sleep 4 && echo c4" \
+ "sleep 3 && echo c3" "sleep 2 && echo c2" "sleep 1 && echo c1"
+ # Or shorter:
+ parallel -j0 "sleep {} && echo c{}" ::: 5 4 3 2 1
+
+ 4$ paral -n=1 "sleep 5 && echo c5" "sleep 4 && echo c4" \
+ "sleep 3 && echo c3" "sleep 2 && echo c2" "sleep 1 && echo c1"
+ 4$ parallel -j1 "sleep {} && echo c{}" ::: 5 4 3 2 1
+
+ 5$ paral -n=2 "sleep 5 && echo c5" "sleep 4 && echo c4" \
+ "sleep 3 && echo c3" "sleep 2 && echo c2" "sleep 1 && echo c1"
+ 5$ parallel -j2 "sleep {} && echo c{}" ::: 5 4 3 2 1
+
+ 6$ paral -n=5 "sleep 5 && echo c5" "sleep 4 && echo c4" \
+ "sleep 3 && echo c3" "sleep 2 && echo c2" "sleep 1 && echo c1"
+ 6$ parallel -j5 "sleep {} && echo c{}" ::: 5 4 3 2 1
+
+ 7$ paral -n=1 "echo a && sleep 0.5 && echo b && sleep 0.5 && \
+ echo c && sleep 0.5 && echo d && sleep 0.5 && \
+ echo e && sleep 0.5 && echo f && sleep 0.5 && \
+ echo g && sleep 0.5 && echo h"
+ 7$ parallel ::: "echo a && sleep 0.5 && echo b && sleep 0.5 && \
+ echo c && sleep 0.5 && echo d && sleep 0.5 && \
+ echo e && sleep 0.5 && echo f && sleep 0.5 && \
+ echo g && sleep 0.5 && echo h"
+
+https://github.com/amattn/paral
+(Last checked: 2019-01)
+
+
+=head2 DIFFERENCES BETWEEN concurr AND GNU Parallel
+
+B<concurr> is built to run jobs in parallel using a client/server
+model.
+
+=head3 EXAMPLES FROM README.md
+
+The examples from B<README.md>:
+
+ 1$ concurr 'echo job {#} on slot {%}: {}' : arg1 arg2 arg3 arg4
+ 1$ parallel 'echo job {#} on slot {%}: {}' ::: arg1 arg2 arg3 arg4
+
+ 2$ concurr 'echo job {#} on slot {%}: {}' :: file1 file2 file3
+ 2$ parallel 'echo job {#} on slot {%}: {}' :::: file1 file2 file3
+
+ 3$ concurr 'echo {}' < input_file
+ 3$ parallel 'echo {}' < input_file
+
+ 4$ cat file | concurr 'echo {}'
+ 4$ cat file | parallel 'echo {}'
+
+B<concurr> deals badly empty input files and with output larger than
+64 KB.
+
+https://github.com/mmstick/concurr
+(Last checked: 2019-01)
+
+
+=head2 DIFFERENCES BETWEEN lesser-parallel AND GNU Parallel
+
+B<lesser-parallel> is the inspiration for B<parallel --embed>. Both
+B<lesser-parallel> and B<parallel --embed> define bash functions that
+can be included as part of a bash script to run jobs in parallel.
+
+B<lesser-parallel> implements a few of the replacement strings, but
+hardly any options, whereas B<parallel --embed> gives you the full
+GNU B<parallel> experience.
+
+https://github.com/kou1okada/lesser-parallel
+(Last checked: 2019-01)
+
+
+=head2 DIFFERENCES BETWEEN npm-parallel AND GNU Parallel
+
+B<npm-parallel> can run npm tasks in parallel.
+
+There are no examples and very little documentation, so it is hard to
+compare to GNU B<parallel>.
+
+https://github.com/spion/npm-parallel
+(Last checked: 2019-01)
+
+
+=head2 DIFFERENCES BETWEEN machma AND GNU Parallel
+
+B<machma> runs tasks in parallel. It gives time stamped
+output. It buffers in RAM.
+
+=head3 EXAMPLES FROM README.md
+
+The examples from README.md:
+
+ 1$ # Put shorthand for timestamp in config for the examples
+ echo '--rpl '\
+ \''{time} $_=::strftime("%Y-%m-%d %H:%M:%S",localtime())'\' \
+ > ~/.parallel/machma
+ echo '--line-buffer --tagstring "{#} {time} {}"' \
+ >> ~/.parallel/machma
+
+ 2$ find . -iname '*.jpg' |
+ machma -- mogrify -resize 1200x1200 -filter Lanczos {}
+ find . -iname '*.jpg' |
+ parallel --bar -Jmachma mogrify -resize 1200x1200 \
+ -filter Lanczos {}
+
+ 3$ cat /tmp/ips | machma -p 2 -- ping -c 2 -q {}
+ 3$ cat /tmp/ips | parallel -j2 -Jmachma ping -c 2 -q {}
+
+ 4$ cat /tmp/ips |
+ machma -- sh -c 'ping -c 2 -q $0 > /dev/null && echo alive' {}
+ 4$ cat /tmp/ips |
+ parallel -Jmachma 'ping -c 2 -q {} > /dev/null && echo alive'
+
+ 5$ find . -iname '*.jpg' |
+ machma --timeout 5s -- mogrify -resize 1200x1200 \
+ -filter Lanczos {}
+ 5$ find . -iname '*.jpg' |
+ parallel --timeout 5s --bar mogrify -resize 1200x1200 \
+ -filter Lanczos {}
+
+ 6$ find . -iname '*.jpg' -print0 |
+ machma --null -- mogrify -resize 1200x1200 -filter Lanczos {}
+ 6$ find . -iname '*.jpg' -print0 |
+ parallel --null --bar mogrify -resize 1200x1200 \
+ -filter Lanczos {}
+
+https://github.com/fd0/machma
+(Last checked: 2019-06)
+
+
+=head2 DIFFERENCES BETWEEN interlace AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item - I2 I3 I4 - - -
+
+=item M1 - M3 - - M6
+
+=item - O2 O3 - - - - x x
+
+=item E1 E2 - - - - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+B<interlace> is built for network analysis to run network tools in parallel.
+
+B<interface> does not buffer output, so output from different jobs mixes.
+
+The overhead for each target is O(n*n), so with 1000 targets it
+becomes very slow with an overhead in the order of 500ms/target.
+
+=head3 EXAMPLES FROM interlace's WEBSITE
+
+Using B<prips> most of the examples from
+https://github.com/codingo/Interlace can be run with GNU B<parallel>:
+
+Blocker
+
+ commands.txt:
+ mkdir -p _output_/_target_/scans/
+ _blocker_
+ nmap _target_ -oA _output_/_target_/scans/_target_-nmap
+ interlace -tL ./targets.txt -cL commands.txt -o $output
+
+ parallel -a targets.txt \
+ mkdir -p $output/{}/scans/\; nmap {} -oA $output/{}/scans/{}-nmap
+
+Blocks
+
+ commands.txt:
+ _block:nmap_
+ mkdir -p _target_/output/scans/
+ nmap _target_ -oN _target_/output/scans/_target_-nmap
+ _block:nmap_
+ nikto --host _target_
+ interlace -tL ./targets.txt -cL commands.txt
+
+ _nmap() {
+ mkdir -p $1/output/scans/
+ nmap $1 -oN $1/output/scans/$1-nmap
+ }
+ export -f _nmap
+ parallel ::: _nmap "nikto --host" :::: targets.txt
+
+Run Nikto Over Multiple Sites
+
+ interlace -tL ./targets.txt -threads 5 \
+ -c "nikto --host _target_ > ./_target_-nikto.txt" -v
+
+ parallel -a targets.txt -P5 nikto --host {} \> ./{}_-nikto.txt
+
+Run Nikto Over Multiple Sites and Ports
+
+ interlace -tL ./targets.txt -threads 5 -c \
+ "nikto --host _target_:_port_ > ./_target_-_port_-nikto.txt" \
+ -p 80,443 -v
+
+ parallel -P5 nikto --host {1}:{2} \> ./{1}-{2}-nikto.txt \
+ :::: targets.txt ::: 80 443
+
+Run a List of Commands against Target Hosts
+
+ commands.txt:
+ nikto --host _target_:_port_ > _output_/_target_-nikto.txt
+ sslscan _target_:_port_ > _output_/_target_-sslscan.txt
+ testssl.sh _target_:_port_ > _output_/_target_-testssl.txt
+ interlace -t example.com -o ~/Engagements/example/ \
+ -cL ./commands.txt -p 80,443
+
+ parallel --results ~/Engagements/example/{2}:{3}{1} {1} {2}:{3} \
+ ::: "nikto --host" sslscan testssl.sh ::: example.com ::: 80 443
+
+CIDR notation with an application that doesn't support it
+
+ interlace -t 192.168.12.0/24 -c "vhostscan _target_ \
+ -oN _output_/_target_-vhosts.txt" -o ~/scans/ -threads 50
+
+ prips 192.168.12.0/24 |
+ parallel -P50 vhostscan {} -oN ~/scans/{}-vhosts.txt
+
+Glob notation with an application that doesn't support it
+
+ interlace -t 192.168.12.* -c "vhostscan _target_ \
+ -oN _output_/_target_-vhosts.txt" -o ~/scans/ -threads 50
+
+ # Glob is not supported in prips
+ prips 192.168.12.0/24 |
+ parallel -P50 vhostscan {} -oN ~/scans/{}-vhosts.txt
+
+Dash (-) notation with an application that doesn't support it
+
+ interlace -t 192.168.12.1-15 -c \
+ "vhostscan _target_ -oN _output_/_target_-vhosts.txt" \
+ -o ~/scans/ -threads 50
+
+ # Dash notation is not supported in prips
+ prips 192.168.12.1 192.168.12.15 |
+ parallel -P50 vhostscan {} -oN ~/scans/{}-vhosts.txt
+
+Threading Support for an application that doesn't support it
+
+ interlace -tL ./target-list.txt -c \
+ "vhostscan -t _target_ -oN _output_/_target_-vhosts.txt" \
+ -o ~/scans/ -threads 50
+
+ cat ./target-list.txt |
+ parallel -P50 vhostscan -t {} -oN ~/scans/{}-vhosts.txt
+
+alternatively
+
+ ./vhosts-commands.txt:
+ vhostscan -t $target -oN _output_/_target_-vhosts.txt
+ interlace -cL ./vhosts-commands.txt -tL ./target-list.txt \
+ -threads 50 -o ~/scans
+
+ ./vhosts-commands.txt:
+ vhostscan -t "$1" -oN "$2"
+ parallel -P50 ./vhosts-commands.txt {} ~/scans/{}-vhosts.txt \
+ :::: ./target-list.txt
+
+Exclusions
+
+ interlace -t 192.168.12.0/24 -e 192.168.12.0/26 -c \
+ "vhostscan _target_ -oN _output_/_target_-vhosts.txt" \
+ -o ~/scans/ -threads 50
+
+ prips 192.168.12.0/24 | grep -xv -Ff <(prips 192.168.12.0/26) |
+ parallel -P50 vhostscan {} -oN ~/scans/{}-vhosts.txt
+
+Run Nikto Using Multiple Proxies
+
+ interlace -tL ./targets.txt -pL ./proxies.txt -threads 5 -c \
+ "nikto --host _target_:_port_ -useproxy _proxy_ > \
+ ./_target_-_port_-nikto.txt" -p 80,443 -v
+
+ parallel -j5 \
+ "nikto --host {1}:{2} -useproxy {3} > ./{1}-{2}-nikto.txt" \
+ :::: ./targets.txt ::: 80 443 :::: ./proxies.txt
+
+https://github.com/codingo/Interlace
+(Last checked: 2019-09)
+
+
+=head2 DIFFERENCES BETWEEN otonvm Parallel AND GNU Parallel
+
+I have been unable to get the code to run at all. It seems unfinished.
+
+https://github.com/otonvm/Parallel
+(Last checked: 2019-02)
+
+
+=head2 DIFFERENCES BETWEEN k-bx par AND GNU Parallel
+
+B<par> requires Haskell to work. This limits the number of platforms
+this can work on.
+
+B<par> does line buffering in memory. The memory usage is 3x the
+longest line (compared to 1x for B<parallel --lb>). Commands must be
+given as arguments. There is no template.
+
+These are the examples from https://github.com/k-bx/par with the
+corresponding GNU B<parallel> command.
+
+ par "echo foo; sleep 1; echo foo; sleep 1; echo foo" \
+ "echo bar; sleep 1; echo bar; sleep 1; echo bar" && echo "success"
+ parallel --lb ::: "echo foo; sleep 1; echo foo; sleep 1; echo foo" \
+ "echo bar; sleep 1; echo bar; sleep 1; echo bar" && echo "success"
+
+ par "echo foo; sleep 1; foofoo" \
+ "echo bar; sleep 1; echo bar; sleep 1; echo bar" && echo "success"
+ parallel --lb --halt 1 ::: "echo foo; sleep 1; foofoo" \
+ "echo bar; sleep 1; echo bar; sleep 1; echo bar" && echo "success"
+
+ par "PARPREFIX=[fooechoer] echo foo" "PARPREFIX=[bar] echo bar"
+ parallel --lb --colsep , --tagstring {1} {2} \
+ ::: "[fooechoer],echo foo" "[bar],echo bar"
+
+ par --succeed "foo" "bar" && echo 'wow'
+ parallel "foo" "bar"; true && echo 'wow'
+
+https://github.com/k-bx/par
+(Last checked: 2019-02)
+
+=head2 DIFFERENCES BETWEEN parallelshell AND GNU Parallel
+
+B<parallelshell> does not allow for composed commands:
+
+ # This does not work
+ parallelshell 'echo foo;echo bar' 'echo baz;echo quuz'
+
+Instead you have to wrap that in a shell:
+
+ parallelshell 'sh -c "echo foo;echo bar"' 'sh -c "echo baz;echo quuz"'
+
+It buffers output in RAM. All commands must be given on the command
+line and all commands are started in parallel at the same time. This
+will cause the system to freeze if there are so many jobs that there
+is not enough memory to run them all at the same time.
+
+https://github.com/keithamus/parallelshell
+(Last checked: 2019-02)
+
+https://github.com/darkguy2008/parallelshell
+(Last checked: 2019-03)
+
+
+=head2 DIFFERENCES BETWEEN shell-executor AND GNU Parallel
+
+B<shell-executor> does not allow for composed commands:
+
+ # This does not work
+ sx 'echo foo;echo bar' 'echo baz;echo quuz'
+
+Instead you have to wrap that in a shell:
+
+ sx 'sh -c "echo foo;echo bar"' 'sh -c "echo baz;echo quuz"'
+
+It buffers output in RAM. All commands must be given on the command
+line and all commands are started in parallel at the same time. This
+will cause the system to freeze if there are so many jobs that there
+is not enough memory to run them all at the same time.
+
+https://github.com/royriojas/shell-executor
+(Last checked: 2019-02)
+
+
+=head2 DIFFERENCES BETWEEN non-GNU par AND GNU Parallel
+
+B<par> buffers in memory to avoid mixing of jobs. It takes 1s per 1
+million output lines.
+
+B<par> needs to have all commands before starting the first job. The
+jobs are read from stdin (standard input) so any quoting will have to
+be done by the user.
+
+Stdout (standard output) is prepended with o:. Stderr (standard error)
+is sendt to stdout (standard output) and prepended with e:.
+
+For short jobs with little output B<par> is 20% faster than GNU
+B<parallel> and 60% slower than B<xargs>.
+
+https://github.com/UnixJunkie/PAR
+
+https://savannah.nongnu.org/projects/par
+(Last checked: 2019-02)
+
+
+=head2 DIFFERENCES BETWEEN fd AND GNU Parallel
+
+B<fd> does not support composed commands, so commands must be wrapped
+in B<sh -c>.
+
+It buffers output in RAM.
+
+It only takes file names from the filesystem as input (similar to B<find>).
+
+https://github.com/sharkdp/fd
+(Last checked: 2019-02)
+
+
+=head2 DIFFERENCES BETWEEN lateral AND GNU Parallel
+
+B<lateral> is very similar to B<sem>: It takes a single command and
+runs it in the background. The design means that output from parallel
+running jobs may mix. If it dies unexpectly it leaves a socket in
+~/.lateral/socket.PID.
+
+B<lateral> deals badly with too long command lines. This makes the
+B<lateral> server crash:
+
+ lateral run echo `seq 100000| head -c 1000k`
+
+Any options will be read by B<lateral> so this does not work
+(B<lateral> interprets the B<-l>):
+
+ lateral run ls -l
+
+Composed commands do not work:
+
+ lateral run pwd ';' ls
+
+Functions do not work:
+
+ myfunc() { echo a; }
+ export -f myfunc
+ lateral run myfunc
+
+Running B<emacs> in the terminal causes the parent shell to die:
+
+ echo '#!/bin/bash' > mycmd
+ echo emacs -nw >> mycmd
+ chmod +x mycmd
+ lateral start
+ lateral run ./mycmd
+
+Here are the examples from https://github.com/akramer/lateral with the
+corresponding GNU B<sem> and GNU B<parallel> commands:
+
+ 1$ lateral start
+ for i in $(cat /tmp/names); do
+ lateral run -- some_command $i
+ done
+ lateral wait
+
+ 1$ for i in $(cat /tmp/names); do
+ sem some_command $i
+ done
+ sem --wait
+
+ 1$ parallel some_command :::: /tmp/names
+
+ 2$ lateral start
+ for i in $(seq 1 100); do
+ lateral run -- my_slow_command < workfile$i > /tmp/logfile$i
+ done
+ lateral wait
+
+ 2$ for i in $(seq 1 100); do
+ sem my_slow_command < workfile$i > /tmp/logfile$i
+ done
+ sem --wait
+
+ 2$ parallel 'my_slow_command < workfile{} > /tmp/logfile{}' \
+ ::: {1..100}
+
+ 3$ lateral start -p 0 # yup, it will just queue tasks
+ for i in $(seq 1 100); do
+ lateral run -- command_still_outputs_but_wont_spam inputfile$i
+ done
+ # command output spam can commence
+ lateral config -p 10; lateral wait
+
+ 3$ for i in $(seq 1 100); do
+ echo "command inputfile$i" >> joblist
+ done
+ parallel -j 10 :::: joblist
+
+ 3$ echo 1 > /tmp/njobs
+ parallel -j /tmp/njobs command inputfile{} \
+ ::: {1..100} &
+ echo 10 >/tmp/njobs
+ wait
+
+https://github.com/akramer/lateral
+(Last checked: 2019-03)
+
+
+=head2 DIFFERENCES BETWEEN with-this AND GNU Parallel
+
+The examples from https://github.com/amritb/with-this.git and the
+corresponding GNU B<parallel> command:
+
+ with -v "$(cat myurls.txt)" "curl -L this"
+ parallel curl -L ::: myurls.txt
+
+ with -v "$(cat myregions.txt)" \
+ "aws --region=this ec2 describe-instance-status"
+ parallel aws --region={} ec2 describe-instance-status \
+ :::: myregions.txt
+
+ with -v "$(ls)" "kubectl --kubeconfig=this get pods"
+ ls | parallel kubectl --kubeconfig={} get pods
+
+ with -v "$(ls | grep config)" "kubectl --kubeconfig=this get pods"
+ ls | grep config | parallel kubectl --kubeconfig={} get pods
+
+ with -v "$(echo {1..10})" "echo 123"
+ parallel -N0 echo 123 ::: {1..10}
+
+Stderr is merged with stdout. B<with-this> buffers in RAM. It uses 3x
+the output size, so you cannot have output larger than 1/3rd the
+amount of RAM. The input values cannot contain spaces. Composed
+commands do not work.
+
+B<with-this> gives some additional information, so the output has to
+be cleaned before piping it to the next command.
+
+https://github.com/amritb/with-this.git
+(Last checked: 2019-03)
+
+
+=head2 DIFFERENCES BETWEEN Tollef's parallel (moreutils) AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item - - - I4 - - I7
+
+=item - - M3 - - M6
+
+=item - O2 O3 - O5 O6 - x x
+
+=item E1 - - - - - E7
+
+=item - x x x x x x x x
+
+=item - -
+
+=back
+
+=head3 EXAMPLES FROM Tollef's parallel MANUAL
+
+B<Tollef> parallel sh -c "echo hi; sleep 2; echo bye" -- 1 2 3
+
+B<GNU> parallel "echo hi; sleep 2; echo bye" ::: 1 2 3
+
+B<Tollef> parallel -j 3 ufraw -o processed -- *.NEF
+
+B<GNU> parallel -j 3 ufraw -o processed ::: *.NEF
+
+B<Tollef> parallel -j 3 -- ls df "echo hi"
+
+B<GNU> parallel -j 3 ::: ls df "echo hi"
+
+(Last checked: 2019-08)
+
+=head2 DIFFERENCES BETWEEN rargs AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item I1 - - - - - I7
+
+=item - - M3 M4 - -
+
+=item - O2 O3 - O5 O6 - O8 -
+
+=item E1 - - E4 - - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+B<rargs> has elegant ways of doing named regexp capture and field ranges.
+
+With GNU B<parallel> you can use B<--rpl> to get a similar
+functionality as regexp capture gives, and use B<join> and B<@arg> to
+get the field ranges. But the syntax is longer. This:
+
+ --rpl '{r(\d+)\.\.(\d+)} $_=join"$opt::colsep",@arg[$$1..$$2]'
+
+would make it possible to use:
+
+ {1r3..6}
+
+for field 3..6.
+
+For full support of {n..m:s} including negative numbers use a dynamic
+replacement string like this:
+
+
+ PARALLEL=--rpl\ \''{r((-?\d+)?)\.\.((-?\d+)?)((:([^}]*))?)}
+ $a = defined $$2 ? $$2 < 0 ? 1+$#arg+$$2 : $$2 : 1;
+ $b = defined $$4 ? $$4 < 0 ? 1+$#arg+$$4 : $$4 : $#arg+1;
+ $s = defined $$6 ? $$7 : " ";
+ $_ = join $s,@arg[$a..$b]'\'
+ export PARALLEL
+
+You can then do:
+
+ head /etc/passwd | parallel --colsep : echo ..={1r..} ..3={1r..3} \
+ 4..={1r4..} 2..4={1r2..4} 3..3={1r3..3} ..3:-={1r..3:-} \
+ ..3:/={1r..3:/} -1={-1} -5={-5} -6={-6} -3..={1r-3..}
+
+=head3 EXAMPLES FROM rargs MANUAL
+
+ 1$ ls *.bak | rargs -p '(.*)\.bak' mv {0} {1}
+
+ 1$ ls *.bak | parallel mv {} {.}
+
+ 2$ cat download-list.csv |
+ rargs -p '(?P<url>.*),(?P<filename>.*)' wget {url} -O {filename}
+
+ 2$ cat download-list.csv |
+ parallel --csv wget {1} -O {2}
+ # or use regexps:
+ 2$ cat download-list.csv |
+ parallel --rpl '{url} s/,.*//' --rpl '{filename} s/.*?,//' \
+ wget {url} -O {filename}
+
+ 3$ cat /etc/passwd |
+ rargs -d: echo -e 'id: "{1}"\t name: "{5}"\t rest: "{6..::}"'
+
+ 3$ cat /etc/passwd |
+ parallel -q --colsep : \
+ echo -e 'id: "{1}"\t name: "{5}"\t rest: "{=6 $_=join":",@arg[6..$#arg]=}"'
+
+https://github.com/lotabout/rargs
+(Last checked: 2020-01)
+
+
+=head2 DIFFERENCES BETWEEN threader AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item I1 - - - - - -
+
+=item M1 - M3 - - M6
+
+=item O1 - O3 - O5 - - x x
+
+=item E1 - - E4 - - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+Newline separates arguments, but newline at the end of file is treated
+as an empty argument. So this runs 2 jobs:
+
+ echo two_jobs | threader -run 'echo "$THREADID"'
+
+B<threader> ignores stderr, so any output to stderr is
+lost. B<threader> buffers in RAM, so output bigger than the machine's
+virtual memory will cause the machine to crash.
+
+https://github.com/voodooEntity/threader
+(Last checked: 2020-04)
+
+
+=head2 DIFFERENCES BETWEEN runp AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item I1 I2 - - - - -
+
+=item M1 - (M3) - - M6
+
+=item O1 O2 O3 - O5 O6 - x x -
+
+=item E1 - - - - - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+(M3): You can add a prefix and a postfix to the input, so it means you can
+only insert the argument on the command line once.
+
+B<runp> runs 10 jobs in parallel by default. B<runp> blocks if output
+of a command is > 64 Kbytes. Quoting of input is needed. It adds
+output to stderr (this can be prevented with -q)
+
+=head3 Examples as GNU Parallel
+
+ base='https://images-api.nasa.gov/search'
+ query='jupiter'
+ desc='planet'
+ type='image'
+ url="$base?q=$query&description=$desc&media_type=$type"
+
+ # Download the images in parallel using runp
+ curl -s $url | jq -r .collection.items[].href | \
+ runp -p 'curl -s' | jq -r .[] | grep large | \
+ runp -p 'curl -s -L -O'
+
+ time curl -s $url | jq -r .collection.items[].href | \
+ runp -g 1 -q -p 'curl -s' | jq -r .[] | grep large | \
+ runp -g 1 -q -p 'curl -s -L -O'
+
+ # Download the images in parallel
+ curl -s $url | jq -r .collection.items[].href | \
+ parallel curl -s | jq -r .[] | grep large | \
+ parallel curl -s -L -O
+
+ time curl -s $url | jq -r .collection.items[].href | \
+ parallel -j 1 curl -s | jq -r .[] | grep large | \
+ parallel -j 1 curl -s -L -O
+
+
+=head4 Run some test commands (read from file)
+
+ # Create a file containing commands to run in parallel.
+ cat << EOF > /tmp/test-commands.txt
+ sleep 5
+ sleep 3
+ blah # this will fail
+ ls $PWD # PWD shell variable is used here
+ EOF
+
+ # Run commands from the file.
+ runp /tmp/test-commands.txt > /dev/null
+
+ parallel -a /tmp/test-commands.txt > /dev/null
+
+=head4 Ping several hosts and see packet loss (read from stdin)
+
+ # First copy this line and press Enter
+ runp -p 'ping -c 5 -W 2' -s '| grep loss'
+ localhost
+ 1.1.1.1
+ 8.8.8.8
+ # Press Enter and Ctrl-D when done entering the hosts
+
+ # First copy this line and press Enter
+ parallel ping -c 5 -W 2 {} '| grep loss'
+ localhost
+ 1.1.1.1
+ 8.8.8.8
+ # Press Enter and Ctrl-D when done entering the hosts
+
+=head4 Get directories' sizes (read from stdin)
+
+ echo -e "$HOME\n/etc\n/tmp" | runp -q -p 'sudo du -sh'
+
+ echo -e "$HOME\n/etc\n/tmp" | parallel sudo du -sh
+ # or:
+ parallel sudo du -sh ::: "$HOME" /etc /tmp
+
+=head4 Compress files
+
+ find . -iname '*.txt' | runp -p 'gzip --best'
+
+ find . -iname '*.txt' | parallel gzip --best
+
+=head4 Measure HTTP request + response time
+
+ export CURL="curl -w 'time_total: %{time_total}\n'"
+ CURL="$CURL -o /dev/null -s https://golang.org/"
+ perl -wE 'for (1..10) { say $ENV{CURL} }' |
+ runp -q # Make 10 requests
+
+ perl -wE 'for (1..10) { say $ENV{CURL} }' | parallel
+ # or:
+ parallel -N0 "$CURL" ::: {1..10}
+
+=head4 Find open TCP ports
+
+ cat << EOF > /tmp/host-port.txt
+ localhost 22
+ localhost 80
+ localhost 81
+ 127.0.0.1 443
+ 127.0.0.1 444
+ scanme.nmap.org 22
+ scanme.nmap.org 23
+ scanme.nmap.org 443
+ EOF
+
+ 1$ cat /tmp/host-port.txt |
+ runp -q -p 'netcat -v -w2 -z' 2>&1 | egrep '(succeeded!|open)$'
+
+ # --colsep is needed to split the line
+ 1$ cat /tmp/host-port.txt |
+ parallel --colsep ' ' netcat -v -w2 -z 2>&1 |
+ egrep '(succeeded!|open)$'
+ # or use uq for unquoted:
+ 1$ cat /tmp/host-port.txt |
+ parallel netcat -v -w2 -z {=uq=} 2>&1 |
+ egrep '(succeeded!|open)$'
+
+https://github.com/jreisinger/runp
+(Last checked: 2020-04)
+
+
+=head2 DIFFERENCES BETWEEN papply AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item - - - I4 - - -
+
+=item M1 - M3 - - M6
+
+=item - - O3 - O5 - - x x O10
+
+=item E1 - - E4 - - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+B<papply> does not print the output if the command fails:
+
+ $ papply 'echo %F; false' foo
+ "echo foo; false" did not succeed
+
+B<papply>'s replacement strings (%F %d %f %n %e %z) can be simulated in GNU
+B<parallel> by putting this in B<~/.parallel/config>:
+
+ --rpl '%F'
+ --rpl '%d $_=Q(::dirname($_));'
+ --rpl '%f s:.*/::;'
+ --rpl '%n s:.*/::;s:\.[^/.]+$::;'
+ --rpl '%e s:.*\.:.:'
+ --rpl '%z $_=""'
+
+B<papply> buffers in RAM, and uses twice the amount of output. So
+output of 5 GB takes 10 GB RAM.
+
+The buffering is very CPU intensive: Buffering a line of 5 GB takes 40
+seconds (compared to 10 seconds with GNU B<parallel>).
+
+
+=head3 Examples as GNU Parallel
+
+ 1$ papply gzip *.txt
+
+ 1$ parallel gzip ::: *.txt
+
+ 2$ papply "convert %F %n.jpg" *.png
+
+ 2$ parallel convert {} {.}.jpg ::: *.png
+
+
+https://pypi.org/project/papply/
+(Last checked: 2020-04)
+
+
+=head2 DIFFERENCES BETWEEN async AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item - - - I4 - - I7
+
+=item - - - - - M6
+
+=item - O2 O3 - O5 O6 - x x O10
+
+=item E1 - - E4 - E6 -
+
+=item - - - - - - - - -
+
+=item S1 S2
+
+=back
+
+B<async> is very similary to GNU B<parallel>'s B<--semaphore> mode
+(aka B<sem>). B<async> requires the user to start a server process.
+
+The input is quoted like B<-q> so you need B<bash -c "...;..."> to run
+composed commands.
+
+=head3 Examples as GNU Parallel
+
+ 1$ S="/tmp/example_socket"
+
+ 1$ ID=myid
+
+ 2$ async -s="$S" server --start
+
+ 2$ # GNU Parallel does not need a server to run
+
+ 3$ for i in {1..20}; do
+ # prints command output to stdout
+ async -s="$S" cmd -- bash -c "sleep 1 && echo test $i"
+ done
+
+ 3$ for i in {1..20}; do
+ # prints command output to stdout
+ sem --id "$ID" -j100% "sleep 1 && echo test $i"
+ # GNU Parallel will only print job when it is done
+ # If you need output from different jobs to mix
+ # use -u or --line-buffer
+ sem --id "$ID" -j100% --line-buffer "sleep 1 && echo test $i"
+ done
+
+ 4$ # wait until all commands are finished
+ async -s="$S" wait
+
+ 4$ sem --id "$ID" --wait
+
+ 5$ # configure the server to run four commands in parallel
+ async -s="$S" server -j4
+
+ 5$ export PARALLEL=-j4
+
+ 6$ mkdir "/tmp/ex_dir"
+ for i in {21..40}; do
+ # redirects command output to /tmp/ex_dir/file*
+ async -s="$S" cmd -o "/tmp/ex_dir/file$i" -- \
+ bash -c "sleep 1 && echo test $i"
+ done
+
+ 6$ mkdir "/tmp/ex_dir"
+ for i in {21..40}; do
+ # redirects command output to /tmp/ex_dir/file*
+ sem --id "$ID" --result '/tmp/my-ex/file-{=$_=""=}'"$i" \
+ "sleep 1 && echo test $i"
+ done
+
+ 7$ sem --id "$ID" --wait
+
+ 7$ async -s="$S" wait
+
+ 8$ # stops server
+ async -s="$S" server --stop
+
+ 8$ # GNU Parallel does not need to stop a server
+
+
+https://github.com/ctbur/async/
+(Last checked: 2023-01)
+
+
+=head2 DIFFERENCES BETWEEN pardi AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item I1 I2 - - - - I7
+
+=item M1 - - - - M6
+
+=item O1 O2 O3 O4 O5 - O7 - - O10
+
+=item E1 - - E4 - - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+B<pardi> is very similar to B<parallel --pipe --cat>: It reads blocks
+of data and not arguments. So it cannot insert an argument in the
+command line. It puts the block into a temporary file, and this file
+name (%IN) can be put in the command line. You can only use %IN once.
+
+It can also run full command lines in parallel (like: B<cat file |
+parallel>).
+
+=head3 EXAMPLES FROM pardi test.sh
+
+ 1$ time pardi -v -c 100 -i data/decoys.smi -ie .smi -oe .smi \
+ -o data/decoys_std_pardi.smi \
+ -w '(standardiser -i %IN -o %OUT 2>&1) > /dev/null'
+
+ 1$ cat data/decoys.smi |
+ time parallel -N 100 --pipe --cat \
+ '(standardiser -i {} -o {#} 2>&1) > /dev/null; cat {#}; rm {#}' \
+ > data/decoys_std_pardi.smi
+
+ 2$ pardi -n 1 -i data/test_in.types -o data/test_out.types \
+ -d 'r:^#atoms:' -w 'cat %IN > %OUT'
+
+ 2$ cat data/test_in.types |
+ parallel -n 1 -k --pipe --cat --regexp --recstart '^#atoms' \
+ 'cat {}' > data/test_out.types
+
+ 3$ pardi -c 6 -i data/test_in.types -o data/test_out.types \
+ -d 'r:^#atoms:' -w 'cat %IN > %OUT'
+
+ 3$ cat data/test_in.types |
+ parallel -n 6 -k --pipe --cat --regexp --recstart '^#atoms' \
+ 'cat {}' > data/test_out.types
+
+ 4$ pardi -i data/decoys.mol2 -o data/still_decoys.mol2 \
+ -d 's:@<TRIPOS>MOLECULE' -w 'cp %IN %OUT'
+
+ 4$ cat data/decoys.mol2 |
+ parallel -n 1 --pipe --cat --recstart '@<TRIPOS>MOLECULE' \
+ 'cp {} {#}; cat {#}; rm {#}' > data/still_decoys.mol2
+
+ 5$ pardi -i data/decoys.mol2 -o data/decoys2.mol2 \
+ -d b:10000 -w 'cp %IN %OUT' --preserve
+
+ 5$ cat data/decoys.mol2 |
+ parallel -k --pipe --block 10k --recend '' --cat \
+ 'cat {} > {#}; cat {#}; rm {#}' > data/decoys2.mol2
+
+https://github.com/UnixJunkie/pardi
+(Last checked: 2021-01)
+
+
+=head2 DIFFERENCES BETWEEN bthread AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item - - - I4 - - -
+
+=item - - - - - M6
+
+=item O1 - O3 - - - O7 O8 - -
+
+=item E1 - - - - - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+B<bthread> takes around 1 sec per MB of output. The maximal output
+line length is 1073741759.
+
+You cannot quote space in the command, so you cannot run composed
+commands like B<sh -c "echo a; echo b">.
+
+https://gitlab.com/netikras/bthread
+(Last checked: 2021-01)
+
+
+=head2 DIFFERENCES BETWEEN simple_gpu_scheduler AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item I1 - - - - - I7
+
+=item M1 - - - - M6
+
+=item - O2 O3 - - O6 - x x O10
+
+=item E1 - - - - - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+=head3 EXAMPLES FROM simple_gpu_scheduler MANUAL
+
+ 1$ simple_gpu_scheduler --gpus 0 1 2 < gpu_commands.txt
+
+ 1$ parallel -j3 --shuf \
+ CUDA_VISIBLE_DEVICES='{=1 $_=slot()-1 =} {=uq;=}' \
+ < gpu_commands.txt
+
+ 2$ simple_hypersearch \
+ "python3 train_dnn.py --lr {lr} --batch_size {bs}" \
+ -p lr 0.001 0.0005 0.0001 -p bs 32 64 128 |
+ simple_gpu_scheduler --gpus 0,1,2
+
+ 2$ parallel --header : --shuf -j3 -v \
+ CUDA_VISIBLE_DEVICES='{=1 $_=slot()-1 =}' \
+ python3 train_dnn.py --lr {lr} --batch_size {bs} \
+ ::: lr 0.001 0.0005 0.0001 ::: bs 32 64 128
+
+ 3$ simple_hypersearch \
+ "python3 train_dnn.py --lr {lr} --batch_size {bs}" \
+ --n-samples 5 -p lr 0.001 0.0005 0.0001 -p bs 32 64 128 |
+ simple_gpu_scheduler --gpus 0,1,2
+
+ 3$ parallel --header : --shuf \
+ CUDA_VISIBLE_DEVICES='{=1 $_=slot()-1; seq()>5 and skip() =}' \
+ python3 train_dnn.py --lr {lr} --batch_size {bs} \
+ ::: lr 0.001 0.0005 0.0001 ::: bs 32 64 128
+
+ 4$ touch gpu.queue
+ tail -f -n 0 gpu.queue | simple_gpu_scheduler --gpus 0,1,2 &
+ echo "my_command_with | and stuff > logfile" >> gpu.queue
+
+ 4$ touch gpu.queue
+ tail -f -n 0 gpu.queue |
+ parallel -j3 CUDA_VISIBLE_DEVICES='{=1 $_=slot()-1 =} {=uq;=}' &
+ # Needed to fill job slots once
+ seq 3 | parallel echo true >> gpu.queue
+ # Add jobs
+ echo "my_command_with | and stuff > logfile" >> gpu.queue
+ # Needed to flush output from completed jobs
+ seq 3 | parallel echo true >> gpu.queue
+
+https://github.com/ExpectationMax/simple_gpu_scheduler
+(Last checked: 2021-01)
+
+
+=head2 DIFFERENCES BETWEEN parasweep AND GNU Parallel
+
+B<parasweep> is a Python module for facilitating parallel parameter
+sweeps.
+
+A B<parasweep> job will normally take a text file as input. The text
+file contains arguments for the job. Some of these arguments will be
+fixed and some of them will be changed by B<parasweep>.
+
+It does this by having a template file such as template.txt:
+
+ Xval: {x}
+ Yval: {y}
+ FixedValue: 9
+ # x with 2 decimals
+ DecimalX: {x:.2f}
+ TenX: ${x*10}
+ RandomVal: {r}
+
+and from this template it generates the file to be used by the job by
+replacing the replacement strings.
+
+Being a Python module B<parasweep> integrates tighter with Python than
+GNU B<parallel>. You get the parameters directly in a Python data
+structure. With GNU B<parallel> you can use the JSON or CSV output
+format to get something similar, but you would have to read the
+output.
+
+B<parasweep> has a filtering method to ignore parameter combinations
+you do not need.
+
+Instead of calling the jobs directly, B<parasweep> can use Python's
+Distributed Resource Management Application API to make jobs run with
+different cluster software.
+
+
+GNU B<parallel> B<--tmpl> supports templates with replacement
+strings. Such as:
+
+ Xval: {x}
+ Yval: {y}
+ FixedValue: 9
+ # x with 2 decimals
+ DecimalX: {=x $_=sprintf("%.2f",$_) =}
+ TenX: {=x $_=$_*10 =}
+ RandomVal: {=1 $_=rand() =}
+
+that can be used like:
+
+ parallel --header : --tmpl my.tmpl={#}.t myprog {#}.t \
+ ::: x 1 2 3 ::: y 1 2 3
+
+Filtering is supported as:
+
+ parallel --filter '{1} > {2}' echo ::: 1 2 3 ::: 1 2 3
+
+https://github.com/eviatarbach/parasweep
+(Last checked: 2021-01)
+
+
+=head2 DIFFERENCES BETWEEN parallel-bash AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item I1 I2 - - - - -
+
+=item - - M3 - - M6
+
+=item - O2 O3 - O5 O6 - O8 x O10
+
+=item E1 - - - - - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+B<parallel-bash> is written in pure bash. It is really fast (overhead
+of ~0.05 ms/job compared to GNU B<parallel>'s 3-10 ms/job). So if your
+jobs are extremely short lived, and you can live with the quite
+limited command, this may be useful.
+
+It works by making a queue for each process. Then the jobs are
+distributed to the queues in a round robin fashion. Finally the queues
+are started in parallel. This works fine, if you are lucky, but if
+not, all the long jobs may end up in the same queue, so you may see:
+
+ $ printf "%b\n" 1 1 1 4 1 1 1 4 1 1 1 4 |
+ time parallel -P4 sleep {}
+ (7 seconds)
+ $ printf "%b\n" 1 1 1 4 1 1 1 4 1 1 1 4 |
+ time ./parallel-bash.bash -p 4 -c sleep {}
+ (12 seconds)
+
+Because it uses bash lists, the total number of jobs is limited to
+167000..265000 depending on your environment. You get a segmentation
+fault, when you reach the limit.
+
+Ctrl-C does not stop spawning new jobs. Ctrl-Z does not suspend
+running jobs.
+
+
+=head3 EXAMPLES FROM parallel-bash
+
+ 1$ some_input | parallel-bash -p 5 -c echo
+
+ 1$ some_input | parallel -j 5 echo
+
+ 2$ parallel-bash -p 5 -c echo < some_file
+
+ 2$ parallel -j 5 echo < some_file
+
+ 3$ parallel-bash -p 5 -c echo <<< 'some string'
+
+ 3$ parallel -j 5 -c echo <<< 'some string'
+
+ 4$ something | parallel-bash -p 5 -c echo {} {}
+
+ 4$ something | parallel -j 5 echo {} {}
+
+https://reposhub.com/python/command-line-tools/Akianonymus-parallel-bash.html
+(Last checked: 2021-06)
+
+
+=head2 DIFFERENCES BETWEEN bash-concurrent AND GNU Parallel
+
+B<bash-concurrent> is more an alternative to B<make> than to GNU
+B<parallel>. Its input is very similar to a Makefile, where jobs
+depend on other jobs.
+
+It has a nice progress indicator where you can see which jobs
+completed successfully, which jobs are currently running, which jobs
+failed, and which jobs were skipped due to a depending job failed.
+The indicator does not deal well with resizing the window.
+
+Output is cached in tempfiles on disk, but is only shown if there is
+an error, so it is not meant to be part of a UNIX pipeline. If
+B<bash-concurrent> crashes these tempfiles are not removed.
+
+It uses an O(n*n) algorithm, so if you have 1000 independent jobs it
+takes 22 seconds to start it.
+
+https://github.com/themattrix/bash-concurrent
+(Last checked: 2021-02)
+
+
+=head2 DIFFERENCES BETWEEN spawntool AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item I1 - - - - - -
+
+=item M1 - - - - M6
+
+=item - O2 O3 - O5 O6 - x x O10
+
+=item E1 - - - - - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+B<spawn> reads a full command line from stdin which it executes in
+parallel.
+
+
+http://code.google.com/p/spawntool/
+(Last checked: 2021-07)
+
+
+=head2 DIFFERENCES BETWEEN go-pssh AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item - - - - - - -
+
+=item M1 - - - - -
+
+=item O1 - - - - - - x x O10
+
+=item E1 - - - - - -
+
+=item R1 R2 - - - R6 - - -
+
+=item - -
+
+=back
+
+B<go-pssh> does B<ssh> in parallel to multiple machines. It runs the
+same command on multiple machines similar to B<--nonall>.
+
+The hostnames must be given as IP-addresses (not as hostnames).
+
+Output is sent to stdout (standard output) if command is successful,
+and to stderr (standard error) if the command fails.
+
+=head3 EXAMPLES FROM go-pssh
+
+ 1$ go-pssh -l <ip>,<ip> -u <user> -p <port> -P <passwd> -c "<command>"
+
+ 1$ parallel -S 'sshpass -p <passwd> ssh -p <port> <user>@<ip>' \
+ --nonall "<command>"
+
+ 2$ go-pssh scp -f host.txt -u <user> -p <port> -P <password> \
+ -s /local/file_or_directory -d /remote/directory
+
+ 2$ parallel --nonall --slf host.txt \
+ --basefile /local/file_or_directory/./ --wd /remote/directory
+ --ssh 'sshpass -p <password> ssh -p <port> -l <user>' true
+
+ 3$ go-pssh scp -l <ip>,<ip> -u <user> -p <port> -P <password> \
+ -s /local/file_or_directory -d /remote/directory
+
+ 3$ parallel --nonall -S <ip>,<ip> \
+ --basefile /local/file_or_directory/./ --wd /remote/directory
+ --ssh 'sshpass -p <password> ssh -p <port> -l <user>' true
+
+https://github.com/xuchenCN/go-pssh
+(Last checked: 2021-07)
+
+
+=head2 DIFFERENCES BETWEEN go-parallel AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item I1 I2 - - - - I7
+
+=item - - M3 - - M6
+
+=item - O2 O3 - O5 - - x x - O10
+
+=item E1 - - E4 - - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+B<go-parallel> uses Go templates for replacement strings. Quite
+similar to the I<{= perl expr =}> replacement string.
+
+=head3 EXAMPLES FROM go-parallel
+
+ 1$ go-parallel -a ./files.txt -t 'cp {{.Input}} {{.Input | dirname | dirname}}'
+
+ 1$ parallel -a ./files.txt cp {} '{= $_=::dirname(::dirname($_)) =}'
+
+ 2$ go-parallel -a ./files.txt -t 'mkdir -p {{.Input}} {{noExt .Input}}'
+
+ 2$ parallel -a ./files.txt echo mkdir -p {} {.}
+
+ 3$ go-parallel -a ./files.txt -t 'mkdir -p {{.Input}} {{.Input | basename | noExt}}'
+
+ 3$ parallel -a ./files.txt echo mkdir -p {} {/.}
+
+https://github.com/mylanconnolly/parallel
+(Last checked: 2021-07)
+
+
+=head2 DIFFERENCES BETWEEN p AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item - - - I4 - - x
+
+=item - - - - - M6
+
+=item - O2 O3 - O5 O6 - x x - O10
+
+=item E1 - - - - - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+B<p> is a tiny shell script. It can color output with some predefined
+colors, but is otherwise quite limited.
+
+It maxes out at around 116000 jobs (probably due to limitations in Bash).
+
+=head3 EXAMPLES FROM p
+
+Some of the examples from B<p> cannot be implemented 100% by GNU
+B<parallel>: The coloring is a bit different, and GNU B<parallel>
+cannot have B<--tag> for some inputs and not for others.
+
+The coloring done by GNU B<parallel> is not exactly the same as B<p>.
+
+ 1$ p -bc blue "ping 127.0.0.1" -uc red "ping 192.168.0.1" \
+ -rc yellow "ping 192.168.1.1" -t example "ping example.com"
+
+ 1$ parallel --lb -j0 --color --tag ping \
+ ::: 127.0.0.1 192.168.0.1 192.168.1.1 example.com
+
+ 2$ p "tail -f /var/log/httpd/access_log" \
+ -bc red "tail -f /var/log/httpd/error_log"
+
+ 2$ cd /var/log/httpd;
+ parallel --lb --color --tag tail -f ::: access_log error_log
+
+ 3$ p tail -f "some file" \& p tail -f "other file with space.txt"
+
+ 3$ parallel --lb tail -f ::: 'some file' "other file with space.txt"
+
+ 4$ p -t project1 "hg pull project1" -t project2 \
+ "hg pull project2" -t project3 "hg pull project3"
+
+ 4$ parallel --lb hg pull ::: project{1..3}
+
+https://github.com/rudymatela/evenmoreutils/blob/master/man/p.1.adoc
+(Last checked: 2022-04)
+
+
+=head2 DIFFERENCES BETWEEN senechal AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item I1 - - - - - -
+
+=item M1 - M3 - - M6
+
+=item O1 - O3 O4 - - - x x -
+
+=item E1 - - - - - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+B<seneschal> only starts the first job after reading the last job, and
+output from the first job is only printed after the last job finishes.
+
+1 byte of output requites 3.5 bytes of RAM.
+
+This makes it impossible to have a total output bigger than the
+virtual memory.
+
+Even though output is kept in RAM outputing is quite slow: 30 MB/s.
+
+Output larger than 4 GB causes random problems - it looks like a race
+condition.
+
+This:
+
+ echo 1 | seneschal --prefix='yes `seq 1000`|head -c 1G' >/dev/null
+
+takes 4100(!) CPU seconds to run on a 64C64T server, but only 140 CPU
+seconds on a 4C8T laptop. So it looks like B<seneschal> wastes a lot
+of CPU time coordinating the CPUs.
+
+Compare this to:
+
+ echo 1 | time -v parallel -N0 'yes `seq 1000`|head -c 1G' >/dev/null
+
+which takes 3-8 CPU seconds.
+
+=head3 EXAMPLES FROM seneschal README.md
+
+ 1$ echo $REPOS | seneschal --prefix="cd {} && git pull"
+
+ # If $REPOS is newline separated
+ 1$ echo "$REPOS" | parallel -k "cd {} && git pull"
+ # If $REPOS is space separated
+ 1$ echo -n "$REPOS" | parallel -d' ' -k "cd {} && git pull"
+
+ COMMANDS="pwd
+ sleep 5 && echo boom
+ echo Howdy
+ whoami"
+
+ 2$ echo "$COMMANDS" | seneschal --debug
+
+ 2$ echo "$COMMANDS" | parallel -k -v
+
+ 3$ ls -1 | seneschal --prefix="pushd {}; git pull; popd;"
+
+ 3$ ls -1 | parallel -k "pushd {}; git pull; popd;"
+ # Or if current dir also contains files:
+ 3$ parallel -k "pushd {}; git pull; popd;" ::: */
+
+https://github.com/TheWizardTower/seneschal
+(Last checked: 2022-06)
+
+
+=head2 DIFFERENCES BETWEEN async AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item x x x x x x x
+
+=item - x x x x x
+
+=item x O2 O3 O4 O5 O6 - x x O10
+
+=item E1 - - E4 - - -
+
+=item - - - - - - - - -
+
+=item S1 S2
+
+=back
+
+B<async> works like B<sem>.
+
+
+=head3 EXAMPLES FROM async
+
+ 1$ S="/tmp/example_socket"
+
+ async -s="$S" server --start
+
+ for i in {1..20}; do
+ # prints command output to stdout
+ async -s="$S" cmd -- bash -c "sleep 1 && echo test $i"
+ done
+
+ # wait until all commands are finished
+ async -s="$S" wait
+
+ 1$ S="example_id"
+
+ # server not needed
+
+ for i in {1..20}; do
+ # prints command output to stdout
+ sem --bg --id "$S" -j100% "sleep 1 && echo test $i"
+ done
+
+ # wait until all commands are finished
+ sem --fg --id "$S" --wait
+
+ 2$ # configure the server to run four commands in parallel
+ async -s="$S" server -j4
+
+ mkdir "/tmp/ex_dir"
+ for i in {21..40}; do
+ # redirects command output to /tmp/ex_dir/file*
+ async -s="$S" cmd -o "/tmp/ex_dir/file$i" -- \
+ bash -c "sleep 1 && echo test $i"
+ done
+
+ async -s="$S" wait
+
+ # stops server
+ async -s="$S" server --stop
+
+ 2$ # starting server not needed
+
+ mkdir "/tmp/ex_dir"
+ for i in {21..40}; do
+ # redirects command output to /tmp/ex_dir/file*
+ sem --bg --id "$S" --results "/tmp/ex_dir/file$i{}" \
+ "sleep 1 && echo test $i"
+ done
+
+ sem --fg --id "$S" --wait
+
+ # there is no server to stop
+
+https://github.com/ctbur/async
+(Last checked: 2023-01)
+
+
+=head2 DIFFERENCES BETWEEN tandem AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item - - - I4 - - x
+
+=item M1 - - - - M6
+
+=item - - O3 - - - - x - -
+
+=item E1 - E3 - E5 - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+B<tandem> runs full commands in parallel. It is made for starting a
+"server", running a job against the server, and when the job is done,
+the server is killed.
+
+More generally: it kills all jobs when the first job completes -
+similar to '--halt now,done=1'.
+
+B<tandem> silently discards some output. It is unclear exactly when
+this happens. It looks like a race condition, because it varies for
+each run.
+
+ $ tandem "seq 10000" | wc -l
+ 6731 <- This should always be 10002
+
+
+=head3 EXAMPLES FROM Demo
+
+ tandem \
+ 'php -S localhost:8000' \
+ 'esbuild src/*.ts --bundle --outdir=dist --watch' \
+ 'tailwind -i src/index.css -o dist/index.css --watch'
+
+ # Emulate tandem's behaviour
+ PARALLEL='--color --lb --halt now,done=1 --tagstring '
+ PARALLEL="$PARALLEL'"'{=s/ .*//; $_.=".".$app{$_}++;=}'"'"
+ export PARALLEL
+
+ parallel ::: \
+ 'php -S localhost:8000' \
+ 'esbuild src/*.ts --bundle --outdir=dist --watch' \
+ 'tailwind -i src/index.css -o dist/index.css --watch'
+
+
+=head3 EXAMPLES FROM tandem -h
+
+ # Emulate tandem's behaviour
+ PARALLEL='--color --lb --halt now,done=1 --tagstring '
+ PARALLEL="$PARALLEL'"'{=s/ .*//; $_.=".".$app{$_}++;=}'"'"
+ export PARALLEL
+
+ 1$ tandem 'sleep 5 && echo "hello"' 'sleep 2 && echo "world"'
+
+ 1$ parallel ::: 'sleep 5 && echo "hello"' 'sleep 2 && echo "world"'
+
+ # '-t 0' fails. But '--timeout 0 works'
+ 2$ tandem --timeout 0 'sleep 5 && echo "hello"' \
+ 'sleep 2 && echo "world"'
+
+ 2$ parallel --timeout 0 ::: 'sleep 5 && echo "hello"' \
+ 'sleep 2 && echo "world"'
+
+=head3 EXAMPLES FROM tandem's readme.md
+
+ # Emulate tandem's behaviour
+ PARALLEL='--color --lb --halt now,done=1 --tagstring '
+ PARALLEL="$PARALLEL'"'{=s/ .*//; $_.=".".$app{$_}++;=}'"'"
+ export PARALLEL
+
+ 1$ tandem 'next dev' 'nodemon --quiet ./server.js'
+
+ 1$ parallel ::: 'next dev' 'nodemon --quiet ./server.js'
+
+ 2$ cat package.json
+ {
+ "scripts": {
+ "dev:php": "...",
+ "dev:js": "...",
+ "dev:css": "..."
+ }
+ }
+
+ tandem 'npm:dev:php' 'npm:dev:js' 'npm:dev:css'
+
+ # GNU Parallel uses bash functions instead
+ 2$ cat package.sh
+ dev:php() { ... ; }
+ dev:js() { ... ; }
+ dev:css() { ... ; }
+ export -f dev:php dev:js dev:css
+
+ . package.sh
+ parallel ::: dev:php dev:js dev:css
+
+ 3$ tandem 'npm:dev:*'
+
+ 3$ compgen -A function | grep ^dev: | parallel
+
+For usage in Makefiles, include a copy of GNU Parallel with your
+source using `parallel --embed`. This has the added benefit of also
+working if access to the internet is down or restricted.
+
+https://github.com/rosszurowski/tandem
+(Last checked: 2023-01)
+
+
+=head2 DIFFERENCES BETWEEN rust-parallel(aaronriekenberg) AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item I1 I2 I3 - - - -
+
+=item - - - - - M6
+
+=item O1 O2 O3 - O5 O6 - x - O10
+
+=item E1 - - E4 - - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+B<rust-parallel> has a goal of only using Rust. It seems it is
+impossible to call bash functions from the command line. You would
+need to put these in a script.
+
+Calling a script that misses the shebang line (#! as first line)
+fails.
+
+=head3 EXAMPLES FROM rust-parallel's README.md
+
+ $ cat >./test <<EOL
+ echo hi
+ echo there
+ echo how
+ echo are
+ echo you
+ EOL
+
+ 1$ cat test | rust-parallel -j5
+
+ 1$ cat test | parallel -j5
+
+ 2$ cat test | rust-parallel -j1
+
+ 2$ cat test | parallel -j1
+
+ 3$ head -100 /usr/share/dict/words | rust-parallel md5 -s
+
+ 3$ head -100 /usr/share/dict/words | parallel md5 -s
+
+ 4$ find . -type f -print0 | rust-parallel -0 gzip -f -k
+
+ 4$ find . -type f -print0 | parallel -0 gzip -f -k
+
+ 5$ head -100 /usr/share/dict/words |
+ awk '{printf "md5 -s %s\n", $1}' | rust-parallel
+
+ 5$ head -100 /usr/share/dict/words |
+ awk '{printf "md5 -s %s\n", $1}' | parallel
+
+ 6$ head -100 /usr/share/dict/words | rust-parallel md5 -s |
+ grep -i abba
+
+ 6$ head -100 /usr/share/dict/words | parallel md5 -s |
+ grep -i abba
+
+https://github.com/aaronriekenberg/rust-parallel
+(Last checked: 2023-01)
+
+
+=head2 DIFFERENCES BETWEEN parallelium AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item - I2 - - - - -
+
+=item M1 - - - - M6
+
+=item O1 - O3 - - - - x - -
+
+=item E1 - - E4 - - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+B<parallelium> merges standard output (stdout) and standard error
+(stderr). The maximal output of a command is 8192 bytes. Bigger output
+makes B<parallelium> go into an infinite loop.
+
+In the input file for B<parallelium> you can define a tag, so that you
+can select to run only these commands. A bit like a target in a
+Makefile.
+
+Progress is printed on standard output (stdout) prepended with '#'
+with similar information as GNU B<parallel>'s B<--bar>.
+
+=head3 EXAMPLES
+
+ $ cat testjobs.txt
+ #tag common sleeps classA
+ (sleep 4.495;echo "job 000")
+ :
+ (sleep 2.587;echo "job 016")
+
+ #tag common sleeps classB
+ (sleep 0.218;echo "job 017")
+ :
+ (sleep 2.269;echo "job 040")
+
+ #tag common sleeps classC
+ (sleep 2.586;echo "job 041")
+ :
+ (sleep 1.626;echo "job 099")
+
+ #tag lasthalf, sleeps, classB
+ (sleep 1.540;echo "job 100")
+ :
+ (sleep 2.001;echo "job 199")
+
+ 1$ parallelium -f testjobs.txt -l logdir -t classB,classC
+
+ 1$ cat testjobs.txt |
+ parallel --plus --results logdir/testjobs.txt_{0#}.output \
+ '{= if(/^#tag /) { @tag = split/,|\s+/ }
+ (grep /^(classB|classC)$/, @tag) or skip =}'
+
+https://github.com/beomagi/parallelium
+(Last checked: 2023-01)
+
+
+=head2 DIFFERENCES BETWEEN forkrun AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item I1 - - - - - I7
+
+=item - - - - - -
+
+=item - O2 O3 - O5 - - - - O10
+
+=item E1 - - E4 - - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+
+B<forkrun> blocks if it receives fewer jobs than slots:
+
+ echo | forkrun -p 2 echo
+
+or when it gets some specific commands e.g.:
+
+ f() { seq "$@" | pv -qL 3; }
+ seq 10 | forkrun f
+
+It is not clear why.
+
+It is faster than GNU B<parallel> (overhead: 1.2 ms/job vs 3 ms/job),
+but way slower than B<parallel-bash> (0.059 ms/job).
+
+Running jobs cannot be stopped by pressing CTRL-C.
+
+B<-k> is supposed to keep the order but fails on the MIX testing
+example below. If used with B<-k> it caches output in RAM.
+
+If B<forkrun> is killed, it leaves temporary files in
+B</tmp/.forkrun.*> that has to be cleaned up manually.
+
+=head3 EXAMPLES
+
+ 1$ time find ./ -type f |
+ forkrun -l512 -- sha256sum 2>/dev/null | wc -l
+ 1$ time find ./ -type f |
+ parallel -j28 -m -- sha256sum 2>/dev/null | wc -l
+
+ 2$ time find ./ -type f |
+ forkrun -l512 -k -- sha256sum 2>/dev/null | wc -l
+ 2$ time find ./ -type f |
+ parallel -j28 -k -m -- sha256sum 2>/dev/null | wc -l
+
+https://github.com/jkool702/forkrun
+(Last checked: 2023-02)
+
+
+=head2 DIFFERENCES BETWEEN parallel-sh AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item I1 I2 - I4 - - -
+
+=item M1 - - - - M6
+
+=item O1 O2 O3 - O5 O6 - - - O10
+
+=item E1 - - E4 - - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+B<parallel-sh> buffers in RAM. The buffering data takes O(n^1.5) time:
+
+2MB=0.107s 4MB=0.175s 8MB=0.342s 16MB=0.766s 32MB=2.2s 64MB=6.7s
+128MB=20s 256MB=64s 512MB=248s 1024MB=998s 2048MB=3756s
+
+It limits the practical usability to jobs outputting < 256 MB. GNU
+B<parallel> buffers on disk, yet is faster for jobs with outputs > 16
+MB and is only limited by the free space in $TMPDIR.
+
+B<parallel-sh> can kill running jobs if a job fails (Similar to
+B<--halt now,fail=1>).
+
+=head3 EXAMPLES
+
+ 1$ parallel-sh "sleep 2 && echo first" "sleep 1 && echo second"
+
+ 1$ parallel ::: "sleep 2 && echo first" "sleep 1 && echo second"
+
+ 2$ cat /tmp/commands
+ sleep 2 && echo first
+ sleep 1 && echo second
+
+ 2$ parallel-sh -f /tmp/commands
+
+ 2$ parallel -a /tmp/commands
+
+ 3$ echo -e 'sleep 2 && echo first\nsleep 1 && echo second' |
+ parallel-sh
+
+ 3$ echo -e 'sleep 2 && echo first\nsleep 1 && echo second' |
+ parallel
+
+https://github.com/thyrc/parallel-sh
+(Last checked: 2023-04)
+
+
+=head2 DIFFERENCES BETWEEN bash-parallel AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item - I2 - - - - I7
+
+=item M1 - M3 - M5 M6
+
+=item - O2 O3 - - O6 - O8 - O10
+
+=item E1 - - - - - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+B<bash-parallel> is not as much a command as it is a shell script that
+you have to alter. It requires you to change the shell function
+process_job that runs the job, and set $MAX_POOL_SIZE to the number of
+jobs to run in parallel.
+
+It is half as fast as GNU B<parallel> for short jobs.
+
+https://github.com/thilinaba/bash-parallel
+(Last checked: 2023-05)
+
+
+=head2 DIFFERENCES BETWEEN PaSH AND GNU Parallel
+
+Summary (see legend above): N/A
+
+B<pash> is quite different from GNU B<parallel>. It is not a general
+parallelizer. It takes a shell script and analyses it and parallelizes
+parts of it by replacing the parts with commands that will give the same
+result.
+
+This will replace B<sort> with a command that does pretty much the
+same as B<parsort --parallel=8> (except somewhat slower):
+
+ pa.sh --width 8 -c 'cat bigfile | sort'
+
+However, even a simple change will confuse B<pash> and you will get no
+parallelization:
+
+ pa.sh --width 8 -c 'mysort() { sort; }; cat bigfile | mysort'
+ pa.sh --width 8 -c 'cat bigfile | sort | md5sum'
+
+From the source it seems B<pash> only looks at: awk cat col comm cut
+diff grep head mkfifo mv rm sed seq sort tail tee tr uniq wc xargs
+
+For pipelines where these commands are bottlenecks, it might be worth
+testing if B<pash> is faster than GNU B<parallel>.
+
+B<pash> does not respect $TMPDIR but always uses /tmp. If B<pash> dies
+unexpectantly it does not clean up.
+
+https://github.com/binpash/pash
+(Last checked: 2023-05)
+
+
+=head2 DIFFERENCES BETWEEN korovkin-parallel AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item I1 - - - - - -
+
+=item M1 - - - - M6
+
+=item - - O3 - - - - x x -
+
+=item E1 - - - - - -
+
+=item R1 - - - - R6 x x -
+
+=item - -
+
+=back
+
+B<korovkin-parallel> prepends all lines with some info.
+
+The output is colored with 6 color combinations, so job 1 and 7 will
+get the same color.
+
+You can get similar output with:
+
+ (echo ...) |
+ parallel --color -j 10 --lb --tagstring \
+ '[l:{#}:{=$_=sprintf("%7.03f",::now()-$^T)=} {=$_=hh_mm_ss($^T)=} {%}]'
+
+Lines longer than 8192 chars are broken into lines shorter than
+8192. B<korovkin-parallel> loses the last char for lines exactly 8193
+chars long.
+
+Short lines from different jobs do not mix, but long lines do:
+
+ fun() {
+ perl -e '$a="'$1'"x1000000; for(1..'$2') { print $a };';
+ echo;
+ }
+ export -f fun
+ (echo fun a 100;echo fun b 100) | korovkin-parallel | tr -s abcdef
+ # Compare to:
+ (echo fun a 100;echo fun b 100) | parallel | tr -s abcdef
+
+There should be only one line of a's and one line of b's.
+
+Just like GNU B<parallel> B<korovkin-parallel> offers a master/slave
+model, so workers on other servers can do some of the tasks. But
+contrary to GNU B<parallel> you must manually start workers on these
+servers. The communication is neither authenticated nor encrypted.
+
+It caches output in RAM: a 1GB line uses ~2.5GB RAM
+
+https://github.com/korovkin/parallel
+(Last checked: 2023-07)
+
+
+=head2 DIFFERENCES BETWEEN xe AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item I1 I2 - I4 - - I7
+
+=item M1 - M3 M4 - M6
+
+=item - O2 O3 - O5 O6 - O8 - O10
+
+=item E1 - - E4 - - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+B<xe> has a peculiar limitation:
+
+ echo /bin/echo | xe {} OK
+ echo echo | xe /bin/{} fails
+
+
+=head3 EXAMPLES
+
+Compress all .c files in the current directory, using all CPU cores:
+
+ 1$ xe -a -j0 gzip -- *.c
+
+ 1$ parallel gzip ::: *.c
+
+Remove all empty files, using lr(1):
+
+ 2$ lr -U -t 'size == 0' | xe -N0 rm
+
+ 2$ lr -U -t 'size == 0' | parallel -X rm
+
+Convert .mp3 to .ogg, using all CPU cores:
+
+ 3$ xe -a -j0 -s 'ffmpeg -i "${1}" "${1%.mp3}.ogg"' -- *.mp3
+
+ 3$ parallel ffmpeg -i {} {.}.ogg ::: *.mp3
+
+Same, using percent rules:
+
+ 4$ xe -a -j0 -p %.mp3 ffmpeg -i %.mp3 %.ogg -- *.mp3
+
+ 4$ parallel --rpl '% s/\.mp3// or skip' ffmpeg -i %.mp3 %.ogg ::: *.mp3
+
+Similar, but hiding output of ffmpeg, instead showing spawned jobs:
+
+ 5$ xe -ap -j0 -vvq '%.{m4a,ogg,opus}' ffmpeg -y -i {} out/%.mp3 -- *
+
+ 5$ parallel -v --rpl '% s/\.(m4a|ogg|opus)// or skip' \
+ ffmpeg -y -i {} out/%.mp3 '2>/dev/null' ::: *
+
+ 5$ parallel -v ffmpeg -y -i {} out/{.}.mp3 '2>/dev/null' ::: *
+
+https://github.com/leahneukirchen/xe
+(Last checked: 2023-08)
+
+
+=head2 DIFFERENCES BETWEEN sp AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item - - - I4 - - -
+
+=item M1 - M3 - - M6
+
+=item - O2 O3 - O5 (O6) - x x O10
+
+=item E1 - - - - - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+B<sp> has very few options.
+
+It can either be used like:
+
+ sp command {} option :: arg1 arg2 arg3
+
+which is similar to:
+
+ parallel command {} option ::: arg1 arg2 arg3
+
+Or:
+
+ sp command1 :: "command2 -option" :: "command3 foo bar"
+
+which is similar to:
+
+ parallel ::: command1 "command2 -option" "command3 foo bar"
+
+B<sp> deals badly with too many commands: This causes B<sp> to run out
+of file handles and gives data loss.
+
+For each command that fails, B<sp> will print an error message on
+stderr (standard error).
+
+You cannot used exported shell functions as commands.
+
+=head3 EXAMPLES
+
+ 1$ sp echo {} :: 1 2 3
+
+ 1$ parallel echo {} ::: 1 2 3
+
+ 2$ sp echo {} {} :: 1 2 3
+
+ 2$ parallel echo {} {} :: 1 2 3
+
+ 3$ sp echo 1 :: echo 2 :: echo 3
+
+ 3$ parallel ::: 'echo 1' 'echo 2' 'echo 3'
+
+ 4$ sp a foo bar :: "b 'baz bar'" :: c
+
+ 4$ parallel ::: 'a foo bar' "b 'baz bar'" :: c
+
+https://github.com/SergioBenitez/sp
+(Last checked: 2023-10)
+
+
+=head2 DIFFERENCES BETWEEN repeater AND GNU Parallel
+
+Summary (see legend above):
+
+=over
+
+=item - - - - - - -
+
+=item - - - - - -
+
+=item - O2 O3 N/A - O6 - x x ?O10
+
+=item E1 - - - E5 - -
+
+=item - - - - - - - - -
+
+=item - -
+
+=back
+
+B<repeater> runs the same job repeatedly. In other words: It does not
+read arguments, thus is it an alternative for GNU B<parallel> for only
+quite limited applications.
+
+B<repeater> has an overhead of around 0.23 ms/job. Compared to GNU
+B<parallel>'s 2-3 ms this is fast. Compared to B<bash-parallel>'s 0.05
+ms/job it is slow.
+
+=head3 Memory use and run time for large output
+
+Output takes O(n^2) time for output of size n. 10 MB takes ~1 second,
+30 MB takes ~7 seconds, 100 MB takes ~60 seconds, 300 MB takes ~480
+seconds, 1000 GB takes
+
+100 MB of output takes around 1 GB of RAM.
+
+ # Run time = 15 sec
+ # Memory use = 20 MB
+ # Output = 1 GB per job
+ \time -v parallel -j1 seq ::: 120000000 120000000 >/dev/null
+
+ # Run time = 4.7 sec
+ # Memory use = 95 MB
+ # Output = 8 MB per job
+ \time -v repeater -w 1 -n 2 -reportFile ./run_output seq 1200000 >/dev/null
+
+ # Run time = 42 sec
+ # Memory use = 277 MB
+ # Output = 27 MB per job
+ \time -v repeater -w 1 -n 2 -reportFile ./run_output seq 3600000 >/dev/null
+
+ # Run time = 530 sec
+ # Memory use = 1000 MB
+ # Output = 97 MB per job
+ \time -v repeater -w 1 -n 2 -reportFile ./run_output seq 12000000 >/dev/null
+
+ # Run time = 2h41m
+ # Memory use = 8.6 GB
+ # Output = 1 GB per job
+ \time -v repeater -w 1 -n 2 -reportFile ./run_output seq 120000000 >/dev/null
+
+For even just moderate sized outputs GNU B<parallel> will be faster
+and use less memory.
+
+
+=head3 EXAMPLES
+
+ 1$ repeater -n 100 -w 10 -reportFile ./run_output
+ -output REPORT_FILE -progress BOTH curl example.com
+
+ 1$ seq 100 | parallel --joblog run.log --eta curl example.com > output
+
+ 2$ repeater -n 100 -increment -progress HIDDEN -reportFile foo
+ echo "this is increment: " INC
+ 2$ seq 100 | parallel echo {}
+ 2$ seq 100 | parallel echo '{= $_ = ++$myvar =}'
+
+https://github.com/baalimago/repeater
+(Last checked: 2023-12)
+
+
+=head2 Todo
+
+https://github.com/justanhduc/task-spooler
+
+https://manpages.ubuntu.com/manpages/xenial/man1/tsp.1.html
+
+https://www.npmjs.com/package/concurrently
+
+http://code.google.com/p/push/ (cannot compile)
+
+https://github.com/krashanoff/parallel
+
+https://github.com/Nukesor/pueue
+
+https://arxiv.org/pdf/2012.15443.pdf KumQuat
+
+https://github.com/JeiKeiLim/simple_distribute_job
+
+https://github.com/reggi/pkgrun - not obvious how to use
+
+https://github.com/benoror/better-npm-run - not obvious how to use
+
+https://github.com/bahmutov/with-package
+
+https://github.com/flesler/parallel
+
+https://github.com/Julian/Verge
+
+https://vicerveza.homeunix.net/~viric/soft/ts/
+
+https://github.com/chapmanjacobd/que
+
+
+
+=head1 TESTING OTHER TOOLS
+
+There are certain issues that are very common on parallelizing
+tools. Here are a few stress tests. Be warned: If the tool is badly
+coded it may overload your machine.
+
+
+=head2 MIX: Output mixes
+
+Output from 2 jobs should not mix. If the output is not used, this
+does not matter; but if the output I<is> used then it is important
+that you do not get half a line from one job followed by half a line
+from another job.
+
+If the tool does not buffer, output will most likely mix now and then.
+
+This test stresses whether output mixes.
+
+ #!/bin/bash
+
+ paralleltool="parallel -j 30"
+
+ cat <<-EOF > mycommand
+ #!/bin/bash
+
+ # If a, b, c, d, e, and f mix: Very bad
+ perl -e 'print STDOUT "a"x3000_000," "'
+ perl -e 'print STDERR "b"x3000_000," "'
+ perl -e 'print STDOUT "c"x3000_000," "'
+ perl -e 'print STDERR "d"x3000_000," "'
+ perl -e 'print STDOUT "e"x3000_000," "'
+ perl -e 'print STDERR "f"x3000_000," "'
+ echo
+ echo >&2
+ EOF
+ chmod +x mycommand
+
+ # Run 30 jobs in parallel
+ seq 30 |
+ $paralleltool ./mycommand > >(tr -s abcdef) 2> >(tr -s abcdef >&2)
+
+ # 'a c e' and 'b d f' should always stay together
+ # and there should only be a single line per job
+
+
+=head2 STDERRMERGE: Stderr is merged with stdout
+
+Output from stdout and stderr should not be merged, but kept separated.
+
+This test shows whether stdout is mixed with stderr.
+
+ #!/bin/bash
+
+ paralleltool="parallel -j0"
+
+ cat <<-EOF > mycommand
+ #!/bin/bash
+
+ echo stdout
+ echo stderr >&2
+ echo stdout
+ echo stderr >&2
+ EOF
+ chmod +x mycommand
+
+ # Run one job
+ echo |
+ $paralleltool ./mycommand > stdout 2> stderr
+ cat stdout
+ cat stderr
+
+
+=head2 RAM: Output limited by RAM
+
+Some tools cache output in RAM. This makes them extremely slow if the
+output is bigger than physical memory and crash if the output is
+bigger than the virtual memory.
+
+ #!/bin/bash
+
+ paralleltool="parallel -j0"
+
+ cat <<'EOF' > mycommand
+ #!/bin/bash
+
+ # Generate 1 GB output
+ yes "`perl -e 'print \"c\"x30_000'`" | head -c 1G
+ EOF
+ chmod +x mycommand
+
+ # Run 20 jobs in parallel
+ # Adjust 20 to be > physical RAM and < free space on /tmp
+ seq 20 | time $paralleltool ./mycommand | wc -c
+
+
+=head2 DISKFULL: Incomplete data if /tmp runs full
+
+If caching is done on disk, the disk can run full during the run. Not
+all programs discover this. GNU Parallel discovers it, if it stays
+full for at least 2 seconds.
+
+ #!/bin/bash
+
+ paralleltool="parallel -j0"
+
+ # This should be a dir with less than 100 GB free space
+ smalldisk=/tmp/shm/parallel
+
+ TMPDIR="$smalldisk"
+ export TMPDIR
+
+ max_output() {
+ # Force worst case scenario:
+ # Make GNU Parallel only check once per second
+ sleep 10
+ # Generate 100 GB to fill $TMPDIR
+ # Adjust if /tmp is bigger than 100 GB
+ yes | head -c 100G >$TMPDIR/$$
+ # Generate 10 MB output that will not be buffered
+ # due to full disk
+ perl -e 'print "X"x10_000_000' | head -c 10M
+ echo This part is missing from incomplete output
+ sleep 2
+ rm $TMPDIR/$$
+ echo Final output
+ }
+
+ export -f max_output
+ seq 10 | $paralleltool max_output | tr -s X
+
+
+=head2 CLEANUP: Leaving tmp files at unexpected death
+
+Some tools do not clean up tmp files if they are killed. If the tool
+buffers on disk, they may not clean up, if they are killed.
+
+ #!/bin/bash
+
+ paralleltool=parallel
+
+ ls /tmp >/tmp/before
+ seq 10 | $paralleltool sleep &
+ pid=$!
+ # Give the tool time to start up
+ sleep 1
+ # Kill it without giving it a chance to cleanup
+ kill -9 $!
+ # Should be empty: No files should be left behind
+ diff <(ls /tmp) /tmp/before
+
+
+=head2 SPCCHAR: Dealing badly with special file names.
+
+It is not uncommon for users to create files like:
+
+ My brother's 12" *** record (costs $$$).jpg
+
+Some tools break on this.
+
+ #!/bin/bash
+
+ paralleltool=parallel
+
+ touch "My brother's 12\" *** record (costs \$\$\$).jpg"
+ ls My*jpg | $paralleltool ls -l
+
+
+=head2 COMPOSED: Composed commands do not work
+
+Some tools require you to wrap composed commands into B<bash -c>.
+
+ echo bar | $paralleltool echo foo';' echo {}
+
+
+=head2 ONEREP: Only one replacement string allowed
+
+Some tools can only insert the argument once.
+
+ echo bar | $paralleltool echo {} foo {}
+
+
+=head2 INPUTSIZE: Length of input should not be limited
+
+Some tools limit the length of the input lines artificially with no good
+reason. GNU B<parallel> does not:
+
+ perl -e 'print "foo."."x"x100_000_000' | parallel echo {.}
+
+GNU B<parallel> limits the command to run to 128 KB due to execve(1):
+
+ perl -e 'print "x"x131_000' | parallel echo {} | wc
+
+
+=head2 NUMWORDS: Speed depends on number of words
+
+Some tools become very slow if output lines have many words.
+
+ #!/bin/bash
+
+ paralleltool=parallel
+
+ cat <<-EOF > mycommand
+ #!/bin/bash
+
+ # 10 MB of lines with 1000 words
+ yes "`seq 1000`" | head -c 10M
+ EOF
+ chmod +x mycommand
+
+ # Run 30 jobs in parallel
+ seq 30 | time $paralleltool -j0 ./mycommand > /dev/null
+
+=head2 4GB: Output with a line > 4GB should be OK
+
+ #!/bin/bash
+
+ paralleltool="parallel -j0"
+
+ cat <<-EOF > mycommand
+ #!/bin/bash
+
+ perl -e '\$a="a"x1000_000; for(1..5000) { print \$a }'
+ EOF
+ chmod +x mycommand
+
+ # Run 1 job
+ seq 1 | $paralleltool ./mycommand | LC_ALL=C wc
+
+
+=head1 AUTHOR
+
+When using GNU B<parallel> for a publication please cite:
+
+O. Tange (2011): GNU Parallel - The Command-Line Power Tool, ;login:
+The USENIX Magazine, February 2011:42-47.
+
+This helps funding further development; and it won't cost you a cent.
+If you pay 10000 EUR you should feel free to use GNU Parallel without citing.
+
+Copyright (C) 2007-10-18 Ole Tange, http://ole.tange.dk
+
+Copyright (C) 2008-2010 Ole Tange, http://ole.tange.dk
+
+Copyright (C) 2010-2024 Ole Tange, http://ole.tange.dk and Free
+Software Foundation, Inc.
+
+Parts of the manual concerning B<xargs> compatibility is inspired by
+the manual of B<xargs> from GNU findutils 4.4.2.
+
+
+=head1 LICENSE
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3 of the License, or
+at your option any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+=head2 Documentation license I
+
+Permission is granted to copy, distribute and/or modify this
+documentation under the terms of the GNU Free Documentation License,
+Version 1.3 or any later version published by the Free Software
+Foundation; with no Invariant Sections, with no Front-Cover Texts, and
+with no Back-Cover Texts. A copy of the license is included in the
+file LICENSES/GFDL-1.3-or-later.txt.
+
+=head2 Documentation license II
+
+You are free:
+
+=over 9
+
+=item B<to Share>
+
+to copy, distribute and transmit the work
+
+=item B<to Remix>
+
+to adapt the work
+
+=back
+
+Under the following conditions:
+
+=over 9
+
+=item B<Attribution>
+
+You must attribute the work in the manner specified by the author or
+licensor (but not in any way that suggests that they endorse you or
+your use of the work).
+
+=item B<Share Alike>
+
+If you alter, transform, or build upon this work, you may distribute
+the resulting work only under the same, similar or a compatible
+license.
+
+=back
+
+With the understanding that:
+
+=over 9
+
+=item B<Waiver>
+
+Any of the above conditions can be waived if you get permission from
+the copyright holder.
+
+=item B<Public Domain>
+
+Where the work or any of its elements is in the public domain under
+applicable law, that status is in no way affected by the license.
+
+=item B<Other Rights>
+
+In no way are any of the following rights affected by the license:
+
+=over 2
+
+=item *
+
+Your fair dealing or fair use rights, or other applicable
+copyright exceptions and limitations;
+
+=item *
+
+The author's moral rights;
+
+=item *
+
+Rights other persons may have either in the work itself or in
+how the work is used, such as publicity or privacy rights.
+
+=back
+
+=back
+
+=over 9
+
+=item B<Notice>
+
+For any reuse or distribution, you must make clear to others the
+license terms of this work.
+
+=back
+
+A copy of the full license is included in the file as
+LICENCES/CC-BY-SA-4.0.txt
+
+
+=head1 DEPENDENCIES
+
+GNU B<parallel> uses Perl, and the Perl modules Getopt::Long,
+IPC::Open3, Symbol, IO::File, POSIX, and File::Temp. For remote usage
+it also uses rsync with ssh.
+
+
+=head1 SEE ALSO
+
+B<find>(1), B<xargs>(1), B<make>(1), B<pexec>(1), B<ppss>(1),
+B<xjobs>(1), B<prll>(1), B<dxargs>(1), B<mdm>(1)
+
+=cut
diff --git a/src/parallel_book.pod b/src/parallel_book.pod
new file mode 100644
index 0000000..2919af7
--- /dev/null
+++ b/src/parallel_book.pod
@@ -0,0 +1,403 @@
+#!/usr/bin/perl -w
+
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GFDL-1.3-or-later
+# SPDX-License-Identifier: CC-BY-SA-4.0
+
+=encoding utf8
+
+=head1 Why should you read this book?
+
+If you write shell scripts to do the same processing for different
+input, then GNU B<parallel> will make your life easier and make your
+scripts run faster.
+
+The book is written so you get the juicy parts first: The goal is that
+you read just enough to get you going. GNU B<parallel> has an
+overwhelming amount of special features to help in different
+situations, and to avoid overloading you with information, the most
+used features are presented first.
+
+All the examples are tested in Bash, and most will work in other
+shells, too, but there are a few exceptions. So you are recommended to
+use Bash while testing out the examples.
+
+
+=head1 Learn GNU Parallel in 5 minutes
+
+You just need to run commands in parallel. You do not care about fine
+tuning.
+
+To get going please run this to make some example files:
+
+ # If your system does not have 'seq', replace 'seq' with 'jot'
+ seq 5 | parallel seq {} '>' example.{}
+
+=head2 Input sources
+
+GNU B<parallel> reads values from input sources. One input source is
+the command line. The values are put after B<:::> :
+
+ parallel echo ::: 1 2 3 4 5
+
+This makes it easy to run the same program on some files:
+
+ parallel wc ::: example.*
+
+If you give multiple B<:::>s, GNU B<parallel> will generate all
+combinations:
+
+ parallel wc ::: -l -c ::: example.*
+
+GNU B<parallel> can also read the values from stdin (standard input):
+
+ seq 5 | parallel echo
+
+
+=head2 Building the command line
+
+The command line is put before the B<:::>. It can contain contain a
+command and options for the command:
+
+ parallel wc -l ::: example.*
+
+The command can contain multiple programs. Just remember to quote
+characters that are interpreted by the shell (such as B<;>):
+
+ parallel echo counting lines';' wc -l ::: example.*
+
+The value will normally be appended to the command, but can be placed
+anywhere by using the replacement string B<{}>:
+
+ parallel echo counting {}';' wc -l {} ::: example.*
+
+When using multiple input sources you use the positional replacement
+strings B<{1}> and B<{2}>:
+
+ parallel echo count {1} in {2}';' wc {1} {2} ::: -l -c ::: example.*
+
+You can check what will be run with B<--dry-run>:
+
+ parallel --dry-run echo count {1} in {2}';' wc {1} {2} ::: -l -c ::: example.*
+
+This is a good idea to do for every command until you are comfortable
+with GNU B<parallel>.
+
+=head2 Controlling the output
+
+The output will be printed as soon as the command completes. This
+means the output may come in a different order than the input:
+
+ parallel sleep {}';' echo {} done ::: 5 4 3 2 1
+
+You can force GNU B<parallel> to print in the order of the values with
+B<--keep-order>/B<-k>. This will still run the commands in parallel.
+The output of the later jobs will be delayed, until the earlier jobs
+are printed:
+
+ parallel -k sleep {}';' echo {} done ::: 5 4 3 2 1
+
+
+=head2 Controlling the execution
+
+If your jobs are compute intensive, you will most likely run one job
+for each core in the system. This is the default for GNU B<parallel>.
+
+But sometimes you want more jobs running. You control the number of
+job slots with B<-j>. Give B<-j> the number of jobs to run in
+parallel:
+
+ parallel -j50 \
+ wget https://ftpmirror.gnu.org/parallel/parallel-{1}{2}22.tar.bz2 \
+ ::: 2012 2013 2014 2015 2016 \
+ ::: 01 02 03 04 05 06 07 08 09 10 11 12
+
+
+=head2 Pipe mode
+
+GNU B<parallel> can also pass blocks of data to commands on stdin
+(standard input):
+
+ seq 1000000 | parallel --pipe wc
+
+This can be used to process big text files. By default GNU B<parallel>
+splits on \n (newline) and passes a block of around 1 MB to each job.
+
+
+=head2 That's it
+
+You have now learned the basic use of GNU B<parallel>. This will
+probably cover most cases of your use of GNU B<parallel>.
+
+The rest of this document will go into more details on each of the
+sections and cover special use cases.
+
+
+=head1 Learn GNU Parallel in an hour
+
+In this part we will dive deeper into what you learned in the first 5 minutes.
+
+To get going please run this to make some example files:
+
+ seq 6 > seq6
+ seq 6 -1 1 > seq-6
+
+=head2 Input sources
+
+On top of the command line, input sources can also be stdin (standard
+input or '-'), files and fifos and they can be mixed. Files are given
+after B<-a> or B<::::>. So these all do the same:
+
+ parallel echo Dice1={1} Dice2={2} ::: 1 2 3 4 5 6 ::: 6 5 4 3 2 1
+ parallel echo Dice1={1} Dice2={2} :::: <(seq 6) :::: <(seq 6 -1 1)
+ parallel echo Dice1={1} Dice2={2} :::: seq6 seq-6
+ parallel echo Dice1={1} Dice2={2} :::: seq6 :::: seq-6
+ parallel -a seq6 -a seq-6 echo Dice1={1} Dice2={2}
+ parallel -a seq6 echo Dice1={1} Dice2={2} :::: seq-6
+ parallel echo Dice1={1} Dice2={2} ::: 1 2 3 4 5 6 :::: seq-6
+ cat seq-6 | parallel echo Dice1={1} Dice2={2} :::: seq6 -
+
+If stdin (standard input) is the only input source, you do not need the '-':
+
+ cat seq6 | parallel echo Dice1={1}
+
+=head3 Linking input sources
+
+You can link multiple input sources with B<:::+> and B<::::+>:
+
+ parallel echo {1}={2} ::: I II III IV V VI :::+ 1 2 3 4 5 6
+ parallel echo {1}={2} ::: I II III IV V VI ::::+ seq6
+
+The B<:::+> (and B<::::+>) will link each value to the corresponding
+value in the previous input source, so value number 3 from the first
+input source will be linked to value number 3 from the second input
+source.
+
+You can combine B<:::+> and B<:::>, so you link 2 input sources, but
+generate all combinations with other input sources:
+
+ parallel echo Dice1={1}={2} Dice2={3}={4} ::: I II III IV V VI ::::+ seq6 \
+ ::: VI V IV III II I ::::+ seq-6
+
+
+=head2 Building the command line
+
+=head3 The command
+
+The command can be a script, a binary or a Bash function if the
+function is exported using B<export -f>:
+
+ # Works only in Bash
+ my_func() {
+ echo in my_func "$1"
+ }
+ export -f my_func
+ parallel my_func ::: 1 2 3
+
+If the command is complex, it often improves readability to make it
+into a function.
+
+
+=head3 The replacement strings
+
+GNU B<parallel> has some replacement strings to make it easier to
+refer to the input read from the input sources.
+
+If the input is B<mydir/mysubdir/myfile.myext> then:
+
+ {} = mydir/mysubdir/myfile.myext
+ {.} = mydir/mysubdir/myfile
+ {/} = myfile.myext
+ {//} = mydir/mysubdir
+ {/.} = myfile
+ {#} = the sequence number of the job
+ {%} = the job slot number
+
+When a job is started it gets a sequence number that starts at 1 and
+increases by 1 for each new job. The job also gets assigned a slot
+number. This number is from 1 to the number of jobs running in
+parallel. It is unique between the running jobs, but is re-used as
+soon as a job finishes.
+
+=head4 The positional replacement strings
+
+The replacement strings have corresponding positional replacement
+strings. If the value from the 3rd input source is
+B<mydir/mysubdir/myfile.myext>:
+
+ {3} = mydir/mysubdir/myfile.myext
+ {3.} = mydir/mysubdir/myfile
+ {3/} = myfile.myext
+ {3//} = mydir/mysubdir
+ {3/.} = myfile
+
+So the number of the input source is simply prepended inside the {}'s.
+
+
+=head1 Replacement strings
+
+--plus replacement strings
+
+change the replacement string (-I --extensionreplace --basenamereplace --basenamereplace --dirnamereplace --basenameextensionreplace --seqreplace --slotreplace
+
+--header with named replacement string
+
+{= =}
+
+Dynamic replacement strings
+
+=head2 Defining replacement strings
+
+
+
+
+=head2 Copying environment
+
+env_parallel
+
+=head2 Controlling the output
+
+=head3 parset
+
+B<parset> is a shell function to get the output from GNU B<parallel>
+into shell variables.
+
+B<parset> is fully supported for B<Bash/Zsh/Ksh> and partially supported
+for B<ash/dash>. I will assume you run B<Bash>.
+
+To activate B<parset> you have to run:
+
+ . `which env_parallel.bash`
+
+(replace B<bash> with your shell's name).
+
+Then you can run:
+
+ parset a,b,c seq ::: 4 5 6
+ echo "$c"
+
+or:
+
+ parset 'a b c' seq ::: 4 5 6
+ echo "$c"
+
+If you give a single variable, this will become an array:
+
+ parset arr seq ::: 4 5 6
+ echo "${arr[1]}"
+
+B<parset> has one limitation: If it reads from a pipe, the output will
+be lost.
+
+ echo This will not work | parset myarr echo
+ echo Nothing: "${myarr[*]}"
+
+Instead you can do this:
+
+ echo This will work > tempfile
+ parset myarr echo < tempfile
+ echo ${myarr[*]}
+
+sql
+cvs
+
+
+=head2 Controlling the execution
+
+--dryrun -v
+
+=head2 Remote execution
+
+For this section you must have B<ssh> access with no password to 2
+servers: B<$server1> and B<$server2>.
+
+ server1=server.example.com
+ server2=server2.example.net
+
+So you must be able to do this:
+
+ ssh $server1 echo works
+ ssh $server2 echo works
+
+It can be setup by running 'ssh-keygen -t dsa; ssh-copy-id $server1'
+and using an empty passphrase. Or you can use B<ssh-agent>.
+
+=head3 Workers
+
+=head3 --transferfile
+
+B<--transferfile> I<filename> will transfer I<filename> to the
+worker. I<filename> can contain a replacement string:
+
+ parallel -S $server1,$server2 --transferfile {} wc ::: example.*
+ parallel -S $server1,$server2 --transferfile {2} \
+ echo count {1} in {2}';' wc {1} {2} ::: -l -c ::: example.*
+
+A shorthand for B<--transferfile {}> is B<--transfer>.
+
+=head3 --return
+
+
+
+=head3 --cleanup
+
+A shorthand for B<--transfer --return {} --cleanup> is B<--trc {}>.
+
+
+=head2 Pipe mode
+
+--pipepart
+
+
+=head2 That's it
+
+=head1 Advanced usage
+
+parset fifo, cmd substitution, arrayelements, array with var names and cmds, env_parset
+
+
+env_parallel
+
+Interfacing with R.
+
+Interfacing with JSON/jq
+
+4dl() {
+ board="$(printf -- '%s' "${1}" | cut -d '/' -f4)"
+ thread="$(printf -- '%s' "${1}" | cut -d '/' -f6)"
+ wget -qO- "https://a.4cdn.org/${board}/thread/${thread}.json" |
+ jq -r '
+ .posts
+ | map(select(.tim != null))
+ | map((.tim | tostring) + .ext)
+ | map("https://i.4cdn.org/'"${board}"'/"+.)[]
+ ' |
+ parallel --gnu -j 0 wget -nv
+}
+
+Interfacing with XML/?
+
+Interfacing with HTML/?
+
+=head2 Controlling the execution
+
+--termseq
+
+
+=head2 Remote execution
+
+seq 10 | parallel --sshlogin 'ssh -i "key.pem" a@b.com' echo
+
+seq 10 | PARALLEL_SSH='ssh -i "key.pem"' parallel --sshlogin a@b.com echo
+
+seq 10 | parallel --ssh 'ssh -i "key.pem"' --sshlogin a@b.com echo
+
+ssh-agent
+
+The sshlogin file format
+
+Check if servers are up
+
+
+
+=cut
diff --git a/src/parallel_cheat_bw.fodt b/src/parallel_cheat_bw.fodt
new file mode 100644
index 0000000..fb3e80f
--- /dev/null
+++ b/src/parallel_cheat_bw.fodt
@@ -0,0 +1,1001 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<office:document xmlns:office="urn:oasis:names:tc:opendocument:xmlns:office:1.0" xmlns:style="urn:oasis:names:tc:opendocument:xmlns:style:1.0" xmlns:text="urn:oasis:names:tc:opendocument:xmlns:text:1.0" xmlns:table="urn:oasis:names:tc:opendocument:xmlns:table:1.0" xmlns:draw="urn:oasis:names:tc:opendocument:xmlns:drawing:1.0" xmlns:fo="urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:meta="urn:oasis:names:tc:opendocument:xmlns:meta:1.0" xmlns:number="urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0" xmlns:svg="urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0" xmlns:chart="urn:oasis:names:tc:opendocument:xmlns:chart:1.0" xmlns:dr3d="urn:oasis:names:tc:opendocument:xmlns:dr3d:1.0" xmlns:math="http://www.w3.org/1998/Math/MathML" xmlns:form="urn:oasis:names:tc:opendocument:xmlns:form:1.0" xmlns:script="urn:oasis:names:tc:opendocument:xmlns:script:1.0" xmlns:config="urn:oasis:names:tc:opendocument:xmlns:config:1.0" xmlns:ooo="http://openoffice.org/2004/office" xmlns:ooow="http://openoffice.org/2004/writer" xmlns:oooc="http://openoffice.org/2004/calc" xmlns:dom="http://www.w3.org/2001/xml-events" xmlns:xforms="http://www.w3.org/2002/xforms" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:rpt="http://openoffice.org/2005/report" xmlns:of="urn:oasis:names:tc:opendocument:xmlns:of:1.2" xmlns:xhtml="http://www.w3.org/1999/xhtml" xmlns:grddl="http://www.w3.org/2003/g/data-view#" xmlns:officeooo="http://openoffice.org/2009/office" xmlns:tableooo="http://openoffice.org/2009/table" xmlns:drawooo="http://openoffice.org/2010/draw" xmlns:calcext="urn:org:documentfoundation:names:experimental:calc:xmlns:calcext:1.0" xmlns:loext="urn:org:documentfoundation:names:experimental:office:xmlns:loext:1.0" xmlns:field="urn:openoffice:names:experimental:ooo-ms-interop:xmlns:field:1.0" xmlns:formx="urn:openoffice:names:experimental:ooxml-odf-interop:xmlns:form:1.0" xmlns:css3t="http://www.w3.org/TR/css3-text/" office:version="1.2" office:mimetype="application/vnd.oasis.opendocument.text">
+ <office:meta><meta:creation-date>2019-02-27T22:13:36.781924718</meta:creation-date><dc:date>2019-03-12T01:40:23.357999303</dc:date><meta:editing-duration>P11DT5H49M15S</meta:editing-duration><meta:editing-cycles>40</meta:editing-cycles><meta:generator>LibreOffice/6.0.6.2$Linux_X86_64 LibreOffice_project/00m0$Build-2</meta:generator><meta:print-date>2019-03-08T23:06:56.779293422</meta:print-date><meta:document-statistic meta:table-count="9" meta:image-count="0" meta:object-count="0" meta:page-count="1" meta:paragraph-count="47" meta:word-count="337" meta:character-count="1950" meta:non-whitespace-character-count="1661"/></office:meta>
+ <office:settings>
+ <config:config-item-set config:name="ooo:view-settings">
+ <config:config-item config:name="ViewAreaTop" config:type="long">15875</config:config-item>
+ <config:config-item config:name="ViewAreaLeft" config:type="long">0</config:config-item>
+ <config:config-item config:name="ViewAreaWidth" config:type="long">22287</config:config-item>
+ <config:config-item config:name="ViewAreaHeight" config:type="long">11042</config:config-item>
+ <config:config-item config:name="ShowRedlineChanges" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="InBrowseMode" config:type="boolean">false</config:config-item>
+ <config:config-item-map-indexed config:name="Views">
+ <config:config-item-map-entry>
+ <config:config-item config:name="ViewId" config:type="string">view2</config:config-item>
+ <config:config-item config:name="ViewLeft" config:type="long">18724</config:config-item>
+ <config:config-item config:name="ViewTop" config:type="long">25474</config:config-item>
+ <config:config-item config:name="VisibleLeft" config:type="long">0</config:config-item>
+ <config:config-item config:name="VisibleTop" config:type="long">15875</config:config-item>
+ <config:config-item config:name="VisibleRight" config:type="long">22285</config:config-item>
+ <config:config-item config:name="VisibleBottom" config:type="long">26915</config:config-item>
+ <config:config-item config:name="ZoomType" config:type="short">0</config:config-item>
+ <config:config-item config:name="ViewLayoutColumns" config:type="short">1</config:config-item>
+ <config:config-item config:name="ViewLayoutBookMode" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="ZoomFactor" config:type="short">250</config:config-item>
+ <config:config-item config:name="IsSelectedFrame" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="AnchoredTextOverflowLegacy" config:type="boolean">false</config:config-item>
+ </config:config-item-map-entry>
+ </config:config-item-map-indexed>
+ </config:config-item-set>
+ <config:config-item-set config:name="ooo:configuration-settings">
+ <config:config-item config:name="PrintPaperFromSetup" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="PrintFaxName" config:type="string"/>
+ <config:config-item config:name="PrintSingleJobs" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="PrintProspectRTL" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="PrintProspect" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="PrintReversed" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="PrintTextPlaceholder" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="PrintTables" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="DoNotJustifyLinesWithManualBreak" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="AlignTabStopPosition" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="PrintLeftPages" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="IgnoreFirstLineIndentInNumbering" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="PrinterSetup" config:type="base64Binary">uAH+/1hlcm94X1BoYXNlcl82MTQwRE4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQ1VQUzpYZXJveF9QaGFzZXJfNjE0MEROAAAAAAAAAAAWAAMA2QAAAAAAAAAEAAhSAAAEdAAASm9iRGF0YSAxCnByaW50ZXI9WGVyb3hfUGhhc2VyXzYxNDBETgpvcmllbnRhdGlvbj1Qb3J0cmFpdApjb3BpZXM9MQpjb2xsYXRlPWZhbHNlCm1hcmdpbmRhanVzdG1lbnQ9MCwwLDAsMApjb2xvcmRlcHRoPTI0CnBzbGV2ZWw9MApwZGZkZXZpY2U9MQpjb2xvcmRldmljZT0wClBQRENvbnRleERhdGEKRHVwbGV4Ok5vbmUAUGFnZVNpemU6QTQASW5wdXRTbG90OkF1dG9TZWxlY3QAABIAQ09NUEFUX0RVUExFWF9NT0RFDwBEdXBsZXhNb2RlOjpPZmY=</config:config-item>
+ <config:config-item config:name="CollapseEmptyCellPara" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="RedlineProtectionKey" config:type="base64Binary"/>
+ <config:config-item config:name="UseOldPrinterMetrics" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="UseOldNumbering" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="AddExternalLeading" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="TreatSingleColumnBreakAsPageBreak" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="UpdateFromTemplate" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="IsLabelDocument" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="RsidRoot" config:type="int">816919</config:config-item>
+ <config:config-item config:name="ConsiderTextWrapOnObjPos" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="TableRowKeep" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="TabsRelativeToIndent" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="SaveVersionOnClose" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="UseFormerTextWrapping" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="ChartAutoUpdate" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="AddParaTableSpacingAtStart" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="AllowPrintJobCancel" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="AddParaTableSpacing" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="PrintDrawings" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="AddParaSpacingToTableCells" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="UseFormerLineSpacing" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="OutlineLevelYieldsNumbering" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="LinkUpdateMode" config:type="short">1</config:config-item>
+ <config:config-item config:name="DoNotResetParaAttrsForNumFont" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="ApplyUserData" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="StylesNoDefault" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="EmbeddedDatabaseName" config:type="string"/>
+ <config:config-item config:name="FloattableNomargins" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="BackgroundParaOverDrawings" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="PrinterName" config:type="string">Xerox_Phaser_6140DN</config:config-item>
+ <config:config-item config:name="UseFormerObjectPositioning" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="TabOverMargin" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="SaveGlobalDocumentLinks" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="CurrentDatabaseDataSource" config:type="string"/>
+ <config:config-item config:name="IsKernAsianPunctuation" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="CharacterCompressionType" config:type="short">0</config:config-item>
+ <config:config-item config:name="SmallCapsPercentage66" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="CurrentDatabaseCommand" config:type="string"/>
+ <config:config-item config:name="CurrentDatabaseCommandType" config:type="int">0</config:config-item>
+ <config:config-item config:name="FieldAutoUpdate" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="IgnoreTabsAndBlanksForLineCalculation" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="LoadReadonly" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="DoNotCaptureDrawObjsOnPage" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="ClipAsCharacterAnchoredWriterFlyFrames" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="PrintBlackFonts" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="DisableOffPagePositioning" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="SurroundTextWrapSmall" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="UnxForceZeroExtLeading" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="TabAtLeftIndentForParagraphsInList" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="PrintRightPages" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="Rsid" config:type="int">1662577</config:config-item>
+ <config:config-item config:name="MathBaselineAlignment" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="MsWordCompTrailingBlanks" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="InvertBorderSpacing" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="EmbedFonts" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="PrinterIndependentLayout" config:type="string">high-resolution</config:config-item>
+ <config:config-item config:name="TabOverflow" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="PrintGraphics" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="PropLineSpacingShrinksFirstLine" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="UnbreakableNumberings" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="AddFrameOffsets" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="ClippedPictures" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="EmbedSystemFonts" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="ApplyParagraphMarkFormatToNumbering" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="SubtractFlysAnchoredAtFlys" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="AddVerticalFrameOffsets" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="ProtectForm" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="PrintEmptyPages" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="PrintControls" config:type="boolean">true</config:config-item>
+ <config:config-item config:name="PrintHiddenText" config:type="boolean">false</config:config-item>
+ <config:config-item config:name="PrintAnnotationMode" config:type="short">0</config:config-item>
+ <config:config-item config:name="PrintPageBackground" config:type="boolean">true</config:config-item>
+ </config:config-item-set>
+ </office:settings>
+ <office:scripts>
+ <office:script script:language="ooo:Basic">
+ <ooo:libraries xmlns:ooo="http://openoffice.org/2004/office" xmlns:xlink="http://www.w3.org/1999/xlink">
+ <ooo:library-embedded ooo:name="Standard"/>
+ </ooo:libraries>
+ </office:script>
+ </office:scripts>
+ <office:font-face-decls>
+ <style:font-face style:name="Lohit Devanagari1" svg:font-family="&apos;Lohit Devanagari&apos;"/>
+ <style:font-face style:name="monospace" svg:font-family="monospace"/>
+ <style:font-face style:name="DejaVu Sans Mono" svg:font-family="&apos;DejaVu Sans Mono&apos;" style:font-family-generic="modern" style:font-pitch="fixed"/>
+ <style:font-face style:name="Liberation Mono" svg:font-family="&apos;Liberation Mono&apos;" style:font-family-generic="modern" style:font-pitch="fixed"/>
+ <style:font-face style:name="Bitstream Vera Serif1" svg:font-family="&apos;Bitstream Vera Serif&apos;" style:font-family-generic="roman" style:font-pitch="variable"/>
+ <style:font-face style:name="Bitstream Vera Serif" svg:font-family="&apos;Bitstream Vera Serif&apos;" style:font-adornments="Roman" style:font-family-generic="roman" style:font-pitch="variable"/>
+ <style:font-face style:name="Liberation Serif" svg:font-family="&apos;Liberation Serif&apos;" style:font-family-generic="roman" style:font-pitch="variable"/>
+ <style:font-face style:name="Thorndale" svg:font-family="Thorndale" style:font-family-generic="roman" style:font-pitch="variable"/>
+ <style:font-face style:name="DejaVu Sans" svg:font-family="&apos;DejaVu Sans&apos;" style:font-family-generic="swiss" style:font-pitch="variable"/>
+ <style:font-face style:name="FreeSans" svg:font-family="FreeSans" style:font-adornments="Bold" style:font-family-generic="swiss" style:font-pitch="variable"/>
+ <style:font-face style:name="Liberation Sans" svg:font-family="&apos;Liberation Sans&apos;" style:font-family-generic="swiss" style:font-pitch="variable"/>
+ <style:font-face style:name="Lohit Devanagari" svg:font-family="&apos;Lohit Devanagari&apos;" style:font-family-generic="system" style:font-pitch="variable"/>
+ <style:font-face style:name="Noto Sans CJK SC Regular" svg:font-family="&apos;Noto Sans CJK SC Regular&apos;" style:font-family-generic="system" style:font-pitch="variable"/>
+ </office:font-face-decls>
+ <office:styles>
+ <style:default-style style:family="graphic">
+ <style:graphic-properties svg:stroke-color="#3465a4" draw:fill-color="#729fcf" fo:wrap-option="no-wrap" draw:shadow-offset-x="0.1181in" draw:shadow-offset-y="0.1181in" draw:start-line-spacing-horizontal="0.1114in" draw:start-line-spacing-vertical="0.1114in" draw:end-line-spacing-horizontal="0.1114in" draw:end-line-spacing-vertical="0.1114in" style:flow-with-text="false"/>
+ <style:paragraph-properties style:text-autospace="ideograph-alpha" style:line-break="strict" style:font-independent-line-spacing="false">
+ <style:tab-stops/>
+ </style:paragraph-properties>
+ <style:text-properties style:use-window-font-color="true" style:font-name="Liberation Serif" fo:font-size="12pt" fo:language="en" fo:country="US" style:letter-kerning="true" style:font-name-asian="Noto Sans CJK SC Regular" style:font-size-asian="10.5pt" style:language-asian="zh" style:country-asian="CN" style:font-name-complex="Lohit Devanagari" style:font-size-complex="12pt" style:language-complex="hi" style:country-complex="IN"/>
+ </style:default-style>
+ <style:default-style style:family="paragraph">
+ <style:paragraph-properties fo:orphans="2" fo:widows="2" fo:hyphenation-ladder-count="no-limit" style:text-autospace="ideograph-alpha" style:punctuation-wrap="hanging" style:line-break="strict" style:tab-stop-distance="0.4925in" style:writing-mode="page"/>
+ <style:text-properties style:use-window-font-color="true" style:font-name="Liberation Serif" fo:font-size="12pt" fo:language="en" fo:country="US" style:letter-kerning="true" style:font-name-asian="Noto Sans CJK SC Regular" style:font-size-asian="10.5pt" style:language-asian="zh" style:country-asian="CN" style:font-name-complex="Lohit Devanagari" style:font-size-complex="12pt" style:language-complex="hi" style:country-complex="IN" fo:hyphenate="false" fo:hyphenation-remain-char-count="2" fo:hyphenation-push-char-count="2"/>
+ </style:default-style>
+ <style:default-style style:family="table">
+ <style:table-properties table:border-model="collapsing"/>
+ </style:default-style>
+ <style:default-style style:family="table-row">
+ <style:table-row-properties fo:keep-together="auto"/>
+ </style:default-style>
+ <style:style style:name="Standard" style:family="paragraph" style:class="text"/>
+ <style:style style:name="Heading" style:family="paragraph" style:parent-style-name="Standard" style:next-style-name="Text_20_body" style:class="text">
+ <style:paragraph-properties fo:margin-top="0.1665in" fo:margin-bottom="0.0835in" loext:contextual-spacing="false" fo:keep-with-next="always"/>
+ <style:text-properties style:font-name="Liberation Sans" fo:font-family="&apos;Liberation Sans&apos;" style:font-family-generic="swiss" style:font-pitch="variable" fo:font-size="14pt" style:font-name-asian="Noto Sans CJK SC Regular" style:font-family-asian="&apos;Noto Sans CJK SC Regular&apos;" style:font-family-generic-asian="system" style:font-pitch-asian="variable" style:font-size-asian="14pt" style:font-name-complex="Lohit Devanagari" style:font-family-complex="&apos;Lohit Devanagari&apos;" style:font-family-generic-complex="system" style:font-pitch-complex="variable" style:font-size-complex="14pt"/>
+ </style:style>
+ <style:style style:name="Text_20_body" style:display-name="Text body" style:family="paragraph" style:parent-style-name="Standard" style:class="text">
+ <style:paragraph-properties fo:margin-top="0in" fo:margin-bottom="0.0972in" loext:contextual-spacing="false" fo:line-height="115%"/>
+ </style:style>
+ <style:style style:name="List" style:family="paragraph" style:parent-style-name="Text_20_body" style:class="list">
+ <style:text-properties style:font-size-asian="12pt" style:font-name-complex="Lohit Devanagari1" style:font-family-complex="&apos;Lohit Devanagari&apos;"/>
+ </style:style>
+ <style:style style:name="Caption" style:family="paragraph" style:parent-style-name="Standard" style:class="extra">
+ <style:paragraph-properties fo:margin-top="0.0835in" fo:margin-bottom="0.0835in" loext:contextual-spacing="false" text:number-lines="false" text:line-number="0"/>
+ <style:text-properties fo:font-size="12pt" fo:font-style="italic" style:font-size-asian="12pt" style:font-style-asian="italic" style:font-name-complex="Lohit Devanagari1" style:font-family-complex="&apos;Lohit Devanagari&apos;" style:font-size-complex="12pt" style:font-style-complex="italic"/>
+ </style:style>
+ <style:style style:name="Index" style:family="paragraph" style:parent-style-name="Standard" style:class="index">
+ <style:paragraph-properties text:number-lines="false" text:line-number="0"/>
+ <style:text-properties style:font-size-asian="12pt" style:font-name-complex="Lohit Devanagari1" style:font-family-complex="&apos;Lohit Devanagari&apos;"/>
+ </style:style>
+ <style:style style:name="Table_20_Contents" style:display-name="Table Contents" style:family="paragraph" style:parent-style-name="Standard" style:class="extra">
+ <loext:graphic-properties draw:fill="none" draw:fill-color="#729fcf"/>
+ <style:paragraph-properties fo:line-height="115%" fo:background-color="transparent" style:shadow="none" text:number-lines="false" text:line-number="0"/>
+ <style:text-properties style:font-name="Bitstream Vera Serif" fo:font-family="&apos;Bitstream Vera Serif&apos;" style:font-style-name="Roman" style:font-family-generic="roman" style:font-pitch="variable" fo:font-size="10pt" style:font-size-asian="10.5pt"/>
+ </style:style>
+ <style:style style:name="Header" style:family="paragraph" style:parent-style-name="Standard" style:class="extra">
+ <style:paragraph-properties text:number-lines="false" text:line-number="0">
+ <style:tab-stops>
+ <style:tab-stop style:position="3.3472in" style:type="center"/>
+ <style:tab-stop style:position="6.6953in" style:type="right"/>
+ </style:tab-stops>
+ </style:paragraph-properties>
+ </style:style>
+ <style:style style:name="Table_20_Heading" style:display-name="Table Heading" style:family="paragraph" style:parent-style-name="Table_20_Contents" style:class="extra">
+ <style:paragraph-properties fo:text-align="center" style:justify-single-word="false" text:number-lines="false" text:line-number="0"/>
+ <style:text-properties fo:font-weight="bold" style:font-weight-asian="bold" style:font-weight-complex="bold"/>
+ </style:style>
+ <style:style style:name="Preformatted_20_Text" style:display-name="Preformatted Text" style:family="paragraph" style:parent-style-name="Standard" style:class="html" style:master-page-name="">
+ <loext:graphic-properties draw:fill="none" draw:fill-color="#729fcf"/>
+ <style:paragraph-properties fo:margin-left="0.2in" fo:margin-right="0.2in" fo:margin-top="0in" fo:margin-bottom="0in" loext:contextual-spacing="true" fo:orphans="3" fo:widows="3" fo:text-indent="0in" style:auto-text-indent="false" style:page-number="auto" fo:background-color="transparent" style:shadow="none" fo:keep-with-next="auto" style:writing-mode="page"/>
+ <style:text-properties style:font-name="Liberation Mono" fo:font-family="&apos;Liberation Mono&apos;" style:font-family-generic="modern" style:font-pitch="fixed" fo:font-size="10pt" style:font-name-asian="DejaVu Sans Mono" style:font-family-asian="&apos;DejaVu Sans Mono&apos;" style:font-family-generic-asian="modern" style:font-pitch-asian="fixed" style:font-size-asian="10pt" style:font-name-complex="Liberation Mono" style:font-family-complex="&apos;Liberation Mono&apos;" style:font-family-generic-complex="modern" style:font-pitch-complex="fixed" style:font-size-complex="10pt"/>
+ </style:style>
+ <style:style style:name="Level1_20_body" style:display-name="Level1 body" style:family="paragraph" style:parent-style-name="Text_20_body">
+ <style:paragraph-properties fo:margin-top="0.1in" fo:margin-bottom="0.1in" loext:contextual-spacing="false" fo:padding="0.1in" fo:border-left="0.51pt solid #000000" fo:border-right="none" fo:border-top="none" fo:border-bottom="none" style:shadow="none" style:writing-mode="page"/>
+ </style:style>
+ <style:style style:name="Numbering_20_Symbols" style:display-name="Numbering Symbols" style:family="text"/>
+ <style:style style:name="Source_20_Text" style:display-name="Source Text" style:family="text">
+ <style:text-properties style:font-name="Liberation Mono" fo:font-family="&apos;Liberation Mono&apos;" style:font-family-generic="modern" style:font-pitch="fixed" fo:font-size="10.5pt" fo:font-weight="normal" fo:background-color="#dddddd" style:font-name-asian="DejaVu Sans Mono" style:font-family-asian="&apos;DejaVu Sans Mono&apos;" style:font-family-generic-asian="modern" style:font-pitch-asian="fixed" style:font-size-asian="9.60000038146973pt" style:font-weight-asian="normal" style:font-name-complex="Liberation Mono" style:font-family-complex="&apos;Liberation Mono&apos;" style:font-family-generic-complex="modern" style:font-pitch-complex="fixed" style:font-size-complex="11pt" style:font-weight-complex="normal"/>
+ </style:style>
+ <text:outline-style style:name="Outline">
+ <text:outline-level-style text:level="1" style:num-format="">
+ <style:list-level-properties text:list-level-position-and-space-mode="label-alignment">
+ <style:list-level-label-alignment text:label-followed-by="listtab"/>
+ </style:list-level-properties>
+ </text:outline-level-style>
+ <text:outline-level-style text:level="2" style:num-format="">
+ <style:list-level-properties text:list-level-position-and-space-mode="label-alignment">
+ <style:list-level-label-alignment text:label-followed-by="listtab"/>
+ </style:list-level-properties>
+ </text:outline-level-style>
+ <text:outline-level-style text:level="3" style:num-format="">
+ <style:list-level-properties text:list-level-position-and-space-mode="label-alignment">
+ <style:list-level-label-alignment text:label-followed-by="listtab"/>
+ </style:list-level-properties>
+ </text:outline-level-style>
+ <text:outline-level-style text:level="4" style:num-format="">
+ <style:list-level-properties text:list-level-position-and-space-mode="label-alignment">
+ <style:list-level-label-alignment text:label-followed-by="listtab"/>
+ </style:list-level-properties>
+ </text:outline-level-style>
+ <text:outline-level-style text:level="5" style:num-format="">
+ <style:list-level-properties text:list-level-position-and-space-mode="label-alignment">
+ <style:list-level-label-alignment text:label-followed-by="listtab"/>
+ </style:list-level-properties>
+ </text:outline-level-style>
+ <text:outline-level-style text:level="6" style:num-format="">
+ <style:list-level-properties text:list-level-position-and-space-mode="label-alignment">
+ <style:list-level-label-alignment text:label-followed-by="listtab"/>
+ </style:list-level-properties>
+ </text:outline-level-style>
+ <text:outline-level-style text:level="7" style:num-format="">
+ <style:list-level-properties text:list-level-position-and-space-mode="label-alignment">
+ <style:list-level-label-alignment text:label-followed-by="listtab"/>
+ </style:list-level-properties>
+ </text:outline-level-style>
+ <text:outline-level-style text:level="8" style:num-format="">
+ <style:list-level-properties text:list-level-position-and-space-mode="label-alignment">
+ <style:list-level-label-alignment text:label-followed-by="listtab"/>
+ </style:list-level-properties>
+ </text:outline-level-style>
+ <text:outline-level-style text:level="9" style:num-format="">
+ <style:list-level-properties text:list-level-position-and-space-mode="label-alignment">
+ <style:list-level-label-alignment text:label-followed-by="listtab"/>
+ </style:list-level-properties>
+ </text:outline-level-style>
+ <text:outline-level-style text:level="10" style:num-format="">
+ <style:list-level-properties text:list-level-position-and-space-mode="label-alignment">
+ <style:list-level-label-alignment text:label-followed-by="listtab"/>
+ </style:list-level-properties>
+ </text:outline-level-style>
+ </text:outline-style>
+ <text:notes-configuration text:note-class="footnote" style:num-format="1" text:start-value="0" text:footnotes-position="page" text:start-numbering-at="document"/>
+ <text:notes-configuration text:note-class="endnote" style:num-format="i" text:start-value="0"/>
+ <text:linenumbering-configuration text:number-lines="false" text:offset="0.1965in" style:num-format="1" text:number-position="left" text:increment="5"/>
+ </office:styles>
+ <office:automatic-styles>
+ <style:style style:name="Table7" style:family="table">
+ <style:table-properties style:width="6.6951in" fo:margin-top="0in" fo:margin-bottom="0in" fo:break-before="auto" fo:break-after="auto" table:align="margins" fo:background-color="#dddddd" fo:keep-with-next="auto" style:may-break-between-rows="false" style:writing-mode="page" table:border-model="collapsing">
+ <style:background-image/>
+ </style:table-properties>
+ </style:style>
+ <style:style style:name="Table7.A" style:family="table-column">
+ <style:table-column-properties style:column-width="6.6951in" style:rel-column-width="65535*"/>
+ </style:style>
+ <style:style style:name="Table7.1" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:background-color="#000000" fo:keep-together="auto">
+ <style:background-image/>
+ </style:table-row-properties>
+ </style:style>
+ <style:style style:name="Table7.A1" style:family="table-cell">
+ <style:table-cell-properties style:vertical-align="middle" fo:background-color="transparent" fo:padding-left="0.1in" fo:padding-right="0in" fo:padding-top="0in" fo:padding-bottom="0in" fo:border-left="1pt solid #ffffff" fo:border-right="none" fo:border-top="1pt solid #ffffff" fo:border-bottom="1pt solid #ffffff" style:writing-mode="lr-tb">
+ <style:background-image/>
+ </style:table-cell-properties>
+ </style:style>
+ <style:style style:name="Table7.2" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table7.A2" style:family="table-cell" style:data-style-name="N10100">
+ <style:table-cell-properties style:vertical-align="middle" fo:background-color="transparent" fo:padding-left="0.1in" fo:padding-right="0in" fo:padding-top="0in" fo:padding-bottom="0in" fo:border-left="1pt solid #ffffff" fo:border-right="none" fo:border-top="none" fo:border-bottom="1pt solid #ffffff" style:writing-mode="lr-tb">
+ <style:background-image/>
+ </style:table-cell-properties>
+ </style:style>
+ <style:style style:name="Table8" style:family="table">
+ <style:table-properties style:width="6.6951in" fo:margin-top="0in" fo:margin-bottom="0in" fo:break-before="auto" fo:break-after="auto" table:align="margins" fo:background-color="#dddddd" fo:keep-with-next="auto" style:may-break-between-rows="false" style:writing-mode="page" table:border-model="collapsing">
+ <style:background-image/>
+ </style:table-properties>
+ </style:style>
+ <style:style style:name="Table8.A" style:family="table-column">
+ <style:table-column-properties style:column-width="6.6951in" style:rel-column-width="65535*"/>
+ </style:style>
+ <style:style style:name="Table8.1" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:background-color="#000000" fo:keep-together="auto">
+ <style:background-image/>
+ </style:table-row-properties>
+ </style:style>
+ <style:style style:name="Table8.A1" style:family="table-cell">
+ <style:table-cell-properties style:vertical-align="middle" fo:background-color="transparent" fo:padding-left="0.1in" fo:padding-right="0in" fo:padding-top="0in" fo:padding-bottom="0in" fo:border-left="1pt solid #ffffff" fo:border-right="none" fo:border-top="1pt solid #ffffff" fo:border-bottom="1pt solid #ffffff" style:writing-mode="lr-tb">
+ <style:background-image/>
+ </style:table-cell-properties>
+ </style:style>
+ <style:style style:name="Table8.2" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table8.A2" style:family="table-cell" style:data-style-name="N10100">
+ <style:table-cell-properties style:vertical-align="middle" fo:background-color="transparent" fo:padding-left="0.1in" fo:padding-right="0in" fo:padding-top="0in" fo:padding-bottom="0in" fo:border-left="1pt solid #ffffff" fo:border-right="none" fo:border-top="none" fo:border-bottom="1pt solid #ffffff" style:writing-mode="lr-tb">
+ <style:background-image/>
+ </style:table-cell-properties>
+ </style:style>
+ <style:style style:name="Table8.3" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table8.4" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table9" style:family="table">
+ <style:table-properties style:width="6.6951in" fo:margin-top="0in" fo:margin-bottom="0in" fo:break-before="auto" fo:break-after="auto" table:align="margins" fo:background-color="#dddddd" fo:keep-with-next="auto" style:may-break-between-rows="false" style:writing-mode="page" table:border-model="collapsing">
+ <style:background-image/>
+ </style:table-properties>
+ </style:style>
+ <style:style style:name="Table9.A" style:family="table-column">
+ <style:table-column-properties style:column-width="6.6951in" style:rel-column-width="65535*"/>
+ </style:style>
+ <style:style style:name="Table9.1" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:background-color="#000000" fo:keep-together="auto">
+ <style:background-image/>
+ </style:table-row-properties>
+ </style:style>
+ <style:style style:name="Table9.A1" style:family="table-cell">
+ <style:table-cell-properties style:vertical-align="middle" fo:background-color="transparent" fo:padding-left="0.1in" fo:padding-right="0in" fo:padding-top="0in" fo:padding-bottom="0in" fo:border-left="1pt solid #ffffff" fo:border-right="none" fo:border-top="1pt solid #ffffff" fo:border-bottom="1pt solid #ffffff" style:writing-mode="lr-tb">
+ <style:background-image/>
+ </style:table-cell-properties>
+ </style:style>
+ <style:style style:name="Table9.2" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table9.A2" style:family="table-cell">
+ <style:table-cell-properties style:vertical-align="middle" fo:background-color="transparent" fo:padding-left="0.1in" fo:padding-right="0in" fo:padding-top="0in" fo:padding-bottom="0in" fo:border-left="1pt solid #ffffff" fo:border-right="none" fo:border-top="none" fo:border-bottom="1pt solid #ffffff" style:writing-mode="lr-tb">
+ <style:background-image/>
+ </style:table-cell-properties>
+ </style:style>
+ <style:style style:name="Table9.3" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table9.4" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table9.5" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table9.6" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table9.7" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table10" style:family="table">
+ <style:table-properties style:width="6.6951in" fo:margin-top="0in" fo:margin-bottom="0in" fo:break-before="auto" fo:break-after="auto" table:align="margins" fo:background-color="#dddddd" fo:keep-with-next="auto" style:may-break-between-rows="false" style:writing-mode="page" table:border-model="collapsing">
+ <style:background-image/>
+ </style:table-properties>
+ </style:style>
+ <style:style style:name="Table10.A" style:family="table-column">
+ <style:table-column-properties style:column-width="2.3736in" style:rel-column-width="3418*"/>
+ </style:style>
+ <style:style style:name="Table10.B" style:family="table-column">
+ <style:table-column-properties style:column-width="4.3215in" style:rel-column-width="6223*"/>
+ </style:style>
+ <style:style style:name="Table10.1" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:background-color="#000000" fo:keep-together="auto">
+ <style:background-image/>
+ </style:table-row-properties>
+ </style:style>
+ <style:style style:name="Table10.A1" style:family="table-cell">
+ <style:table-cell-properties style:vertical-align="middle" fo:background-color="transparent" fo:padding-left="0.1in" fo:padding-right="0in" fo:padding-top="0in" fo:padding-bottom="0in" fo:border-left="1pt solid #ffffff" fo:border-right="none" fo:border-top="1pt solid #ffffff" fo:border-bottom="1pt solid #ffffff" style:writing-mode="lr-tb">
+ <style:background-image/>
+ </style:table-cell-properties>
+ </style:style>
+ <style:style style:name="Table10.B1" style:family="table-cell">
+ <style:table-cell-properties style:vertical-align="middle" fo:background-color="transparent" fo:padding-left="0.1in" fo:padding-right="0in" fo:padding-top="0in" fo:padding-bottom="0in" fo:border="1pt solid #ffffff" style:writing-mode="lr-tb">
+ <style:background-image/>
+ </style:table-cell-properties>
+ </style:style>
+ <style:style style:name="Table10.2" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table10.A2" style:family="table-cell">
+ <style:table-cell-properties style:vertical-align="middle" fo:background-color="transparent" fo:padding-left="0.1in" fo:padding-right="0in" fo:padding-top="0in" fo:padding-bottom="0in" fo:border-left="1pt solid #ffffff" fo:border-right="none" fo:border-top="none" fo:border-bottom="1pt solid #ffffff" style:writing-mode="lr-tb">
+ <style:background-image/>
+ </style:table-cell-properties>
+ </style:style>
+ <style:style style:name="Table10.B2" style:family="table-cell">
+ <style:table-cell-properties style:vertical-align="middle" fo:background-color="transparent" fo:padding-left="0.1in" fo:padding-right="0in" fo:padding-top="0in" fo:padding-bottom="0in" fo:border-left="1pt solid #ffffff" fo:border-right="1pt solid #ffffff" fo:border-top="none" fo:border-bottom="1pt solid #ffffff" style:writing-mode="lr-tb">
+ <style:background-image/>
+ </style:table-cell-properties>
+ </style:style>
+ <style:style style:name="Table10.3" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table10.4" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table10.5" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table10.6" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table10.7" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table10.8" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table10.9" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table11" style:family="table">
+ <style:table-properties style:width="6.6951in" fo:margin-top="0in" fo:margin-bottom="0in" fo:break-before="auto" fo:break-after="auto" table:align="margins" fo:background-color="#dddddd" fo:keep-with-next="auto" style:may-break-between-rows="false" style:writing-mode="page" table:border-model="collapsing">
+ <style:background-image/>
+ </style:table-properties>
+ </style:style>
+ <style:style style:name="Table11.A" style:family="table-column">
+ <style:table-column-properties style:column-width="6.6951in" style:rel-column-width="65535*"/>
+ </style:style>
+ <style:style style:name="Table11.1" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:background-color="#000000" fo:keep-together="auto">
+ <style:background-image/>
+ </style:table-row-properties>
+ </style:style>
+ <style:style style:name="Table11.A1" style:family="table-cell">
+ <style:table-cell-properties style:vertical-align="middle" fo:background-color="transparent" fo:padding-left="0.1in" fo:padding-right="0in" fo:padding-top="0in" fo:padding-bottom="0in" fo:border-left="1pt solid #ffffff" fo:border-right="none" fo:border-top="1pt solid #ffffff" fo:border-bottom="1pt solid #ffffff" style:writing-mode="lr-tb">
+ <style:background-image/>
+ </style:table-cell-properties>
+ </style:style>
+ <style:style style:name="Table11.2" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table11.A2" style:family="table-cell">
+ <style:table-cell-properties style:vertical-align="middle" fo:background-color="transparent" fo:padding-left="0.1in" fo:padding-right="0in" fo:padding-top="0in" fo:padding-bottom="0in" fo:border-left="1pt solid #ffffff" fo:border-right="none" fo:border-top="none" fo:border-bottom="1pt solid #ffffff" style:writing-mode="lr-tb">
+ <style:background-image/>
+ </style:table-cell-properties>
+ </style:style>
+ <style:style style:name="Table12" style:family="table">
+ <style:table-properties style:width="6.6951in" fo:margin-top="0in" fo:margin-bottom="0in" fo:break-before="auto" fo:break-after="auto" table:align="margins" fo:background-color="#dddddd" fo:keep-with-next="auto" style:may-break-between-rows="false" style:writing-mode="page" table:border-model="collapsing">
+ <style:background-image/>
+ </style:table-properties>
+ </style:style>
+ <style:style style:name="Table12.A" style:family="table-column">
+ <style:table-column-properties style:column-width="6.6951in" style:rel-column-width="65535*"/>
+ </style:style>
+ <style:style style:name="Table12.1" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:background-color="#000000" fo:keep-together="auto">
+ <style:background-image/>
+ </style:table-row-properties>
+ </style:style>
+ <style:style style:name="Table12.A1" style:family="table-cell">
+ <style:table-cell-properties style:vertical-align="middle" fo:background-color="transparent" fo:padding-left="0.1in" fo:padding-right="0in" fo:padding-top="0in" fo:padding-bottom="0in" fo:border-left="1pt solid #ffffff" fo:border-right="none" fo:border-top="1pt solid #ffffff" fo:border-bottom="1pt solid #ffffff" style:writing-mode="lr-tb">
+ <style:background-image/>
+ </style:table-cell-properties>
+ </style:style>
+ <style:style style:name="Table12.2" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table12.A2" style:family="table-cell">
+ <style:table-cell-properties style:vertical-align="middle" fo:background-color="transparent" fo:padding-left="0.1in" fo:padding-right="0in" fo:padding-top="0in" fo:padding-bottom="0in" fo:border-left="1pt solid #ffffff" fo:border-right="none" fo:border-top="none" fo:border-bottom="1pt solid #ffffff" style:writing-mode="lr-tb">
+ <style:background-image/>
+ </style:table-cell-properties>
+ </style:style>
+ <style:style style:name="Table12.3" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table13" style:family="table">
+ <style:table-properties style:width="6.6951in" fo:margin-top="0in" fo:margin-bottom="0in" fo:break-before="auto" fo:break-after="auto" table:align="margins" fo:background-color="#dddddd" fo:keep-with-next="auto" style:may-break-between-rows="false" style:writing-mode="page" table:border-model="collapsing">
+ <style:background-image/>
+ </style:table-properties>
+ </style:style>
+ <style:style style:name="Table13.A" style:family="table-column">
+ <style:table-column-properties style:column-width="6.6951in" style:rel-column-width="65535*"/>
+ </style:style>
+ <style:style style:name="Table13.1" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:background-color="#000000" fo:keep-together="auto">
+ <style:background-image/>
+ </style:table-row-properties>
+ </style:style>
+ <style:style style:name="Table13.A1" style:family="table-cell">
+ <style:table-cell-properties style:vertical-align="middle" fo:background-color="transparent" fo:padding-left="0.1in" fo:padding-right="0in" fo:padding-top="0in" fo:padding-bottom="0in" fo:border-left="1pt solid #ffffff" fo:border-right="none" fo:border-top="1pt solid #ffffff" fo:border-bottom="1pt solid #ffffff" style:writing-mode="lr-tb">
+ <style:background-image/>
+ </style:table-cell-properties>
+ </style:style>
+ <style:style style:name="Table13.2" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table13.A2" style:family="table-cell">
+ <style:table-cell-properties style:vertical-align="middle" fo:background-color="transparent" fo:padding-left="0.1in" fo:padding-right="0in" fo:padding-top="0in" fo:padding-bottom="0in" fo:border-left="1pt solid #ffffff" fo:border-right="none" fo:border-top="none" fo:border-bottom="1pt solid #ffffff" style:writing-mode="lr-tb">
+ <style:background-image/>
+ </style:table-cell-properties>
+ </style:style>
+ <style:style style:name="Table1" style:family="table">
+ <style:table-properties style:width="6.6951in" fo:margin-top="0in" fo:margin-bottom="0in" fo:break-before="auto" fo:break-after="auto" table:align="margins" fo:background-color="#dddddd" fo:keep-with-next="auto" style:may-break-between-rows="false" style:writing-mode="page" table:border-model="collapsing">
+ <style:background-image/>
+ </style:table-properties>
+ </style:style>
+ <style:style style:name="Table1.A" style:family="table-column">
+ <style:table-column-properties style:column-width="6.6951in" style:rel-column-width="65535*"/>
+ </style:style>
+ <style:style style:name="Table1.1" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:background-color="#000000" fo:keep-together="auto">
+ <style:background-image/>
+ </style:table-row-properties>
+ </style:style>
+ <style:style style:name="Table1.A1" style:family="table-cell">
+ <style:table-cell-properties style:vertical-align="middle" fo:background-color="transparent" fo:padding-left="0.1in" fo:padding-right="0in" fo:padding-top="0in" fo:padding-bottom="0in" fo:border-left="1pt solid #ffffff" fo:border-right="none" fo:border-top="1pt solid #ffffff" fo:border-bottom="1pt solid #ffffff" style:writing-mode="lr-tb">
+ <style:background-image/>
+ </style:table-cell-properties>
+ </style:style>
+ <style:style style:name="Table1.2" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table1.A2" style:family="table-cell">
+ <style:table-cell-properties style:vertical-align="middle" fo:background-color="transparent" fo:padding-left="0.1in" fo:padding-right="0in" fo:padding-top="0in" fo:padding-bottom="0in" fo:border-left="1pt solid #ffffff" fo:border-right="none" fo:border-top="none" fo:border-bottom="1pt solid #ffffff" style:writing-mode="lr-tb">
+ <style:background-image/>
+ </style:table-cell-properties>
+ </style:style>
+ <style:style style:name="Table1.3" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table2" style:family="table">
+ <style:table-properties style:width="6.6951in" fo:margin-top="0in" fo:margin-bottom="0in" fo:break-before="auto" fo:break-after="auto" table:align="margins" fo:background-color="#dddddd" fo:keep-with-next="auto" style:may-break-between-rows="false" style:writing-mode="page" table:border-model="collapsing">
+ <style:background-image/>
+ </style:table-properties>
+ </style:style>
+ <style:style style:name="Table2.A" style:family="table-column">
+ <style:table-column-properties style:column-width="6.6951in" style:rel-column-width="65535*"/>
+ </style:style>
+ <style:style style:name="Table2.1" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:background-color="#000000" fo:keep-together="auto">
+ <style:background-image/>
+ </style:table-row-properties>
+ </style:style>
+ <style:style style:name="Table2.A1" style:family="table-cell">
+ <style:table-cell-properties style:vertical-align="middle" fo:background-color="transparent" fo:padding-left="0.1in" fo:padding-right="0in" fo:padding-top="0in" fo:padding-bottom="0in" fo:border-left="1pt solid #ffffff" fo:border-right="none" fo:border-top="1pt solid #ffffff" fo:border-bottom="1pt solid #ffffff" style:writing-mode="lr-tb">
+ <style:background-image/>
+ </style:table-cell-properties>
+ </style:style>
+ <style:style style:name="Table2.2" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="Table2.A2" style:family="table-cell">
+ <style:table-cell-properties style:vertical-align="middle" fo:background-color="transparent" fo:padding-left="0.1in" fo:padding-right="0in" fo:padding-top="0in" fo:padding-bottom="0in" fo:border-left="1pt solid #ffffff" fo:border-right="none" fo:border-top="none" fo:border-bottom="1pt solid #ffffff" style:writing-mode="lr-tb">
+ <style:background-image/>
+ </style:table-cell-properties>
+ </style:style>
+ <style:style style:name="Table2.3" style:family="table-row">
+ <style:table-row-properties style:min-row-height="0.1778in" fo:keep-together="auto"/>
+ </style:style>
+ <style:style style:name="P1" style:family="paragraph" style:parent-style-name="Standard">
+ <style:text-properties officeooo:paragraph-rsid="0015544e"/>
+ </style:style>
+ <style:style style:name="P2" style:family="paragraph" style:parent-style-name="Standard">
+ <style:text-properties fo:font-size="8pt" officeooo:paragraph-rsid="001483d1" style:font-size-asian="7pt" style:font-size-complex="8pt"/>
+ </style:style>
+ <style:style style:name="P3" style:family="paragraph" style:parent-style-name="Text_20_body">
+ <style:paragraph-properties fo:text-align="center" style:justify-single-word="false" fo:break-before="auto" fo:break-after="auto"/>
+ <style:text-properties fo:font-size="18pt" fo:font-weight="bold" officeooo:paragraph-rsid="001483d1" style:font-size-asian="18pt" style:font-weight-asian="bold" style:font-size-complex="18pt" style:font-weight-complex="bold"/>
+ </style:style>
+ <style:style style:name="P4" style:family="paragraph" style:parent-style-name="Table_20_Contents">
+ <style:paragraph-properties fo:text-align="start" style:justify-single-word="false"/>
+ <style:text-properties fo:color="#000000" style:text-outline="false" style:text-line-through-style="none" style:text-line-through-type="none" style:font-name="Thorndale" fo:font-size="12pt" fo:font-style="normal" fo:text-shadow="none" style:text-underline-style="none" fo:font-weight="bold" officeooo:rsid="0010bcb2" officeooo:paragraph-rsid="001483d1" style:font-size-asian="12pt" style:font-style-asian="normal" style:font-weight-asian="bold" style:font-size-complex="12pt" style:font-style-complex="normal" style:font-weight-complex="bold" style:text-overline-style="none" style:text-overline-color="font-color"/>
+ </style:style>
+ <style:style style:name="P5" style:family="paragraph" style:parent-style-name="Table_20_Contents">
+ <style:text-properties fo:font-weight="normal" officeooo:rsid="0012b9fe" officeooo:paragraph-rsid="001483d1" style:font-weight-asian="normal" style:font-weight-complex="normal"/>
+ </style:style>
+ <style:style style:name="P6" style:family="paragraph" style:parent-style-name="Table_20_Contents">
+ <style:text-properties fo:font-weight="bold" style:font-weight-asian="bold" style:font-weight-complex="bold"/>
+ </style:style>
+ <style:style style:name="P7" style:family="paragraph" style:parent-style-name="Table_20_Contents">
+ <style:text-properties fo:font-style="italic" officeooo:rsid="0012b9fe" officeooo:paragraph-rsid="001483d1" style:font-style-asian="italic" style:font-style-complex="italic"/>
+ </style:style>
+ <style:style style:name="P8" style:family="paragraph" style:parent-style-name="Table_20_Contents">
+ <style:text-properties fo:font-style="italic" officeooo:paragraph-rsid="001483d1" style:font-style-asian="italic" style:font-style-complex="italic"/>
+ </style:style>
+ <style:style style:name="P9" style:family="paragraph" style:parent-style-name="Table_20_Contents">
+ <style:text-properties officeooo:rsid="0012b9fe" officeooo:paragraph-rsid="001483d1"/>
+ </style:style>
+ <style:style style:name="P10" style:family="paragraph" style:parent-style-name="Table_20_Contents">
+ <style:text-properties officeooo:rsid="00137669" officeooo:paragraph-rsid="001483d1"/>
+ </style:style>
+ <style:style style:name="P11" style:family="paragraph" style:parent-style-name="Table_20_Contents">
+ <style:text-properties officeooo:paragraph-rsid="001483d1"/>
+ </style:style>
+ <style:style style:name="P12" style:family="paragraph" style:parent-style-name="Table_20_Contents">
+ <style:text-properties officeooo:paragraph-rsid="0015544e"/>
+ </style:style>
+ <style:style style:name="P13" style:family="paragraph" style:parent-style-name="Table_20_Heading">
+ <style:paragraph-properties fo:text-align="start" style:justify-single-word="false"/>
+ <style:text-properties style:use-window-font-color="true" style:text-outline="false" style:text-line-through-style="none" style:text-line-through-type="none" style:font-name="FreeSans" fo:font-size="12pt" fo:font-style="normal" fo:text-shadow="none" style:text-underline-style="none" fo:font-weight="600" officeooo:paragraph-rsid="001483d1" style:font-size-asian="12pt" style:font-style-asian="normal" style:font-weight-asian="600" style:font-size-complex="12pt" style:font-style-complex="normal" style:font-weight-complex="600" style:text-overline-style="none" style:text-overline-color="font-color"/>
+ </style:style>
+ <style:style style:name="P14" style:family="paragraph" style:parent-style-name="Table_20_Heading">
+ <style:paragraph-properties fo:text-align="start" style:justify-single-word="false"/>
+ <style:text-properties style:use-window-font-color="true" style:text-outline="false" style:text-line-through-style="none" style:text-line-through-type="none" style:font-name="FreeSans" fo:font-size="12pt" fo:font-style="normal" fo:text-shadow="none" style:text-underline-style="none" fo:font-weight="600" officeooo:paragraph-rsid="0015544e" style:font-size-asian="12pt" style:font-style-asian="normal" style:font-weight-asian="600" style:font-size-complex="12pt" style:font-style-complex="normal" style:font-weight-complex="600" style:text-overline-style="none" style:text-overline-color="font-color"/>
+ </style:style>
+ <style:style style:name="P15" style:family="paragraph" style:parent-style-name="Table_20_Heading">
+ <style:text-properties style:font-name="DejaVu Sans" fo:font-size="18pt" style:font-size-asian="18pt" style:font-size-complex="18pt"/>
+ </style:style>
+ <style:style style:name="P16" style:family="paragraph" style:parent-style-name="Standard">
+ <style:text-properties officeooo:paragraph-rsid="0015544e"/>
+ </style:style>
+ <style:style style:name="P17" style:family="paragraph" style:parent-style-name="Standard">
+ <style:paragraph-properties fo:text-align="end" style:justify-single-word="false"/>
+ <style:text-properties fo:font-size="6pt" officeooo:rsid="00180262" officeooo:paragraph-rsid="00180262" style:font-size-asian="6pt" style:font-size-complex="6pt"/>
+ </style:style>
+ <style:style style:name="P18" style:family="paragraph" style:parent-style-name="Table_20_Contents">
+ <style:text-properties officeooo:paragraph-rsid="0016fdaa"/>
+ </style:style>
+ <style:style style:name="P19" style:family="paragraph" style:parent-style-name="Table_20_Contents">
+ <style:paragraph-properties fo:text-align="start" style:justify-single-word="false"/>
+ <style:text-properties fo:color="#000000" style:text-outline="false" style:text-line-through-style="none" style:text-line-through-type="none" style:font-name="Thorndale" fo:font-size="12pt" fo:font-style="normal" fo:text-shadow="none" style:text-underline-style="none" fo:font-weight="bold" officeooo:rsid="0010bcb2" officeooo:paragraph-rsid="001483d1" style:font-size-asian="12pt" style:font-style-asian="normal" style:font-weight-asian="bold" style:font-size-complex="12pt" style:font-style-complex="normal" style:font-weight-complex="bold" style:text-overline-style="none" style:text-overline-color="font-color"/>
+ </style:style>
+ <style:style style:name="P20" style:family="paragraph" style:parent-style-name="Table_20_Contents">
+ <style:text-properties fo:font-weight="bold" officeooo:rsid="0016fdaa" officeooo:paragraph-rsid="0016fdaa" style:font-weight-asian="bold" style:font-weight-complex="bold"/>
+ </style:style>
+ <style:style style:name="P21" style:family="paragraph" style:parent-style-name="Table_20_Contents">
+ <style:text-properties fo:font-weight="bold" officeooo:paragraph-rsid="0016fdaa" style:font-weight-asian="bold" style:font-weight-complex="bold"/>
+ </style:style>
+ <style:style style:name="P22" style:family="paragraph" style:parent-style-name="Table_20_Contents">
+ <style:text-properties officeooo:rsid="0016fdaa" officeooo:paragraph-rsid="0016fdaa"/>
+ </style:style>
+ <style:style style:name="P23" style:family="paragraph" style:parent-style-name="Table_20_Contents">
+ <style:text-properties officeooo:paragraph-rsid="00180262"/>
+ </style:style>
+ <style:style style:name="P24" style:family="paragraph" style:parent-style-name="Table_20_Heading">
+ <style:paragraph-properties fo:text-align="start" style:justify-single-word="false"/>
+ <style:text-properties style:use-window-font-color="true" style:text-outline="false" style:text-line-through-style="none" style:text-line-through-type="none" style:font-name="FreeSans" fo:font-size="12pt" fo:font-style="normal" fo:text-shadow="none" style:text-underline-style="none" fo:font-weight="600" officeooo:rsid="001483d1" officeooo:paragraph-rsid="001483d1" style:font-size-asian="12pt" style:font-style-asian="normal" style:font-weight-asian="600" style:font-size-complex="12pt" style:font-style-complex="normal" style:font-weight-complex="600" style:text-overline-style="none" style:text-overline-color="font-color"/>
+ </style:style>
+ <style:style style:name="P25" style:family="paragraph" style:parent-style-name="Table_20_Heading">
+ <style:paragraph-properties fo:text-align="start" style:justify-single-word="false"/>
+ <style:text-properties style:use-window-font-color="true" style:text-outline="false" style:text-line-through-style="none" style:text-line-through-type="none" style:font-name="FreeSans" fo:font-size="12pt" fo:font-style="normal" fo:text-shadow="none" style:text-underline-style="none" fo:font-weight="600" officeooo:rsid="0015544e" officeooo:paragraph-rsid="0015544e" style:font-size-asian="12pt" style:font-style-asian="normal" style:font-weight-asian="600" style:font-size-complex="12pt" style:font-style-complex="normal" style:font-weight-complex="600" style:text-overline-style="none" style:text-overline-color="font-color"/>
+ </style:style>
+ <style:style style:name="P26" style:family="paragraph" style:parent-style-name="Table_20_Heading">
+ <style:paragraph-properties fo:text-align="start" style:justify-single-word="false"/>
+ <style:text-properties officeooo:paragraph-rsid="001483d1"/>
+ </style:style>
+ <style:style style:name="T1" style:family="text">
+ <style:text-properties fo:font-weight="bold" style:font-weight-asian="bold" style:font-weight-complex="bold"/>
+ </style:style>
+ <style:style style:name="T2" style:family="text">
+ <style:text-properties fo:font-weight="bold" officeooo:rsid="0015544e" style:font-weight-asian="bold" style:font-weight-complex="bold"/>
+ </style:style>
+ <style:style style:name="T3" style:family="text">
+ <style:text-properties fo:font-weight="bold" officeooo:rsid="00180262" style:font-weight-asian="bold" style:font-weight-complex="bold"/>
+ </style:style>
+ <style:style style:name="T4" style:family="text">
+ <style:text-properties officeooo:rsid="0012b9fe"/>
+ </style:style>
+ <style:style style:name="T5" style:family="text">
+ <style:text-properties fo:font-weight="normal" style:font-weight-asian="normal" style:font-weight-complex="normal"/>
+ </style:style>
+ <style:style style:name="T6" style:family="text">
+ <style:text-properties fo:font-weight="normal" officeooo:rsid="001483d1" style:font-weight-asian="normal" style:font-weight-complex="normal"/>
+ </style:style>
+ <style:style style:name="T7" style:family="text">
+ <style:text-properties fo:font-weight="normal" officeooo:rsid="0015544e" style:font-weight-asian="normal" style:font-weight-complex="normal"/>
+ </style:style>
+ <style:style style:name="T8" style:family="text">
+ <style:text-properties fo:font-weight="normal" officeooo:rsid="0016fdaa" style:font-weight-asian="normal" style:font-weight-complex="normal"/>
+ </style:style>
+ <style:style style:name="T9" style:family="text">
+ <style:text-properties officeooo:rsid="00137669"/>
+ </style:style>
+ <style:style style:name="T10" style:family="text">
+ <style:text-properties officeooo:rsid="001483d1"/>
+ </style:style>
+ <style:style style:name="T11" style:family="text">
+ <style:text-properties style:font-name="DejaVu Sans" officeooo:rsid="000ccaed"/>
+ </style:style>
+ <style:style style:name="T12" style:family="text">
+ <style:text-properties fo:color="#000000" style:text-outline="false" style:text-line-through-style="none" style:text-line-through-type="none" fo:font-style="normal" fo:text-shadow="none" style:text-underline-style="none" fo:font-weight="normal" officeooo:rsid="0010bcb2" style:font-style-asian="normal" style:font-weight-asian="normal" style:font-style-complex="normal" style:font-weight-complex="normal" style:text-overline-style="none" style:text-overline-color="font-color"/>
+ </style:style>
+ <style:style style:name="T13" style:family="text">
+ <style:text-properties fo:color="#000000" style:text-outline="false" style:text-line-through-style="none" style:text-line-through-type="none" fo:font-style="normal" fo:text-shadow="none" style:text-underline-style="none" fo:font-weight="normal" officeooo:rsid="0015544e" style:font-style-asian="normal" style:font-weight-asian="normal" style:font-style-complex="normal" style:font-weight-complex="normal" style:text-overline-style="none" style:text-overline-color="font-color"/>
+ </style:style>
+ <style:style style:name="T14" style:family="text">
+ <style:text-properties fo:color="#000000" style:text-outline="false" style:text-line-through-style="none" style:text-line-through-type="none" fo:font-style="normal" fo:text-shadow="none" style:text-underline-style="none" fo:font-weight="normal" officeooo:rsid="001483d1" style:font-style-asian="normal" style:font-weight-asian="normal" style:font-style-complex="normal" style:font-weight-complex="normal" style:text-overline-style="none" style:text-overline-color="font-color"/>
+ </style:style>
+ <style:style style:name="T15" style:family="text">
+ <style:text-properties officeooo:rsid="0015544e"/>
+ </style:style>
+ <style:style style:name="T16" style:family="text">
+ <style:text-properties style:use-window-font-color="true" style:text-outline="false" style:text-line-through-style="none" style:text-line-through-type="none" style:font-name="FreeSans" fo:font-size="12pt" fo:font-style="normal" fo:text-shadow="none" style:text-underline-style="none" fo:font-weight="600" style:font-size-asian="12pt" style:font-style-asian="normal" style:font-weight-asian="600" style:font-size-complex="12pt" style:font-style-complex="normal" style:font-weight-complex="600" style:text-overline-style="none" style:text-overline-color="font-color"/>
+ </style:style>
+ <style:style style:name="T17" style:family="text">
+ <style:text-properties style:use-window-font-color="true" style:text-outline="false" style:text-line-through-style="none" style:text-line-through-type="none" style:font-name="FreeSans" fo:font-size="12pt" fo:font-style="normal" fo:text-shadow="none" style:text-underline-style="none" fo:font-weight="600" officeooo:rsid="001483d1" style:font-size-asian="12pt" style:font-style-asian="normal" style:font-weight-asian="600" style:font-size-complex="12pt" style:font-style-complex="normal" style:font-weight-complex="600" style:text-overline-style="none" style:text-overline-color="font-color"/>
+ </style:style>
+ <style:style style:name="T18" style:family="text">
+ <style:text-properties style:use-window-font-color="true" style:text-outline="false" style:text-line-through-style="none" style:text-line-through-type="none" style:font-name="FreeSans" fo:font-size="12pt" fo:font-style="normal" fo:text-shadow="none" style:text-underline-style="none" fo:font-weight="600" officeooo:rsid="00195e71" style:font-size-asian="12pt" style:font-style-asian="normal" style:font-weight-asian="600" style:font-size-complex="12pt" style:font-style-complex="normal" style:font-weight-complex="600" style:text-overline-style="none" style:text-overline-color="font-color"/>
+ </style:style>
+ <style:style style:name="T19" style:family="text">
+ <style:text-properties style:font-name="Bitstream Vera Serif1" fo:font-weight="normal" officeooo:rsid="0015544e" style:font-weight-asian="normal" style:font-weight-complex="normal"/>
+ </style:style>
+ <style:style style:name="T20" style:family="text">
+ <style:text-properties style:font-name="Bitstream Vera Serif1" fo:font-weight="bold" officeooo:rsid="0015544e" style:font-weight-asian="bold" style:font-weight-complex="bold"/>
+ </style:style>
+ <style:style style:name="T21" style:family="text">
+ <style:text-properties officeooo:rsid="0016fdaa"/>
+ </style:style>
+ <style:style style:name="T22" style:family="text">
+ <style:text-properties fo:font-style="italic" style:font-style-asian="italic" style:font-style-complex="italic"/>
+ </style:style>
+ <style:style style:name="T23" style:family="text">
+ <style:text-properties fo:font-style="italic" fo:font-weight="normal" officeooo:rsid="0015544e" style:font-style-asian="italic" style:font-weight-asian="normal" style:font-style-complex="italic" style:font-weight-complex="normal"/>
+ </style:style>
+ <style:style style:name="T24" style:family="text">
+ <style:text-properties officeooo:rsid="00180262"/>
+ </style:style>
+ <style:style style:name="T25" style:family="text">
+ <style:text-properties fo:font-style="normal" style:font-style-asian="normal" style:font-style-complex="normal"/>
+ </style:style>
+ <style:style style:name="T26" style:family="text">
+ <style:text-properties fo:font-style="normal" fo:font-weight="bold" style:font-style-asian="normal" style:font-weight-asian="bold" style:font-style-complex="normal" style:font-weight-complex="bold"/>
+ </style:style>
+ <style:style style:name="T27" style:family="text">
+ <style:text-properties fo:font-style="normal" fo:font-weight="bold" officeooo:rsid="00180262" style:font-style-asian="normal" style:font-weight-asian="bold" style:font-style-complex="normal" style:font-weight-complex="bold"/>
+ </style:style>
+ <style:style style:name="T28" style:family="text">
+ <style:text-properties officeooo:rsid="00195e71"/>
+ </style:style>
+ <style:page-layout style:name="pm1">
+ <style:page-layout-properties fo:page-width="8.2701in" fo:page-height="11.6902in" style:num-format="1" style:print-orientation="portrait" fo:margin-top="0.7874in" fo:margin-bottom="0.7874in" fo:margin-left="0.7874in" fo:margin-right="0.7874in" style:writing-mode="lr-tb" style:layout-grid-color="#c0c0c0" style:layout-grid-lines="20" style:layout-grid-base-height="0.278in" style:layout-grid-ruby-height="0.139in" style:layout-grid-mode="none" style:layout-grid-ruby-below="false" style:layout-grid-print="false" style:layout-grid-display="false" style:footnote-max-height="0in">
+ <style:columns fo:column-count="1" fo:column-gap="0in"/>
+ <style:footnote-sep style:width="0.0071in" style:distance-before-sep="0.0398in" style:distance-after-sep="0.0398in" style:line-style="solid" style:adjustment="left" style:rel-width="25%" style:color="#000000"/>
+ </style:page-layout-properties>
+ <style:header-style/>
+ <style:footer-style/>
+ </style:page-layout>
+ <number:text-style style:name="N10100" number:language="en" number:country="US">
+ <number:text-content/>
+ </number:text-style>
+ </office:automatic-styles>
+ <office:master-styles>
+ <style:master-page style:name="Standard" style:page-layout-name="pm1"/>
+ </office:master-styles>
+ <office:body>
+ <office:text>
+ <office:forms form:automatic-focus="false" form:apply-design-mode="false"/>
+ <text:sequence-decls>
+ <text:sequence-decl text:display-outline-level="0" text:name="Illustration"/>
+ <text:sequence-decl text:display-outline-level="0" text:name="Table"/>
+ <text:sequence-decl text:display-outline-level="0" text:name="Text"/>
+ <text:sequence-decl text:display-outline-level="0" text:name="Drawing"/>
+ </text:sequence-decls>
+ <table:table table:name="Table7" table:style-name="Table7">
+ <table:table-column table:style-name="Table7.A"/>
+ <table:table-row table:style-name="Table7.1">
+ <table:table-cell table:style-name="Table7.A1" office:value-type="string">
+ <text:p text:style-name="P15">GNU Para<text:span text:style-name="T22">ll</text:span>el Cheat Sheet</text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table7.2">
+ <table:table-cell table:style-name="Table7.A2" office:value-type="float" office:value="0">
+ <text:p text:style-name="P6">GNU Parallel is a replacement for <text:span text:style-name="Source_20_Text"><text:span text:style-name="T22">xargs</text:span></text:span> and <text:span text:style-name="Source_20_Text"><text:span text:style-name="T22">for</text:span></text:span> loops. It can also split a file or a stream into blocks and pass those to commands <text:span text:style-name="T15">running in parallel.</text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ </table:table>
+ <text:p text:style-name="P2"/>
+ <table:table table:name="Table8" table:style-name="Table8">
+ <table:table-column table:style-name="Table8.A"/>
+ <table:table-row table:style-name="Table8.1">
+ <table:table-cell table:style-name="Table8.A1" office:value-type="string">
+ <text:p text:style-name="P13">Example<text:span text:style-name="T10">s</text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table8.2">
+ <table:table-cell table:style-name="Table8.A2" office:value-type="float" office:value="0">
+ <text:p text:style-name="P12"><text:span text:style-name="T1">Compress all *.html files in parallel – 2 </text:span><text:span text:style-name="T3">jobs </text:span><text:span text:style-name="T1">per CPU thread </text:span><text:span text:style-name="T3">in parallel</text:span><text:line-break/><text:span text:style-name="Source_20_Text"><text:span text:style-name="T12">parallel --</text:span></text:span><text:span text:style-name="Source_20_Text"><text:span text:style-name="T13">jobs 200% </text:span></text:span><text:span text:style-name="Source_20_Text"><text:span text:style-name="T12">gzip ::: *.html</text:span></text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table8.3">
+ <table:table-cell table:style-name="Table8.A2" office:value-type="float" office:value="0">
+ <text:p text:style-name="P23"><text:span text:style-name="T1">Convert all *.wav to *.mp3 using </text:span><text:span text:style-name="Source_20_Text"><text:span text:style-name="T22">lame</text:span></text:span><text:span text:style-name="T1"> – </text:span><text:span text:style-name="T3">1</text:span><text:span text:style-name="T1"> job per CPU thread in parallel </text:span><text:span text:style-name="T3">(default)</text:span><text:line-break/><text:span text:style-name="Source_20_Text"><text:span text:style-name="T12">parallel lame {} -o {.}.mp3 ::: *.wav</text:span></text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table8.4">
+ <table:table-cell table:style-name="Table8.A2" office:value-type="float" office:value="0">
+ <text:p text:style-name="P12"><text:span text:style-name="T1">Chop bigfile into 1MB blocks and grep for the string foobar</text:span><text:line-break/><text:span text:style-name="Source_20_Text"><text:span text:style-name="T14">cat bigfile | </text:span></text:span><text:span text:style-name="Source_20_Text"><text:span text:style-name="T12">parallel --pipe grep foobar</text:span></text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ </table:table>
+ <text:p text:style-name="P2"/>
+ <table:table table:name="Table9" table:style-name="Table9">
+ <table:table-column table:style-name="Table9.A"/>
+ <table:table-row table:style-name="Table9.1">
+ <table:table-cell table:style-name="Table9.A1" office:value-type="string">
+ <text:p text:style-name="P13">Input sources</text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table9.2">
+ <table:table-cell table:style-name="Table9.A2" office:value-type="string">
+ <text:p text:style-name="P11"><text:span text:style-name="Source_20_Text"><text:span text:style-name="T5">parallel echo ::: cmd line input source</text:span></text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table9.3">
+ <table:table-cell table:style-name="Table9.A2" office:value-type="string">
+ <text:p text:style-name="P11"><text:span text:style-name="Source_20_Text"><text:span text:style-name="T5">cat input_from_stdin | parallel echo</text:span></text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table9.4">
+ <table:table-cell table:style-name="Table9.A2" office:value-type="string">
+ <text:p text:style-name="P11"><text:span text:style-name="Source_20_Text"><text:span text:style-name="T5">parallel echo ::: multiple input source</text:span></text:span><text:span text:style-name="Source_20_Text"><text:span text:style-name="T8">s ::: with </text:span></text:span><text:span text:style-name="Source_20_Text"><text:span text:style-name="T5">values</text:span></text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table9.5">
+ <table:table-cell table:style-name="Table9.A2" office:value-type="string">
+ <text:p text:style-name="P11"><text:span text:style-name="Source_20_Text"><text:span text:style-name="T5">parallel -a input_from_file echo</text:span></text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table9.6">
+ <table:table-cell table:style-name="Table9.A2" office:value-type="string">
+ <text:p text:style-name="P11"><text:span text:style-name="Source_20_Text"><text:span text:style-name="T5">parallel echo :::: input_from_file</text:span></text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table9.7">
+ <table:table-cell table:style-name="Table9.A2" office:value-type="string">
+ <text:p text:style-name="P10"><text:span text:style-name="Source_20_Text"><text:span text:style-name="T5">parallel echo :::: input_from_file ::: and command line</text:span></text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ </table:table>
+ <text:p text:style-name="P2"/>
+ <table:table table:name="Table10" table:style-name="Table10">
+ <table:table-column table:style-name="Table10.A"/>
+ <table:table-column table:style-name="Table10.B"/>
+ <table:table-row table:style-name="Table10.1">
+ <table:table-cell table:style-name="Table10.A1" office:value-type="string">
+ <text:p text:style-name="P13">Replacement string</text:p>
+ </table:table-cell>
+ <table:table-cell table:style-name="Table10.B1" office:value-type="string">
+ <text:p text:style-name="P26"><text:span text:style-name="T16">Value </text:span><text:span text:style-name="T18">if input is mydir/mysubdir/myfile.myext</text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table10.2">
+ <table:table-cell table:style-name="Table10.A2" office:value-type="string">
+ <text:p text:style-name="P11"><text:span text:style-name="Source_20_Text">{}</text:span></text:p>
+ </table:table-cell>
+ <table:table-cell table:style-name="Table10.B2" office:value-type="string">
+ <text:p text:style-name="P11"><text:span text:style-name="Source_20_Text">mydir/mysubdir/myfile.myext</text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table10.3">
+ <table:table-cell table:style-name="Table10.A2" office:value-type="string">
+ <text:p text:style-name="P11"><text:span text:style-name="Source_20_Text">{.}</text:span></text:p>
+ </table:table-cell>
+ <table:table-cell table:style-name="Table10.B2" office:value-type="string">
+ <text:p text:style-name="P11"><text:span text:style-name="Source_20_Text">mydir/mysubdir/myfile</text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table10.4">
+ <table:table-cell table:style-name="Table10.A2" office:value-type="string">
+ <text:p text:style-name="P18"><text:span text:style-name="Source_20_Text">{/}, {//}, </text:span><text:span text:style-name="Source_20_Text">{/.}</text:span></text:p>
+ </table:table-cell>
+ <table:table-cell table:style-name="Table10.B2" office:value-type="string">
+ <text:p text:style-name="P18"><text:span text:style-name="Source_20_Text">myfile.myext, mydir/mysubdir, </text:span><text:span text:style-name="Source_20_Text">myfile</text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table10.5">
+ <table:table-cell table:style-name="Table10.A2" office:value-type="string">
+ <text:p text:style-name="P11"><text:span text:style-name="Source_20_Text">{#}</text:span></text:p>
+ </table:table-cell>
+ <table:table-cell table:style-name="Table10.B2" office:value-type="string">
+ <text:p text:style-name="P8"><text:span text:style-name="T4">T</text:span>he sequence number of the job</text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table10.6">
+ <table:table-cell table:style-name="Table10.A2" office:value-type="string">
+ <text:p text:style-name="P11"><text:span text:style-name="Source_20_Text">{%}</text:span></text:p>
+ </table:table-cell>
+ <table:table-cell table:style-name="Table10.B2" office:value-type="string">
+ <text:p text:style-name="P8"><text:span text:style-name="T4">T</text:span>he job slot number</text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table10.7">
+ <table:table-cell table:style-name="Table10.A2" office:value-type="string">
+ <text:p text:style-name="P5"><text:span text:style-name="Source_20_Text">{2}</text:span></text:p>
+ </table:table-cell>
+ <table:table-cell table:style-name="Table10.B2" office:value-type="string">
+ <text:p text:style-name="P7"><text:span text:style-name="T21">Value from the s</text:span>econd input source</text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table10.8">
+ <table:table-cell table:style-name="Table10.A2" office:value-type="string">
+ <text:p text:style-name="P5"><text:span text:style-name="Source_20_Text">{2.} </text:span><text:span text:style-name="Source_20_Text"><text:span text:style-name="T21">{2/} {2//} {2/.}</text:span></text:span></text:p>
+ </table:table-cell>
+ <table:table-cell table:style-name="Table10.B2" office:value-type="string">
+ <text:p text:style-name="P7">Comb<text:span text:style-name="T21">ination </text:span>of {2} and {.} <text:span text:style-name="T21">{/} {//} {/.}</text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table10.9">
+ <table:table-cell table:style-name="Table10.A2" office:value-type="string">
+ <text:p text:style-name="P9"><text:span text:style-name="Source_20_Text">{= perl </text:span><text:span text:style-name="Source_20_Text"><text:span text:style-name="T15">expression</text:span></text:span><text:span text:style-name="Source_20_Text"> =}</text:span></text:p>
+ </table:table-cell>
+ <table:table-cell table:style-name="Table10.B2" office:value-type="string">
+ <text:p text:style-name="P7">Change $_ with perl expression</text:p>
+ </table:table-cell>
+ </table:table-row>
+ </table:table>
+ <text:p text:style-name="P2"/>
+ <table:table table:name="Table11" table:style-name="Table11">
+ <table:table-column table:style-name="Table11.A"/>
+ <table:table-row table:style-name="Table11.1">
+ <table:table-cell table:style-name="Table11.A1" office:value-type="string">
+ <text:p text:style-name="P13">Control the output – <text:span text:style-name="T10">keep the same order as the input, prepend with input value</text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table11.2">
+ <table:table-cell table:style-name="Table11.A2" office:value-type="string">
+ <text:p text:style-name="P18"><text:span text:style-name="Source_20_Text"><text:span text:style-name="T10">parallel --keep-order --tag &quot;sleep {}; echo {}&quot; ::: 5 4 3 2 1</text:span></text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ </table:table>
+ <text:p text:style-name="P2"/>
+ <table:table table:name="Table12" table:style-name="Table12">
+ <table:table-column table:style-name="Table12.A"/>
+ <table:table-row table:style-name="Table12.1">
+ <table:table-cell table:style-name="Table12.A1" office:value-type="string">
+ <text:p text:style-name="P13">Control the execution</text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table12.3">
+ <table:table-cell table:style-name="Table12.A2" office:value-type="string">
+ <text:p text:style-name="P20">Run 2 jobs in parallel – <text:span text:style-name="T24">command is a composed command</text:span></text:p>
+ <text:p text:style-name="P18"><text:span text:style-name="Source_20_Text"><text:span text:style-name="T10">parallel --jobs 2 &quot;sleep {}; echo {}&quot; ::: 5 4 3 2 1</text:span></text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table12.3">
+ <table:table-cell table:style-name="Table12.A2" office:value-type="string">
+ <text:p text:style-name="P20">See what will be run</text:p>
+ <text:p text:style-name="P22"><text:span text:style-name="Source_20_Text">parallel --dryrun echo </text:span><text:span text:style-name="Source_20_Text"><text:span text:style-name="T28">{2} {1} </text:span></text:span><text:span text:style-name="Source_20_Text">::: </text:span><text:span text:style-name="Source_20_Text"><text:span text:style-name="T28">bird flower fish ::: </text:span></text:span><text:span text:style-name="Source_20_Text">Red Green Blue</text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ </table:table>
+ <text:p text:style-name="P2"/>
+ <table:table table:name="Table13" table:style-name="Table13">
+ <table:table-column table:style-name="Table13.A"/>
+ <table:table-row table:style-name="Table13.1">
+ <table:table-cell table:style-name="Table13.A1" office:value-type="string">
+ <text:p text:style-name="P24">Remote execution</text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table13.2">
+ <table:table-cell table:style-name="Table13.A2" office:value-type="string">
+ <text:p text:style-name="P18"><text:span text:style-name="Source_20_Text"><text:span text:style-name="T10">parallel -S server1 -S server2 &quot;hostname; echo {}&quot; ::: foo bar </text:span></text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ </table:table>
+ <text:p text:style-name="P2"/>
+ <table:table table:name="Table1" table:style-name="Table1">
+ <table:table-column table:style-name="Table1.A"/>
+ <table:table-row table:style-name="Table1.1">
+ <table:table-cell table:style-name="Table1.A1" office:value-type="string">
+ <text:p text:style-name="P24">Pipe mode</text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table1.2">
+ <table:table-cell table:style-name="Table1.A2" office:value-type="string">
+ <text:p text:style-name="P11"><text:span text:style-name="Source_20_Text"><text:span text:style-name="T10">cat bigfile | parallel --pipe wc -l</text:span></text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table1.3">
+ <table:table-cell table:style-name="Table1.A2" office:value-type="string">
+ <text:p text:style-name="P4"><text:span text:style-name="Source_20_Text"><text:span text:style-name="T20">Chop bigfile into one block per CPU thread and </text:span></text:span><text:span text:style-name="Source_20_Text"><text:span text:style-name="T23">grep</text:span></text:span><text:span text:style-name="Source_20_Text"><text:span text:style-name="T20"> for foobar</text:span></text:span><text:span text:style-name="Source_20_Text"><text:span text:style-name="T5"><text:line-break/>parallel -a bigfile --pipepart --block -1 grep foobar</text:span></text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ </table:table>
+ <text:p text:style-name="P2"/>
+ <table:table table:name="Table2" table:style-name="Table2">
+ <table:table-column table:style-name="Table2.A"/>
+ <table:table-row table:style-name="Table2.1">
+ <table:table-cell table:style-name="Table2.A1" office:value-type="string">
+ <text:p text:style-name="P25">Read more – <text:span text:style-name="T21">Your command line will love you for it</text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table2.3">
+ <table:table-cell table:style-name="Table2.A2" office:value-type="string">
+ <text:p text:style-name="P21"><text:span text:style-name="Source_20_Text"><text:span text:style-name="T21">parallel --help; <text:s/></text:span></text:span><text:span text:style-name="Source_20_Text">man parallel; <text:s/></text:span><text:span text:style-name="Source_20_Text"><text:span text:style-name="T21">man parallel_tutorial; <text:s/>www.pi.dk/1</text:span></text:span></text:p>
+ </table:table-cell>
+ </table:table-row>
+ <table:table-row table:style-name="Table2.3">
+ <table:table-cell table:style-name="Table2.A2" office:value-type="string">
+ <text:p text:style-name="P6"><text:span text:style-name="T15">GNU Parallel 2018 </text:span>https://doi.org/10.5281/zenodo.1146014</text:p>
+ </table:table-cell>
+ </table:table-row>
+ </table:table>
+ <text:p text:style-name="P17">(CC-By-SA) 2019-03-<text:span text:style-name="T28">11</text:span> Ole Tange</text:p>
+ <text:p text:style-name="P1"/>
+ </office:text>
+ </office:body>
+</office:document> \ No newline at end of file
diff --git a/src/parallel_design.pod b/src/parallel_design.pod
new file mode 100644
index 0000000..0ba2502
--- /dev/null
+++ b/src/parallel_design.pod
@@ -0,0 +1,1480 @@
+#!/usr/bin/perl -w
+
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GFDL-1.3-or-later
+# SPDX-License-Identifier: CC-BY-SA-4.0
+
+=encoding utf8
+
+
+=head1 Design of GNU Parallel
+
+This document describes design decisions made in the development of
+GNU B<parallel> and the reasoning behind them. It will give an
+overview of why some of the code looks the way it does, and will help
+new maintainers understand the code better.
+
+
+=head2 One file program
+
+GNU B<parallel> is a Perl script in a single file. It is object
+oriented, but contrary to normal Perl scripts each class is not in its
+own file. This is due to user experience: The goal is that in a pinch
+the user will be able to get GNU B<parallel> working simply by copying
+a single file: No need to mess around with environment variables like
+PERL5LIB.
+
+
+=head2 Choice of programming language
+
+GNU B<parallel> is designed to be able to run on old systems. That
+means that it cannot depend on a compiler being installed - and
+especially not a compiler for a language that is younger than 20 years
+old.
+
+The goal is that you can use GNU B<parallel> on any system, even if
+you are not allowed to install additional software.
+
+Of all the systems I have experienced, I have yet to see a system that
+had GCC installed that did not have Perl. The same goes for Rust, Go,
+Haskell, and other younger languages. I have, however, seen systems
+with Perl without any of the mentioned compilers.
+
+Most modern systems also have either Python2 or Python3 installed, but
+you still cannot be certain which version, and since Python2 cannot
+run under Python3, Python is not an option.
+
+Perl has the added benefit that implementing the {= perlexpr =}
+replacement string was fairly easy.
+
+The primary drawback is that Perl is slow. So there is an overhead of
+3-10 ms/job and 1 ms/MB output (and even more if you use B<--tag>).
+
+
+=head2 Old Perl style
+
+GNU B<parallel> uses some old, deprecated constructs. This is due to a
+goal of being able to run on old installations. Currently the target
+is CentOS 3.9 and Perl 5.8.0.
+
+
+=head2 Scalability up and down
+
+The smallest system GNU B<parallel> is tested on is a 32 MB ASUS
+WL500gP. The largest is a 2 TB 128-core machine. It scales up to
+around 100 machines - depending on the duration of each job.
+
+
+=head2 Exponentially back off
+
+GNU B<parallel> busy waits. This is because the reason why a job is
+not started may be due to load average (when using B<--load>), and
+thus it will not make sense to just wait for a job to finish. Instead
+the load average must be rechecked regularly. Load average is not the
+only reason: B<--timeout> has a similar problem.
+
+To not burn up too much CPU GNU B<parallel> sleeps exponentially
+longer and longer if nothing happens, maxing out at 1 second.
+
+
+=head2 Shell compatibility
+
+It is a goal to have GNU B<parallel> work equally well in any
+shell. However, in practice GNU B<parallel> is being developed in
+B<bash> and thus testing in other shells is limited to reported bugs.
+
+When an incompatibility is found there is often not an easy fix:
+Fixing the problem in B<csh> often breaks it in B<bash>. In these
+cases the fix is often to use a small Perl script and call that.
+
+
+=head2 env_parallel
+
+B<env_parallel> is a dummy shell script that will run if
+B<env_parallel> is not an alias or a function and tell the user how to
+activate the alias/function for the supported shells.
+
+The alias or function will copy the current environment and run the
+command with GNU B<parallel> in the copy of the environment.
+
+The problem is that you cannot access all of the current environment
+inside Perl. E.g. aliases, functions and unexported shell variables.
+
+The idea is therefore to take the environment and put it in
+B<$PARALLEL_ENV> which GNU B<parallel> prepends to every command.
+
+The only way to have access to the environment is directly from the
+shell, so the program must be written in a shell script that will be
+sourced and there has to deal with the dialect of the relevant shell.
+
+
+=head3 env_parallel.*
+
+These are the files that implements the alias or function
+B<env_parallel> for a given shell. It could be argued that these
+should be put in some obscure place under /usr/lib, but by putting
+them in your path it becomes trivial to find the path to them and
+B<source> them:
+
+ source `which env_parallel.foo`
+
+The beauty is that they can be put anywhere in the path without the
+user having to know the location. So if the user's path includes
+/afs/bin/i386_fc5 or /usr/pkg/parallel/bin or
+/usr/local/parallel/20161222/sunos5.6/bin the files can be put in the
+dir that makes most sense for the sysadmin.
+
+
+=head3 env_parallel.bash / env_parallel.sh / env_parallel.ash /
+env_parallel.dash / env_parallel.zsh / env_parallel.ksh /
+env_parallel.mksh
+
+B<env_parallel.(bash|sh|ash|dash|ksh|mksh|zsh)> defines the function
+B<env_parallel>. It uses B<alias> and B<typeset> to dump the
+configuration (with a few exceptions) into B<$PARALLEL_ENV> before
+running GNU B<parallel>.
+
+After GNU B<parallel> is finished, B<$PARALLEL_ENV> is deleted.
+
+
+=head3 env_parallel.csh
+
+B<env_parallel.csh> has two purposes: If B<env_parallel> is not an
+alias: make it into an alias that sets B<$PARALLEL> with arguments
+and calls B<env_parallel.csh>.
+
+If B<env_parallel> is an alias, then B<env_parallel.csh> uses
+B<$PARALLEL> as the arguments for GNU B<parallel>.
+
+It exports the environment by writing a variable definition to a file
+for each variable. The definitions of aliases are appended to this
+file. Finally the file is put into B<$PARALLEL_ENV>.
+
+GNU B<parallel> is then run and B<$PARALLEL_ENV> is deleted.
+
+
+=head3 env_parallel.fish
+
+First all functions definitions are generated using a loop and
+B<functions>.
+
+Dumping the scalar variable definitions is harder.
+
+B<fish> can represent non-printable characters in (at least) 2
+ways. To avoid problems all scalars are converted to \XX quoting.
+
+Then commands to generate the definitions are made and separated by
+NUL.
+
+This is then piped into a Perl script that quotes all values. List
+elements will be appended using two spaces.
+
+Finally \n is converted into \1 because B<fish> variables cannot
+contain \n. GNU B<parallel> will later convert all \1 from
+B<$PARALLEL_ENV> into \n.
+
+This is then all saved in B<$PARALLEL_ENV>.
+
+GNU B<parallel> is called, and B<$PARALLEL_ENV> is deleted.
+
+
+=head2 parset (supported in sh, ash, dash, bash, zsh, ksh, mksh)
+
+B<parset> is a shell function. This is the reason why B<parset> can
+set variables: It runs in the shell which is calling it.
+
+It is also the reason why B<parset> does not work, when data is piped
+into it: B<... | parset ...> makes B<parset> start in a subshell, and
+any changes in environment can therefore not make it back to the
+calling shell.
+
+
+=head2 Job slots
+
+The easiest way to explain what GNU B<parallel> does is to assume that
+there are a number of job slots, and when a slot becomes available a
+job from the queue will be run in that slot. But originally GNU
+B<parallel> did not model job slots in the code. Job slots have been
+added to make it possible to use B<{%}> as a replacement string.
+
+While the job sequence number can be computed in advance, the job slot
+can only be computed the moment a slot becomes available. So it has
+been implemented as a stack with lazy evaluation: Draw one from an
+empty stack and the stack is extended by one. When a job is done, push
+the available job slot back on the stack.
+
+This implementation also means that if you re-run the same jobs, you
+cannot assume jobs will get the same slots. And if you use remote
+executions, you cannot assume that a given job slot will remain on the
+same remote server. This goes double since number of job slots can be
+adjusted on the fly (by giving B<--jobs> a file name).
+
+
+=head2 Rsync protocol version
+
+B<rsync> 3.1.x uses protocol 31 which is unsupported by version
+2.5.7. That means that you cannot push a file to a remote system using
+B<rsync> protocol 31, if the remote system uses 2.5.7. B<rsync> does
+not automatically downgrade to protocol 30.
+
+GNU B<parallel> does not require protocol 31, so if the B<rsync>
+version is >= 3.1.0 then B<--protocol 30> is added to force newer
+B<rsync>s to talk to version 2.5.7.
+
+
+=head2 Compression
+
+GNU B<parallel> buffers output in temporary files. B<--compress>
+compresses the buffered data. This is a bit tricky because there
+should be no files to clean up if GNU B<parallel> is killed by a power
+outage.
+
+GNU B<parallel> first selects a compression program. If the user has
+not selected one, the first of these that is in $PATH is used: B<pzstd
+lbzip2 pbzip2 zstd pixz lz4 pigz lzop plzip lzip gzip lrz pxz bzip2
+lzma xz clzip>. They are sorted by speed on a 128 core machine.
+
+Schematically the setup is as follows:
+
+ command started by parallel | compress > tmpfile
+ cattail tmpfile | uncompress | parallel which reads the output
+
+The setup is duplicated for both standard output (stdout) and standard
+error (stderr).
+
+GNU B<parallel> pipes output from the command run into the compression
+program which saves to a tmpfile. GNU B<parallel> records the pid of
+the compress program. At the same time a small Perl script (called
+B<cattail> above) is started: It basically does B<cat> followed by
+B<tail -f>, but it also removes the tmpfile as soon as the first byte
+is read, and it continuously checks if the pid of the compression
+program is dead. If the compress program is dead, B<cattail> reads the
+rest of tmpfile and exits.
+
+As most compression programs write out a header when they start, the
+tmpfile in practice is removed by B<cattail> after around 40 ms.
+
+More detailed it works like this:
+
+ bash ( command ) |
+ sh ( emptywrapper ( bash ( compound compress ) ) >tmpfile )
+ cattail ( rm tmpfile; compound decompress ) < tmpfile
+
+This complex setup is to make sure compress program is only started if
+there is input. This means each job will cause 8 processes to run. If
+combined with B<--keep-order> these processes will run until the job
+has been printed.
+
+
+=head2 Wrapping
+
+The command given by the user can be wrapped in multiple
+templates. Templates can be wrapped in other templates.
+
+
+
+=over 15
+
+=item B<$COMMAND>
+
+the command to run.
+
+
+=item B<$INPUT>
+
+the input to run.
+
+
+=item B<$SHELL>
+
+the shell that started GNU Parallel.
+
+
+=item B<$SSHLOGIN>
+
+the sshlogin.
+
+
+=item B<$WORKDIR>
+
+the working dir.
+
+
+=item B<$FILE>
+
+the file to read parts from.
+
+
+=item B<$STARTPOS>
+
+the first byte position to read from B<$FILE>.
+
+
+=item B<$LENGTH>
+
+the number of bytes to read from B<$FILE>.
+
+
+=item --shellquote
+
+echo I<Double quoted $INPUT>
+
+
+=item --nice I<pri>
+
+Remote: See B<The remote system wrapper>.
+
+Local: B<setpriority(0,0,$nice)>
+
+=item --cat
+
+ cat > {}; $COMMAND {};
+ perl -e '$bash = shift;
+ $csh = shift;
+ for(@ARGV) { unlink;rmdir; }
+ if($bash =~ s/h//) { exit $bash; }
+ exit $csh;' "$?h" "$status" {};
+
+{} is set to B<$PARALLEL_TMP> which is a tmpfile. The Perl script
+saves the exit value, unlinks the tmpfile, and returns the exit value
+- no matter if the shell is B<bash>/B<ksh>/B<zsh> (using $?) or
+B<*csh>/B<fish> (using $status).
+
+=item --fifo
+
+ perl -e '($s,$c,$f) = @ARGV;
+ # mkfifo $PARALLEL_TMP
+ system "mkfifo", $f;
+ # spawn $shell -c $command &
+ $pid = fork || exec $s, "-c", $c;
+ open($o,">",$f) || die $!;
+ # cat > $PARALLEL_TMP
+ while(sysread(STDIN,$buf,131072)){
+ syswrite $o, $buf;
+ }
+ close $o;
+ # waitpid to get the exit code from $command
+ waitpid $pid,0;
+ # Cleanup
+ unlink $f;
+ exit $?/256;' $SHELL -c $COMMAND $PARALLEL_TMP
+
+This is an elaborate way of: mkfifo {}; run B<$COMMAND> in the
+background using B<$SHELL>; copying STDIN to {}; waiting for background
+to complete; remove {} and exit with the exit code from B<$COMMAND>.
+
+It is made this way to be compatible with B<*csh>/B<fish>.
+
+=item --pipepart
+
+
+ < $FILE perl -e 'while(@ARGV) {
+ sysseek(STDIN,shift,0) || die;
+ $left = shift;
+ while($read =
+ sysread(STDIN,$buf,
+ ($left > 131072 ? 131072 : $left))){
+ $left -= $read;
+ syswrite(STDOUT,$buf);
+ }
+ }' $STARTPOS $LENGTH
+
+This will read B<$LENGTH> bytes from B<$FILE> starting at B<$STARTPOS>
+and send it to STDOUT.
+
+=item --sshlogin $SSHLOGIN
+
+ ssh $SSHLOGIN "$COMMAND"
+
+=item --transfer
+
+ ssh $SSHLOGIN mkdir -p ./$WORKDIR;
+ rsync --protocol 30 -rlDzR \
+ -essh ./{} $SSHLOGIN:./$WORKDIR;
+ ssh $SSHLOGIN "$COMMAND"
+
+Read about B<--protocol 30> in the section B<Rsync protocol version>.
+
+=item --transferfile I<file>
+
+<<todo>>
+
+=item --basefile
+
+<<todo>>
+
+=item --return I<file>
+
+ $COMMAND; _EXIT_status=$?; mkdir -p $WORKDIR;
+ rsync --protocol 30 \
+ --rsync-path=cd\ ./$WORKDIR\;\ rsync \
+ -rlDzR -essh $SSHLOGIN:./$FILE ./$WORKDIR;
+ exit $_EXIT_status;
+
+The B<--rsync-path=cd ...> is needed because old versions of B<rsync>
+do not support B<--no-implied-dirs>.
+
+The B<$_EXIT_status> trick is to postpone the exit value. This makes it
+incompatible with B<*csh> and should be fixed in the future. Maybe a
+wrapping 'sh -c' is enough?
+
+=item --cleanup
+
+$RETURN is the wrapper from B<--return>
+
+ $COMMAND; _EXIT_status=$?; $RETURN;
+ ssh $SSHLOGIN \(rm\ -f\ ./$WORKDIR/{}\;\
+ rmdir\ ./$WORKDIR\ \>\&/dev/null\;\);
+ exit $_EXIT_status;
+
+B<$_EXIT_status>: see B<--return> above.
+
+
+=item --pipe
+
+ perl -e 'if(sysread(STDIN, $buf, 1)) {
+ open($fh, "|-", "@ARGV") || die;
+ syswrite($fh, $buf);
+ # Align up to 128k block
+ if($read = sysread(STDIN, $buf, 131071)) {
+ syswrite($fh, $buf);
+ }
+ while($read = sysread(STDIN, $buf, 131072)) {
+ syswrite($fh, $buf);
+ }
+ close $fh;
+ exit ($?&127 ? 128+($?&127) : 1+$?>>8)
+ }' $SHELL -c $COMMAND
+
+This small wrapper makes sure that B<$COMMAND> will never be run if
+there is no data.
+
+=item --tmux
+
+<<TODO Fixup with '-quoting>>
+mkfifo /tmp/tmx3cMEV &&
+ sh -c 'tmux -S /tmp/tmsaKpv1 new-session -s p334310 -d "sleep .2" >/dev/null 2>&1';
+tmux -S /tmp/tmsaKpv1 new-window -t p334310 -n wc\ 10 \(wc\ 10\)\;\ perl\ -e\ \'while\(\$t++\<3\)\{\ print\ \$ARGV\[0\],\"\\n\"\ \}\'\ \$\?h/\$status\ \>\>\ /tmp/tmx3cMEV\&echo\ wc\\\ 10\;\ echo\ \Job\ finished\ at:\ \`date\`\;sleep\ 10;
+exec perl -e '$/="/";$_=<>;$c=<>;unlink $ARGV; /(\d+)h/ and exit($1);exit$c' /tmp/tmx3cMEV
+
+
+mkfifo I<tmpfile.tmx>;
+tmux -S <tmpfile.tms> new-session -s pI<PID> -d 'sleep .2' >&/dev/null;
+tmux -S <tmpfile.tms> new-window -t pI<PID> -n <<shell quoted input>> \(<<shell quoted input>>\)\;\ perl\ -e\ \'while\(\$t++\<3\)\{\ print\ \$ARGV\[0\],\"\\n\"\ \}\'\ \$\?h/\$status\ \>\>\ I<tmpfile.tmx>\&echo\ <<shell double quoted input>>\;echo\ \Job\ finished\ at:\ \`date\`\;sleep\ 10;
+exec perl -e '$/="/";$_=<>;$c=<>;unlink $ARGV; /(\d+)h/ and exit($1);exit$c' I<tmpfile.tmx>
+
+First a FIFO is made (.tmx). It is used for communicating exit
+value. Next a new tmux session is made. This may fail if there is
+already a session, so the output is ignored. If all job slots finish
+at the same time, then B<tmux> will close the session. A temporary
+socket is made (.tms) to avoid a race condition in B<tmux>. It is
+cleaned up when GNU B<parallel> finishes.
+
+The input is used as the name of the windows in B<tmux>. When the job
+inside B<tmux> finishes, the exit value is printed to the FIFO (.tmx).
+This FIFO is opened by B<perl> outside B<tmux>, and B<perl> then
+removes the FIFO. B<Perl> blocks until the first value is read from
+the FIFO, and this value is used as exit value.
+
+To make it compatible with B<csh> and B<bash> the exit value is
+printed as: $?h/$status and this is parsed by B<perl>.
+
+There is a bug that makes it necessary to print the exit value 3
+times.
+
+Another bug in B<tmux> requires the length of the tmux title and
+command to not have certain limits. When inside these limits, 75 '\ '
+are added to the title to force it to be outside the limits.
+
+You can map the bad limits using:
+
+ perl -e 'sub r { int(rand(shift)).($_[0] && "\t".r(@_)) } print map { r(@ARGV)."\n" } 1..10000' 1600 1500 90 |
+ perl -ane '$F[0]+$F[1]+$F[2] < 2037 and print ' |
+ parallel --colsep '\t' --tagstring '{1}\t{2}\t{3}' tmux -S /tmp/p{%}-'{=3 $_="O"x$_ =}' \
+ new-session -d -n '{=1 $_="O"x$_ =}' true'\ {=2 $_="O"x$_ =};echo $?;rm -f /tmp/p{%}-O*'
+
+ perl -e 'sub r { int(rand(shift)).($_[0] && "\t".r(@_)) } print map { r(@ARGV)."\n" } 1..10000' 17000 17000 90 |
+ parallel --colsep '\t' --tagstring '{1}\t{2}\t{3}' \
+ tmux -S /tmp/p{%}-'{=3 $_="O"x$_ =}' new-session -d -n '{=1 $_="O"x$_ =}' true'\ {=2 $_="O"x$_ =};echo $?;rm /tmp/p{%}-O*'
+ > value.csv 2>/dev/null
+
+ R -e 'a<-read.table("value.csv");X11();plot(a[,1],a[,2],col=a[,4]+5,cex=0.1);Sys.sleep(1000)'
+
+For B<tmux 1.8> 17000 can be lowered to 2100.
+
+The interesting areas are title 0..1000 with (title + whole command)
+in 996..1127 and 9331..9636.
+
+=back
+
+The ordering of the wrapping is important:
+
+=over 5
+
+=item *
+
+$PARALLEL_ENV which is set in env_parallel.* must be prepended to the
+command first, as the command may contain exported variables or
+functions.
+
+=item *
+
+B<--nice>/B<--cat>/B<--fifo> should be done on the remote machine
+
+=item *
+
+B<--pipepart>/B<--pipe> should be done on the local machine inside B<--tmux>
+
+=back
+
+
+=head2 Convenience options --nice --basefile --transfer --return
+--cleanup --tmux --group --compress --cat --fifo --workdir --tag
+--tagstring
+
+These are all convenience options that make it easier to do a
+task. But more importantly: They are tested to work on corner cases,
+too. Take B<--nice> as an example:
+
+ nice parallel command ...
+
+will work just fine. But when run remotely, you need to move the nice
+command so it is being run on the server:
+
+ parallel -S server nice command ...
+
+And this will again work just fine, as long as you are running a
+single command. When you are running a composed command you need nice
+to apply to the whole command, and it gets harder still:
+
+ parallel -S server -q nice bash -c 'command1 ...; cmd2 | cmd3'
+
+It is not impossible, but by using B<--nice> GNU B<parallel> will do
+the right thing for you. Similarly when transferring files: It starts
+to get hard when the file names contain space, :, `, *, or other
+special characters.
+
+To run the commands in a B<tmux> session you basically just need to
+quote the command. For simple commands that is easy, but when commands
+contain special characters, it gets much harder to get right.
+
+B<--compress> not only compresses standard output (stdout) but also
+standard error (stderr); and it does so into files, that are open but
+deleted, so a crash will not leave these files around.
+
+B<--cat> and B<--fifo> are easy to do by hand, until you want to clean
+up the tmpfile and keep the exit code of the command.
+
+The real killer comes when you try to combine several of these: Doing
+that correctly for all corner cases is next to impossible to do by
+hand.
+
+=head2 --shard
+
+The simple way to implement sharding would be to:
+
+=over 5
+
+=item 1
+
+start n jobs,
+
+=item 2
+
+split each line into columns,
+
+=item 3
+
+select the data from the relevant column
+
+=item 4
+
+compute a hash value from the data
+
+=item 5
+
+take the modulo n of the hash value
+
+=item 6
+
+pass the full line to the jobslot that has the computed value
+
+=back
+
+Unfortunately Perl is rather slow at computing the hash value (and
+somewhat slow at splitting into columns).
+
+One solution is to use a compiled language for the splitting and
+hashing, but that would go against the design criteria of not
+depending on a compiler.
+
+Luckily those tasks can be parallelized. So GNU B<parallel> starts n
+sharders that do step 2-6, and passes blocks of 100k to each of those
+in a round robin manner. To make sure these sharders compute the hash
+the same way, $PERL_HASH_SEED is set to the same value for all sharders.
+
+Running n sharders poses a new problem: Instead of having n outputs
+(one for each computed value) you now have n outputs for each of the n
+values, so in total n*n outputs; and you need to merge these n*n
+outputs together into n outputs.
+
+This can be done by simply running 'parallel -j0 --lb cat :::
+outputs_for_one_value', but that is rather inefficient, as it spawns a
+process for each file. Instead the core code from 'parcat' is run,
+which is also a bit faster.
+
+All the sharders and parcats communicate through named pipes that are
+unlinked as soon as they are opened.
+
+
+=head2 Shell shock
+
+The shell shock bug in B<bash> did not affect GNU B<parallel>, but the
+solutions did. B<bash> first introduced functions in variables named:
+I<BASH_FUNC_myfunc()> and later changed that to
+I<BASH_FUNC_myfunc%%>. When transferring functions GNU B<parallel>
+reads off the function and changes that into a function definition,
+which is copied to the remote system and executed before the actual
+command is executed. Therefore GNU B<parallel> needs to know how to
+read the function.
+
+From version 20150122 GNU B<parallel> tries both the ()-version and
+the %%-version, and the function definition works on both pre- and
+post-shell shock versions of B<bash>.
+
+
+=head2 The remote system wrapper
+
+The remote system wrapper does some initialization before starting the
+command on the remote system.
+
+=head3 Make quoting unnecessary by hex encoding everything
+
+When you run B<ssh server foo> then B<foo> has to be quoted once:
+
+ ssh server "echo foo; echo bar"
+
+If you run B<ssh server1 ssh server2 foo> then B<foo> has to be quoted
+twice:
+
+ ssh server1 ssh server2 \'"echo foo; echo bar"\'
+
+GNU B<parallel> avoids this by packing everyting into hex values and
+running a command that does not need quoting:
+
+ perl -X -e GNU_Parallel_worker,eval+pack+q/H10000000/,join+q//,@ARGV
+
+This command reads hex from the command line and converts that to
+bytes that are then eval'ed as a Perl expression.
+
+The string B<GNU_Parallel_worker> is not needed. It is simply there to
+let the user know, that this process is GNU B<parallel> working.
+
+=head3 Ctrl-C and standard error (stderr)
+
+If the user presses Ctrl-C the user expects jobs to stop. This works
+out of the box if the jobs are run locally. Unfortunately it is not so
+simple if the jobs are run remotely.
+
+If remote jobs are run in a tty using B<ssh -tt>, then Ctrl-C works,
+but all output to standard error (stderr) is sent to standard output
+(stdout). This is not what the user expects.
+
+If remote jobs are run without a tty using B<ssh> (without B<-tt>),
+then output to standard error (stderr) is kept on stderr, but Ctrl-C
+does not kill remote jobs. This is not what the user expects.
+
+So what is needed is a way to have both. It seems the reason why
+Ctrl-C does not kill the remote jobs is because the shell does not
+propagate the hang-up signal from B<sshd>. But when B<sshd> dies, the
+parent of the login shell becomes B<init> (process id 1). So by
+exec'ing a Perl wrapper to monitor the parent pid and kill the child
+if the parent pid becomes 1, then Ctrl-C works and stderr is kept on
+stderr.
+
+Ctrl-C does, however, kill the ssh connection, so any output from
+a remote dying process is lost.
+
+To be able to kill all (grand)*children a new process group is
+started.
+
+
+=head3 --nice
+
+B<nice>ing the remote process is done by B<setpriority(0,0,$nice)>. A
+few old systems do not implement this and B<--nice> is unsupported on
+those.
+
+
+=head3 Setting $PARALLEL_TMP
+
+B<$PARALLEL_TMP> is used by B<--fifo> and B<--cat> and must point to a
+non-exitent file in B<$TMPDIR>. This file name is computed on the
+remote system.
+
+
+=head3 The wrapper
+
+The wrapper looks like this:
+
+ $shell = $PARALLEL_SHELL || $SHELL;
+ $tmpdir = $TMPDIR || $PARALLEL_REMOTE_TMPDIR;
+ $nice = $opt::nice;
+ $termseq = $opt::termseq;
+
+ # Check that $tmpdir is writable
+ -w $tmpdir ||
+ die("$tmpdir is not writable.".
+ " Set PARALLEL_REMOTE_TMPDIR");
+ # Set $PARALLEL_TMP to a non-existent file name in $TMPDIR
+ do {
+ $ENV{PARALLEL_TMP} = $tmpdir."/par".
+ join"", map { (0..9,"a".."z","A".."Z")[rand(62)] } (1..5);
+ } while(-e $ENV{PARALLEL_TMP});
+ # Set $script to a non-existent file name in $TMPDIR
+ do {
+ $script = $tmpdir."/par".
+ join"", map { (0..9,"a".."z","A".."Z")[rand(62)] } (1..5);
+ } while(-e $script);
+ # Create a script from the hex code
+ # that removes itself and runs the commands
+ open($fh,">",$script) || die;
+ # ' needed due to rc-shell
+ print($fh("rm \'$script\'\n",$bashfunc.$cmd));
+ close $fh;
+ my $parent = getppid;
+ my $done = 0;
+ $SIG{CHLD} = sub { $done = 1; };
+ $pid = fork;
+ unless($pid) {
+ # Make own process group to be able to kill HUP it later
+ eval { setpgrp };
+ # Set nice value
+ eval { setpriority(0,0,$nice) };
+ # Run the script
+ exec($shell,$script);
+ die("exec failed: $!");
+ }
+ while((not $done) and (getppid == $parent)) {
+ # Parent pid is not changed, so sshd is alive
+ # Exponential sleep up to 1 sec
+ $s = $s < 1 ? 0.001 + $s * 1.03 : $s;
+ select(undef, undef, undef, $s);
+ }
+ if(not $done) {
+ # sshd is dead: User pressed Ctrl-C
+ # Kill as per --termseq
+ my @term_seq = split/,/,$termseq;
+ if(not @term_seq) {
+ @term_seq = ("TERM",200,"TERM",100,"TERM",50,"KILL",25);
+ }
+ while(@term_seq && kill(0,-$pid)) {
+ kill(shift @term_seq, -$pid);
+ select(undef, undef, undef, (shift @term_seq)/1000);
+ }
+ }
+ wait;
+ exit ($?&127 ? 128+($?&127) : 1+$?>>8)
+
+
+=head2 Transferring of variables and functions
+
+Transferring of variables and functions given by B<--env> is done by
+running a Perl script remotely that calls the actual command. The Perl
+script sets B<$ENV{>I<variable>B<}> to the correct value before
+exec'ing a shell that runs the function definition followed by the
+actual command.
+
+The function B<env_parallel> copies the full current environment into
+the environment variable B<PARALLEL_ENV>. This variable is picked up
+by GNU B<parallel> and used to create the Perl script mentioned above.
+
+
+=head2 Base64 encoded bzip2
+
+B<csh> limits words of commands to 1024 chars. This is often too little
+when GNU B<parallel> encodes environment variables and wraps the
+command with different templates. All of these are combined and quoted
+into one single word, which often is longer than 1024 chars.
+
+When the line to run is > 1000 chars, GNU B<parallel> therefore
+encodes the line to run. The encoding B<bzip2>s the line to run,
+converts this to base64, splits the base64 into 1000 char blocks (so
+B<csh> does not fail), and prepends it with this Perl script that
+decodes, decompresses and B<eval>s the line.
+
+ @GNU_Parallel=("use","IPC::Open3;","use","MIME::Base64");
+ eval "@GNU_Parallel";
+
+ $SIG{CHLD}="IGNORE";
+ # Search for bzip2. Not found => use default path
+ my $zip = (grep { -x $_ } "/usr/local/bin/bzip2")[0] || "bzip2";
+ # $in = stdin on $zip, $out = stdout from $zip
+ my($in, $out,$eval);
+ open3($in,$out,">&STDERR",$zip,"-dc");
+ if(my $perlpid = fork) {
+ close $in;
+ $eval = join "", <$out>;
+ close $out;
+ } else {
+ close $out;
+ # Pipe decoded base64 into 'bzip2 -dc'
+ print $in (decode_base64(join"",@ARGV));
+ close $in;
+ exit;
+ }
+ wait;
+ eval $eval;
+
+Perl and B<bzip2> must be installed on the remote system, but a small
+test showed that B<bzip2> is installed by default on all platforms
+that runs GNU B<parallel>, so this is not a big problem.
+
+The added bonus of this is that much bigger environments can now be
+transferred as they will be below B<bash>'s limit of 131072 chars.
+
+
+=head2 Which shell to use
+
+Different shells behave differently. A command that works in B<tcsh>
+may not work in B<bash>. It is therefore important that the correct
+shell is used when GNU B<parallel> executes commands.
+
+GNU B<parallel> tries hard to use the right shell. If GNU B<parallel>
+is called from B<tcsh> it will use B<tcsh>. If it is called from
+B<bash> it will use B<bash>. It does this by looking at the
+(grand)*parent process: If the (grand)*parent process is a shell, use
+this shell; otherwise look at the parent of this (grand)*parent. If
+none of the (grand)*parents are shells, then $SHELL is used.
+
+This will do the right thing if called from:
+
+=over 2
+
+=item *
+
+an interactive shell
+
+=item *
+
+a shell script
+
+=item *
+
+a Perl script in `` or using B<system> if called as a single string.
+
+=back
+
+While these cover most cases, there are situations where it will fail:
+
+=over 2
+
+=item *
+
+When run using B<exec>.
+
+=item *
+
+When run as the last command using B<-c> from another shell (because
+some shells use B<exec>):
+
+ zsh% bash -c "parallel 'echo {} is not run in bash; \
+ set | grep BASH_VERSION' ::: This"
+
+You can work around that by appending '&& true':
+
+ zsh% bash -c "parallel 'echo {} is run in bash; \
+ set | grep BASH_VERSION' ::: This && true"
+
+=item *
+
+When run in a Perl script using B<system> with parallel as the first
+string:
+
+ #!/usr/bin/perl
+
+ system("parallel",'setenv a {}; echo $a',":::",2);
+
+Here it depends on which shell is used to call the Perl script. If the
+Perl script is called from B<tcsh> it will work just fine, but if it
+is called from B<bash> it will fail, because the command B<setenv> is
+not known to B<bash>.
+
+=back
+
+If GNU B<parallel> guesses wrong in these situation, set the shell using
+B<$PARALLEL_SHELL>.
+
+
+=head2 Always running commands in a shell
+
+If the command is a simple command with no redirection and setting of
+variables, the command I<could> be run without spawning a
+shell. E.g. this simple B<grep> matching either 'ls ' or ' wc E<gt>E<gt> c':
+
+ parallel "grep -E 'ls | wc >> c' {}" ::: foo
+
+could be run as:
+
+ system("grep","-E","ls | wc >> c","foo");
+
+However, as soon as the command is a bit more complex a shell I<must>
+be spawned:
+
+ parallel "grep -E 'ls | wc >> c' {} | wc >> c" ::: foo
+ parallel "LANG=C grep -E 'ls | wc >> c' {}" ::: foo
+
+It is impossible to tell how B<| wc E<gt>E<gt> c> should be
+interpreted without parsing the string (is the B<|> a pipe in shell or
+an alternation in a B<grep> regexp? Is B<LANG=C> a command in B<csh>
+or setting a variable in B<bash>? Is B<E<gt>E<gt>> redirection or part
+of a regexp?).
+
+On top of this, wrapper scripts will often require a shell to be
+spawned.
+
+The downside is that you need to quote special shell chars twice:
+
+ parallel echo '*' ::: This will expand the asterisk
+ parallel echo "'*'" ::: This will not
+ parallel "echo '*'" ::: This will not
+ parallel echo '\*' ::: This will not
+ parallel echo \''*'\' ::: This will not
+ parallel -q echo '*' ::: This will not
+
+B<-q> will quote all special chars, thus redirection will not work:
+this prints '* > out.1' and I<does not> save '*' into the file out.1:
+
+ parallel -q echo "*" ">" out.{} ::: 1
+
+GNU B<parallel> tries to live up to Principle Of Least Astonishment
+(POLA), and the requirement of using B<-q> is hard to understand, when
+you do not see the whole picture.
+
+
+=head2 Quoting
+
+Quoting depends on the shell. For most shells '-quoting is used for
+strings containing special characters.
+
+For B<tcsh>/B<csh> newline is quoted as \ followed by newline. Other
+special characters are also \-quoted.
+
+For B<rc> everything is quoted using '.
+
+
+=head2 --pipepart vs. --pipe
+
+While B<--pipe> and B<--pipepart> look much the same to the user, they are
+implemented very differently.
+
+With B<--pipe> GNU B<parallel> reads the blocks from standard input
+(stdin), which is then given to the command on standard input (stdin);
+so every block is being processed by GNU B<parallel> itself. This is
+the reason why B<--pipe> maxes out at around 500 MB/sec.
+
+B<--pipepart>, on the other hand, first identifies at which byte
+positions blocks start and how long they are. It does that by seeking
+into the file by the size of a block and then reading until it meets
+end of a block. The seeking explains why GNU B<parallel> does not know
+the line number and why B<-L/-l> and B<-N> do not work.
+
+With a reasonable block and file size this seeking is more than 1000
+time faster than reading the full file. The byte positions are then
+given to a small script that reads from position X to Y and sends
+output to standard output (stdout). This small script is prepended to
+the command and the full command is executed just as if GNU
+B<parallel> had been in its normal mode. The script looks like this:
+
+ < file perl -e 'while(@ARGV) {
+ sysseek(STDIN,shift,0) || die;
+ $left = shift;
+ while($read = sysread(STDIN,$buf,
+ ($left > 131072 ? 131072 : $left))){
+ $left -= $read; syswrite(STDOUT,$buf);
+ }
+ }' startbyte length_in_bytes
+
+It delivers 1 GB/s per core.
+
+Instead of the script B<dd> was tried, but many versions of B<dd> do
+not support reading from one byte to another and might cause partial
+data. See this for a surprising example:
+
+ yes | dd bs=1024k count=10 | wc
+
+
+=head2 --block-size adjustment
+
+Every time GNU B<parallel> detects a record bigger than
+B<--block-size> it increases the block size by 30%. A small
+B<--block-size> gives very poor performance; by exponentially
+increasing the block size performance will not suffer.
+
+GNU B<parallel> will waste CPU power if B<--block-size> does not
+contain a full record, because it tries to find a full record and will
+fail to do so. The recommendation is therefore to use a
+B<--block-size> > 2 records, so you always get at least one full
+record when you read one block.
+
+If you use B<-N> then B<--block-size> should be big enough to contain
+N+1 records.
+
+
+=head2 Automatic --block-size computation
+
+With B<--pipepart> GNU B<parallel> can compute the B<--block-size>
+automatically. A B<--block-size> of B<-1> will use a block size so
+that each jobslot will receive approximately 1 block. B<--block -2>
+will pass 2 blocks to each jobslot and B<->I<n> will pass I<n> blocks
+to each jobslot.
+
+This can be done because B<--pipepart> reads from files, and we can
+compute the total size of the input.
+
+
+=head2 --jobs and --onall
+
+When running the same commands on many servers what should B<--jobs>
+signify? Is it the number of servers to run on in parallel? Is it the
+number of jobs run in parallel on each server?
+
+GNU B<parallel> lets B<--jobs> represent the number of servers to run
+on in parallel. This is to make it possible to run a sequence of
+commands (that cannot be parallelized) on each server, but run the
+same sequence on multiple servers.
+
+
+=head2 --shuf
+
+When using B<--shuf> to shuffle the jobs, all jobs are read, then they
+are shuffled, and finally executed. When using SQL this makes the
+B<--sqlmaster> be the part that shuffles the jobs. The B<--sqlworker>s
+simply executes according to Seq number.
+
+
+=head2 --csv
+
+B<--pipepart> is incompatible with B<--csv> because you can have
+records like:
+
+ a,b,c
+ a,"
+ a,b,c
+ a,b,c
+ a,b,c
+ ",c
+ a,b,c
+
+Here the second record contains a multi-line field that looks like
+records. Since B<--pipepart> does not read then whole file when
+searching for record endings, it may start reading in this multi-line
+field, which would be wrong.
+
+
+=head2 Buffering on disk
+
+GNU B<parallel> buffers output, because if output is not buffered you
+have to be ridiculously careful on sizes to avoid mixing of outputs
+(see excellent example on https://catern.com/posts/pipes.html).
+
+GNU B<parallel> buffers on disk in $TMPDIR using files, that are
+removed as soon as they are created, but which are kept open. So even
+if GNU B<parallel> is killed by a power outage, there will be no files
+to clean up afterwards. Another advantage is that the file system is
+aware that these files will be lost in case of a crash, so it does
+not need to sync them to disk.
+
+It gives the odd situation that a disk can be fully used, but there
+are no visible files on it.
+
+
+=head3 Partly buffering in memory
+
+When using output formats SQL and CSV then GNU Parallel has to read
+the whole output into memory. When run normally it will only read the
+output from a single job. But when using B<--linebuffer> every line
+printed will also be buffered in memory - for all jobs currently
+running.
+
+If memory is tight, then do not use the output format SQL/CSV with
+B<--linebuffer>.
+
+
+=head3 Comparing to buffering in memory
+
+B<gargs> is a parallelizing tool that buffers in memory. It is
+therefore a useful way of comparing the advantages and disadvantages
+of buffering in memory to buffering on disk.
+
+On an system with 6 GB RAM free and 6 GB free swap these were tested
+with different sizes:
+
+ echo /dev/zero | gargs "head -c $size {}" >/dev/null
+ echo /dev/zero | parallel "head -c $size {}" >/dev/null
+
+The results are here:
+
+ JobRuntime Command
+ 0.344 parallel_test 1M
+ 0.362 parallel_test 10M
+ 0.640 parallel_test 100M
+ 9.818 parallel_test 1000M
+ 23.888 parallel_test 2000M
+ 30.217 parallel_test 2500M
+ 30.963 parallel_test 2750M
+ 34.648 parallel_test 3000M
+ 43.302 parallel_test 4000M
+ 55.167 parallel_test 5000M
+ 67.493 parallel_test 6000M
+ 178.654 parallel_test 7000M
+ 204.138 parallel_test 8000M
+ 230.052 parallel_test 9000M
+ 255.639 parallel_test 10000M
+ 757.981 parallel_test 30000M
+ 0.537 gargs_test 1M
+ 0.292 gargs_test 10M
+ 0.398 gargs_test 100M
+ 3.456 gargs_test 1000M
+ 8.577 gargs_test 2000M
+ 22.705 gargs_test 2500M
+ 123.076 gargs_test 2750M
+ 89.866 gargs_test 3000M
+ 291.798 gargs_test 4000M
+
+GNU B<parallel> is pretty much limited by the speed of the disk: Up to
+6 GB data is written to disk but cached, so reading is fast. Above 6
+GB data are both written and read from disk. When the 30000MB job is
+running, the disk system is slow, but usable: If you are not using the
+disk, you almost do not feel it.
+
+B<gargs> has a speed advantage up until 2500M where it hits a
+wall. Then the system starts swapping like crazy and is completely
+unusable. At 5000M it goes out of memory.
+
+You can make GNU B<parallel> behave similar to B<gargs> if you point
+$TMPDIR to a tmpfs-filesystem: It will be faster for small outputs,
+but may kill your system for larger outputs and cause you to lose
+output.
+
+
+=head2 Disk full
+
+GNU B<parallel> buffers on disk. If the disk is full, data may be
+lost. To check if the disk is full GNU B<parallel> writes a 8193 byte
+file every second. If this file is written successfully, it is removed
+immediately. If it is not written successfully, the disk is full. The
+size 8193 was chosen because 8192 gave wrong result on some file
+systems, whereas 8193 did the correct thing on all tested filesystems.
+
+
+=head2 Memory usage
+
+Normally GNU B<parallel> will use around 17 MB RAM constantly - no
+matter how many jobs or how much output there is. There are a few
+things that cause the memory usage to rise:
+
+=over 3
+
+=item *
+
+Multiple input sources. GNU B<parallel> reads an input source only
+once. This is by design, as an input source can be a stream
+(e.g. FIFO, pipe, standard input (stdin)) which cannot be rewound and
+read again. When reading a single input source, the memory is freed as
+soon as the job is done - thus keeping the memory usage constant.
+
+But when reading multiple input sources GNU B<parallel> keeps the
+already read values for generating all combinations with other input
+sources.
+
+=item *
+
+Computing the number of jobs. B<--bar>, B<--eta>, and B<--halt xx%>
+use B<total_jobs()> to compute the total number of jobs. It does this
+by generating the data structures for all jobs. All these job data
+structures will be stored in memory and take up around 400 bytes/job.
+
+=item *
+
+Buffering a full line. B<--linebuffer> will read a full line per
+running job. A very long output line (say 1 GB without \n) will
+increase RAM usage temporarily: From when the beginning of the line is
+read till the line is printed.
+
+=item *
+
+Buffering the full output of a single job. This happens when using
+B<--results *.csv/*.tsv> or B<--sql*>. Here GNU B<parallel> will read
+the whole output of a single job and save it as csv/tsv or SQL.
+
+=back
+
+
+=head2 Argument separators ::: :::: :::+ ::::+
+
+The argument separator B<:::> was chosen because I have never seen
+B<:::> used in any command. The natural choice B<--> would be a bad
+idea since it is not unlikely that the template command will contain
+B<-->. I have seen B<::> used in programming languanges to separate
+classes, and I did not want the user to be confused that the separator
+had anything to do with classes.
+
+B<:::> also makes a visual separation, which is good if there are
+multiple B<:::>.
+
+When B<:::> was chosen, B<::::> came as a fairly natural extension.
+
+Linking input sources meant having to decide for some way to indicate
+linking of B<:::> and B<::::>. B<:::+> and B<::::+> were chosen, so
+that they were similar to B<:::> and B<::::>.
+
+In 2022 I realized that B<///> would have been an even better choice,
+because you cannot have an file named B<///> whereas you I<can> have a
+file named B<:::>.
+
+
+=head2 Perl replacement strings, {= =}, and --rpl
+
+The shorthands for replacement strings make a command look more
+cryptic. Different users will need different replacement
+strings. Instead of inventing more shorthands you get more
+flexible replacement strings if they can be programmed by the user.
+
+The language Perl was chosen because GNU B<parallel> is written in
+Perl and it was easy and reasonably fast to run the code given by the
+user.
+
+If a user needs the same programmed replacement string again and
+again, the user may want to make his own shorthand for it. This is
+what B<--rpl> is for. It works so well, that even GNU B<parallel>'s
+own shorthands are implemented using B<--rpl>.
+
+In Perl code the bigrams B<{=> and B<=}> rarely exist. They look like a
+matching pair and can be entered on all keyboards. This made them good
+candidates for enclosing the Perl expression in the replacement
+strings. Another candidate ,, and ,, was rejected because they do not
+look like a matching pair. B<--parens> was made, so that the users can
+still use ,, and ,, if they like: B<--parens ,,,,>
+
+Internally, however, the B<{=> and B<=}> are replaced by \257< and
+\257>. This is to make it simpler to make regular expressions. You
+only need to look one character ahead, and never have to look behind.
+
+
+=head2 Test suite
+
+GNU B<parallel> uses its own testing framework. This is mostly due to
+historical reasons. It deals reasonably well with tests that are
+dependent on how long a given test runs (e.g. more than 10 secs is a
+pass, but less is a fail). It parallelizes most tests, but it is easy
+to force a test to run as the single test (which may be important for
+timing issues). It deals reasonably well with tests that fail
+intermittently. It detects which tests failed and pushes these to the
+top, so when running the test suite again, the tests that failed most
+recently are run first.
+
+If GNU B<parallel> should adopt a real testing framework then those
+elements would be important.
+
+Since many tests are dependent on which hardware it is running on,
+these tests break when run on a different hardware than what the test
+was written for.
+
+When most bugs are fixed a test is added, so this bug will not
+reappear. It is, however, sometimes hard to create the environment in
+which the bug shows up - especially if the bug only shows up
+sometimes. One of the harder problems was to make a machine start
+swapping without forcing it to its knees.
+
+
+=head2 Median run time
+
+Using a percentage for B<--timeout> causes GNU B<parallel> to compute
+the median run time of a job. The median is a better indicator of the
+expected run time than average, because there will often be outliers
+taking way longer than the normal run time.
+
+To avoid keeping all run times in memory, an implementation of
+remedian was made (Rousseeuw et al).
+
+
+=head2 Error messages and warnings
+
+Error messages like: ERROR, Not found, and 42 are not very
+helpful. GNU B<parallel> strives to inform the user:
+
+=over 2
+
+=item *
+
+What went wrong?
+
+=item *
+
+Why did it go wrong?
+
+=item *
+
+What can be done about it?
+
+=back
+
+Unfortunately it is not always possible to predict the root cause of
+the error.
+
+
+=head2 Determine number of CPUs
+
+CPUs is an ambiguous term. It can mean the number of socket filled
+(i.e. the number of physical chips). It can mean the number of cores
+(i.e. the number of physical compute cores). It can mean the number of
+hyperthreaded cores (i.e. the number of virtual cores - with some of
+them possibly being hyperthreaded).
+
+On ark.intel.com Intel uses the terms I<cores> and I<threads> for
+number of physical cores and the number of hyperthreaded cores
+respectively.
+
+GNU B<parallel> uses uses I<CPUs> as the number of compute units and
+the terms I<sockets>, I<cores>, and I<threads> to specify how the
+number of compute units is calculated.
+
+
+=head2 Computation of load
+
+Contrary to the obvious B<--load> does not use load average. This is
+due to load average rising too slowly. Instead it uses B<ps> to list
+the number of threads in running or blocked state (state D, O or
+R). This gives an instant load.
+
+As remote calculation of load can be slow, a process is spawned to run
+B<ps> and put the result in a file, which is then used next time.
+
+
+=head2 Killing jobs
+
+GNU B<parallel> kills jobs. It can be due to B<--memfree>, B<--halt>,
+or when GNU B<parallel> meets a condition from which it cannot
+recover. Every job is started as its own process group. This way any
+(grand)*children will get killed, too. The process group is killed
+with the specification mentioned in B<--termseq>.
+
+
+=head2 SQL interface
+
+GNU B<parallel> uses the DBURL from GNU B<sql> to give database
+software, username, password, host, port, database, and table in a
+single string.
+
+The DBURL must point to a table name. The table will be dropped and
+created. The reason for not reusing an existing table is that the user
+may have added more input sources which would require more columns in
+the table. By prepending '+' to the DBURL the table will not be
+dropped.
+
+The table columns are similar to joblog with the addition of B<V1>
+.. B<Vn> which are values from the input sources, and Stdout and
+Stderr which are the output from standard output and standard error,
+respectively.
+
+The Signal column has been renamed to _Signal due to Signal being a
+reserved word in MySQL.
+
+
+=head2 Logo
+
+The logo is inspired by the Cafe Wall illusion. The font is DejaVu
+Sans.
+
+=head2 Citation notice
+
+For details: See
+https://git.savannah.gnu.org/cgit/parallel.git/tree/doc/citation-notice-faq.txt
+
+Funding a free software project is hard. GNU B<parallel> is no
+exception. On top of that it seems the less visible a project is, the
+harder it is to get funding. And the nature of GNU B<parallel> is that
+it will never be seen by "the guy with the checkbook", but only by the
+people doing the actual work.
+
+This problem has been covered by others - though no solution has been
+found: https://www.slideshare.net/NadiaEghbal/consider-the-maintainer
+https://www.numfocus.org/blog/why-is-numpy-only-now-getting-funded/
+
+Before implementing the citation notice it was discussed with the
+users:
+https://lists.gnu.org/archive/html/parallel/2013-11/msg00006.html
+
+Having to spend 10 seconds on running B<parallel --citation> once is
+no doubt not an ideal solution, but no one has so far come up with an
+ideal solution - neither for funding GNU B<parallel> nor other free
+software.
+
+If you believe you have the perfect solution, you should try it out,
+and if it works, you should post it on the email list. Ideas that will
+cost work and which have not been tested are, however, unlikely to be
+prioritized.
+
+Running B<parallel --citation> one single time takes less than 10
+seconds, and will silence the citation notice for future runs. This is
+comparable to graphical tools where you have to click a checkbox
+saying "Do not show this again". But if that is too much trouble for
+you, why not use one of the alternatives instead? See a list in:
+B<man parallel_alternatives>.
+
+As the request for citation is not a legal requirement this is
+acceptable under GPLv3 and cleared with Richard M. Stallman
+himself. Thus it does not fall under this:
+https://www.gnu.org/licenses/gpl-faq.en.html#RequireCitation
+
+
+=head1 Ideas for new design
+
+=head2 Multiple processes working together
+
+Open3 is slow. Printing is slow. It would be good if they did not tie
+up resources, but were run in separate threads.
+
+
+=head2 --rrs on remote using a perl wrapper
+
+... | perl -pe '$/=$recend$recstart;BEGIN{ if(substr($_) eq $recstart) substr($_)="" } eof and substr($_) eq $recend) substr($_)=""
+
+It ought to be possible to write a filter that removed rec sep on the
+fly instead of inside GNU B<parallel>. This could then use more cpus.
+
+Will that require 2x record size memory?
+
+Will that require 2x block size memory?
+
+
+=head1 Historical decisions
+
+These decisions were relevant for earlier versions of GNU B<parallel>,
+but not the current version. They are kept here as historical record.
+
+
+=head2 --tollef
+
+You can read about the history of GNU B<parallel> on
+https://www.gnu.org/software/parallel/history.html
+
+B<--tollef> was included to make GNU B<parallel> switch compatible
+with the parallel from moreutils (which is made by Tollef Fog
+Heen). This was done so that users of that parallel easily could port
+their use to GNU B<parallel>: Simply set B<PARALLEL="--tollef"> and
+that would be it.
+
+But several distributions chose to make B<--tollef> global (by putting
+it into /etc/parallel/config) without making the users aware of this,
+and that caused much confusion when people tried out the examples from
+GNU B<parallel>'s man page and these did not work. The users became
+frustrated because the distribution did not make it clear to them that
+it has made B<--tollef> global.
+
+So to lessen the frustration and the resulting support, B<--tollef>
+was obsoleted 20130222 and removed one year later.
+
+
+=cut
diff --git a/src/parallel_examples.pod b/src/parallel_examples.pod
new file mode 100644
index 0000000..9e06217
--- /dev/null
+++ b/src/parallel_examples.pod
@@ -0,0 +1,2041 @@
+#!/usr/bin/perl -w
+
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GFDL-1.3-or-later
+# SPDX-License-Identifier: CC-BY-SA-4.0
+
+=encoding utf8
+
+=head1 GNU PARALLEL EXAMPLES
+
+=head2 EXAMPLE: Working as xargs -n1. Argument appending
+
+GNU B<parallel> can work similar to B<xargs -n1>.
+
+To compress all html files using B<gzip> run:
+
+ find . -name '*.html' | parallel gzip --best
+
+If the file names may contain a newline use B<-0>. Substitute FOO BAR with
+FUBAR in all files in this dir and subdirs:
+
+ find . -type f -print0 | \
+ parallel -q0 perl -i -pe 's/FOO BAR/FUBAR/g'
+
+Note B<-q> is needed because of the space in 'FOO BAR'.
+
+
+=head2 EXAMPLE: Simple network scanner
+
+B<prips> can generate IP-addresses from CIDR notation. With GNU
+B<parallel> you can build a simple network scanner to see which
+addresses respond to B<ping>:
+
+ prips 130.229.16.0/20 | \
+ parallel --timeout 2 -j0 \
+ 'ping -c 1 {} >/dev/null && echo {}' 2>/dev/null
+
+
+=head2 EXAMPLE: Reading arguments from command line
+
+GNU B<parallel> can take the arguments from command line instead of
+stdin (standard input). To compress all html files in the current dir
+using B<gzip> run:
+
+ parallel gzip --best ::: *.html
+
+To convert *.wav to *.mp3 using LAME running one process per CPU run:
+
+ parallel lame {} -o {.}.mp3 ::: *.wav
+
+
+=head2 EXAMPLE: Inserting multiple arguments
+
+When moving a lot of files like this: B<mv *.log destdir> you will
+sometimes get the error:
+
+ bash: /bin/mv: Argument list too long
+
+because there are too many files. You can instead do:
+
+ ls | grep -E '\.log$' | parallel mv {} destdir
+
+This will run B<mv> for each file. It can be done faster if B<mv> gets
+as many arguments that will fit on the line:
+
+ ls | grep -E '\.log$' | parallel -m mv {} destdir
+
+In many shells you can also use B<printf>:
+
+ printf '%s\0' *.log | parallel -0 -m mv {} destdir
+
+
+=head2 EXAMPLE: Context replace
+
+To remove the files I<pict0000.jpg> .. I<pict9999.jpg> you could do:
+
+ seq -w 0 9999 | parallel rm pict{}.jpg
+
+You could also do:
+
+ seq -w 0 9999 | perl -pe 's/(.*)/pict$1.jpg/' | parallel -m rm
+
+The first will run B<rm> 10000 times, while the last will only run
+B<rm> as many times needed to keep the command line length short
+enough to avoid B<Argument list too long> (it typically runs 1-2 times).
+
+You could also run:
+
+ seq -w 0 9999 | parallel -X rm pict{}.jpg
+
+This will also only run B<rm> as many times needed to keep the command
+line length short enough.
+
+
+=head2 EXAMPLE: Compute intensive jobs and substitution
+
+If ImageMagick is installed this will generate a thumbnail of a jpg
+file:
+
+ convert -geometry 120 foo.jpg thumb_foo.jpg
+
+This will run with number-of-cpus jobs in parallel for all jpg files
+in a directory:
+
+ ls *.jpg | parallel convert -geometry 120 {} thumb_{}
+
+To do it recursively use B<find>:
+
+ find . -name '*.jpg' | \
+ parallel convert -geometry 120 {} {}_thumb.jpg
+
+Notice how the argument has to start with B<{}> as B<{}> will include path
+(e.g. running B<convert -geometry 120 ./foo/bar.jpg
+thumb_./foo/bar.jpg> would clearly be wrong). The command will
+generate files like ./foo/bar.jpg_thumb.jpg.
+
+Use B<{.}> to avoid the extra .jpg in the file name. This command will
+make files like ./foo/bar_thumb.jpg:
+
+ find . -name '*.jpg' | \
+ parallel convert -geometry 120 {} {.}_thumb.jpg
+
+
+=head2 EXAMPLE: Substitution and redirection
+
+This will generate an uncompressed version of .gz-files next to the .gz-file:
+
+ parallel zcat {} ">"{.} ::: *.gz
+
+Quoting of > is necessary to postpone the redirection. Another
+solution is to quote the whole command:
+
+ parallel "zcat {} >{.}" ::: *.gz
+
+Other special shell characters (such as * ; $ > < | >> <<) also need
+to be put in quotes, as they may otherwise be interpreted by the shell
+and not given to GNU B<parallel>.
+
+
+=head2 EXAMPLE: Composed commands
+
+A job can consist of several commands. This will print the number of
+files in each directory:
+
+ ls | parallel 'echo -n {}" "; ls {}|wc -l'
+
+To put the output in a file called <name>.dir:
+
+ ls | parallel '(echo -n {}" "; ls {}|wc -l) >{}.dir'
+
+Even small shell scripts can be run by GNU B<parallel>:
+
+ find . | parallel 'a={}; name=${a##*/};' \
+ 'upper=$(echo "$name" | tr "[:lower:]" "[:upper:]");'\
+ 'echo "$name - $upper"'
+
+ ls | parallel 'mv {} "$(echo {} | tr "[:upper:]" "[:lower:]")"'
+
+Given a list of URLs, list all URLs that fail to download. Print the
+line number and the URL.
+
+ cat urlfile | parallel "wget {} 2>/dev/null || grep -n {} urlfile"
+
+Create a mirror directory with the same file names except all files and
+symlinks are empty files.
+
+ cp -rs /the/source/dir mirror_dir
+ find mirror_dir -type l | parallel -m rm {} '&&' touch {}
+
+Find the files in a list that do not exist
+
+ cat file_list | parallel 'if [ ! -e {} ] ; then echo {}; fi'
+
+
+=head2 EXAMPLE: Composed command with perl replacement string
+
+You have a bunch of file. You want them sorted into dirs. The dir of
+each file should be named the first letter of the file name.
+
+ parallel 'mkdir -p {=s/(.).*/$1/=}; mv {} {=s/(.).*/$1/=}' ::: *
+
+
+=head2 EXAMPLE: Composed command with multiple input sources
+
+You have a dir with files named as 24 hours in 5 minute intervals:
+00:00, 00:05, 00:10 .. 23:55. You want to find the files missing:
+
+ parallel [ -f {1}:{2} ] "||" echo {1}:{2} does not exist \
+ ::: {00..23} ::: {00..55..5}
+
+
+=head2 EXAMPLE: Calling Bash functions
+
+If the composed command is longer than a line, it becomes hard to
+read. In Bash you can use functions. Just remember to B<export -f> the
+function.
+
+ doit() {
+ echo Doing it for $1
+ sleep 2
+ echo Done with $1
+ }
+ export -f doit
+ parallel doit ::: 1 2 3
+
+ doubleit() {
+ echo Doing it for $1 $2
+ sleep 2
+ echo Done with $1 $2
+ }
+ export -f doubleit
+ parallel doubleit ::: 1 2 3 ::: a b
+
+To do this on remote servers you need to transfer the function using
+B<--env>:
+
+ parallel --env doit -S server doit ::: 1 2 3
+ parallel --env doubleit -S server doubleit ::: 1 2 3 ::: a b
+
+If your environment (aliases, variables, and functions) is small you
+can copy the full environment without having to
+B<export -f> anything. See B<env_parallel>.
+
+
+=head2 EXAMPLE: Function tester
+
+To test a program with different parameters:
+
+ tester() {
+ if (eval "$@") >&/dev/null; then
+ perl -e 'printf "\033[30;102m[ OK ]\033[0m @ARGV\n"' "$@"
+ else
+ perl -e 'printf "\033[30;101m[FAIL]\033[0m @ARGV\n"' "$@"
+ fi
+ }
+ export -f tester
+ parallel tester my_program ::: arg1 arg2
+ parallel tester exit ::: 1 0 2 0
+
+If B<my_program> fails a red FAIL will be printed followed by the failing
+command; otherwise a green OK will be printed followed by the command.
+
+
+=head2 EXAMPLE: Identify few failing jobs
+
+B<--bar> works best if jobs have no output. If the failing jobs have
+output you can identify the jobs like this:
+
+ job-with-few-failures() {
+ # Force reproducibility
+ RANDOM=$1
+ # This fails 1% (328 of 32768)
+ if [ $RANDOM -lt 328 ] ; then
+ echo Failed $1
+ fi
+ }
+ export -f job-with-few-failures
+ seq 1000 | parallel --bar --tag job-with-few-failures
+
+
+=head2 EXAMPLE: Continously show the latest line of output
+
+It can be useful to monitor the output of running jobs.
+
+This shows the most recent output line until a job finishes. After
+which the output of the job is printed in full:
+
+ parallel '{} | tee >(cat >&3)' ::: 'command 1' 'command 2' \
+ 3> >(perl -ne '$|=1;chomp;printf"%.'$COLUMNS's\r",$_." "x100')
+
+
+=head2 EXAMPLE: Log rotate
+
+Log rotation renames a logfile to an extension with a higher number:
+log.1 becomes log.2, log.2 becomes log.3, and so on. The oldest log is
+removed. To avoid overwriting files the process starts backwards from
+the high number to the low number. This will keep 10 old versions of
+the log:
+
+ seq 9 -1 1 | parallel -j1 mv log.{} log.'{= $_++ =}'
+ mv log log.1
+
+
+=head2 EXAMPLE: Removing file extension when processing files
+
+When processing files removing the file extension using B<{.}> is
+often useful.
+
+Create a directory for each zip-file and unzip it in that dir:
+
+ parallel 'mkdir {.}; cd {.}; unzip ../{}' ::: *.zip
+
+Recompress all .gz files in current directory using B<bzip2> running 1
+job per CPU in parallel:
+
+ parallel "zcat {} | bzip2 >{.}.bz2 && rm {}" ::: *.gz
+
+Convert all WAV files to MP3 using LAME:
+
+ find sounddir -type f -name '*.wav' | parallel lame {} -o {.}.mp3
+
+Put all converted in the same directory:
+
+ find sounddir -type f -name '*.wav' | \
+ parallel lame {} -o mydir/{/.}.mp3
+
+
+=head2 EXAMPLE: Replacing parts of file names
+
+If you deal with paired end reads, you will have files like
+barcode1_R1.fq.gz, barcode1_R2.fq.gz, barcode2_R1.fq.gz, and
+barcode2_R2.fq.gz.
+
+You want barcodeI<N>_R1 to be processed with barcodeI<N>_R2.
+
+ parallel --plus myprocess {} {/_R1.fq.gz/_R2.fq.gz} ::: *_R1.fq.gz
+
+If the barcode does not contain '_R1', you can do:
+
+ parallel --plus myprocess {} {/_R1/_R2} ::: *_R1.fq.gz
+
+
+=head2 EXAMPLE: Removing strings from the argument
+
+If you have directory with tar.gz files and want these extracted in
+the corresponding dir (e.g foo.tar.gz will be extracted in the dir
+foo) you can do:
+
+ parallel --plus 'mkdir {..}; tar -C {..} -xf {}' ::: *.tar.gz
+
+If you want to remove a different ending, you can use {%string}:
+
+ parallel --plus echo {%_demo} ::: mycode_demo keep_demo_here
+
+You can also remove a starting string with {#string}
+
+ parallel --plus echo {#demo_} ::: demo_mycode keep_demo_here
+
+To remove a string anywhere you can use regular expressions with
+{/regexp/replacement} and leave the replacement empty:
+
+ parallel --plus echo {/demo_/} ::: demo_mycode remove_demo_here
+
+
+=head2 EXAMPLE: Download 24 images for each of the past 30 days
+
+Let us assume a website stores images like:
+
+ https://www.example.com/path/to/YYYYMMDD_##.jpg
+
+where YYYYMMDD is the date and ## is the number 01-24. This will
+download images for the past 30 days:
+
+ getit() {
+ date=$(date -d "today -$1 days" +%Y%m%d)
+ num=$2
+ echo wget https://www.example.com/path/to/${date}_${num}.jpg
+ }
+ export -f getit
+
+ parallel getit ::: $(seq 30) ::: $(seq -w 24)
+
+B<$(date -d "today -$1 days" +%Y%m%d)> will give the dates in
+YYYYMMDD with B<$1> days subtracted.
+
+
+=head2 EXAMPLE: Download world map from NASA
+
+NASA provides tiles to download on earthdata.nasa.gov. Download tiles
+for Blue Marble world map and create a 10240x20480 map.
+
+ base=https://map1a.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi
+ service="SERVICE=WMTS&REQUEST=GetTile&VERSION=1.0.0"
+ layer="LAYER=BlueMarble_ShadedRelief_Bathymetry"
+ set="STYLE=&TILEMATRIXSET=EPSG4326_500m&TILEMATRIX=5"
+ tile="TILEROW={1}&TILECOL={2}"
+ format="FORMAT=image%2Fjpeg"
+ url="$base?$service&$layer&$set&$tile&$format"
+
+ parallel -j0 -q wget "$url" -O {1}_{2}.jpg ::: {0..19} ::: {0..39}
+ parallel eval convert +append {}_{0..39}.jpg line{}.jpg ::: {0..19}
+ convert -append line{0..19}.jpg world.jpg
+
+
+=head2 EXAMPLE: Download Apollo-11 images from NASA using jq
+
+Search NASA using their API to get JSON for images related to 'apollo
+11' and has 'moon landing' in the description.
+
+The search query returns JSON containing URLs to JSON containing
+collections of pictures. One of the pictures in each of these
+collection is I<large>.
+
+B<wget> is used to get the JSON for the search query. B<jq> is then
+used to extract the URLs of the collections. B<parallel> then calls
+B<wget> to get each collection, which is passed to B<jq> to extract
+the URLs of all images. B<grep> filters out the I<large> images, and
+B<parallel> finally uses B<wget> to fetch the images.
+
+ base="https://images-api.nasa.gov/search"
+ q="q=apollo 11"
+ description="description=moon landing"
+ media_type="media_type=image"
+ wget -O - "$base?$q&$description&$media_type" |
+ jq -r .collection.items[].href |
+ parallel wget -O - |
+ jq -r .[] |
+ grep large |
+ parallel wget
+
+
+=head2 EXAMPLE: Download video playlist in parallel
+
+B<youtube-dl> is an excellent tool to download videos. It can,
+however, not download videos in parallel. This takes a playlist and
+downloads 10 videos in parallel.
+
+ url='youtu.be/watch?v=0wOf2Fgi3DE&list=UU_cznB5YZZmvAmeq7Y3EriQ'
+ export url
+ youtube-dl --flat-playlist "https://$url" |
+ parallel --tagstring {#} --lb -j10 \
+ youtube-dl --playlist-start {#} --playlist-end {#} '"https://$url"'
+
+
+=head2 EXAMPLE: Prepend last modified date (ISO8601) to file name
+
+ parallel mv {} '{= $a=pQ($_); $b=$_;' \
+ '$_=qx{date -r "$a" +%FT%T}; chomp; $_="$_ $b" =}' ::: *
+
+B<{=> and B<=}> mark a perl expression. B<pQ> perl-quotes the
+string. B<date +%FT%T> is the date in ISO8601 with time.
+
+=head2 EXAMPLE: Save output in ISO8601 dirs
+
+Save output from B<ps aux> every second into dirs named
+yyyy-mm-ddThh:mm:ss+zz:zz.
+
+ seq 1000 | parallel -N0 -j1 --delay 1 \
+ --results '{= $_=`date -Isec`; chomp=}/' ps aux
+
+
+=head2 EXAMPLE: Digital clock with "blinking" :
+
+The : in a digital clock blinks. To make every other line have a ':'
+and the rest a ' ' a perl expression is used to look at the 3rd input
+source. If the value modulo 2 is 1: Use ":" otherwise use " ":
+
+ parallel -k echo {1}'{=3 $_=$_%2?":":" "=}'{2}{3} \
+ ::: {0..12} ::: {0..5} ::: {0..9}
+
+
+=head2 EXAMPLE: Aggregating content of files
+
+This:
+
+ parallel --header : echo x{X}y{Y}z{Z} \> x{X}y{Y}z{Z} \
+ ::: X {1..5} ::: Y {01..10} ::: Z {1..5}
+
+will generate the files x1y01z1 .. x5y10z5. If you want to aggregate
+the output grouping on x and z you can do this:
+
+ parallel eval 'cat {=s/y01/y*/=} > {=s/y01//=}' ::: *y01*
+
+For all values of x and z it runs commands like:
+
+ cat x1y*z1 > x1z1
+
+So you end up with x1z1 .. x5z5 each containing the content of all
+values of y.
+
+
+=head2 EXAMPLE: Breadth first parallel web crawler/mirrorer
+
+This script below will crawl and mirror a URL in parallel. It
+downloads first pages that are 1 click down, then 2 clicks down, then
+3; instead of the normal depth first, where the first link link on
+each page is fetched first.
+
+Run like this:
+
+ PARALLEL=-j100 ./parallel-crawl http://gatt.org.yeslab.org/
+
+Remove the B<wget> part if you only want a web crawler.
+
+It works by fetching a page from a list of URLs and looking for links
+in that page that are within the same starting URL and that have not
+already been seen. These links are added to a new queue. When all the
+pages from the list is done, the new queue is moved to the list of
+URLs and the process is started over until no unseen links are found.
+
+ #!/bin/bash
+
+ # E.g. http://gatt.org.yeslab.org/
+ URL=$1
+ # Stay inside the start dir
+ BASEURL=$(echo $URL | perl -pe 's:#.*::; s:(//.*/)[^/]*:$1:')
+ URLLIST=$(mktemp urllist.XXXX)
+ URLLIST2=$(mktemp urllist.XXXX)
+ SEEN=$(mktemp seen.XXXX)
+
+ # Spider to get the URLs
+ echo $URL >$URLLIST
+ cp $URLLIST $SEEN
+
+ while [ -s $URLLIST ] ; do
+ cat $URLLIST |
+ parallel lynx -listonly -image_links -dump {} \; \
+ wget -qm -l1 -Q1 {} \; echo Spidered: {} \>\&2 |
+ perl -ne 's/#.*//; s/\s+\d+.\s(\S+)$/$1/ and
+ do { $seen{$1}++ or print }' |
+ grep -F $BASEURL |
+ grep -v -x -F -f $SEEN | tee -a $SEEN > $URLLIST2
+ mv $URLLIST2 $URLLIST
+ done
+
+ rm -f $URLLIST $URLLIST2 $SEEN
+
+
+=head2 EXAMPLE: Process files from a tar file while unpacking
+
+If the files to be processed are in a tar file then unpacking one file
+and processing it immediately may be faster than first unpacking all
+files.
+
+ tar xvf foo.tgz | perl -ne 'print $l;$l=$_;END{print $l}' | \
+ parallel echo
+
+The Perl one-liner is needed to make sure the file is complete before
+handing it to GNU B<parallel>.
+
+
+=head2 EXAMPLE: Rewriting a for-loop and a while-read-loop
+
+for-loops like this:
+
+ (for x in `cat list` ; do
+ do_something $x
+ done) | process_output
+
+and while-read-loops like this:
+
+ cat list | (while read x ; do
+ do_something $x
+ done) | process_output
+
+can be written like this:
+
+ cat list | parallel do_something | process_output
+
+For example: Find which host name in a list has IP address 1.2.3 4:
+
+ cat hosts.txt | parallel -P 100 host | grep 1.2.3.4
+
+If the processing requires more steps the for-loop like this:
+
+ (for x in `cat list` ; do
+ no_extension=${x%.*};
+ do_step1 $x scale $no_extension.jpg
+ do_step2 <$x $no_extension
+ done) | process_output
+
+and while-loops like this:
+
+ cat list | (while read x ; do
+ no_extension=${x%.*};
+ do_step1 $x scale $no_extension.jpg
+ do_step2 <$x $no_extension
+ done) | process_output
+
+can be written like this:
+
+ cat list | parallel "do_step1 {} scale {.}.jpg ; do_step2 <{} {.}" |\
+ process_output
+
+If the body of the loop is bigger, it improves readability to use a function:
+
+ (for x in `cat list` ; do
+ do_something $x
+ [... 100 lines that do something with $x ...]
+ done) | process_output
+
+ cat list | (while read x ; do
+ do_something $x
+ [... 100 lines that do something with $x ...]
+ done) | process_output
+
+can both be rewritten as:
+
+ doit() {
+ x=$1
+ do_something $x
+ [... 100 lines that do something with $x ...]
+ }
+ export -f doit
+ cat list | parallel doit
+
+=head2 EXAMPLE: Rewriting nested for-loops
+
+Nested for-loops like this:
+
+ (for x in `cat xlist` ; do
+ for y in `cat ylist` ; do
+ do_something $x $y
+ done
+ done) | process_output
+
+can be written like this:
+
+ parallel do_something {1} {2} :::: xlist ylist | process_output
+
+Nested for-loops like this:
+
+ (for colour in red green blue ; do
+ for size in S M L XL XXL ; do
+ echo $colour $size
+ done
+ done) | sort
+
+can be written like this:
+
+ parallel echo {1} {2} ::: red green blue ::: S M L XL XXL | sort
+
+
+=head2 EXAMPLE: Finding the lowest difference between files
+
+B<diff> is good for finding differences in text files. B<diff | wc -l>
+gives an indication of the size of the difference. To find the
+differences between all files in the current dir do:
+
+ parallel --tag 'diff {1} {2} | wc -l' ::: * ::: * | sort -nk3
+
+This way it is possible to see if some files are closer to other
+files.
+
+
+=head2 EXAMPLE: for-loops with column names
+
+When doing multiple nested for-loops it can be easier to keep track of
+the loop variable if is is named instead of just having a number. Use
+B<--header :> to let the first argument be an named alias for the
+positional replacement string:
+
+ parallel --header : echo {colour} {size} \
+ ::: colour red green blue ::: size S M L XL XXL
+
+This also works if the input file is a file with columns:
+
+ cat addressbook.tsv | \
+ parallel --colsep '\t' --header : echo {Name} {E-mail address}
+
+
+=head2 EXAMPLE: All combinations in a list
+
+GNU B<parallel> makes all combinations when given two lists.
+
+To make all combinations in a single list with unique values, you
+repeat the list and use replacement string B<{choose_k}>:
+
+ parallel --plus echo {choose_k} ::: A B C D ::: A B C D
+
+ parallel --plus echo 2{2choose_k} 1{1choose_k} ::: A B C D ::: A B C D
+
+B<{choose_k}> works for any number of input sources:
+
+ parallel --plus echo {choose_k} ::: A B C D ::: A B C D ::: A B C D
+
+Where B<{choose_k}> does not care about order, B<{uniq}> cares about
+order. It simply skips jobs where values from different input sources
+are the same:
+
+ parallel --plus echo {uniq} ::: A B C ::: A B C ::: A B C
+ parallel --plus echo {1uniq}+{2uniq}+{3uniq} \
+ ::: A B C ::: A B C ::: A B C
+
+The behaviour of B<{choose_k}> is undefined, if the input values of each
+source are different.
+
+
+=head2 EXAMPLE: Which git branches are the most similar
+
+If you have a ton of branches in git, it may be useful to see how
+similar the branches are. This gives a rough estimate:
+
+ parallel --trim rl --plus --tag 'git diff {choose_k} | wc -c' \
+ :::: <(git branch | grep -v '*') <(git branch | grep -v '*') |
+ sort -k3n
+
+
+=head2 EXAMPLE: From a to b and b to c
+
+Assume you have input like:
+
+ aardvark
+ babble
+ cab
+ dab
+ each
+
+and want to run combinations like:
+
+ aardvark babble
+ babble cab
+ cab dab
+ dab each
+
+If the input is in the file in.txt:
+
+ parallel echo {1} - {2} ::::+ <(head -n -1 in.txt) <(tail -n +2 in.txt)
+
+If the input is in the array $a here are two solutions:
+
+ seq $((${#a[@]}-1)) | \
+ env_parallel --env a echo '${a[{=$_--=}]} - ${a[{}]}'
+ parallel echo {1} - {2} ::: "${a[@]::${#a[@]}-1}" :::+ "${a[@]:1}"
+
+
+=head2 EXAMPLE: Count the differences between all files in a dir
+
+Using B<--results> the results are saved in /tmp/diffcount*.
+
+ parallel --results /tmp/diffcount "diff -U 0 {1} {2} | \
+ tail -n +3 |grep -v '^@'|wc -l" ::: * ::: *
+
+To see the difference between file A and file B look at the file
+'/tmp/diffcount/1/A/2/B'.
+
+
+=head2 EXAMPLE: Speeding up fast jobs
+
+Starting a job on the local machine takes around 3-10 ms. This can be
+a big overhead if the job takes very few ms to run. Often you can
+group small jobs together using B<-X> which will make the overhead
+less significant. Compare the speed of these:
+
+ seq -w 0 9999 | parallel touch pict{}.jpg
+ seq -w 0 9999 | parallel -X touch pict{}.jpg
+
+If your program cannot take multiple arguments, then you can use GNU
+B<parallel> to spawn multiple GNU B<parallel>s:
+
+ seq -w 0 9999999 | \
+ parallel -j10 -q -I,, --pipe parallel -j0 touch pict{}.jpg
+
+If B<-j0> normally spawns 252 jobs, then the above will try to spawn
+2520 jobs. On a normal GNU/Linux system you can spawn 32000 jobs using
+this technique with no problems. To raise the 32000 jobs limit raise
+/proc/sys/kernel/pid_max to 4194303.
+
+If you do not need GNU B<parallel> to have control over each job (so
+no need for B<--retries> or B<--joblog> or similar), then it can be
+even faster if you can generate the command lines and pipe those to a
+shell. So if you can do this:
+
+ mygenerator | sh
+
+Then that can be parallelized like this:
+
+ mygenerator | parallel --pipe --block 10M sh
+
+E.g.
+
+ mygenerator() {
+ seq 10000000 | perl -pe 'print "echo This is fast job number "';
+ }
+ mygenerator | parallel --pipe --block 10M sh
+
+The overhead is 100000 times smaller namely around 100 nanoseconds per
+job.
+
+
+=head2 EXAMPLE: Using shell variables
+
+When using shell variables you need to quote them correctly as they
+may otherwise be interpreted by the shell.
+
+Notice the difference between:
+
+ ARR=("My brother's 12\" records are worth <\$\$\$>"'!' Foo Bar)
+ parallel echo ::: ${ARR[@]} # This is probably not what you want
+
+and:
+
+ ARR=("My brother's 12\" records are worth <\$\$\$>"'!' Foo Bar)
+ parallel echo ::: "${ARR[@]}"
+
+When using variables in the actual command that contains special
+characters (e.g. space) you can quote them using B<'"$VAR"'> or using
+"'s and B<-q>:
+
+ VAR="My brother's 12\" records are worth <\$\$\$>"
+ parallel -q echo "$VAR" ::: '!'
+ export VAR
+ parallel echo '"$VAR"' ::: '!'
+
+If B<$VAR> does not contain ' then B<"'$VAR'"> will also work
+(and does not need B<export>):
+
+ VAR="My 12\" records are worth <\$\$\$>"
+ parallel echo "'$VAR'" ::: '!'
+
+If you use them in a function you just quote as you normally would do:
+
+ VAR="My brother's 12\" records are worth <\$\$\$>"
+ export VAR
+ myfunc() { echo "$VAR" "$1"; }
+ export -f myfunc
+ parallel myfunc ::: '!'
+
+
+=head2 EXAMPLE: Group output lines
+
+When running jobs that output data, you often do not want the output
+of multiple jobs to run together. GNU B<parallel> defaults to grouping
+the output of each job, so the output is printed when the job
+finishes. If you want full lines to be printed while the job is
+running you can use B<--line-buffer>. If you want output to be
+printed as soon as possible you can use B<-u>.
+
+Compare the output of:
+
+ parallel wget --progress=dot --limit-rate=100k \
+ https://ftpmirror.gnu.org/parallel/parallel-20{}0822.tar.bz2 \
+ ::: {12..16}
+ parallel --line-buffer wget --progress=dot --limit-rate=100k \
+ https://ftpmirror.gnu.org/parallel/parallel-20{}0822.tar.bz2 \
+ ::: {12..16}
+ parallel --latest-line wget --progress=dot --limit-rate=100k \
+ https://ftpmirror.gnu.org/parallel/parallel-20{}0822.tar.bz2 \
+ ::: {12..16}
+ parallel -u wget --progress=dot --limit-rate=100k \
+ https://ftpmirror.gnu.org/parallel/parallel-20{}0822.tar.bz2 \
+ ::: {12..16}
+
+=head2 EXAMPLE: Tag output lines
+
+GNU B<parallel> groups the output lines, but it can be hard to see
+where the different jobs begin. B<--tag> prepends the argument to make
+that more visible:
+
+ parallel --tag wget --limit-rate=100k \
+ https://ftpmirror.gnu.org/parallel/parallel-20{}0822.tar.bz2 \
+ ::: {12..16}
+
+B<--tag> works with B<--line-buffer> but not with B<-u>:
+
+ parallel --tag --line-buffer wget --limit-rate=100k \
+ https://ftpmirror.gnu.org/parallel/parallel-20{}0822.tar.bz2 \
+ ::: {12..16}
+
+Check the uptime of the servers in I<~/.parallel/sshloginfile>:
+
+ parallel --tag -S .. --nonall uptime
+
+
+=head2 EXAMPLE: Colorize output
+
+Give each job a new color. Most terminals support ANSI colors with the
+escape code "\033[30;3Xm" where 0 <= X <= 7:
+
+ seq 10 | \
+ parallel --tagstring '\033[30;3{=$_=++$::color%8=}m' seq {}
+ parallel --rpl '{color} $_="\033[30;3".(++$::color%8)."m"' \
+ --tagstring {color} seq {} ::: {1..10}
+
+To get rid of the initial \t (which comes from B<--tagstring>):
+
+ ... | perl -pe 's/\t//'
+
+
+=head2 EXAMPLE: Keep order of output same as order of input
+
+Normally the output of a job will be printed as soon as it
+completes. Sometimes you want the order of the output to remain the
+same as the order of the input. This is often important, if the output
+is used as input for another system. B<-k> will make sure the order of
+output will be in the same order as input even if later jobs end
+before earlier jobs.
+
+Append a string to every line in a text file:
+
+ cat textfile | parallel -k echo {} append_string
+
+If you remove B<-k> some of the lines may come out in the wrong order.
+
+Another example is B<traceroute>:
+
+ parallel traceroute ::: qubes-os.org debian.org freenetproject.org
+
+will give traceroute of qubes-os.org, debian.org and
+freenetproject.org, but it will be sorted according to which job
+completed first.
+
+To keep the order the same as input run:
+
+ parallel -k traceroute ::: qubes-os.org debian.org freenetproject.org
+
+This will make sure the traceroute to qubes-os.org will be printed
+first.
+
+A bit more complex example is downloading a huge file in chunks in
+parallel: Some internet connections will deliver more data if you
+download files in parallel. For downloading files in parallel see:
+"EXAMPLE: Download 10 images for each of the past 30 days". But if you
+are downloading a big file you can download the file in chunks in
+parallel.
+
+To download byte 10000000-19999999 you can use B<curl>:
+
+ curl -r 10000000-19999999 https://example.com/the/big/file >file.part
+
+To download a 1 GB file we need 100 10MB chunks downloaded and
+combined in the correct order.
+
+ seq 0 99 | parallel -k curl -r \
+ {}0000000-{}9999999 https://example.com/the/big/file > file
+
+
+=head2 EXAMPLE: Parallel grep
+
+B<grep -r> greps recursively through directories. GNU B<parallel> can
+often speed this up.
+
+ find . -type f | parallel -k -j150% -n 1000 -m grep -H -n STRING {}
+
+This will run 1.5 job per CPU, and give 1000 arguments to B<grep>.
+
+There are situations where the above will be slower than B<grep -r>:
+
+=over 2
+
+=item *
+
+If data is already in RAM. The overhead of starting jobs and buffering
+output may outweigh the benefit of running in parallel.
+
+=item *
+
+If the files are big. If a file cannot be read in a single seek, the
+disk may start thrashing.
+
+=back
+
+The speedup is caused by two factors:
+
+=over 2
+
+=item *
+
+On rotating harddisks small files often require a seek for each
+file. By searching for more files in parallel, the arm may pass
+another wanted file on its way.
+
+=item *
+
+NVMe drives often perform better by having multiple command running in
+parallel.
+
+=back
+
+
+=head2 EXAMPLE: Grepping n lines for m regular expressions.
+
+The simplest solution to grep a big file for a lot of regexps is:
+
+ grep -f regexps.txt bigfile
+
+Or if the regexps are fixed strings:
+
+ grep -F -f regexps.txt bigfile
+
+There are 3 limiting factors: CPU, RAM, and disk I/O.
+
+RAM is easy to measure: If the B<grep> process takes up most of your
+free memory (e.g. when running B<top>), then RAM is a limiting factor.
+
+CPU is also easy to measure: If the B<grep> takes >90% CPU in B<top>,
+then the CPU is a limiting factor, and parallelization will speed this
+up.
+
+It is harder to see if disk I/O is the limiting factor, and depending
+on the disk system it may be faster or slower to parallelize. The only
+way to know for certain is to test and measure.
+
+
+=head3 Limiting factor: RAM
+
+The normal B<grep -f regexps.txt bigfile> works no matter the size of
+bigfile, but if regexps.txt is so big it cannot fit into memory, then
+you need to split this.
+
+B<grep -F> takes around 100 bytes of RAM and B<grep> takes about 500
+bytes of RAM per 1 byte of regexp. So if regexps.txt is 1% of your
+RAM, then it may be too big.
+
+If you can convert your regexps into fixed strings do that. E.g. if
+the lines you are looking for in bigfile all looks like:
+
+ ID1 foo bar baz Identifier1 quux
+ fubar ID2 foo bar baz Identifier2
+
+then your regexps.txt can be converted from:
+
+ ID1.*Identifier1
+ ID2.*Identifier2
+
+into:
+
+ ID1 foo bar baz Identifier1
+ ID2 foo bar baz Identifier2
+
+This way you can use B<grep -F> which takes around 80% less memory and
+is much faster.
+
+If it still does not fit in memory you can do this:
+
+ parallel --pipe-part -a regexps.txt --block 1M grep -F -f - -n bigfile | \
+ sort -un | perl -pe 's/^\d+://'
+
+The 1M should be your free memory divided by the number of CPU threads and
+divided by 200 for B<grep -F> and by 1000 for normal B<grep>. On
+GNU/Linux you can do:
+
+ free=$(awk '/^((Swap)?Cached|MemFree|Buffers):/ { sum += $2 }
+ END { print sum }' /proc/meminfo)
+ percpu=$((free / 200 / $(parallel --number-of-threads)))k
+
+ parallel --pipe-part -a regexps.txt --block $percpu --compress \
+ grep -F -f - -n bigfile | \
+ sort -un | perl -pe 's/^\d+://'
+
+If you can live with duplicated lines and wrong order, it is faster to do:
+
+ parallel --pipe-part -a regexps.txt --block $percpu --compress \
+ grep -F -f - bigfile
+
+=head3 Limiting factor: CPU
+
+If the CPU is the limiting factor parallelization should be done on
+the regexps:
+
+ cat regexps.txt | parallel --pipe -L1000 --round-robin --compress \
+ grep -f - -n bigfile | \
+ sort -un | perl -pe 's/^\d+://'
+
+The command will start one B<grep> per CPU and read I<bigfile> one
+time per CPU, but as that is done in parallel, all reads except the
+first will be cached in RAM. Depending on the size of I<regexps.txt> it
+may be faster to use B<--block 10m> instead of B<-L1000>.
+
+Some storage systems perform better when reading multiple chunks in
+parallel. This is true for some RAID systems and for some network file
+systems. To parallelize the reading of I<bigfile>:
+
+ parallel --pipe-part --block 100M -a bigfile -k --compress \
+ grep -f regexps.txt
+
+This will split I<bigfile> into 100MB chunks and run B<grep> on each of
+these chunks. To parallelize both reading of I<bigfile> and I<regexps.txt>
+combine the two using B<--cat>:
+
+ parallel --pipe-part --block 100M -a bigfile --cat cat regexps.txt \
+ \| parallel --pipe -L1000 --round-robin grep -f - {}
+
+If a line matches multiple regexps, the line may be duplicated.
+
+=head3 Bigger problem
+
+If the problem is too big to be solved by this, you are probably ready
+for Lucene.
+
+
+=head2 EXAMPLE: Using remote computers
+
+To run commands on a remote computer SSH needs to be set up and you
+must be able to login without entering a password (The commands
+B<ssh-copy-id>, B<ssh-agent>, and B<sshpass> may help you do that).
+
+If you need to login to a whole cluster, you typically do not want to
+accept the host key for every host. You want to accept them the first
+time and be warned if they are ever changed. To do that:
+
+ # Add the servers to the sshloginfile
+ (echo servera; echo serverb) > .parallel/my_cluster
+ # Make sure .ssh/config exist
+ touch .ssh/config
+ cp .ssh/config .ssh/config.backup
+ # Disable StrictHostKeyChecking temporarily
+ (echo 'Host *'; echo StrictHostKeyChecking no) >> .ssh/config
+ parallel --slf my_cluster --nonall true
+ # Remove the disabling of StrictHostKeyChecking
+ mv .ssh/config.backup .ssh/config
+
+The servers in B<.parallel/my_cluster> are now added in B<.ssh/known_hosts>.
+
+To run B<echo> on B<server.example.com>:
+
+ seq 10 | parallel --sshlogin server.example.com echo
+
+To run commands on more than one remote computer run:
+
+ seq 10 | parallel --sshlogin s1.example.com,s2.example.net echo
+
+Or:
+
+ seq 10 | parallel --sshlogin server.example.com \
+ --sshlogin server2.example.net echo
+
+If the login username is I<foo> on I<server2.example.net> use:
+
+ seq 10 | parallel --sshlogin server.example.com \
+ --sshlogin foo@server2.example.net echo
+
+If your list of hosts is I<server1-88.example.net> with login I<foo>:
+
+ seq 10 | parallel -Sfoo@server{1..88}.example.net echo
+
+To distribute the commands to a list of computers, make a file
+I<mycomputers> with all the computers:
+
+ server.example.com
+ foo@server2.example.com
+ server3.example.com
+
+Then run:
+
+ seq 10 | parallel --sshloginfile mycomputers echo
+
+To include the local computer add the special sshlogin ':' to the list:
+
+ server.example.com
+ foo@server2.example.com
+ server3.example.com
+ :
+
+GNU B<parallel> will try to determine the number of CPUs on each of
+the remote computers, and run one job per CPU - even if the remote
+computers do not have the same number of CPUs.
+
+If the number of CPUs on the remote computers is not identified
+correctly the number of CPUs can be added in front. Here the computer
+has 8 CPUs.
+
+ seq 10 | parallel --sshlogin 8/server.example.com echo
+
+
+=head2 EXAMPLE: Transferring of files
+
+To recompress gzipped files with B<bzip2> using a remote computer run:
+
+ find logs/ -name '*.gz' | \
+ parallel --sshlogin server.example.com \
+ --transfer "zcat {} | bzip2 -9 >{.}.bz2"
+
+This will list the .gz-files in the I<logs> directory and all
+directories below. Then it will transfer the files to
+I<server.example.com> to the corresponding directory in
+I<$HOME/logs>. On I<server.example.com> the file will be recompressed
+using B<zcat> and B<bzip2> resulting in the corresponding file with
+I<.gz> replaced with I<.bz2>.
+
+If you want the resulting bz2-file to be transferred back to the local
+computer add I<--return {.}.bz2>:
+
+ find logs/ -name '*.gz' | \
+ parallel --sshlogin server.example.com \
+ --transfer --return {.}.bz2 "zcat {} | bzip2 -9 >{.}.bz2"
+
+After the recompressing is done the I<.bz2>-file is transferred back to
+the local computer and put next to the original I<.gz>-file.
+
+If you want to delete the transferred files on the remote computer add
+I<--cleanup>. This will remove both the file transferred to the remote
+computer and the files transferred from the remote computer:
+
+ find logs/ -name '*.gz' | \
+ parallel --sshlogin server.example.com \
+ --transfer --return {.}.bz2 --cleanup "zcat {} | bzip2 -9 >{.}.bz2"
+
+If you want run on several computers add the computers to I<--sshlogin>
+either using ',' or multiple I<--sshlogin>:
+
+ find logs/ -name '*.gz' | \
+ parallel --sshlogin server.example.com,server2.example.com \
+ --sshlogin server3.example.com \
+ --transfer --return {.}.bz2 --cleanup "zcat {} | bzip2 -9 >{.}.bz2"
+
+You can add the local computer using I<--sshlogin :>. This will disable the
+removing and transferring for the local computer only:
+
+ find logs/ -name '*.gz' | \
+ parallel --sshlogin server.example.com,server2.example.com \
+ --sshlogin server3.example.com \
+ --sshlogin : \
+ --transfer --return {.}.bz2 --cleanup "zcat {} | bzip2 -9 >{.}.bz2"
+
+Often I<--transfer>, I<--return> and I<--cleanup> are used together. They can be
+shortened to I<--trc>:
+
+ find logs/ -name '*.gz' | \
+ parallel --sshlogin server.example.com,server2.example.com \
+ --sshlogin server3.example.com \
+ --sshlogin : \
+ --trc {.}.bz2 "zcat {} | bzip2 -9 >{.}.bz2"
+
+With the file I<mycomputers> containing the list of computers it becomes:
+
+ find logs/ -name '*.gz' | parallel --sshloginfile mycomputers \
+ --trc {.}.bz2 "zcat {} | bzip2 -9 >{.}.bz2"
+
+If the file I<~/.parallel/sshloginfile> contains the list of computers
+the special short hand I<-S ..> can be used:
+
+ find logs/ -name '*.gz' | parallel -S .. \
+ --trc {.}.bz2 "zcat {} | bzip2 -9 >{.}.bz2"
+
+
+=head2 EXAMPLE: Advanced file transfer
+
+Assume you have files in in/*, want them processed on server,
+and transferred back into /other/dir:
+
+ parallel -S server --trc /other/dir/./{/}.out \
+ cp {/} {/}.out ::: in/./*
+
+
+=head2 EXAMPLE: Distributing work to local and remote computers
+
+Convert *.mp3 to *.ogg running one process per CPU on local computer
+and server2:
+
+ parallel --trc {.}.ogg -S server2,: \
+ 'mpg321 -w - {} | oggenc -q0 - -o {.}.ogg' ::: *.mp3
+
+
+=head2 EXAMPLE: Running the same command on remote computers
+
+To run the command B<uptime> on remote computers you can do:
+
+ parallel --tag --nonall -S server1,server2 uptime
+
+B<--nonall> reads no arguments. If you have a list of jobs you want
+to run on each computer you can do:
+
+ parallel --tag --onall -S server1,server2 echo ::: 1 2 3
+
+Remove B<--tag> if you do not want the sshlogin added before the
+output.
+
+If you have a lot of hosts use '-j0' to access more hosts in parallel.
+
+
+=head2 EXAMPLE: Running 'sudo' on remote computers
+
+Put the password into passwordfile then run:
+
+ parallel --ssh 'cat passwordfile | ssh' --nonall \
+ -S user@server1,user@server2 sudo -S ls -l /root
+
+
+=head2 EXAMPLE: Using remote computers behind NAT wall
+
+If the workers are behind a NAT wall, you need some trickery to get to
+them.
+
+If you can B<ssh> to a jumphost, and reach the workers from there,
+then the obvious solution would be this, but it B<does not work>:
+
+ parallel --ssh 'ssh jumphost ssh' -S host1 echo ::: DOES NOT WORK
+
+It does not work because the command is dequoted by B<ssh> twice where
+as GNU B<parallel> only expects it to be dequoted once.
+
+You can use a bash function and have GNU B<parallel> quote the command:
+
+ jumpssh() { ssh -A jumphost ssh $(parallel --shellquote ::: "$@"); }
+ export -f jumpssh
+ parallel --ssh jumpssh -S host1 echo ::: this works
+
+Or you can instead put this in B<~/.ssh/config>:
+
+ Host host1 host2 host3
+ ProxyCommand ssh jumphost.domain nc -w 1 %h 22
+
+It requires B<nc(netcat)> to be installed on jumphost. With this you
+can simply:
+
+ parallel -S host1,host2,host3 echo ::: This does work
+
+=head3 No jumphost, but port forwards
+
+If there is no jumphost but each server has port 22 forwarded from the
+firewall (e.g. the firewall's port 22001 = port 22 on host1, 22002 = host2,
+22003 = host3) then you can use B<~/.ssh/config>:
+
+ Host host1.v
+ Port 22001
+ Host host2.v
+ Port 22002
+ Host host3.v
+ Port 22003
+ Host *.v
+ Hostname firewall
+
+And then use host{1..3}.v as normal hosts:
+
+ parallel -S host1.v,host2.v,host3.v echo ::: a b c
+
+=head3 No jumphost, no port forwards
+
+If ports cannot be forwarded, you need some sort of VPN to traverse
+the NAT-wall. TOR is one options for that, as it is very easy to get
+working.
+
+You need to install TOR and setup a hidden service. In B<torrc> put:
+
+ HiddenServiceDir /var/lib/tor/hidden_service/
+ HiddenServicePort 22 127.0.0.1:22
+
+Then start TOR: B</etc/init.d/tor restart>
+
+The TOR hostname is now in B</var/lib/tor/hidden_service/hostname> and
+is something similar to B<izjafdceobowklhz.onion>. Now you simply
+prepend B<torsocks> to B<ssh>:
+
+ parallel --ssh 'torsocks ssh' -S izjafdceobowklhz.onion \
+ -S zfcdaeiojoklbwhz.onion,auclucjzobowklhi.onion echo ::: a b c
+
+If not all hosts are accessible through TOR:
+
+ parallel -S 'torsocks ssh izjafdceobowklhz.onion,host2,host3' \
+ echo ::: a b c
+
+See more B<ssh> tricks on https://en.wikibooks.org/wiki/OpenSSH/Cookbook/Proxies_and_Jump_Hosts
+
+
+=head2 EXAMPLE: Use sshpass with ssh
+
+If you cannot use passwordless login, you may be able to use B<sshpass>:
+
+ seq 10 | parallel -S user-with-password:MyPassword@server echo
+
+or:
+
+ export SSHPASS='MyPa$$w0rd'
+ seq 10 | parallel -S user-with-password:@server echo
+
+
+=head2 EXAMPLE: Use outrun instead of ssh
+
+B<outrun> lets you run a command on a remote server. B<outrun> sets up
+a connection to access files at the source server, and automatically
+transfers files. B<outrun> must be installed on the remote system.
+
+You can use B<outrun> in an sshlogin this way:
+
+ parallel -S 'outrun user@server' command
+
+or:
+
+ parallel --ssh outrun -S server command
+
+
+=head2 EXAMPLE: Slurm cluster
+
+The Slurm Workload Manager is used in many clusters.
+
+Here is a simple example of using GNU B<parallel> to call B<srun>:
+
+ #!/bin/bash
+
+ #SBATCH --time 00:02:00
+ #SBATCH --ntasks=4
+ #SBATCH --job-name GnuParallelDemo
+ #SBATCH --output gnuparallel.out
+
+ module purge
+ module load gnu_parallel
+
+ my_parallel="parallel --delay .2 -j $SLURM_NTASKS"
+ my_srun="srun --export=all --exclusive -n1"
+ my_srun="$my_srun --cpus-per-task=1 --cpu-bind=cores"
+ $my_parallel "$my_srun" echo This is job {} ::: {1..20}
+
+
+=head2 EXAMPLE: Parallelizing rsync
+
+B<rsync> is a great tool, but sometimes it will not fill up the
+available bandwidth. Running multiple B<rsync> in parallel can fix
+this.
+
+ cd src-dir
+ find . -type f |
+ parallel -j10 -X rsync -zR -Ha ./{} fooserver:/dest-dir/
+
+Adjust B<-j10> until you find the optimal number.
+
+B<rsync -R> will create the needed subdirectories, so all files are
+not put into a single dir. The B<./> is needed so the resulting command
+looks similar to:
+
+ rsync -zR ././sub/dir/file fooserver:/dest-dir/
+
+The B</./> is what B<rsync -R> works on.
+
+If you are unable to push data, but need to pull them and the files
+are called digits.png (e.g. 000000.png) you might be able to do:
+
+ seq -w 0 99 | parallel rsync -Havessh fooserver:src/*{}.png destdir/
+
+
+=head2 EXAMPLE: Use multiple inputs in one command
+
+Copy files like foo.es.ext to foo.ext:
+
+ ls *.es.* | perl -pe 'print; s/\.es//' | parallel -N2 cp {1} {2}
+
+The perl command spits out 2 lines for each input. GNU B<parallel>
+takes 2 inputs (using B<-N2>) and replaces {1} and {2} with the inputs.
+
+Count in binary:
+
+ parallel -k echo ::: 0 1 ::: 0 1 ::: 0 1 ::: 0 1 ::: 0 1 ::: 0 1
+
+Print the number on the opposing sides of a six sided die:
+
+ parallel --link -a <(seq 6) -a <(seq 6 -1 1) echo
+ parallel --link echo :::: <(seq 6) <(seq 6 -1 1)
+
+Convert files from all subdirs to PNG-files with consecutive numbers
+(useful for making input PNG's for B<ffmpeg>):
+
+ parallel --link -a <(find . -type f | sort) \
+ -a <(seq $(find . -type f|wc -l)) convert {1} {2}.png
+
+Alternative version:
+
+ find . -type f | sort | parallel convert {} {#}.png
+
+
+=head2 EXAMPLE: Use a table as input
+
+Content of table_file.tsv:
+
+ foo<TAB>bar
+ baz <TAB> quux
+
+To run:
+
+ cmd -o bar -i foo
+ cmd -o quux -i baz
+
+you can run:
+
+ parallel -a table_file.tsv --colsep '\t' cmd -o {2} -i {1}
+
+Note: The default for GNU B<parallel> is to remove the spaces around
+the columns. To keep the spaces:
+
+ parallel -a table_file.tsv --trim n --colsep '\t' cmd -o {2} -i {1}
+
+
+=head2 EXAMPLE: Output to database
+
+GNU B<parallel> can output to a database table and a CSV-file:
+
+ dburl=csv:///%2Ftmp%2Fmydir
+ dbtableurl=$dburl/mytable.csv
+ parallel --sqlandworker $dbtableurl seq ::: {1..10}
+
+It is rather slow and takes up a lot of CPU time because GNU
+B<parallel> parses the whole CSV file for each update.
+
+A better approach is to use an SQLite-base and then convert that to CSV:
+
+ dburl=sqlite3:///%2Ftmp%2Fmy.sqlite
+ dbtableurl=$dburl/mytable
+ parallel --sqlandworker $dbtableurl seq ::: {1..10}
+ sql $dburl '.headers on' '.mode csv' 'SELECT * FROM mytable;'
+
+This takes around a second per job.
+
+If you have access to a real database system, such as PostgreSQL, it
+is even faster:
+
+ dburl=pg://user:pass@host/mydb
+ dbtableurl=$dburl/mytable
+ parallel --sqlandworker $dbtableurl seq ::: {1..10}
+ sql $dburl \
+ "COPY (SELECT * FROM mytable) TO stdout DELIMITER ',' CSV HEADER;"
+
+Or MySQL:
+
+ dburl=mysql://user:pass@host/mydb
+ dbtableurl=$dburl/mytable
+ parallel --sqlandworker $dbtableurl seq ::: {1..10}
+ sql -p -B $dburl "SELECT * FROM mytable;" > mytable.tsv
+ perl -pe 's/"/""/g; s/\t/","/g; s/^/"/; s/$/"/;
+ %s=("\\" => "\\", "t" => "\t", "n" => "\n");
+ s/\\([\\tn])/$s{$1}/g;' mytable.tsv
+
+
+=head2 EXAMPLE: Output to CSV-file for R
+
+If you have no need for the advanced job distribution control that a
+database provides, but you simply want output into a CSV file that you
+can read into R or LibreCalc, then you can use B<--results>:
+
+ parallel --results my.csv seq ::: 10 20 30
+ R
+ > mydf <- read.csv("my.csv");
+ > print(mydf[2,])
+ > write(as.character(mydf[2,c("Stdout")]),'')
+
+
+=head2 EXAMPLE: Use XML as input
+
+The show Aflyttet on Radio 24syv publishes an RSS feed with their audio
+podcasts on: http://arkiv.radio24syv.dk/audiopodcast/channel/4466232
+
+Using B<xpath> you can extract the URLs for 2019 and download them
+using GNU B<parallel>:
+
+ wget -O - http://arkiv.radio24syv.dk/audiopodcast/channel/4466232 | \
+ xpath -e "//pubDate[contains(text(),'2019')]/../enclosure/@url" | \
+ parallel -u wget '{= s/ url="//; s/"//; =}'
+
+
+=head2 EXAMPLE: Run the same command 10 times
+
+If you want to run the same command with the same arguments 10 times
+in parallel you can do:
+
+ seq 10 | parallel -n0 my_command my_args
+
+
+=head2 EXAMPLE: Working as cat | sh. Resource inexpensive jobs and evaluation
+
+GNU B<parallel> can work similar to B<cat | sh>.
+
+A resource inexpensive job is a job that takes very little CPU, disk
+I/O and network I/O. Ping is an example of a resource inexpensive
+job. wget is too - if the webpages are small.
+
+The content of the file jobs_to_run:
+
+ ping -c 1 10.0.0.1
+ wget http://example.com/status.cgi?ip=10.0.0.1
+ ping -c 1 10.0.0.2
+ wget http://example.com/status.cgi?ip=10.0.0.2
+ ...
+ ping -c 1 10.0.0.255
+ wget http://example.com/status.cgi?ip=10.0.0.255
+
+To run 100 processes simultaneously do:
+
+ parallel -j 100 < jobs_to_run
+
+As there is not a I<command> the jobs will be evaluated by the shell.
+
+
+=head2 EXAMPLE: Call program with FASTA sequence
+
+FASTA files have the format:
+
+ >Sequence name1
+ sequence
+ sequence continued
+ >Sequence name2
+ sequence
+ sequence continued
+ more sequence
+
+To call B<myprog> with the sequence as argument run:
+
+ cat file.fasta |
+ parallel --pipe -N1 --recstart '>' --rrs \
+ 'read a; echo Name: "$a"; myprog $(tr -d "\n")'
+
+
+=head2 EXAMPLE: Call program with interleaved FASTQ records
+
+FASTQ files have the format:
+
+ @M10991:61:000000000-A7EML:1:1101:14011:1001 1:N:0:28
+ CTCCTAGGTCGGCATGATGGGGGAAGGAGAGCATGGGAAGAAATGAGAGAGTAGCAAGG
+ +
+ #8BCCGGGGGFEFECFGGGGGGGGG@;FFGGGEG@FF<EE<@FFC,CEGCCGGFF<FGF
+
+Interleaved FASTQ starts with a line like these:
+
+ @HWUSI-EAS100R:6:73:941:1973#0/1
+ @EAS139:136:FC706VJ:2:2104:15343:197393 1:Y:18:ATCACG
+ @EAS139:136:FC706VJ:2:2104:15343:197393 1:N:18:1
+
+where '/1' and ' 1:' determines this is read 1.
+
+This will cut big.fq into one chunk per CPU thread and pass it on
+stdin (standard input) to the program fastq-reader:
+
+ parallel --pipe-part -a big.fq --block -1 --regexp \
+ --recend '\n' --recstart '@.*(/1| 1:.*)\n[A-Za-z\n\.~]' \
+ fastq-reader
+
+
+=head2 EXAMPLE: Processing a big file using more CPUs
+
+To process a big file or some output you can use B<--pipe> to split up
+the data into blocks and pipe the blocks into the processing program.
+
+If the program is B<gzip -9> you can do:
+
+ cat bigfile | parallel --pipe --recend '' -k gzip -9 > bigfile.gz
+
+This will split B<bigfile> into blocks of 1 MB and pass that to B<gzip
+-9> in parallel. One B<gzip> will be run per CPU. The output of B<gzip
+-9> will be kept in order and saved to B<bigfile.gz>
+
+B<gzip> works fine if the output is appended, but some processing does
+not work like that - for example sorting. For this GNU B<parallel> can
+put the output of each command into a file. This will sort a big file
+in parallel:
+
+ cat bigfile | parallel --pipe --files sort |\
+ parallel -Xj1 sort -m {} ';' rm {} >bigfile.sort
+
+Here B<bigfile> is split into blocks of around 1MB, each block ending
+in '\n' (which is the default for B<--recend>). Each block is passed
+to B<sort> and the output from B<sort> is saved into files. These
+files are passed to the second B<parallel> that runs B<sort -m> on the
+files before it removes the files. The output is saved to
+B<bigfile.sort>.
+
+GNU B<parallel>'s B<--pipe> maxes out at around 100 MB/s because every
+byte has to be copied through GNU B<parallel>. But if B<bigfile> is a
+real (seekable) file GNU B<parallel> can by-pass the copying and send
+the parts directly to the program:
+
+ parallel --pipe-part --block 100m -a bigfile --files sort |\
+ parallel -Xj1 sort -m {} ';' rm {} >bigfile.sort
+
+
+=head2 EXAMPLE: Grouping input lines
+
+When processing with B<--pipe> you may have lines grouped by a
+value. Here is I<my.csv>:
+
+ Transaction Customer Item
+ 1 a 53
+ 2 b 65
+ 3 b 82
+ 4 c 96
+ 5 c 67
+ 6 c 13
+ 7 d 90
+ 8 d 43
+ 9 d 91
+ 10 d 84
+ 11 e 72
+ 12 e 102
+ 13 e 63
+ 14 e 56
+ 15 e 74
+
+Let us assume you want GNU B<parallel> to process each customer. In
+other words: You want all the transactions for a single customer to be
+treated as a single record.
+
+To do this we preprocess the data with a program that inserts a record
+separator before each customer (column 2 = $F[1]). Here we first make
+a 50 character random string, which we then use as the separator:
+
+ sep=`perl -e 'print map { ("a".."z","A".."Z")[rand(52)] } (1..50);'`
+ cat my.csv | \
+ perl -ape '$F[1] ne $l and print "'$sep'"; $l = $F[1]' | \
+ parallel --recend $sep --rrs --pipe -N1 wc
+
+If your program can process multiple customers replace B<-N1> with a
+reasonable B<--blocksize>.
+
+
+=head2 EXAMPLE: Running more than 250 jobs workaround
+
+If you need to run a massive amount of jobs in parallel, then you will
+likely hit the filehandle limit which is often around 250 jobs. If you
+are super user you can raise the limit in /etc/security/limits.conf
+but you can also use this workaround. The filehandle limit is per
+process. That means that if you just spawn more GNU B<parallel>s then
+each of them can run 250 jobs. This will spawn up to 2500 jobs:
+
+ cat myinput |\
+ parallel --pipe -N 50 --round-robin -j50 parallel -j50 your_prg
+
+This will spawn up to 62500 jobs (use with caution - you need 64 GB
+RAM to do this, and you may need to increase /proc/sys/kernel/pid_max):
+
+ cat myinput |\
+ parallel --pipe -N 250 --round-robin -j250 parallel -j250 your_prg
+
+
+=head2 EXAMPLE: Working as mutex and counting semaphore
+
+The command B<sem> is an alias for B<parallel --semaphore>.
+
+A counting semaphore will allow a given number of jobs to be started
+in the background. When the number of jobs are running in the
+background, GNU B<sem> will wait for one of these to complete before
+starting another command. B<sem --wait> will wait for all jobs to
+complete.
+
+Run 10 jobs concurrently in the background:
+
+ for i in *.log ; do
+ echo $i
+ sem -j10 gzip $i ";" echo done
+ done
+ sem --wait
+
+A mutex is a counting semaphore allowing only one job to run. This
+will edit the file I<myfile> and prepends the file with lines with the
+numbers 1 to 3.
+
+ seq 3 | parallel sem sed -i -e '1i{}' myfile
+
+As I<myfile> can be very big it is important only one process edits
+the file at the same time.
+
+Name the semaphore to have multiple different semaphores active at the
+same time:
+
+ seq 3 | parallel sem --id mymutex sed -i -e '1i{}' myfile
+
+
+=head2 EXAMPLE: Mutex for a script
+
+Assume a script is called from cron or from a web service, but only
+one instance can be run at a time. With B<sem> and B<--shebang-wrap>
+the script can be made to wait for other instances to finish. Here in
+B<bash>:
+
+ #!/usr/bin/sem --shebang-wrap -u --id $0 --fg /bin/bash
+
+ echo This will run
+ sleep 5
+ echo exclusively
+
+Here B<perl>:
+
+ #!/usr/bin/sem --shebang-wrap -u --id $0 --fg /usr/bin/perl
+
+ print "This will run ";
+ sleep 5;
+ print "exclusively\n";
+
+Here B<python>:
+
+ #!/usr/local/bin/sem --shebang-wrap -u --id $0 --fg /usr/bin/python
+
+ import time
+ print "This will run ";
+ time.sleep(5)
+ print "exclusively";
+
+
+=head2 EXAMPLE: Start editor with file names from stdin (standard input)
+
+You can use GNU B<parallel> to start interactive programs like emacs or vi:
+
+ cat filelist | parallel --tty -X emacs
+ cat filelist | parallel --tty -X vi
+
+If there are more files than will fit on a single command line, the
+editor will be started again with the remaining files.
+
+
+=head2 EXAMPLE: Running sudo
+
+B<sudo> requires a password to run a command as root. It caches the
+access, so you only need to enter the password again if you have not
+used B<sudo> for a while.
+
+The command:
+
+ parallel sudo echo ::: This is a bad idea
+
+is no good, as you would be prompted for the sudo password for each of
+the jobs. Instead do:
+
+ sudo parallel echo ::: This is a good idea
+
+This way you only have to enter the sudo password once.
+
+=head2 EXAMPLE: Run ping in parallel
+
+B<ping> prints out statistics when killed with CTRL-C.
+
+Unfortunately, CTRL-C will also normally kill GNU B<parallel>.
+
+But by using B<--open-tty> and ignoring SIGINT you can get the wanted effect:
+
+ parallel -j0 --open-tty --lb --tag ping '{= $SIG{INT}=sub {} =}' \
+ ::: 1.1.1.1 8.8.8.8 9.9.9.9 21.21.21.21 80.80.80.80 88.88.88.88
+
+B<--open-tty> will make the B<ping>s receive SIGINT (from CTRL-C).
+CTRL-C will not kill GNU B<parallel>, so that will only exit after
+B<ping> is done.
+
+
+=head2 EXAMPLE: GNU Parallel as queue system/batch manager
+
+GNU B<parallel> can work as a simple job queue system or batch manager.
+The idea is to put the jobs into a file and have GNU B<parallel> read
+from that continuously. As GNU B<parallel> will stop at end of file we
+use B<tail> to continue reading:
+
+ true >jobqueue; tail -n+0 -f jobqueue | parallel
+
+To submit your jobs to the queue:
+
+ echo my_command my_arg >> jobqueue
+
+You can of course use B<-S> to distribute the jobs to remote
+computers:
+
+ true >jobqueue; tail -n+0 -f jobqueue | parallel -S ..
+
+Output only will be printed when reading the next input after a job
+has finished: So you need to submit a job after the first has finished
+to see the output from the first job.
+
+If you keep this running for a long time, jobqueue will grow. A way of
+removing the jobs already run is by making GNU B<parallel> stop when
+it hits a special value and then restart. To use B<--eof> to make GNU
+B<parallel> exit, B<tail> also needs to be forced to exit:
+
+ true >jobqueue;
+ while true; do
+ tail -n+0 -f jobqueue |
+ (parallel -E StOpHeRe -S ..; echo GNU Parallel is now done;
+ perl -e 'while(<>){/StOpHeRe/ and last};print <>' jobqueue > j2;
+ (seq 1000 >> jobqueue &);
+ echo Done appending dummy data forcing tail to exit)
+ echo tail exited;
+ mv j2 jobqueue
+ done
+
+In some cases you can run on more CPUs and computers during the night:
+
+ # Day time
+ echo 50% > jobfile
+ cp day_server_list ~/.parallel/sshloginfile
+ # Night time
+ echo 100% > jobfile
+ cp night_server_list ~/.parallel/sshloginfile
+ tail -n+0 -f jobqueue | parallel --jobs jobfile -S ..
+
+GNU B<parallel> discovers if B<jobfile> or B<~/.parallel/sshloginfile>
+changes.
+
+
+=head2 EXAMPLE: GNU Parallel as dir processor
+
+If you have a dir in which users drop files that needs to be processed
+you can do this on GNU/Linux (If you know what B<inotifywait> is
+called on other platforms file a bug report):
+
+ inotifywait -qmre MOVED_TO -e CLOSE_WRITE --format %w%f my_dir |\
+ parallel -u echo
+
+This will run the command B<echo> on each file put into B<my_dir> or
+subdirs of B<my_dir>.
+
+You can of course use B<-S> to distribute the jobs to remote
+computers:
+
+ inotifywait -qmre MOVED_TO -e CLOSE_WRITE --format %w%f my_dir |\
+ parallel -S .. -u echo
+
+If the files to be processed are in a tar file then unpacking one file
+and processing it immediately may be faster than first unpacking all
+files. Set up the dir processor as above and unpack into the dir.
+
+Using GNU B<parallel> as dir processor has the same limitations as
+using GNU B<parallel> as queue system/batch manager.
+
+
+=head2 EXAMPLE: Locate the missing package
+
+If you have downloaded source and tried compiling it, you may have seen:
+
+ $ ./configure
+ [...]
+ checking for something.h... no
+ configure: error: "libsomething not found"
+
+Often it is not obvious which package you should install to get that
+file. Debian has `apt-file` to search for a file. `tracefile` from
+https://codeberg.org/tange/tangetools can tell which files a program
+tried to access. In this case we are interested in one of the last
+files:
+
+ $ tracefile -un ./configure | tail | parallel -j0 apt-file search
+
+
+=head1 AUTHOR
+
+When using GNU B<parallel> for a publication please cite:
+
+O. Tange (2011): GNU Parallel - The Command-Line Power Tool, ;login:
+The USENIX Magazine, February 2011:42-47.
+
+This helps funding further development; and it won't cost you a cent.
+If you pay 10000 EUR you should feel free to use GNU Parallel without citing.
+
+Copyright (C) 2007-10-18 Ole Tange, http://ole.tange.dk
+
+Copyright (C) 2008-2010 Ole Tange, http://ole.tange.dk
+
+Copyright (C) 2010-2024 Ole Tange, http://ole.tange.dk and Free
+Software Foundation, Inc.
+
+Parts of the manual concerning B<xargs> compatibility is inspired by
+the manual of B<xargs> from GNU findutils 4.4.2.
+
+
+=head1 LICENSE
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3 of the License, or
+at your option any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+=head2 Documentation license I
+
+Permission is granted to copy, distribute and/or modify this
+documentation under the terms of the GNU Free Documentation License,
+Version 1.3 or any later version published by the Free Software
+Foundation; with no Invariant Sections, with no Front-Cover Texts, and
+with no Back-Cover Texts. A copy of the license is included in the
+file LICENSES/GFDL-1.3-or-later.txt.
+
+=head2 Documentation license II
+
+You are free:
+
+=over 9
+
+=item B<to Share>
+
+to copy, distribute and transmit the work
+
+=item B<to Remix>
+
+to adapt the work
+
+=back
+
+Under the following conditions:
+
+=over 9
+
+=item B<Attribution>
+
+You must attribute the work in the manner specified by the author or
+licensor (but not in any way that suggests that they endorse you or
+your use of the work).
+
+=item B<Share Alike>
+
+If you alter, transform, or build upon this work, you may distribute
+the resulting work only under the same, similar or a compatible
+license.
+
+=back
+
+With the understanding that:
+
+=over 9
+
+=item B<Waiver>
+
+Any of the above conditions can be waived if you get permission from
+the copyright holder.
+
+=item B<Public Domain>
+
+Where the work or any of its elements is in the public domain under
+applicable law, that status is in no way affected by the license.
+
+=item B<Other Rights>
+
+In no way are any of the following rights affected by the license:
+
+=over 2
+
+=item *
+
+Your fair dealing or fair use rights, or other applicable
+copyright exceptions and limitations;
+
+=item *
+
+The author's moral rights;
+
+=item *
+
+Rights other persons may have either in the work itself or in
+how the work is used, such as publicity or privacy rights.
+
+=back
+
+=back
+
+=over 9
+
+=item B<Notice>
+
+For any reuse or distribution, you must make clear to others the
+license terms of this work.
+
+=back
+
+A copy of the full license is included in the file as
+LICENCES/CC-BY-SA-4.0.txt
+
+
+=head1 SEE ALSO
+
+B<parallel>(1), B<parallel_tutorial>(7), B<env_parallel>(1),
+B<parset>(1), B<parsort>(1), B<parallel_alternatives>(7),
+B<parallel_design>(7), B<niceload>(1), B<sql>(1), B<ssh>(1),
+B<ssh-agent>(1), B<sshpass>(1), B<ssh-copy-id>(1), B<rsync>(1)
+
+=cut
diff --git a/src/parallel_tutorial.pod b/src/parallel_tutorial.pod
new file mode 100644
index 0000000..90a3663
--- /dev/null
+++ b/src/parallel_tutorial.pod
@@ -0,0 +1,3177 @@
+#!/usr/bin/perl -w
+
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GFDL-1.3-or-later
+# SPDX-License-Identifier: CC-BY-SA-4.0
+
+=head1 GNU Parallel Tutorial
+
+This tutorial shows off much of GNU B<parallel>'s functionality. The
+tutorial is meant to learn the options in and syntax of GNU
+B<parallel>. The tutorial is B<not> to show realistic examples from the
+real world.
+
+=head2 Reader's guide
+
+If you prefer reading a book buy B<GNU Parallel 2018> at
+https://www.lulu.com/shop/ole-tange/gnu-parallel-2018/paperback/product-23558902.html
+or download it at: https://doi.org/10.5281/zenodo.1146014
+
+Otherwise start by watching the intro videos for a quick introduction:
+https://www.youtube.com/playlist?list=PL284C9FF2488BC6D1
+
+Then browse through the examples (B<man parallel_examples>). That will give
+you an idea of what GNU B<parallel> is capable of.
+
+If you want to dive even deeper: spend a couple of hours walking
+through the tutorial (B<man parallel_tutorial>). Your command line
+will love you for it.
+
+Finally you may want to look at the rest of the manual (B<man
+parallel>) if you have special needs not already covered.
+
+If you want to know the design decisions behind GNU B<parallel>, try:
+B<man parallel_design>. This is also a good intro if you intend to
+change GNU B<parallel>.
+
+
+
+=head1 Prerequisites
+
+To run this tutorial you must have the following:
+
+=over 9
+
+=item parallel >= version 20160822
+
+Install the newest version using your package manager (recommended for
+security reasons), the way described in README, or with this command:
+
+ $ (wget -O - pi.dk/3 || lynx -source pi.dk/3 || curl pi.dk/3/ || \
+ fetch -o - http://pi.dk/3 ) > install.sh
+ $ sha1sum install.sh
+ 12345678 51621b7f 1ee103c0 0783aae4 ef9889f8
+ $ md5sum install.sh
+ 62eada78 703b5500 241b8e50 baf62758
+ $ sha512sum install.sh
+ 160d3159 9480cf5c a101512f 150b7ac0 206a65dc 86f2bb6b bdf1a2bc 96bc6d06
+ 7f8237c2 0964b67f bccf8a93 332528fa 11e5ab43 2a6226a6 ceb197ab 7f03c061
+ $ bash install.sh
+
+This will also install the newest version of the tutorial which you
+can see by running this:
+
+ man parallel_tutorial
+
+Most of the tutorial will work on older versions, too.
+
+
+=item abc-file:
+
+The file can be generated by this command:
+
+ parallel -k echo ::: A B C > abc-file
+
+=item def-file:
+
+The file can be generated by this command:
+
+ parallel -k echo ::: D E F > def-file
+
+=item abc0-file:
+
+The file can be generated by this command:
+
+ perl -e 'printf "A\0B\0C\0"' > abc0-file
+
+=item abc_-file:
+
+The file can be generated by this command:
+
+ perl -e 'printf "A_B_C_"' > abc_-file
+
+=item tsv-file.tsv
+
+The file can be generated by this command:
+
+ perl -e 'printf "f1\tf2\nA\tB\nC\tD\n"' > tsv-file.tsv
+
+=item num8
+
+The file can be generated by this command:
+
+ perl -e 'for(1..8){print "$_\n"}' > num8
+
+=item num128
+
+The file can be generated by this command:
+
+ perl -e 'for(1..128){print "$_\n"}' > num128
+
+=item num30000
+
+The file can be generated by this command:
+
+ perl -e 'for(1..30000){print "$_\n"}' > num30000
+
+=item num1000000
+
+The file can be generated by this command:
+
+ perl -e 'for(1..1000000){print "$_\n"}' > num1000000
+
+=item num_%header
+
+The file can be generated by this command:
+
+ (echo %head1; echo %head2; \
+ perl -e 'for(1..10){print "$_\n"}') > num_%header
+
+=item fixedlen
+
+The file can be generated by this command:
+
+ perl -e 'print "HHHHAAABBBCCC"' > fixedlen
+
+=item For remote running: ssh login on 2 servers with no password in
+$SERVER1 and $SERVER2 must work.
+
+ SERVER1=server.example.com
+ SERVER2=server2.example.net
+
+So you must be able to do this without entering a password:
+
+ ssh $SERVER1 echo works
+ ssh $SERVER2 echo works
+
+It can be setup by running B<ssh-keygen -t dsa; ssh-copy-id $SERVER1>
+and using an empty passphrase, or you can use B<ssh-agent>.
+
+=back
+
+
+=head1 Input sources
+
+GNU B<parallel> reads input from input sources. These can be files, the
+command line, and stdin (standard input or a pipe).
+
+=head2 A single input source
+
+Input can be read from the command line:
+
+ parallel echo ::: A B C
+
+Output (the order may be different because the jobs are run in
+parallel):
+
+ A
+ B
+ C
+
+The input source can be a file:
+
+ parallel -a abc-file echo
+
+Output: Same as above.
+
+STDIN (standard input) can be the input source:
+
+ cat abc-file | parallel echo
+
+Output: Same as above.
+
+
+=head2 Multiple input sources
+
+GNU B<parallel> can take multiple input sources given on the command
+line. GNU B<parallel> then generates all combinations of the input
+sources:
+
+ parallel echo ::: A B C ::: D E F
+
+Output (the order may be different):
+
+ A D
+ A E
+ A F
+ B D
+ B E
+ B F
+ C D
+ C E
+ C F
+
+The input sources can be files:
+
+ parallel -a abc-file -a def-file echo
+
+Output: Same as above.
+
+STDIN (standard input) can be one of the input sources using B<->:
+
+ cat abc-file | parallel -a - -a def-file echo
+
+Output: Same as above.
+
+Instead of B<-a> files can be given after B<::::>:
+
+ cat abc-file | parallel echo :::: - def-file
+
+Output: Same as above.
+
+::: and :::: can be mixed:
+
+ parallel echo ::: A B C :::: def-file
+
+Output: Same as above.
+
+=head3 Linking arguments from input sources
+
+With B<--link> you can link the input sources and get one argument
+from each input source:
+
+ parallel --link echo ::: A B C ::: D E F
+
+Output (the order may be different):
+
+ A D
+ B E
+ C F
+
+If one of the input sources is too short, its values will wrap:
+
+ parallel --link echo ::: A B C D E ::: F G
+
+Output (the order may be different):
+
+ A F
+ B G
+ C F
+ D G
+ E F
+
+For more flexible linking you can use B<:::+> and B<::::+>. They work
+like B<:::> and B<::::> except they link the previous input source to
+this input source.
+
+This will link ABC to GHI:
+
+ parallel echo :::: abc-file :::+ G H I :::: def-file
+
+Output (the order may be different):
+
+ A G D
+ A G E
+ A G F
+ B H D
+ B H E
+ B H F
+ C I D
+ C I E
+ C I F
+
+This will link GHI to DEF:
+
+ parallel echo :::: abc-file ::: G H I ::::+ def-file
+
+Output (the order may be different):
+
+ A G D
+ A H E
+ A I F
+ B G D
+ B H E
+ B I F
+ C G D
+ C H E
+ C I F
+
+If one of the input sources is too short when using B<:::+> or
+B<::::+>, the rest will be ignored:
+
+ parallel echo ::: A B C D E :::+ F G
+
+Output (the order may be different):
+
+ A F
+ B G
+
+
+=head2 Changing the argument separator.
+
+GNU B<parallel> can use other separators than B<:::> or B<::::>. This is
+typically useful if B<:::> or B<::::> is used in the command to run:
+
+ parallel --arg-sep ,, echo ,, A B C :::: def-file
+
+Output (the order may be different):
+
+ A D
+ A E
+ A F
+ B D
+ B E
+ B F
+ C D
+ C E
+ C F
+
+Changing the argument file separator:
+
+ parallel --arg-file-sep // echo ::: A B C // def-file
+
+Output: Same as above.
+
+
+=head2 Changing the argument delimiter
+
+GNU B<parallel> will normally treat a full line as a single argument: It
+uses B<\n> as argument delimiter. This can be changed with B<-d>:
+
+ parallel -d _ echo :::: abc_-file
+
+Output (the order may be different):
+
+ A
+ B
+ C
+
+NUL can be given as B<\0>:
+
+ parallel -d '\0' echo :::: abc0-file
+
+Output: Same as above.
+
+A shorthand for B<-d '\0'> is B<-0> (this will often be used to read files
+from B<find ... -print0>):
+
+ parallel -0 echo :::: abc0-file
+
+Output: Same as above.
+
+=head2 End-of-file value for input source
+
+GNU B<parallel> can stop reading when it encounters a certain value:
+
+ parallel -E stop echo ::: A B stop C D
+
+Output:
+
+ A
+ B
+
+=head2 Skipping empty lines
+
+Using B<--no-run-if-empty> GNU B<parallel> will skip empty lines.
+
+ (echo 1; echo; echo 2) | parallel --no-run-if-empty echo
+
+Output:
+
+ 1
+ 2
+
+
+=head1 Building the command line
+
+=head2 No command means arguments are commands
+
+If no command is given after parallel the arguments themselves are
+treated as commands:
+
+ parallel ::: ls 'echo foo' pwd
+
+Output (the order may be different):
+
+ [list of files in current dir]
+ foo
+ [/path/to/current/working/dir]
+
+The command can be a script, a binary or a Bash function if the function is
+exported using B<export -f>:
+
+ # Only works in Bash
+ my_func() {
+ echo in my_func $1
+ }
+ export -f my_func
+ parallel my_func ::: 1 2 3
+
+Output (the order may be different):
+
+ in my_func 1
+ in my_func 2
+ in my_func 3
+
+=head2 Replacement strings
+
+=head3 The 7 predefined replacement strings
+
+GNU B<parallel> has several replacement strings. If no replacement
+strings are used the default is to append B<{}>:
+
+ parallel echo ::: dir/file.ext dir2/file2.ext2
+
+Output:
+
+ dir/file.ext
+ dir2/file2.ext2
+
+The default replacement string is B<{}>:
+
+ parallel echo {} ::: dir/file.ext dir2/file2.ext2
+
+Output:
+
+ dir/file.ext
+ dir2/file2.ext2
+
+The replacement string B<{.}> removes the extension:
+
+ parallel echo {.} ::: dir/file.ext dir2/file2.ext2
+
+Output:
+
+ dir/file
+ dir2/file2
+
+The replacement string B<{/}> removes the path:
+
+ parallel echo {/} ::: dir/file.ext dir2/file2.ext2
+
+Output:
+
+ file.ext
+ file2.ext2
+
+The replacement string B<{//}> keeps only the path:
+
+ parallel echo {//} ::: dir/file.ext dir2/file2.ext2
+
+Output:
+
+ dir
+ dir2
+
+The replacement string B<{/.}> removes the path and the extension:
+
+ parallel echo {/.} ::: dir/file.ext dir2/file2.ext2
+
+Output:
+
+ file
+ file2
+
+The replacement string B<{#}> gives the job number:
+
+ parallel echo {#} ::: A B C
+
+Output (the order may be different):
+
+ 1
+ 2
+ 3
+
+The replacement string B<{%}> gives the job slot number (between 1 and
+number of jobs to run in parallel):
+
+ parallel -j 2 echo {%} ::: A B C
+
+Output (the order may be different and 1 and 2 may be swapped):
+
+ 1
+ 2
+ 1
+
+=head3 Changing the replacement strings
+
+The replacement string B<{}> can be changed with B<-I>:
+
+ parallel -I ,, echo ,, ::: A/B.C
+
+Output:
+
+ A/B.C
+
+The replacement string B<{.}> can be changed with B<--extensionreplace>:
+
+ parallel --extensionreplace ,, echo ,, ::: A/B.C
+
+Output:
+
+ A/B
+
+The replacement string B<{/}> can be replaced with B<--basenamereplace>:
+
+ parallel --basenamereplace ,, echo ,, ::: A/B.C
+
+Output:
+
+ B.C
+
+The replacement string B<{//}> can be changed with B<--dirnamereplace>:
+
+ parallel --dirnamereplace ,, echo ,, ::: A/B.C
+
+Output:
+
+ A
+
+The replacement string B<{/.}> can be changed with B<--basenameextensionreplace>:
+
+ parallel --basenameextensionreplace ,, echo ,, ::: A/B.C
+
+Output:
+
+ B
+
+The replacement string B<{#}> can be changed with B<--seqreplace>:
+
+ parallel --seqreplace ,, echo ,, ::: A B C
+
+Output (the order may be different):
+
+ 1
+ 2
+ 3
+
+The replacement string B<{%}> can be changed with B<--slotreplace>:
+
+ parallel -j2 --slotreplace ,, echo ,, ::: A B C
+
+Output (the order may be different and 1 and 2 may be swapped):
+
+ 1
+ 2
+ 1
+
+=head3 Perl expression replacement string
+
+When predefined replacement strings are not flexible enough a perl
+expression can be used instead. One example is to remove two
+extensions: foo.tar.gz becomes foo
+
+ parallel echo '{= s:\.[^.]+$::;s:\.[^.]+$::; =}' ::: foo.tar.gz
+
+Output:
+
+ foo
+
+In B<{= =}> you can access all of GNU B<parallel>'s internal functions
+and variables. A few are worth mentioning.
+
+B<total_jobs()> returns the total number of jobs:
+
+ parallel echo Job {#} of {= '$_=total_jobs()' =} ::: {1..5}
+
+Output:
+
+ Job 1 of 5
+ Job 2 of 5
+ Job 3 of 5
+ Job 4 of 5
+ Job 5 of 5
+
+B<Q(...)> shell quotes the string:
+
+ parallel echo {} shell quoted is {= '$_=Q($_)' =} ::: '*/!#$'
+
+Output:
+
+ */!#$ shell quoted is \*/\!\#\$
+
+B<skip()> skips the job:
+
+ parallel echo {= 'if($_==3) { skip() }' =} ::: {1..5}
+
+Output:
+
+ 1
+ 2
+ 4
+ 5
+
+B<@arg> contains the input source variables:
+
+ parallel echo {= 'if($arg[1]==$arg[2]) { skip() }' =} \
+ ::: {1..3} ::: {1..3}
+
+Output:
+
+ 1 2
+ 1 3
+ 2 1
+ 2 3
+ 3 1
+ 3 2
+
+If the strings B<{=> and B<=}> cause problems they can be replaced with B<--parens>:
+
+ parallel --parens ,,,, echo ',, s:\.[^.]+$::;s:\.[^.]+$::; ,,' \
+ ::: foo.tar.gz
+
+Output:
+
+ foo
+
+To define a shorthand replacement string use B<--rpl>:
+
+ parallel --rpl '.. s:\.[^.]+$::;s:\.[^.]+$::;' echo '..' \
+ ::: foo.tar.gz
+
+Output: Same as above.
+
+If the shorthand starts with B<{> it can be used as a positional
+replacement string, too:
+
+ parallel --rpl '{..} s:\.[^.]+$::;s:\.[^.]+$::;' echo '{..}'
+ ::: foo.tar.gz
+
+Output: Same as above.
+
+If the shorthand contains matching parenthesis the replacement string
+becomes a dynamic replacement string and the string in the parenthesis
+can be accessed as $$1. If there are multiple matching parenthesis,
+the matched strings can be accessed using $$2, $$3 and so on.
+
+You can think of this as giving arguments to the replacement
+string. Here we give the argument B<.tar.gz> to the replacement string
+B<{%I<string>}> which removes I<string>:
+
+ parallel --rpl '{%(.+?)} s/$$1$//;' echo {%.tar.gz}.zip ::: foo.tar.gz
+
+Output:
+
+ foo.zip
+
+Here we give the two arguments B<tar.gz> and B<zip> to the replacement
+string B<{/I<string1>/I<string2>}> which replaces I<string1> with
+I<string2>:
+
+ parallel --rpl '{/(.+?)/(.*?)} s/$$1/$$2/;' echo {/tar.gz/zip} \
+ ::: foo.tar.gz
+
+Output:
+
+ foo.zip
+
+
+GNU B<parallel>'s 7 replacement strings are implemented as this:
+
+ --rpl '{} '
+ --rpl '{#} $_=$job->seq()'
+ --rpl '{%} $_=$job->slot()'
+ --rpl '{/} s:.*/::'
+ --rpl '{//} $Global::use{"File::Basename"} ||=
+ eval "use File::Basename; 1;"; $_ = dirname($_);'
+ --rpl '{/.} s:.*/::; s:\.[^/.]+$::;'
+ --rpl '{.} s:\.[^/.]+$::'
+
+=head3 Positional replacement strings
+
+With multiple input sources the argument from the individual input
+sources can be accessed with S<< B<{>numberB<}> >>:
+
+ parallel echo {1} and {2} ::: A B ::: C D
+
+Output (the order may be different):
+
+ A and C
+ A and D
+ B and C
+ B and D
+
+The positional replacement strings can also be modified using B</>, B<//>, B</.>, and B<.>:
+
+ parallel echo /={1/} //={1//} /.={1/.} .={1.} ::: A/B.C D/E.F
+
+Output (the order may be different):
+
+ /=B.C //=A /.=B .=A/B
+ /=E.F //=D /.=E .=D/E
+
+If a position is negative, it will refer to the input source counted
+from behind:
+
+ parallel echo 1={1} 2={2} 3={3} -1={-1} -2={-2} -3={-3} \
+ ::: A B ::: C D ::: E F
+
+Output (the order may be different):
+
+ 1=A 2=C 3=E -1=E -2=C -3=A
+ 1=A 2=C 3=F -1=F -2=C -3=A
+ 1=A 2=D 3=E -1=E -2=D -3=A
+ 1=A 2=D 3=F -1=F -2=D -3=A
+ 1=B 2=C 3=E -1=E -2=C -3=B
+ 1=B 2=C 3=F -1=F -2=C -3=B
+ 1=B 2=D 3=E -1=E -2=D -3=B
+ 1=B 2=D 3=F -1=F -2=D -3=B
+
+
+=head3 Positional perl expression replacement string
+
+To use a perl expression as a positional replacement string simply
+prepend the perl expression with number and space:
+
+ parallel echo '{=2 s:\.[^.]+$::;s:\.[^.]+$::; =} {1}' \
+ ::: bar ::: foo.tar.gz
+
+Output:
+
+ foo bar
+
+If a shorthand defined using B<--rpl> starts with B<{> it can be used as
+a positional replacement string, too:
+
+ parallel --rpl '{..} s:\.[^.]+$::;s:\.[^.]+$::;' echo '{2..} {1}' \
+ ::: bar ::: foo.tar.gz
+
+Output: Same as above.
+
+
+=head3 Input from columns
+
+The columns in a file can be bound to positional replacement strings
+using B<--colsep>. Here the columns are separated by TAB (\t):
+
+ parallel --colsep '\t' echo 1={1} 2={2} :::: tsv-file.tsv
+
+Output (the order may be different):
+
+ 1=f1 2=f2
+ 1=A 2=B
+ 1=C 2=D
+
+=head3 Header defined replacement strings
+
+With B<--header> GNU B<parallel> will use the first value of the input
+source as the name of the replacement string. Only the non-modified
+version B<{}> is supported:
+
+ parallel --header : echo f1={f1} f2={f2} ::: f1 A B ::: f2 C D
+
+Output (the order may be different):
+
+ f1=A f2=C
+ f1=A f2=D
+ f1=B f2=C
+ f1=B f2=D
+
+It is useful with B<--colsep> for processing files with TAB separated values:
+
+ parallel --header : --colsep '\t' echo f1={f1} f2={f2} \
+ :::: tsv-file.tsv
+
+Output (the order may be different):
+
+ f1=A f2=B
+ f1=C f2=D
+
+=head3 More pre-defined replacement strings with --plus
+
+B<--plus> adds the replacement strings B<{+/} {+.} {+..} {+...} {..} {...}
+{/..} {/...} {##}>. The idea being that B<{+foo}> matches the opposite of B<{foo}>
+and B<{}> = B<{+/}>/B<{/}> = B<{.}>.B<{+.}> = B<{+/}>/B<{/.}>.B<{+.}> = B<{..}>.B<{+..}> =
+B<{+/}>/B<{/..}>.B<{+..}> = B<{...}>.B<{+...}> = B<{+/}>/B<{/...}>.B<{+...}>.
+
+ parallel --plus echo {} ::: dir/sub/file.ex1.ex2.ex3
+ parallel --plus echo {+/}/{/} ::: dir/sub/file.ex1.ex2.ex3
+ parallel --plus echo {.}.{+.} ::: dir/sub/file.ex1.ex2.ex3
+ parallel --plus echo {+/}/{/.}.{+.} ::: dir/sub/file.ex1.ex2.ex3
+ parallel --plus echo {..}.{+..} ::: dir/sub/file.ex1.ex2.ex3
+ parallel --plus echo {+/}/{/..}.{+..} ::: dir/sub/file.ex1.ex2.ex3
+ parallel --plus echo {...}.{+...} ::: dir/sub/file.ex1.ex2.ex3
+ parallel --plus echo {+/}/{/...}.{+...} ::: dir/sub/file.ex1.ex2.ex3
+
+Output:
+
+ dir/sub/file.ex1.ex2.ex3
+
+B<{##}> is simply the number of jobs:
+
+ parallel --plus echo Job {#} of {##} ::: {1..5}
+
+Output:
+
+ Job 1 of 5
+ Job 2 of 5
+ Job 3 of 5
+ Job 4 of 5
+ Job 5 of 5
+
+=head3 Dynamic replacement strings with --plus
+
+B<--plus> also defines these dynamic replacement strings:
+
+=over 19
+
+=item B<{:-I<string>}>
+
+Default value is I<string> if the argument is empty.
+
+=item B<{:I<number>}>
+
+Substring from I<number> till end of string.
+
+=item B<{:I<number1>:I<number2>}>
+
+Substring from I<number1> to I<number2>.
+
+=item B<{#I<string>}>
+
+If the argument starts with I<string>, remove it.
+
+=item B<{%I<string>}>
+
+If the argument ends with I<string>, remove it.
+
+=item B<{/I<string1>/I<string2>}>
+
+Replace I<string1> with I<string2>.
+
+=item B<{^I<string>}>
+
+If the argument starts with I<string>, upper case it. I<string> must
+be a single letter.
+
+=item B<{^^I<string>}>
+
+If the argument contains I<string>, upper case it. I<string> must be a
+single letter.
+
+=item B<{,I<string>}>
+
+If the argument starts with I<string>, lower case it. I<string> must
+be a single letter.
+
+=item B<{,,I<string>}>
+
+If the argument contains I<string>, lower case it. I<string> must be a
+single letter.
+
+=back
+
+They are inspired from B<Bash>:
+
+ unset myvar
+ echo ${myvar:-myval}
+ parallel --plus echo {:-myval} ::: "$myvar"
+
+ myvar=abcAaAdef
+ echo ${myvar:2}
+ parallel --plus echo {:2} ::: "$myvar"
+
+ echo ${myvar:2:3}
+ parallel --plus echo {:2:3} ::: "$myvar"
+
+ echo ${myvar#bc}
+ parallel --plus echo {#bc} ::: "$myvar"
+ echo ${myvar#abc}
+ parallel --plus echo {#abc} ::: "$myvar"
+
+ echo ${myvar%de}
+ parallel --plus echo {%de} ::: "$myvar"
+ echo ${myvar%def}
+ parallel --plus echo {%def} ::: "$myvar"
+
+ echo ${myvar/def/ghi}
+ parallel --plus echo {/def/ghi} ::: "$myvar"
+
+ echo ${myvar^a}
+ parallel --plus echo {^a} ::: "$myvar"
+ echo ${myvar^^a}
+ parallel --plus echo {^^a} ::: "$myvar"
+
+ myvar=AbcAaAdef
+ echo ${myvar,A}
+ parallel --plus echo '{,A}' ::: "$myvar"
+ echo ${myvar,,A}
+ parallel --plus echo '{,,A}' ::: "$myvar"
+
+Output:
+
+ myval
+ myval
+ cAaAdef
+ cAaAdef
+ cAa
+ cAa
+ abcAaAdef
+ abcAaAdef
+ AaAdef
+ AaAdef
+ abcAaAdef
+ abcAaAdef
+ abcAaA
+ abcAaA
+ abcAaAghi
+ abcAaAghi
+ AbcAaAdef
+ AbcAaAdef
+ AbcAAAdef
+ AbcAAAdef
+ abcAaAdef
+ abcAaAdef
+ abcaaadef
+ abcaaadef
+
+
+=head2 More than one argument
+
+With B<--xargs> GNU B<parallel> will fit as many arguments as possible on a
+single line:
+
+ cat num30000 | parallel --xargs echo | wc -l
+
+Output (if you run this under Bash on GNU/Linux):
+
+ 2
+
+The 30000 arguments fitted on 2 lines.
+
+The maximal length of a single line can be set with B<-s>. With a maximal
+line length of 10000 chars 17 commands will be run:
+
+ cat num30000 | parallel --xargs -s 10000 echo | wc -l
+
+Output:
+
+ 17
+
+For better parallelism GNU B<parallel> can distribute the arguments
+between all the parallel jobs when end of file is met.
+
+Below GNU B<parallel> reads the last argument when generating the second
+job. When GNU B<parallel> reads the last argument, it spreads all the
+arguments for the second job over 4 jobs instead, as 4 parallel jobs
+are requested.
+
+The first job will be the same as the B<--xargs> example above, but the
+second job will be split into 4 evenly sized jobs, resulting in a
+total of 5 jobs:
+
+ cat num30000 | parallel --jobs 4 -m echo | wc -l
+
+Output (if you run this under Bash on GNU/Linux):
+
+ 5
+
+This is even more visible when running 4 jobs with 10 arguments. The
+10 arguments are being spread over 4 jobs:
+
+ parallel --jobs 4 -m echo ::: 1 2 3 4 5 6 7 8 9 10
+
+Output:
+
+ 1 2 3
+ 4 5 6
+ 7 8 9
+ 10
+
+A replacement string can be part of a word. B<-m> will not repeat the context:
+
+ parallel --jobs 4 -m echo pre-{}-post ::: A B C D E F G
+
+Output (the order may be different):
+
+ pre-A B-post
+ pre-C D-post
+ pre-E F-post
+ pre-G-post
+
+To repeat the context use B<-X> which otherwise works like B<-m>:
+
+ parallel --jobs 4 -X echo pre-{}-post ::: A B C D E F G
+
+Output (the order may be different):
+
+ pre-A-post pre-B-post
+ pre-C-post pre-D-post
+ pre-E-post pre-F-post
+ pre-G-post
+
+To limit the number of arguments use B<-N>:
+
+ parallel -N3 echo ::: A B C D E F G H
+
+Output (the order may be different):
+
+ A B C
+ D E F
+ G H
+
+B<-N> also sets the positional replacement strings:
+
+ parallel -N3 echo 1={1} 2={2} 3={3} ::: A B C D E F G H
+
+Output (the order may be different):
+
+ 1=A 2=B 3=C
+ 1=D 2=E 3=F
+ 1=G 2=H 3=
+
+B<-N0> reads 1 argument but inserts none:
+
+ parallel -N0 echo foo ::: 1 2 3
+
+Output:
+
+ foo
+ foo
+ foo
+
+=head2 Quoting
+
+Command lines that contain special characters may need to be protected from the shell.
+
+The B<perl> program B<print "@ARGV\n"> basically works like B<echo>.
+
+ perl -e 'print "@ARGV\n"' A
+
+Output:
+
+ A
+
+To run that in parallel the command needs to be quoted:
+
+ parallel perl -e 'print "@ARGV\n"' ::: This wont work
+
+Output:
+
+ [Nothing]
+
+To quote the command use B<-q>:
+
+ parallel -q perl -e 'print "@ARGV\n"' ::: This works
+
+Output (the order may be different):
+
+ This
+ works
+
+Or you can quote the critical part using B<\'>:
+
+ parallel perl -e \''print "@ARGV\n"'\' ::: This works, too
+
+Output (the order may be different):
+
+ This
+ works,
+ too
+
+GNU B<parallel> can also \-quote full lines. Simply run this:
+
+ parallel --shellquote
+ Warning: Input is read from the terminal. You either know what you
+ Warning: are doing (in which case: YOU ARE AWESOME!) or you forgot
+ Warning: ::: or :::: or to pipe data into parallel. If so
+ Warning: consider going through the tutorial: man parallel_tutorial
+ Warning: Press CTRL-D to exit.
+ perl -e 'print "@ARGV\n"'
+ [CTRL-D]
+
+Output:
+
+ perl\ -e\ \'print\ \"@ARGV\\n\"\'
+
+This can then be used as the command:
+
+ parallel perl\ -e\ \'print\ \"@ARGV\\n\"\' ::: This also works
+
+Output (the order may be different):
+
+ This
+ also
+ works
+
+
+=head2 Trimming space
+
+Space can be trimmed on the arguments using B<--trim>:
+
+ parallel --trim r echo pre-{}-post ::: ' A '
+
+Output:
+
+ pre- A-post
+
+To trim on the left side:
+
+ parallel --trim l echo pre-{}-post ::: ' A '
+
+Output:
+
+ pre-A -post
+
+To trim on the both sides:
+
+ parallel --trim lr echo pre-{}-post ::: ' A '
+
+Output:
+
+ pre-A-post
+
+
+=head2 Respecting the shell
+
+This tutorial uses Bash as the shell. GNU B<parallel> respects which
+shell you are using, so in B<zsh> you can do:
+
+ parallel echo \={} ::: zsh bash ls
+
+Output:
+
+ /usr/bin/zsh
+ /bin/bash
+ /bin/ls
+
+In B<csh> you can do:
+
+ parallel 'set a="{}"; if( { test -d "$a" } ) echo "$a is a dir"' ::: *
+
+Output:
+
+ [somedir] is a dir
+
+This also becomes useful if you use GNU B<parallel> in a shell script:
+GNU B<parallel> will use the same shell as the shell script.
+
+
+=head1 Controlling the output
+
+The output can prefixed with the argument:
+
+ parallel --tag echo foo-{} ::: A B C
+
+Output (the order may be different):
+
+ A foo-A
+ B foo-B
+ C foo-C
+
+To prefix it with another string use B<--tagstring>:
+
+ parallel --tagstring {}-bar echo foo-{} ::: A B C
+
+Output (the order may be different):
+
+ A-bar foo-A
+ B-bar foo-B
+ C-bar foo-C
+
+To see what commands will be run without running them use B<--dryrun>:
+
+ parallel --dryrun echo {} ::: A B C
+
+Output (the order may be different):
+
+ echo A
+ echo B
+ echo C
+
+To print the command before running them use B<--verbose>:
+
+ parallel --verbose echo {} ::: A B C
+
+Output (the order may be different):
+
+ echo A
+ echo B
+ A
+ echo C
+ B
+ C
+
+GNU B<parallel> will postpone the output until the command completes:
+
+ parallel -j2 'printf "%s-start\n%s" {} {};
+ sleep {};printf "%s\n" -middle;echo {}-end' ::: 4 2 1
+
+Output:
+
+ 2-start
+ 2-middle
+ 2-end
+ 1-start
+ 1-middle
+ 1-end
+ 4-start
+ 4-middle
+ 4-end
+
+To get the output immediately use B<--ungroup>:
+
+ parallel -j2 --ungroup 'printf "%s-start\n%s" {} {};
+ sleep {};printf "%s\n" -middle;echo {}-end' ::: 4 2 1
+
+Output:
+
+ 4-start
+ 42-start
+ 2-middle
+ 2-end
+ 1-start
+ 1-middle
+ 1-end
+ -middle
+ 4-end
+
+B<--ungroup> is fast, but can cause half a line from one job to be mixed
+with half a line of another job. That has happened in the second line,
+where the line '4-middle' is mixed with '2-start'.
+
+To avoid this use B<--linebuffer>:
+
+ parallel -j2 --linebuffer 'printf "%s-start\n%s" {} {};
+ sleep {};printf "%s\n" -middle;echo {}-end' ::: 4 2 1
+
+Output:
+
+ 4-start
+ 2-start
+ 2-middle
+ 2-end
+ 1-start
+ 1-middle
+ 1-end
+ 4-middle
+ 4-end
+
+To force the output in the same order as the arguments use B<--keep-order>/B<-k>:
+
+ parallel -j2 -k 'printf "%s-start\n%s" {} {};
+ sleep {};printf "%s\n" -middle;echo {}-end' ::: 4 2 1
+
+Output:
+
+ 4-start
+ 4-middle
+ 4-end
+ 2-start
+ 2-middle
+ 2-end
+ 1-start
+ 1-middle
+ 1-end
+
+
+=head2 Saving output into files
+
+GNU B<parallel> can save the output of each job into files:
+
+ parallel --files echo ::: A B C
+
+Output will be similar to this:
+
+ /tmp/pAh6uWuQCg.par
+ /tmp/opjhZCzAX4.par
+ /tmp/W0AT_Rph2o.par
+
+By default GNU B<parallel> will cache the output in files in B</tmp>. This
+can be changed by setting B<$TMPDIR> or B<--tmpdir>:
+
+ parallel --tmpdir /var/tmp --files echo ::: A B C
+
+Output will be similar to this:
+
+ /var/tmp/N_vk7phQRc.par
+ /var/tmp/7zA4Ccf3wZ.par
+ /var/tmp/LIuKgF_2LP.par
+
+Or:
+
+ TMPDIR=/var/tmp parallel --files echo ::: A B C
+
+Output: Same as above.
+
+The output files can be saved in a structured way using B<--results>:
+
+ parallel --results outdir echo ::: A B C
+
+Output:
+
+ A
+ B
+ C
+
+These files were also generated containing the standard output
+(stdout), standard error (stderr), and the sequence number (seq):
+
+ outdir/1/A/seq
+ outdir/1/A/stderr
+ outdir/1/A/stdout
+ outdir/1/B/seq
+ outdir/1/B/stderr
+ outdir/1/B/stdout
+ outdir/1/C/seq
+ outdir/1/C/stderr
+ outdir/1/C/stdout
+
+B<--header :> will take the first value as name and use that in the
+directory structure. This is useful if you are using multiple input
+sources:
+
+ parallel --header : --results outdir echo ::: f1 A B ::: f2 C D
+
+Generated files:
+
+ outdir/f1/A/f2/C/seq
+ outdir/f1/A/f2/C/stderr
+ outdir/f1/A/f2/C/stdout
+ outdir/f1/A/f2/D/seq
+ outdir/f1/A/f2/D/stderr
+ outdir/f1/A/f2/D/stdout
+ outdir/f1/B/f2/C/seq
+ outdir/f1/B/f2/C/stderr
+ outdir/f1/B/f2/C/stdout
+ outdir/f1/B/f2/D/seq
+ outdir/f1/B/f2/D/stderr
+ outdir/f1/B/f2/D/stdout
+
+The directories are named after the variables and their values.
+
+=head1 Controlling the execution
+
+=head2 Number of simultaneous jobs
+
+The number of concurrent jobs is given with B<--jobs>/B<-j>:
+
+ /usr/bin/time parallel -N0 -j64 sleep 1 :::: num128
+
+With 64 jobs in parallel the 128 B<sleep>s will take 2-8 seconds to run -
+depending on how fast your machine is.
+
+By default B<--jobs> is the same as the number of CPU cores. So this:
+
+ /usr/bin/time parallel -N0 sleep 1 :::: num128
+
+should take twice the time of running 2 jobs per CPU core:
+
+ /usr/bin/time parallel -N0 --jobs 200% sleep 1 :::: num128
+
+B<--jobs 0> will run as many jobs in parallel as possible:
+
+ /usr/bin/time parallel -N0 --jobs 0 sleep 1 :::: num128
+
+which should take 1-7 seconds depending on how fast your machine is.
+
+B<--jobs> can read from a file which is re-read when a job finishes:
+
+ echo 50% > my_jobs
+ /usr/bin/time parallel -N0 --jobs my_jobs sleep 1 :::: num128 &
+ sleep 1
+ echo 0 > my_jobs
+ wait
+
+The first second only 50% of the CPU cores will run a job. Then B<0> is
+put into B<my_jobs> and then the rest of the jobs will be started in
+parallel.
+
+Instead of basing the percentage on the number of CPU cores
+GNU B<parallel> can base it on the number of CPUs:
+
+ parallel --use-cpus-instead-of-cores -N0 sleep 1 :::: num8
+
+=head2 Shuffle job order
+
+If you have many jobs (e.g. by multiple combinations of input
+sources), it can be handy to shuffle the jobs, so you get different
+values run. Use B<--shuf> for that:
+
+ parallel --shuf echo ::: 1 2 3 ::: a b c ::: A B C
+
+Output:
+
+ All combinations but different order for each run.
+
+=head2 Interactivity
+
+GNU B<parallel> can ask the user if a command should be run using B<--interactive>:
+
+ parallel --interactive echo ::: 1 2 3
+
+Output:
+
+ echo 1 ?...y
+ echo 2 ?...n
+ 1
+ echo 3 ?...y
+ 3
+
+GNU B<parallel> can be used to put arguments on the command line for an
+interactive command such as B<emacs> to edit one file at a time:
+
+ parallel --tty emacs ::: 1 2 3
+
+Or give multiple argument in one go to open multiple files:
+
+ parallel -X --tty vi ::: 1 2 3
+
+=head2 A terminal for every job
+
+Using B<--tmux> GNU B<parallel> can start a terminal for every job run:
+
+ seq 10 20 | parallel --tmux 'echo start {}; sleep {}; echo done {}'
+
+This will tell you to run something similar to:
+
+ tmux -S /tmp/tmsrPrO0 attach
+
+Using normal B<tmux> keystrokes (CTRL-b n or CTRL-b p) you can cycle
+between windows of the running jobs. When a job is finished it will
+pause for 10 seconds before closing the window.
+
+=head2 Timing
+
+Some jobs do heavy I/O when they start. To avoid a thundering herd GNU
+B<parallel> can delay starting new jobs. B<--delay> I<X> will make
+sure there is at least I<X> seconds between each start:
+
+ parallel --delay 2.5 echo Starting {}\;date ::: 1 2 3
+
+Output:
+
+ Starting 1
+ Thu Aug 15 16:24:33 CEST 2013
+ Starting 2
+ Thu Aug 15 16:24:35 CEST 2013
+ Starting 3
+ Thu Aug 15 16:24:38 CEST 2013
+
+
+If jobs taking more than a certain amount of time are known to fail,
+they can be stopped with B<--timeout>. The accuracy of B<--timeout> is
+2 seconds:
+
+ parallel --timeout 4.1 sleep {}\; echo {} ::: 2 4 6 8
+
+Output:
+
+ 2
+ 4
+
+GNU B<parallel> can compute the median runtime for jobs and kill those
+that take more than 200% of the median runtime:
+
+ parallel --timeout 200% sleep {}\; echo {} ::: 2.1 2.2 3 7 2.3
+
+Output:
+
+ 2.1
+ 2.2
+ 3
+ 2.3
+
+=head2 Progress information
+
+Based on the runtime of completed jobs GNU B<parallel> can estimate the
+total runtime:
+
+ parallel --eta sleep ::: 1 3 2 2 1 3 3 2 1
+
+Output:
+
+ Computers / CPU cores / Max jobs to run
+ 1:local / 2 / 2
+
+ Computer:jobs running/jobs completed/%of started jobs/
+ Average seconds to complete
+ ETA: 2s 0left 1.11avg local:0/9/100%/1.1s
+
+GNU B<parallel> can give progress information with B<--progress>:
+
+ parallel --progress sleep ::: 1 3 2 2 1 3 3 2 1
+
+Output:
+
+ Computers / CPU cores / Max jobs to run
+ 1:local / 2 / 2
+
+ Computer:jobs running/jobs completed/%of started jobs/
+ Average seconds to complete
+ local:0/9/100%/1.1s
+
+A progress bar can be shown with B<--bar>:
+
+ parallel --bar sleep ::: 1 3 2 2 1 3 3 2 1
+
+And a graphic bar can be shown with B<--bar> and B<zenity>:
+
+ seq 1000 | parallel -j10 --bar '(echo -n {};sleep 0.1)' \
+ 2> >(perl -pe 'BEGIN{$/="\r";$|=1};s/\r/\n/g' |
+ zenity --progress --auto-kill --auto-close)
+
+A logfile of the jobs completed so far can be generated with B<--joblog>:
+
+ parallel --joblog /tmp/log exit ::: 1 2 3 0
+ cat /tmp/log
+
+Output:
+
+ Seq Host Starttime Runtime Send Receive Exitval Signal Command
+ 1 : 1376577364.974 0.008 0 0 1 0 exit 1
+ 2 : 1376577364.982 0.013 0 0 2 0 exit 2
+ 3 : 1376577364.990 0.013 0 0 3 0 exit 3
+ 4 : 1376577365.003 0.003 0 0 0 0 exit 0
+
+The log contains the job sequence, which host the job was run on, the
+start time and run time, how much data was transferred, the exit
+value, the signal that killed the job, and finally the command being
+run.
+
+With a joblog GNU B<parallel> can be stopped and later pickup where it
+left off. It it important that the input of the completed jobs is
+unchanged.
+
+ parallel --joblog /tmp/log exit ::: 1 2 3 0
+ cat /tmp/log
+ parallel --resume --joblog /tmp/log exit ::: 1 2 3 0 0 0
+ cat /tmp/log
+
+Output:
+
+ Seq Host Starttime Runtime Send Receive Exitval Signal Command
+ 1 : 1376580069.544 0.008 0 0 1 0 exit 1
+ 2 : 1376580069.552 0.009 0 0 2 0 exit 2
+ 3 : 1376580069.560 0.012 0 0 3 0 exit 3
+ 4 : 1376580069.571 0.005 0 0 0 0 exit 0
+
+ Seq Host Starttime Runtime Send Receive Exitval Signal Command
+ 1 : 1376580069.544 0.008 0 0 1 0 exit 1
+ 2 : 1376580069.552 0.009 0 0 2 0 exit 2
+ 3 : 1376580069.560 0.012 0 0 3 0 exit 3
+ 4 : 1376580069.571 0.005 0 0 0 0 exit 0
+ 5 : 1376580070.028 0.009 0 0 0 0 exit 0
+ 6 : 1376580070.038 0.007 0 0 0 0 exit 0
+
+Note how the start time of the last 2 jobs is clearly different from the second run.
+
+With B<--resume-failed> GNU B<parallel> will re-run the jobs that failed:
+
+ parallel --resume-failed --joblog /tmp/log exit ::: 1 2 3 0 0 0
+ cat /tmp/log
+
+Output:
+
+ Seq Host Starttime Runtime Send Receive Exitval Signal Command
+ 1 : 1376580069.544 0.008 0 0 1 0 exit 1
+ 2 : 1376580069.552 0.009 0 0 2 0 exit 2
+ 3 : 1376580069.560 0.012 0 0 3 0 exit 3
+ 4 : 1376580069.571 0.005 0 0 0 0 exit 0
+ 5 : 1376580070.028 0.009 0 0 0 0 exit 0
+ 6 : 1376580070.038 0.007 0 0 0 0 exit 0
+ 1 : 1376580154.433 0.010 0 0 1 0 exit 1
+ 2 : 1376580154.444 0.022 0 0 2 0 exit 2
+ 3 : 1376580154.466 0.005 0 0 3 0 exit 3
+
+Note how seq 1 2 3 have been repeated because they had exit value
+different from 0.
+
+B<--retry-failed> does almost the same as B<--resume-failed>. Where
+B<--resume-failed> reads the commands from the command line (and
+ignores the commands in the joblog), B<--retry-failed> ignores the
+command line and reruns the commands mentioned in the joblog.
+
+ parallel --retry-failed --joblog /tmp/log
+ cat /tmp/log
+
+Output:
+
+ Seq Host Starttime Runtime Send Receive Exitval Signal Command
+ 1 : 1376580069.544 0.008 0 0 1 0 exit 1
+ 2 : 1376580069.552 0.009 0 0 2 0 exit 2
+ 3 : 1376580069.560 0.012 0 0 3 0 exit 3
+ 4 : 1376580069.571 0.005 0 0 0 0 exit 0
+ 5 : 1376580070.028 0.009 0 0 0 0 exit 0
+ 6 : 1376580070.038 0.007 0 0 0 0 exit 0
+ 1 : 1376580154.433 0.010 0 0 1 0 exit 1
+ 2 : 1376580154.444 0.022 0 0 2 0 exit 2
+ 3 : 1376580154.466 0.005 0 0 3 0 exit 3
+ 1 : 1376580164.633 0.010 0 0 1 0 exit 1
+ 2 : 1376580164.644 0.022 0 0 2 0 exit 2
+ 3 : 1376580164.666 0.005 0 0 3 0 exit 3
+
+
+=head2 Termination
+
+=head3 Unconditional termination
+
+By default GNU B<parallel> will wait for all jobs to finish before exiting.
+
+If you send GNU B<parallel> the B<TERM> signal, GNU B<parallel> will
+stop spawning new jobs and wait for the remaining jobs to finish. If
+you send GNU B<parallel> the B<TERM> signal again, GNU B<parallel>
+will kill all running jobs and exit.
+
+=head3 Termination dependent on job status
+
+For certain jobs there is no need to continue if one of the jobs fails
+and has an exit code different from 0. GNU B<parallel> will stop spawning new jobs
+with B<--halt soon,fail=1>:
+
+ parallel -j2 --halt soon,fail=1 echo {}\; exit {} ::: 0 0 1 2 3
+
+Output:
+
+ 0
+ 0
+ 1
+ parallel: This job failed:
+ echo 1; exit 1
+ parallel: Starting no more jobs. Waiting for 1 jobs to finish.
+ 2
+
+With B<--halt now,fail=1> the running jobs will be killed immediately:
+
+ parallel -j2 --halt now,fail=1 echo {}\; exit {} ::: 0 0 1 2 3
+
+Output:
+
+ 0
+ 0
+ 1
+ parallel: This job failed:
+ echo 1; exit 1
+
+If B<--halt> is given a percentage this percentage of the jobs must fail
+before GNU B<parallel> stops spawning more jobs:
+
+ parallel -j2 --halt soon,fail=20% echo {}\; exit {} \
+ ::: 0 1 2 3 4 5 6 7 8 9
+
+Output:
+
+ 0
+ 1
+ parallel: This job failed:
+ echo 1; exit 1
+ 2
+ parallel: This job failed:
+ echo 2; exit 2
+ parallel: Starting no more jobs. Waiting for 1 jobs to finish.
+ 3
+ parallel: This job failed:
+ echo 3; exit 3
+
+If you are looking for success instead of failures, you can use
+B<success>. This will finish as soon as the first job succeeds:
+
+ parallel -j2 --halt now,success=1 echo {}\; exit {} ::: 1 2 3 0 4 5 6
+
+Output:
+
+ 1
+ 2
+ 3
+ 0
+ parallel: This job succeeded:
+ echo 0; exit 0
+
+GNU B<parallel> can retry the command with B<--retries>. This is useful if a
+command fails for unknown reasons now and then.
+
+ parallel -k --retries 3 \
+ 'echo tried {} >>/tmp/runs; echo completed {}; exit {}' ::: 1 2 0
+ cat /tmp/runs
+
+Output:
+
+ completed 1
+ completed 2
+ completed 0
+
+ tried 1
+ tried 2
+ tried 1
+ tried 2
+ tried 1
+ tried 2
+ tried 0
+
+Note how job 1 and 2 were tried 3 times, but 0 was not retried because it had exit code 0.
+
+=head3 Termination signals (advanced)
+
+Using B<--termseq> you can control which signals are sent when killing
+children. Normally children will be killed by sending them B<SIGTERM>,
+waiting 200 ms, then another B<SIGTERM>, waiting 100 ms, then another
+B<SIGTERM>, waiting 50 ms, then a B<SIGKILL>, finally waiting 25 ms
+before giving up. It looks like this:
+
+ show_signals() {
+ perl -e 'for(keys %SIG) {
+ $SIG{$_} = eval "sub { print \"Got $_\\n\"; }";
+ }
+ while(1){sleep 1}'
+ }
+ export -f show_signals
+ echo | parallel --termseq TERM,200,TERM,100,TERM,50,KILL,25 \
+ -u --timeout 1 show_signals
+
+Output:
+
+ Got TERM
+ Got TERM
+ Got TERM
+
+Or just:
+
+ echo | parallel -u --timeout 1 show_signals
+
+Output: Same as above.
+
+You can change this to B<SIGINT>, B<SIGTERM>, B<SIGKILL>:
+
+ echo | parallel --termseq INT,200,TERM,100,KILL,25 \
+ -u --timeout 1 show_signals
+
+Output:
+
+ Got INT
+ Got TERM
+
+The B<SIGKILL> does not show because it cannot be caught, and thus the
+child dies.
+
+
+=head2 Limiting the resources
+
+To avoid overloading systems GNU B<parallel> can look at the system load
+before starting another job:
+
+ parallel --load 100% echo load is less than {} job per cpu ::: 1
+
+Output:
+
+ [when then load is less than the number of cpu cores]
+ load is less than 1 job per cpu
+
+GNU B<parallel> can also check if the system is swapping.
+
+ parallel --noswap echo the system is not swapping ::: now
+
+Output:
+
+ [when then system is not swapping]
+ the system is not swapping now
+
+Some jobs need a lot of memory, and should only be started when there
+is enough memory free. Using B<--memfree> GNU B<parallel> can check if
+there is enough memory free. Additionally, GNU B<parallel> will kill
+off the youngest job if the memory free falls below 50% of the
+size. The killed job will put back on the queue and retried later.
+
+ parallel --memfree 1G echo will run if more than 1 GB is ::: free
+
+GNU B<parallel> can run the jobs with a nice value. This will work both
+locally and remotely.
+
+ parallel --nice 17 echo this is being run with nice -n ::: 17
+
+Output:
+
+ this is being run with nice -n 17
+
+=head1 Remote execution
+
+GNU B<parallel> can run jobs on remote servers. It uses B<ssh> to
+communicate with the remote machines.
+
+=head2 Sshlogin
+
+The most basic sshlogin is B<-S> I<host>:
+
+ parallel -S $SERVER1 echo running on ::: $SERVER1
+
+Output:
+
+ running on [$SERVER1]
+
+To use a different username prepend the server with I<username@>:
+
+ parallel -S username@$SERVER1 echo running on ::: username@$SERVER1
+
+Output:
+
+ running on [username@$SERVER1]
+
+The special sshlogin B<:> is the local machine:
+
+ parallel -S : echo running on ::: the_local_machine
+
+Output:
+
+ running on the_local_machine
+
+If B<ssh> is not in $PATH it can be prepended to $SERVER1:
+
+ parallel -S '/usr/bin/ssh '$SERVER1 echo custom ::: ssh
+
+Output:
+
+ custom ssh
+
+The B<ssh> command can also be given using B<--ssh>:
+
+ parallel --ssh /usr/bin/ssh -S $SERVER1 echo custom ::: ssh
+
+or by setting B<$PARALLEL_SSH>:
+
+ export PARALLEL_SSH=/usr/bin/ssh
+ parallel -S $SERVER1 echo custom ::: ssh
+
+Several servers can be given using multiple B<-S>:
+
+ parallel -S $SERVER1 -S $SERVER2 echo ::: running on more hosts
+
+Output (the order may be different):
+
+ running
+ on
+ more
+ hosts
+
+Or they can be separated by B<,>:
+
+ parallel -S $SERVER1,$SERVER2 echo ::: running on more hosts
+
+Output: Same as above.
+
+Or newline:
+
+ # This gives a \n between $SERVER1 and $SERVER2
+ SERVERS="`echo $SERVER1; echo $SERVER2`"
+ parallel -S "$SERVERS" echo ::: running on more hosts
+
+They can also be read from a file (replace I<user@> with the user on B<$SERVER2>):
+
+ echo $SERVER1 > nodefile
+ # Force 4 cores, special ssh-command, username
+ echo 4//usr/bin/ssh user@$SERVER2 >> nodefile
+ parallel --sshloginfile nodefile echo ::: running on more hosts
+
+Output: Same as above.
+
+Every time a job finished, the B<--sshloginfile> will be re-read, so
+it is possible to both add and remove hosts while running.
+
+The special B<--sshloginfile ..> reads from B<~/.parallel/sshloginfile>.
+
+To force GNU B<parallel> to treat a server having a given number of CPU
+cores prepend the number of core followed by B</> to the sshlogin:
+
+ parallel -S 4/$SERVER1 echo force {} cpus on server ::: 4
+
+Output:
+
+ force 4 cpus on server
+
+Servers can be put into groups by prepending I<@groupname> to the
+server and the group can then be selected by appending I<@groupname> to
+the argument if using B<--hostgroup>:
+
+ parallel --hostgroup -S @grp1/$SERVER1 -S @grp2/$SERVER2 echo {} \
+ ::: run_on_grp1@grp1 run_on_grp2@grp2
+
+Output:
+
+ run_on_grp1
+ run_on_grp2
+
+A host can be in multiple groups by separating the groups with B<+>, and
+you can force GNU B<parallel> to limit the groups on which the command
+can be run with B<-S> I<@groupname>:
+
+ parallel -S @grp1 -S @grp1+grp2/$SERVER1 -S @grp2/SERVER2 echo {} \
+ ::: run_on_grp1 also_grp1
+
+Output:
+
+ run_on_grp1
+ also_grp1
+
+=head2 Transferring files
+
+GNU B<parallel> can transfer the files to be processed to the remote
+host. It does that using rsync.
+
+ echo This is input_file > input_file
+ parallel -S $SERVER1 --transferfile {} cat ::: input_file
+
+Output:
+
+ This is input_file
+
+If the files are processed into another file, the resulting file can be
+transferred back:
+
+ echo This is input_file > input_file
+ parallel -S $SERVER1 --transferfile {} --return {}.out \
+ cat {} ">"{}.out ::: input_file
+ cat input_file.out
+
+Output: Same as above.
+
+To remove the input and output file on the remote server use B<--cleanup>:
+
+ echo This is input_file > input_file
+ parallel -S $SERVER1 --transferfile {} --return {}.out --cleanup \
+ cat {} ">"{}.out ::: input_file
+ cat input_file.out
+
+Output: Same as above.
+
+There is a shorthand for B<--transferfile {} --return --cleanup> called B<--trc>:
+
+ echo This is input_file > input_file
+ parallel -S $SERVER1 --trc {}.out cat {} ">"{}.out ::: input_file
+ cat input_file.out
+
+Output: Same as above.
+
+Some jobs need a common database for all jobs. GNU B<parallel> can
+transfer that using B<--basefile> which will transfer the file before the
+first job:
+
+ echo common data > common_file
+ parallel --basefile common_file -S $SERVER1 \
+ cat common_file\; echo {} ::: foo
+
+Output:
+
+ common data
+ foo
+
+To remove it from the remote host after the last job use B<--cleanup>.
+
+
+=head2 Working dir
+
+The default working dir on the remote machines is the login dir. This
+can be changed with B<--workdir> I<mydir>.
+
+Files transferred using B<--transferfile> and B<--return> will be relative
+to I<mydir> on remote computers, and the command will be executed in
+the dir I<mydir>.
+
+The special I<mydir> value B<...> will create working dirs under
+B<~/.parallel/tmp> on the remote computers. If B<--cleanup> is given
+these dirs will be removed.
+
+The special I<mydir> value B<.> uses the current working dir. If the
+current working dir is beneath your home dir, the value B<.> is
+treated as the relative path to your home dir. This means that if your
+home dir is different on remote computers (e.g. if your login is
+different) the relative path will still be relative to your home dir.
+
+ parallel -S $SERVER1 pwd ::: ""
+ parallel --workdir . -S $SERVER1 pwd ::: ""
+ parallel --workdir ... -S $SERVER1 pwd ::: ""
+
+Output:
+
+ [the login dir on $SERVER1]
+ [current dir relative on $SERVER1]
+ [a dir in ~/.parallel/tmp/...]
+
+
+=head2 Avoid overloading sshd
+
+If many jobs are started on the same server, B<sshd> can be
+overloaded. GNU B<parallel> can insert a delay between each job run on
+the same server:
+
+ parallel -S $SERVER1 --sshdelay 0.2 echo ::: 1 2 3
+
+Output (the order may be different):
+
+ 1
+ 2
+ 3
+
+B<sshd> will be less overloaded if using B<--controlmaster>, which will
+multiplex ssh connections:
+
+ parallel --controlmaster -S $SERVER1 echo ::: 1 2 3
+
+Output: Same as above.
+
+=head2 Ignore hosts that are down
+
+In clusters with many hosts a few of them are often down. GNU B<parallel>
+can ignore those hosts. In this case the host 173.194.32.46 is down:
+
+ parallel --filter-hosts -S 173.194.32.46,$SERVER1 echo ::: bar
+
+Output:
+
+ bar
+
+=head2 Running the same commands on all hosts
+
+GNU B<parallel> can run the same command on all the hosts:
+
+ parallel --onall -S $SERVER1,$SERVER2 echo ::: foo bar
+
+Output (the order may be different):
+
+ foo
+ bar
+ foo
+ bar
+
+Often you will just want to run a single command on all hosts with out
+arguments. B<--nonall> is a no argument B<--onall>:
+
+ parallel --nonall -S $SERVER1,$SERVER2 echo foo bar
+
+Output:
+
+ foo bar
+ foo bar
+
+When B<--tag> is used with B<--nonall> and B<--onall> the B<--tagstring> is the host:
+
+ parallel --nonall --tag -S $SERVER1,$SERVER2 echo foo bar
+
+Output (the order may be different):
+
+ $SERVER1 foo bar
+ $SERVER2 foo bar
+
+B<--jobs> sets the number of servers to log in to in parallel.
+
+=head2 Transferring environment variables and functions
+
+B<env_parallel> is a shell function that transfers all aliases,
+functions, variables, and arrays. You active it by running:
+
+ source `which env_parallel.bash`
+
+Replace B<bash> with the shell you use.
+
+Now you can use B<env_parallel> instead of B<parallel> and still have
+your environment:
+
+ alias myecho=echo
+ myvar="Joe's var is"
+ env_parallel -S $SERVER1 'myecho $myvar' ::: green
+
+Output:
+
+ Joe's var is green
+
+The disadvantage is that if your environment is huge B<env_parallel>
+will fail.
+
+When B<env_parallel> fails, you can still use B<--env> to tell GNU
+B<parallel> to transfer an environment variable to the remote system.
+
+ MYVAR='foo bar'
+ export MYVAR
+ parallel --env MYVAR -S $SERVER1 echo '$MYVAR' ::: baz
+
+Output:
+
+ foo bar baz
+
+This works for functions, too, if your shell is Bash:
+
+ # This only works in Bash
+ my_func() {
+ echo in my_func $1
+ }
+ export -f my_func
+ parallel --env my_func -S $SERVER1 my_func ::: baz
+
+Output:
+
+ in my_func baz
+
+GNU B<parallel> can copy all user defined variables and functions to
+the remote system. It just needs to record which ones to ignore in
+B<~/.parallel/ignored_vars>. Do that by running this once:
+
+ parallel --record-env
+ cat ~/.parallel/ignored_vars
+
+Output:
+
+ [list of variables to ignore - including $PATH and $HOME]
+
+Now all other variables and functions defined will be copied when
+using B<--env _>.
+
+ # The function is only copied if using Bash
+ my_func2() {
+ echo in my_func2 $VAR $1
+ }
+ export -f my_func2
+ VAR=foo
+ export VAR
+
+ parallel --env _ -S $SERVER1 'echo $VAR; my_func2' ::: bar
+
+Output:
+
+ foo
+ in my_func2 foo bar
+
+If you use B<env_parallel> the variables, functions, and aliases do
+not even need to be exported to be copied:
+
+ NOT='not exported var'
+ alias myecho=echo
+ not_ex() {
+ myecho in not_exported_func $NOT $1
+ }
+ env_parallel --env _ -S $SERVER1 'echo $NOT; not_ex' ::: bar
+
+Output:
+
+ not exported var
+ in not_exported_func not exported var bar
+
+
+=head2 Showing what is actually run
+
+B<--verbose> will show the command that would be run on the local
+machine.
+
+When using B<--cat>, B<--pipepart>, or when a job is run on a remote
+machine, the command is wrapped with helper scripts. B<-vv> shows all
+of this.
+
+ parallel -vv --pipepart --block 1M wc :::: num30000
+
+Output:
+
+ <num30000 perl -e 'while(@ARGV) { sysseek(STDIN,shift,0) || die;
+ $left = shift; while($read = sysread(STDIN,$buf, ($left > 131072
+ ? 131072 : $left))){ $left -= $read; syswrite(STDOUT,$buf); } }'
+ 0 0 0 168894 | (wc)
+ 30000 30000 168894
+
+When the command gets more complex, the output is so hard to read,
+that it is only useful for debugging:
+
+ my_func3() {
+ echo in my_func $1 > $1.out
+ }
+ export -f my_func3
+ parallel -vv --workdir ... --nice 17 --env _ --trc {}.out \
+ -S $SERVER1 my_func3 {} ::: abc-file
+
+Output will be similar to:
+
+
+ ( ssh server -- mkdir -p ./.parallel/tmp/aspire-1928520-1;rsync
+ --protocol 30 -rlDzR -essh ./abc-file
+ server:./.parallel/tmp/aspire-1928520-1 );ssh server -- exec perl -e
+ \''@GNU_Parallel=("use","IPC::Open3;","use","MIME::Base64");
+ eval"@GNU_Parallel";my$eval=decode_base64(join"",@ARGV);eval$eval;'\'
+ c3lzdGVtKCJta2RpciIsIi1wIiwiLS0iLCIucGFyYWxsZWwvdG1wL2FzcGlyZS0xOTI4N
+ TsgY2hkaXIgIi5wYXJhbGxlbC90bXAvYXNwaXJlLTE5Mjg1MjAtMSIgfHxwcmludChTVE
+ BhcmFsbGVsOiBDYW5ub3QgY2hkaXIgdG8gLnBhcmFsbGVsL3RtcC9hc3BpcmUtMTkyODU
+ iKSAmJiBleGl0IDI1NTskRU5WeyJPTERQV0QifT0iL2hvbWUvdGFuZ2UvcHJpdmF0L3Bh
+ IjskRU5WeyJQQVJBTExFTF9QSUQifT0iMTkyODUyMCI7JEVOVnsiUEFSQUxMRUxfU0VRI
+ 0BiYXNoX2Z1bmN0aW9ucz1xdyhteV9mdW5jMyk7IGlmKCRFTlZ7IlNIRUxMIn09fi9jc2
+ ByaW50IFNUREVSUiAiQ1NIL1RDU0ggRE8gTk9UIFNVUFBPUlQgbmV3bGluZXMgSU4gVkF
+ TL0ZVTkNUSU9OUy4gVW5zZXQgQGJhc2hfZnVuY3Rpb25zXG4iOyBleGVjICJmYWxzZSI7
+ YXNoZnVuYyA9ICJteV9mdW5jMygpIHsgIGVjaG8gaW4gbXlfZnVuYyBcJDEgPiBcJDEub
+ Xhwb3J0IC1mIG15X2Z1bmMzID4vZGV2L251bGw7IjtAQVJHVj0ibXlfZnVuYzMgYWJjLW
+ RzaGVsbD0iJEVOVntTSEVMTH0iOyR0bXBkaXI9Ii90bXAiOyRuaWNlPTE3O2RveyRFTlZ
+ MRUxfVE1QfT0kdG1wZGlyLiIvcGFyIi5qb2luIiIsbWFweygwLi45LCJhIi4uInoiLCJB
+ KVtyYW5kKDYyKV19KDEuLjUpO313aGlsZSgtZSRFTlZ7UEFSQUxMRUxfVE1QfSk7JFNJ
+ fT1zdWJ7JGRvbmU9MTt9OyRwaWQ9Zm9yazt1bmxlc3MoJHBpZCl7c2V0cGdycDtldmFse
+ W9yaXR5KDAsMCwkbmljZSl9O2V4ZWMkc2hlbGwsIi1jIiwoJGJhc2hmdW5jLiJAQVJHVi
+ JleGVjOiQhXG4iO31kb3skcz0kczwxPzAuMDAxKyRzKjEuMDM6JHM7c2VsZWN0KHVuZGV
+ mLHVuZGVmLCRzKTt9dW50aWwoJGRvbmV8fGdldHBwaWQ9PTEpO2tpbGwoU0lHSFVQLC0k
+ dW5sZXNzJGRvbmU7d2FpdDtleGl0KCQ/JjEyNz8xMjgrKCQ/JjEyNyk6MSskPz4+OCk=;
+ _EXIT_status=$?; mkdir -p ./.; rsync --protocol 30 --rsync-path=cd\
+ ./.parallel/tmp/aspire-1928520-1/./.\;\ rsync -rlDzR -essh
+ server:./abc-file.out ./.;ssh server -- \(rm\ -f\
+ ./.parallel/tmp/aspire-1928520-1/abc-file\;\ sh\ -c\ \'rmdir\
+ ./.parallel/tmp/aspire-1928520-1/\ ./.parallel/tmp/\ ./.parallel/\
+ 2\>/dev/null\'\;rm\ -rf\ ./.parallel/tmp/aspire-1928520-1\;\);ssh
+ server -- \(rm\ -f\ ./.parallel/tmp/aspire-1928520-1/abc-file.out\;\
+ sh\ -c\ \'rmdir\ ./.parallel/tmp/aspire-1928520-1/\ ./.parallel/tmp/\
+ ./.parallel/\ 2\>/dev/null\'\;rm\ -rf\
+ ./.parallel/tmp/aspire-1928520-1\;\);ssh server -- rm -rf
+ .parallel/tmp/aspire-1928520-1; exit $_EXIT_status;
+
+=head1 Saving output to shell variables (advanced)
+
+GNU B<parset> will set shell variables to the output of GNU
+B<parallel>. GNU B<parset> has one important limitation: It cannot be
+part of a pipe. In particular this means it cannot read anything from
+standard input (stdin) or pipe output to another program.
+
+To use GNU B<parset> prepend command with destination variables:
+
+ parset myvar1,myvar2 echo ::: a b
+ echo $myvar1
+ echo $myvar2
+
+Output:
+
+ a
+ b
+
+If you only give a single variable, it will be treated as an array:
+
+ parset myarray seq {} 5 ::: 1 2 3
+ echo "${myarray[1]}"
+
+Output:
+
+ 2
+ 3
+ 4
+ 5
+
+The commands to run can be an array:
+
+ cmd=("echo '<<joe \"double space\" cartoon>>'" "pwd")
+ parset data ::: "${cmd[@]}"
+ echo "${data[0]}"
+ echo "${data[1]}"
+
+Output:
+
+ <<joe "double space" cartoon>>
+ [current dir]
+
+
+=head1 Saving to an SQL base (advanced)
+
+GNU B<parallel> can save into an SQL base. Point GNU B<parallel> to a
+table and it will put the joblog there together with the variables and
+the output each in their own column.
+
+=head2 CSV as SQL base
+
+The simplest is to use a CSV file as the storage table:
+
+ parallel --sqlandworker csv:///%2Ftmp/log.csv \
+ seq ::: 10 ::: 12 13 14
+ cat /tmp/log.csv
+
+Note how '/' in the path must be written as %2F.
+
+Output will be similar to:
+
+ Seq,Host,Starttime,JobRuntime,Send,Receive,Exitval,_Signal,
+ Command,V1,V2,Stdout,Stderr
+ 1,:,1458254498.254,0.069,0,9,0,0,"seq 10 12",10,12,"10
+ 11
+ 12
+ ",
+ 2,:,1458254498.278,0.080,0,12,0,0,"seq 10 13",10,13,"10
+ 11
+ 12
+ 13
+ ",
+ 3,:,1458254498.301,0.083,0,15,0,0,"seq 10 14",10,14,"10
+ 11
+ 12
+ 13
+ 14
+ ",
+
+A proper CSV reader (like LibreOffice or R's read.csv) will read this
+format correctly - even with fields containing newlines as above.
+
+If the output is big you may want to put it into files using B<--results>:
+
+ parallel --results outdir --sqlandworker csv:///%2Ftmp/log2.csv \
+ seq ::: 10 ::: 12 13 14
+ cat /tmp/log2.csv
+
+Output will be similar to:
+
+ Seq,Host,Starttime,JobRuntime,Send,Receive,Exitval,_Signal,
+ Command,V1,V2,Stdout,Stderr
+ 1,:,1458824738.287,0.029,0,9,0,0,
+ "seq 10 12",10,12,outdir/1/10/2/12/stdout,outdir/1/10/2/12/stderr
+ 2,:,1458824738.298,0.025,0,12,0,0,
+ "seq 10 13",10,13,outdir/1/10/2/13/stdout,outdir/1/10/2/13/stderr
+ 3,:,1458824738.309,0.026,0,15,0,0,
+ "seq 10 14",10,14,outdir/1/10/2/14/stdout,outdir/1/10/2/14/stderr
+
+
+=head2 DBURL as table
+
+The CSV file is an example of a DBURL.
+
+GNU B<parallel> uses a DBURL to address the table. A DBURL has this format:
+
+ vendor://[[user][:password]@][host][:port]/[database[/table]
+
+Example:
+
+ mysql://scott:tiger@my.example.com/mydatabase/mytable
+ postgresql://scott:tiger@pg.example.com/mydatabase/mytable
+ sqlite3:///%2Ftmp%2Fmydatabase/mytable
+ csv:///%2Ftmp/log.csv
+
+To refer to B</tmp/mydatabase> with B<sqlite> or B<csv> you need to
+encode the B</> as B<%2F>.
+
+Run a job using B<sqlite> on B<mytable> in B</tmp/mydatabase>:
+
+ DBURL=sqlite3:///%2Ftmp%2Fmydatabase
+ DBURLTABLE=$DBURL/mytable
+ parallel --sqlandworker $DBURLTABLE echo ::: foo bar ::: baz quuz
+
+To see the result:
+
+ sql $DBURL 'SELECT * FROM mytable ORDER BY Seq;'
+
+Output will be similar to:
+
+ Seq|Host|Starttime|JobRuntime|Send|Receive|Exitval|_Signal|
+ Command|V1|V2|Stdout|Stderr
+ 1|:|1451619638.903|0.806||8|0|0|echo foo baz|foo|baz|foo baz
+ |
+ 2|:|1451619639.265|1.54||9|0|0|echo foo quuz|foo|quuz|foo quuz
+ |
+ 3|:|1451619640.378|1.43||8|0|0|echo bar baz|bar|baz|bar baz
+ |
+ 4|:|1451619641.473|0.958||9|0|0|echo bar quuz|bar|quuz|bar quuz
+ |
+
+The first columns are well known from B<--joblog>. B<V1> and B<V2> are
+data from the input sources. B<Stdout> and B<Stderr> are standard
+output and standard error, respectively.
+
+=head2 Using multiple workers
+
+Using an SQL base as storage costs overhead in the order of 1 second
+per job.
+
+One of the situations where it makes sense is if you have multiple
+workers.
+
+You can then have a single master machine that submits jobs to the SQL
+base (but does not do any of the work):
+
+ parallel --sqlmaster $DBURLTABLE echo ::: foo bar ::: baz quuz
+
+On the worker machines you run exactly the same command except you
+replace B<--sqlmaster> with B<--sqlworker>.
+
+ parallel --sqlworker $DBURLTABLE echo ::: foo bar ::: baz quuz
+
+To run a master and a worker on the same machine use B<--sqlandworker>
+as shown earlier.
+
+
+=head1 --pipe
+
+The B<--pipe> functionality puts GNU B<parallel> in a different mode:
+Instead of treating the data on stdin (standard input) as arguments
+for a command to run, the data will be sent to stdin (standard input)
+of the command.
+
+The typical situation is:
+
+ command_A | command_B | command_C
+
+where command_B is slow, and you want to speed up command_B.
+
+=head2 Chunk size
+
+By default GNU B<parallel> will start an instance of command_B, read a
+chunk of 1 MB, and pass that to the instance. Then start another
+instance, read another chunk, and pass that to the second instance.
+
+ cat num1000000 | parallel --pipe wc
+
+Output (the order may be different):
+
+ 165668 165668 1048571
+ 149797 149797 1048579
+ 149796 149796 1048572
+ 149797 149797 1048579
+ 149797 149797 1048579
+ 149796 149796 1048572
+ 85349 85349 597444
+
+The size of the chunk is not exactly 1 MB because GNU B<parallel> only
+passes full lines - never half a line, thus the blocksize is only
+1 MB on average. You can change the block size to 2 MB with B<--block>:
+
+ cat num1000000 | parallel --pipe --block 2M wc
+
+Output (the order may be different):
+
+ 315465 315465 2097150
+ 299593 299593 2097151
+ 299593 299593 2097151
+ 85349 85349 597444
+
+GNU B<parallel> treats each line as a record. If the order of records
+is unimportant (e.g. you need all lines processed, but you do not care
+which is processed first), then you can use B<--roundrobin>. Without
+B<--roundrobin> GNU B<parallel> will start a command per block; with
+B<--roundrobin> only the requested number of jobs will be started
+(B<--jobs>). The records will then be distributed between the running
+jobs:
+
+ cat num1000000 | parallel --pipe -j4 --roundrobin wc
+
+Output will be similar to:
+
+ 149797 149797 1048579
+ 299593 299593 2097151
+ 315465 315465 2097150
+ 235145 235145 1646016
+
+One of the 4 instances got a single record, 2 instances got 2 full
+records each, and one instance got 1 full and 1 partial record.
+
+=head2 Records
+
+GNU B<parallel> sees the input as records. The default record is a single
+line.
+
+Using B<-N140000> GNU B<parallel> will read 140000 records at a time:
+
+ cat num1000000 | parallel --pipe -N140000 wc
+
+Output (the order may be different):
+
+ 140000 140000 868895
+ 140000 140000 980000
+ 140000 140000 980000
+ 140000 140000 980000
+ 140000 140000 980000
+ 140000 140000 980000
+ 140000 140000 980000
+ 20000 20000 140001
+
+Note how that the last job could not get the full 140000 lines, but
+only 20000 lines.
+
+If a record is 75 lines B<-L> can be used:
+
+ cat num1000000 | parallel --pipe -L75 wc
+
+Output (the order may be different):
+
+ 165600 165600 1048095
+ 149850 149850 1048950
+ 149775 149775 1048425
+ 149775 149775 1048425
+ 149850 149850 1048950
+ 149775 149775 1048425
+ 85350 85350 597450
+ 25 25 176
+
+Note how GNU B<parallel> still reads a block of around 1 MB; but
+instead of passing full lines to B<wc> it passes full 75 lines at a
+time. This of course does not hold for the last job (which in this
+case got 25 lines).
+
+=head2 Fixed length records
+
+Fixed length records can be processed by setting B<--recend ''> and
+B<--block I<recordsize>>. A header of size I<n> can be processed with
+B<--header .{I<n>}>.
+
+Here is how to process a file with a 4-byte header and a 3-byte record
+size:
+
+ cat fixedlen | parallel --pipe --header .{4} --block 3 --recend '' \
+ 'echo start; cat; echo'
+
+Output:
+
+ start
+ HHHHAAA
+ start
+ HHHHCCC
+ start
+ HHHHBBB
+
+It may be more efficient to increase B<--block> to a multiplum of the
+record size.
+
+=head2 Record separators
+
+GNU B<parallel> uses separators to determine where two records split.
+
+B<--recstart> gives the string that starts a record; B<--recend> gives the
+string that ends a record. The default is B<--recend '\n'> (newline).
+
+If both B<--recend> and B<--recstart> are given, then the record will only
+split if the recend string is immediately followed by the recstart
+string.
+
+Here the B<--recend> is set to B<', '>:
+
+ echo /foo, bar/, /baz, qux/, | \
+ parallel -kN1 --recend ', ' --pipe echo JOB{#}\;cat\;echo END
+
+Output:
+
+ JOB1
+ /foo, END
+ JOB2
+ bar/, END
+ JOB3
+ /baz, END
+ JOB4
+ qux/,
+ END
+
+Here the B<--recstart> is set to B</>:
+
+ echo /foo, bar/, /baz, qux/, | \
+ parallel -kN1 --recstart / --pipe echo JOB{#}\;cat\;echo END
+
+Output:
+
+ JOB1
+ /foo, barEND
+ JOB2
+ /, END
+ JOB3
+ /baz, quxEND
+ JOB4
+ /,
+ END
+
+Here both B<--recend> and B<--recstart> are set:
+
+ echo /foo, bar/, /baz, qux/, | \
+ parallel -kN1 --recend ', ' --recstart / --pipe \
+ echo JOB{#}\;cat\;echo END
+
+Output:
+
+ JOB1
+ /foo, bar/, END
+ JOB2
+ /baz, qux/,
+ END
+
+Note the difference between setting one string and setting both strings.
+
+With B<--regexp> the B<--recend> and B<--recstart> will be treated as
+a regular expression:
+
+ echo foo,bar,_baz,__qux, | \
+ parallel -kN1 --regexp --recend ,_+ --pipe \
+ echo JOB{#}\;cat\;echo END
+
+Output:
+
+ JOB1
+ foo,bar,_END
+ JOB2
+ baz,__END
+ JOB3
+ qux,
+ END
+
+GNU B<parallel> can remove the record separators with
+B<--remove-rec-sep>/B<--rrs>:
+
+ echo foo,bar,_baz,__qux, | \
+ parallel -kN1 --rrs --regexp --recend ,_+ --pipe \
+ echo JOB{#}\;cat\;echo END
+
+Output:
+
+ JOB1
+ foo,barEND
+ JOB2
+ bazEND
+ JOB3
+ qux,
+ END
+
+=head2 Header
+
+If the input data has a header, the header can be repeated for each
+job by matching the header with B<--header>. If headers start with
+B<%> you can do this:
+
+ cat num_%header | \
+ parallel --header '(%.*\n)*' --pipe -N3 echo JOB{#}\;cat
+
+Output (the order may be different):
+
+ JOB1
+ %head1
+ %head2
+ 1
+ 2
+ 3
+ JOB2
+ %head1
+ %head2
+ 4
+ 5
+ 6
+ JOB3
+ %head1
+ %head2
+ 7
+ 8
+ 9
+ JOB4
+ %head1
+ %head2
+ 10
+
+If the header is 2 lines, B<--header> 2 will work:
+
+ cat num_%header | parallel --header 2 --pipe -N3 echo JOB{#}\;cat
+
+Output: Same as above.
+
+=head2 --pipepart
+
+B<--pipe> is not very efficient. It maxes out at around 500
+MB/s. B<--pipepart> can easily deliver 5 GB/s. But there are a few
+limitations. The input has to be a normal file (not a pipe) given by
+B<-a> or B<::::> and B<-L>/B<-l>/B<-N> do not work. B<--recend> and
+B<--recstart>, however, I<do> work, and records can often be split on
+that alone.
+
+ parallel --pipepart -a num1000000 --block 3m wc
+
+Output (the order may be different):
+
+ 444443 444444 3000002
+ 428572 428572 3000004
+ 126985 126984 888890
+
+
+=head1 Shebang
+
+=head2 Input data and parallel command in the same file
+
+GNU B<parallel> is often called as this:
+
+ cat input_file | parallel command
+
+With B<--shebang> the I<input_file> and B<parallel> can be combined into the same script.
+
+UNIX shell scripts start with a shebang line like this:
+
+ #!/bin/bash
+
+GNU B<parallel> can do that, too. With B<--shebang> the arguments can be
+listed in the file. The B<parallel> command is the first line of the
+script:
+
+ #!/usr/bin/parallel --shebang -r echo
+
+ foo
+ bar
+ baz
+
+Output (the order may be different):
+
+ foo
+ bar
+ baz
+
+=head2 Parallelizing existing scripts
+
+GNU B<parallel> is often called as this:
+
+ cat input_file | parallel command
+ parallel command ::: foo bar
+
+If B<command> is a script, B<parallel> can be combined into a single
+file so this will run the script in parallel:
+
+ cat input_file | command
+ command foo bar
+
+This B<perl> script B<perl_echo> works like B<echo>:
+
+ #!/usr/bin/perl
+
+ print "@ARGV\n"
+
+It can be called as this:
+
+ parallel perl_echo ::: foo bar
+
+By changing the B<#!>-line it can be run in parallel:
+
+ #!/usr/bin/parallel --shebang-wrap /usr/bin/perl
+
+ print "@ARGV\n"
+
+Thus this will work:
+
+ perl_echo foo bar
+
+Output (the order may be different):
+
+ foo
+ bar
+
+This technique can be used for:
+
+=over 9
+
+=item Perl:
+
+ #!/usr/bin/parallel --shebang-wrap /usr/bin/perl
+
+ print "Arguments @ARGV\n";
+
+
+=item Python:
+
+ #!/usr/bin/parallel --shebang-wrap /usr/bin/python
+
+ import sys
+ print 'Arguments', str(sys.argv)
+
+
+=item Bash/sh/zsh/Korn shell:
+
+ #!/usr/bin/parallel --shebang-wrap /bin/bash
+
+ echo Arguments "$@"
+
+
+=item csh:
+
+ #!/usr/bin/parallel --shebang-wrap /bin/csh
+
+ echo Arguments "$argv"
+
+
+=item Tcl:
+
+ #!/usr/bin/parallel --shebang-wrap /usr/bin/tclsh
+
+ puts "Arguments $argv"
+
+
+=item R:
+
+ #!/usr/bin/parallel --shebang-wrap /usr/bin/Rscript --vanilla --slave
+
+ args <- commandArgs(trailingOnly = TRUE)
+ print(paste("Arguments ",args))
+
+
+=item GNUplot:
+
+ #!/usr/bin/parallel --shebang-wrap ARG={} /usr/bin/gnuplot
+
+ print "Arguments ", system('echo $ARG')
+
+
+=item Ruby:
+
+ #!/usr/bin/parallel --shebang-wrap /usr/bin/ruby
+
+ print "Arguments "
+ puts ARGV
+
+
+=item Octave:
+
+ #!/usr/bin/parallel --shebang-wrap /usr/bin/octave
+
+ printf ("Arguments");
+ arg_list = argv ();
+ for i = 1:nargin
+ printf (" %s", arg_list{i});
+ endfor
+ printf ("\n");
+
+=item Common LISP:
+
+ #!/usr/bin/parallel --shebang-wrap /usr/bin/clisp
+
+ (format t "~&~S~&" 'Arguments)
+ (format t "~&~S~&" *args*)
+
+=item PHP:
+
+ #!/usr/bin/parallel --shebang-wrap /usr/bin/php
+ <?php
+ echo "Arguments";
+ foreach(array_slice($argv,1) as $v)
+ {
+ echo " $v";
+ }
+ echo "\n";
+ ?>
+
+=item Node.js:
+
+ #!/usr/bin/parallel --shebang-wrap /usr/bin/node
+
+ var myArgs = process.argv.slice(2);
+ console.log('Arguments ', myArgs);
+
+=item LUA:
+
+ #!/usr/bin/parallel --shebang-wrap /usr/bin/lua
+
+ io.write "Arguments"
+ for a = 1, #arg do
+ io.write(" ")
+ io.write(arg[a])
+ end
+ print("")
+
+=item C#:
+
+ #!/usr/bin/parallel --shebang-wrap ARGV={} /usr/bin/csharp
+
+ var argv = Environment.GetEnvironmentVariable("ARGV");
+ print("Arguments "+argv);
+
+=back
+
+=head1 Semaphore
+
+GNU B<parallel> can work as a counting semaphore. This is slower and less
+efficient than its normal mode.
+
+A counting semaphore is like a row of toilets. People needing a toilet
+can use any toilet, but if there are more people than toilets, they
+will have to wait for one of the toilets to become available.
+
+An alias for B<parallel --semaphore> is B<sem>.
+
+B<sem> will follow a person to the toilets, wait until a toilet is
+available, leave the person in the toilet and exit.
+
+B<sem --fg> will follow a person to the toilets, wait until a toilet is
+available, stay with the person in the toilet and exit when the person
+exits.
+
+B<sem --wait> will wait for all persons to leave the toilets.
+
+B<sem> does not have a queue discipline, so the next person is chosen
+randomly.
+
+B<-j> sets the number of toilets.
+
+=head2 Mutex
+
+The default is to have only one toilet (this is called a mutex). The
+program is started in the background and B<sem> exits immediately. Use
+B<--wait> to wait for all B<sem>s to finish:
+
+ sem 'sleep 1; echo The first finished' &&
+ echo The first is now running in the background &&
+ sem 'sleep 1; echo The second finished' &&
+ echo The second is now running in the background
+ sem --wait
+
+Output:
+
+ The first is now running in the background
+ The first finished
+ The second is now running in the background
+ The second finished
+
+The command can be run in the foreground with B<--fg>, which will only
+exit when the command completes:
+
+ sem --fg 'sleep 1; echo The first finished' &&
+ echo The first finished running in the foreground &&
+ sem --fg 'sleep 1; echo The second finished' &&
+ echo The second finished running in the foreground
+ sem --wait
+
+The difference between this and just running the command, is that a
+mutex is set, so if other B<sem>s were running in the background only one
+would run at a time.
+
+To control which semaphore is used, use
+B<--semaphorename>/B<--id>. Run this in one terminal:
+
+ sem --id my_id -u 'echo First started; sleep 10; echo First done'
+
+and simultaneously this in another terminal:
+
+ sem --id my_id -u 'echo Second started; sleep 10; echo Second done'
+
+Note how the second will only be started when the first has finished.
+
+=head2 Counting semaphore
+
+A mutex is like having a single toilet: When it is in use everyone
+else will have to wait. A counting semaphore is like having multiple
+toilets: Several people can use the toilets, but when they all are in
+use, everyone else will have to wait.
+
+B<sem> can emulate a counting semaphore. Use B<--jobs> to set the
+number of toilets like this:
+
+ sem --jobs 3 --id my_id -u 'echo Start 1; sleep 5; echo 1 done' &&
+ sem --jobs 3 --id my_id -u 'echo Start 2; sleep 6; echo 2 done' &&
+ sem --jobs 3 --id my_id -u 'echo Start 3; sleep 7; echo 3 done' &&
+ sem --jobs 3 --id my_id -u 'echo Start 4; sleep 8; echo 4 done' &&
+ sem --wait --id my_id
+
+Output:
+
+ Start 1
+ Start 2
+ Start 3
+ 1 done
+ Start 4
+ 2 done
+ 3 done
+ 4 done
+
+=head2 Timeout
+
+With B<--semaphoretimeout> you can force running the command anyway after
+a period (positive number) or give up (negative number):
+
+ sem --id foo -u 'echo Slow started; sleep 5; echo Slow ended' &&
+ sem --id foo --semaphoretimeout 1 'echo Forced running after 1 sec' &&
+ sem --id foo --semaphoretimeout -2 'echo Give up after 2 secs'
+ sem --id foo --wait
+
+Output:
+
+ Slow started
+ parallel: Warning: Semaphore timed out. Stealing the semaphore.
+ Forced running after 1 sec
+ parallel: Warning: Semaphore timed out. Exiting.
+ Slow ended
+
+Note how the 'Give up' was not run.
+
+=head1 Informational
+
+GNU B<parallel> has some options to give short information about the
+configuration.
+
+B<--help> will print a summary of the most important options:
+
+ parallel --help
+
+Output:
+
+ Usage:
+
+ parallel [options] [command [arguments]] < list_of_arguments
+ parallel [options] [command [arguments]] (::: arguments|:::: argfile(s))...
+ cat ... | parallel --pipe [options] [command [arguments]]
+
+ -j n Run n jobs in parallel
+ -k Keep same order
+ -X Multiple arguments with context replace
+ --colsep regexp Split input on regexp for positional replacements
+ {} {.} {/} {/.} {#} {%} {= perl code =} Replacement strings
+ {3} {3.} {3/} {3/.} {=3 perl code =} Positional replacement strings
+ With --plus: {} = {+/}/{/} = {.}.{+.} = {+/}/{/.}.{+.} = {..}.{+..} =
+ {+/}/{/..}.{+..} = {...}.{+...} = {+/}/{/...}.{+...}
+
+ -S sshlogin Example: foo@server.example.com
+ --slf .. Use ~/.parallel/sshloginfile as the list of sshlogins
+ --trc {}.bar Shorthand for --transfer --return {}.bar --cleanup
+ --onall Run the given command with argument on all sshlogins
+ --nonall Run the given command with no arguments on all sshlogins
+
+ --pipe Split stdin (standard input) to multiple jobs.
+ --recend str Record end separator for --pipe.
+ --recstart str Record start separator for --pipe.
+
+ See 'man parallel' for details
+
+ Academic tradition requires you to cite works you base your article on.
+ When using programs that use GNU Parallel to process data for publication
+ please cite:
+
+ O. Tange (2011): GNU Parallel - The Command-Line Power Tool,
+ ;login: The USENIX Magazine, February 2011:42-47.
+
+ This helps funding further development; AND IT WON'T COST YOU A CENT.
+ If you pay 10000 EUR you should feel free to use GNU Parallel without citing.
+
+When asking for help, always report the full output of this:
+
+ parallel --version
+
+Output:
+
+ GNU parallel 20230122
+ Copyright (C) 2007-2024 Ole Tange, http://ole.tange.dk and Free Software
+ Foundation, Inc.
+ License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>
+ This is free software: you are free to change and redistribute it.
+ GNU parallel comes with no warranty.
+
+ Web site: https://www.gnu.org/software/parallel
+
+ When using programs that use GNU Parallel to process data for publication
+ please cite as described in 'parallel --citation'.
+
+In scripts B<--minversion> can be used to ensure the user has at least
+this version:
+
+ parallel --minversion 20130722 && \
+ echo Your version is at least 20130722.
+
+Output:
+
+ 20160322
+ Your version is at least 20130722.
+
+If you are using GNU B<parallel> for research the BibTeX citation can be
+generated using B<--citation>:
+
+ parallel --citation
+
+Output:
+
+ Academic tradition requires you to cite works you base your article on.
+ When using programs that use GNU Parallel to process data for publication
+ please cite:
+
+ @article{Tange2011a,
+ title = {GNU Parallel - The Command-Line Power Tool},
+ author = {O. Tange},
+ address = {Frederiksberg, Denmark},
+ journal = {;login: The USENIX Magazine},
+ month = {Feb},
+ number = {1},
+ volume = {36},
+ url = {https://www.gnu.org/s/parallel},
+ year = {2011},
+ pages = {42-47},
+ doi = {10.5281/zenodo.16303}
+ }
+
+ (Feel free to use \nocite{Tange2011a})
+
+ This helps funding further development; AND IT WON'T COST YOU A CENT.
+ If you pay 10000 EUR you should feel free to use GNU Parallel without citing.
+
+ If you send a copy of your published article to tange@gnu.org, it will be
+ mentioned in the release notes of next version of GNU Parallel.
+
+With B<--max-line-length-allowed> GNU B<parallel> will report the maximal
+size of the command line:
+
+ parallel --max-line-length-allowed
+
+Output (may vary on different systems):
+
+ 131071
+
+B<--number-of-cpus> and B<--number-of-cores> run system specific code to
+determine the number of CPUs and CPU cores on the system. On
+unsupported platforms they will return 1:
+
+ parallel --number-of-cpus
+ parallel --number-of-cores
+
+Output (may vary on different systems):
+
+ 4
+ 64
+
+=head1 Profiles
+
+The defaults for GNU B<parallel> can be changed systemwide by putting the
+command line options in B</etc/parallel/config>. They can be changed for
+a user by putting them in B<~/.parallel/config>.
+
+Profiles work the same way, but have to be referred to with B<--profile>:
+
+ echo '--nice 17' > ~/.parallel/nicetimeout
+ echo '--timeout 300%' >> ~/.parallel/nicetimeout
+ parallel --profile nicetimeout echo ::: A B C
+
+Output:
+
+ A
+ B
+ C
+
+Profiles can be combined:
+
+ echo '-vv --dry-run' > ~/.parallel/dryverbose
+ parallel --profile dryverbose --profile nicetimeout echo ::: A B C
+
+Output:
+
+ echo A
+ echo B
+ echo C
+
+
+=head1 Spread the word
+
+I hope you have learned something from this tutorial.
+
+If you like GNU B<parallel>:
+
+=over 2
+
+=item *
+
+(Re-)walk through the tutorial if you have not done so in the past year
+(https://www.gnu.org/software/parallel/parallel_tutorial.html)
+
+=item *
+
+Give a demo at your local user group/your team/your colleagues
+
+=item *
+
+Post the intro videos and the tutorial on Reddit, Mastodon, Diaspora*,
+forums, blogs, Identi.ca, Google+, Twitter, Facebook, Linkedin, and
+mailing lists
+
+=item *
+
+Request or write a review for your favourite blog or magazine
+(especially if you do something cool with GNU B<parallel>)
+
+=item *
+
+Invite me for your next conference
+
+=back
+
+If you use GNU B<parallel> for research:
+
+=over 2
+
+=item *
+
+Please cite GNU B<parallel> in you publications (use B<--citation>)
+
+=back
+
+If GNU B<parallel> saves you money:
+
+=over 2
+
+=item *
+
+(Have your company) donate to FSF or become a member
+https://my.fsf.org/donate/
+
+=back
+
+(C) 2013-2024 Ole Tange, GFDLv1.3+ (See
+LICENSES/GFDL-1.3-or-later.txt)
+
+
+=cut
diff --git a/src/parcat b/src/parcat
new file mode 100755
index 0000000..b4956b0
--- /dev/null
+++ b/src/parcat
@@ -0,0 +1,194 @@
+#!/usr/bin/perl
+
+# Copyright (C) 2016-2024 Ole Tange, http://ole.tange.dk and Free
+# Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <https://www.gnu.org/licenses/>
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+#
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+use Symbol qw(gensym);
+use IPC::Open3;
+use POSIX qw(:errno_h);
+use IO::Select;
+use strict;
+use threads;
+use threads::shared;
+use Thread::Queue;
+
+
+my $opened :shared;
+my $q = Thread::Queue->new();
+my $okq = Thread::Queue->new();
+my @producers;
+
+if(not @ARGV) {
+ if(-t *STDIN) {
+ print "Usage:\n";
+ print " parcat file(s)\n";
+ print " cat argfile | parcat\n";
+ } else {
+ # Read arguments from stdin
+ chomp(@ARGV = <STDIN>);
+ }
+}
+
+my $files_to_open = 0;
+# Default: fd = stdout
+my $fd = 1;
+for (@ARGV) {
+ # --rm = remove file when opened
+ /^--rm$/ and do { $opt::rm = 1; next; };
+ # -1 = output to fd 1, -2 = output to fd 2
+ /^-(\d+)$/ and do { $fd = $1; next; };
+ push @producers, threads->create('producer', $_, $fd);
+ $files_to_open++;
+}
+
+sub producer {
+ # Open a file/fifo, set non blocking, enqueue fileno of the file handle
+ my $file = shift;
+ my $output_fd = shift;
+ open(my $fh, "<", $file) || do {
+ print STDERR "parcat: Cannot open $file\n";
+ exit(1);
+ };
+ # Remove file when it has been opened
+ if($opt::rm) {
+ unlink $file;
+ }
+ set_fh_non_blocking($fh);
+ $opened++;
+ # Pass the fileno to parent
+ $q->enqueue(fileno($fh),$output_fd);
+ # Get an OK that the $fh is opened and we can release the $fh
+ while(1) {
+ my $ok = $okq->dequeue();
+ if($ok == fileno($fh)) { last; }
+ # Not ours - very unlikely to happen
+ $okq->enqueue($ok);
+ }
+ return;
+}
+
+my $s = IO::Select->new();
+my %buffer;
+
+sub add_file {
+ my $infd = shift;
+ my $outfd = shift;
+ open(my $infh, "<&=", $infd) || die;
+ open(my $outfh, ">&=", $outfd) || die;
+ $s->add($infh);
+ # Tell the producer now opened here and can be released
+ $okq->enqueue($infd);
+ # Initialize the buffer
+ @{$buffer{$infh}{$outfd}} = ();
+ $Global::fh{$outfd} = $outfh;
+}
+
+sub add_files {
+ # Non-blocking dequeue
+ my ($infd,$outfd);
+ do {
+ ($infd,$outfd) = $q->dequeue_nb(2);
+ if(defined($outfd)) {
+ add_file($infd,$outfd);
+ }
+ } while(defined($outfd));
+}
+
+sub add_files_block {
+ # Blocking dequeue
+ my ($infd,$outfd) = $q->dequeue(2);
+ add_file($infd,$outfd);
+}
+
+
+my $fd;
+my (@ready,$infh,$rv,$buf);
+do {
+ # Wait until at least one file is opened
+ add_files_block();
+ while($q->pending or keys %buffer) {
+ add_files();
+ while(keys %buffer) {
+ @ready = $s->can_read(0.01);
+ if(not @ready) {
+ add_files();
+ }
+ for $infh (@ready) {
+ # There is only one key, namely the output file descriptor
+ for my $outfd (keys %{$buffer{$infh}}) {
+ $rv = sysread($infh, $buf, 65536);
+ if (!$rv) {
+ if($! == EAGAIN) {
+ # Would block: Nothing read
+ next;
+ } else {
+ # Nothing read, but would not block:
+ # This file is done
+ $s->remove($infh);
+ for(@{$buffer{$infh}{$outfd}}) {
+ syswrite($Global::fh{$outfd},$_);
+ }
+ delete $buffer{$infh};
+ # Closing the $infh causes it to block
+ # close $infh;
+ add_files();
+ next;
+ }
+ }
+ # Something read.
+ # Find \n or \r for full line
+ my $i = (rindex($buf,"\n")+1);
+ if($i) {
+ # Print full line
+ for(@{$buffer{$infh}{$outfd}}, substr($buf,0,$i)) {
+ syswrite($Global::fh{$outfd},$_);
+ }
+ # @buffer = remaining half line
+ $buffer{$infh}{$outfd} = [substr($buf,$i,$rv-$i)];
+ } else {
+ # Something read, but not a full line
+ push @{$buffer{$infh}{$outfd}}, $buf;
+ }
+ redo;
+ }
+ }
+ }
+ }
+} while($opened < $files_to_open);
+
+
+for (@producers) {
+ $_->join();
+}
+
+sub set_fh_non_blocking {
+ # Set filehandle as non-blocking
+ # Inputs:
+ # $fh = filehandle to be blocking
+ # Returns:
+ # N/A
+ my $fh = shift;
+ $Global::use{"Fcntl"} ||= eval "use Fcntl qw(:DEFAULT :flock); 1;";
+ my $flags;
+ fcntl($fh, &F_GETFL, $flags) || die $!; # Get the current flags on the filehandle
+ $flags |= &O_NONBLOCK; # Add non-blocking to the flags
+ fcntl($fh, &F_SETFL, $flags) || die $!; # Set the flags on the filehandle
+}
diff --git a/src/parcat.pod b/src/parcat.pod
new file mode 100644
index 0000000..f41e530
--- /dev/null
+++ b/src/parcat.pod
@@ -0,0 +1,191 @@
+#!/usr/bin/perl
+
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GFDL-1.3-or-later
+# SPDX-License-Identifier: CC-BY-SA-4.0
+
+=head1 NAME
+
+parcat - cat files or fifos in parallel
+
+=head1 SYNOPSIS
+
+B<parcat> [--rm] [-#] file(s) [-#] file(s)
+
+=head1 DESCRIPTION
+
+GNU B<parcat> reads files or fifos in parallel. It writes full lines
+so there will be no problem with mixed-half-lines which you risk if
+you use:
+
+ (cat file1 & cat file2 &) | ...
+
+It is faster than doing:
+
+ parallel -j0 --lb cat ::: file*
+
+Arguments can be given on the command line or passed in on stdin
+(standard input).
+
+=head1 OPTIONS
+
+=over 9
+
+=item -B<#>
+
+Arguments following this will be sent to the file descriptor B<#>. E.g.
+
+ parcat -1 stdout1 stdout2 -2 stderr1 stderr2
+
+will send I<stdout1> and I<stdout2> to stdout (standard output = file
+descriptor 1), and send I<stderr1> and I<stderr2> to stderr (standard
+error = file descriptor 2).
+
+=item --rm
+
+Remove files after opening. As soon as the files are opened, unlink
+the files.
+
+=back
+
+=head1 EXAMPLES
+
+=head2 Simple line buffered output
+
+B<traceroute> will often print half a line. If run in parallel, two
+instances may half-lines of their output. This can be avoided by
+saving the output to a fifo and then using B<parcat> to read the two
+fifos in parallel:
+
+ mkfifo freenetproject.org.fifo tange.dk.fifo
+ traceroute freenetproject.org > freenetproject.org.fifo &
+ traceroute tange.dk > tange.dk.fifo &
+ parcat --rm *fifo
+
+
+=head1 REPORTING BUGS
+
+GNU B<parcat> is part of GNU B<parallel>. Report bugs to
+<bug-parallel@gnu.org>.
+
+
+=head1 AUTHOR
+
+Copyright (C) 2016-2024 Ole Tange, http://ole.tange.dk and Free
+Software Foundation, Inc.
+
+=head1 LICENSE
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3 of the License, or
+at your option any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+=head2 Documentation license I
+
+Permission is granted to copy, distribute and/or modify this
+documentation under the terms of the GNU Free Documentation License,
+Version 1.3 or any later version published by the Free Software
+Foundation; with no Invariant Sections, with no Front-Cover Texts, and
+with no Back-Cover Texts. A copy of the license is included in the
+file LICENSES/GFDL-1.3-or-later.txt.
+
+=head2 Documentation license II
+
+You are free:
+
+=over 9
+
+=item B<to Share>
+
+to copy, distribute and transmit the work
+
+=item B<to Remix>
+
+to adapt the work
+
+=back
+
+Under the following conditions:
+
+=over 9
+
+=item B<Attribution>
+
+You must attribute the work in the manner specified by the author or
+licensor (but not in any way that suggests that they endorse you or
+your use of the work).
+
+=item B<Share Alike>
+
+If you alter, transform, or build upon this work, you may distribute
+the resulting work only under the same, similar or a compatible
+license.
+
+=back
+
+With the understanding that:
+
+=over 9
+
+=item B<Waiver>
+
+Any of the above conditions can be waived if you get permission from
+the copyright holder.
+
+=item B<Public Domain>
+
+Where the work or any of its elements is in the public domain under
+applicable law, that status is in no way affected by the license.
+
+=item B<Other Rights>
+
+In no way are any of the following rights affected by the license:
+
+=over 9
+
+=item *
+
+Your fair dealing or fair use rights, or other applicable
+copyright exceptions and limitations;
+
+=item *
+
+The author's moral rights;
+
+=item *
+
+Rights other persons may have either in the work itself or in
+how the work is used, such as publicity or privacy rights.
+
+=back
+
+=item B<Notice>
+
+For any reuse or distribution, you must make clear to others the
+license terms of this work.
+
+=back
+
+A copy of the full license is included in the file as
+LICENCES/CC-BY-SA-4.0.txt
+
+
+=head1 DEPENDENCIES
+
+GNU B<parcat> uses Perl.
+
+
+=head1 SEE ALSO
+
+B<cat>(1), B<parallel>(1)
+
+=cut
diff --git a/src/parset b/src/parset
new file mode 100755
index 0000000..ba865d6
--- /dev/null
+++ b/src/parset
@@ -0,0 +1,138 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2016-2024 Ole Tange, http://ole.tange.dk and Free
+# Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+#
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+grepq() {
+ # grep -q for systems without -q
+ grep >/dev/null 2>/dev/null "$@"
+}
+
+installer() {
+ source="$1"
+ script="$2"
+ into="$3"
+ if grepq $script $into; then
+ true already installed
+ else
+ echo $source \`which $script\` >> $into
+ fi
+}
+
+while test $# -gt 0; do
+ key="$1"
+
+ case $key in
+ -i|--install)
+ installer . env_parallel.bash $HOME/.bashrc
+ installer . env_parallel.sh $HOME/.shrc
+ installer . env_parallel.zsh $HOME/.zshenv
+ installer source env_parallel.ksh $HOME/.kshrc
+ installer source env_parallel.mksh $HOME/.kshrc
+ echo $SHELL | grepq /pdksh &&
+ installer . env_parallel.pdksh $HOME/.profile
+ echo $SHELL | grepq /ash &&
+ installer . env_parallel.ash $HOME/.profile
+ echo $SHELL | grepq /dash &&
+ installer . env_parallel.dash $HOME/.profile
+ installer source env_parallel.csh $HOME/.cshrc
+ installer source env_parallel.tcsh $HOME/.tcshrc
+ mkdir -p $HOME/.config/fish
+ grepq env_parallel.fish $HOME/.config/fish/config.fish ||
+ echo '. (which env_parallel.fish)' >> $HOME/.config/fish/config.fish
+ echo 'Installed env_parallel in:'
+ echo " " $HOME/.bashrc
+ echo " " $HOME/.shrc
+ echo " " $HOME/.zshenv
+ echo " " $HOME/.config/fish/config.fish
+ echo " " $HOME/.kshrc
+ echo " " $HOME/.mkshrc
+ echo " " $HOME/.profile
+ echo " " $HOME/.cshrc
+ echo " " $HOME/.tcshrc
+ exit
+ ;;
+ *)
+ echo "Unknown option: $key"
+ ;;
+ esac
+ shift # past argument or value
+done
+
+
+cat <<'_EOS'
+
+parset only works if it is a function. The function is defined as part
+of env_parallel.
+
+Do the below and restart your shell.
+
+bash: Put this in $HOME/.bashrc: . `which env_parallel.bash`
+ E.g. by doing: echo '. `which env_parallel.bash`' >> $HOME/.bashrc
+ Supports: variables, aliases, functions, arrays
+
+ksh: Put this in $HOME/.kshrc: source `which env_parallel.ksh`
+ E.g. by doing: echo 'source `which env_parallel.ksh`' >> $HOME/.kshrc
+ Supports: variables, aliases, functions, arrays
+
+mksh: Put this in $HOME/.mkshrc: source `which env_parallel.mksh`
+ E.g. by doing: echo 'source `which env_parallel.mksh`' >> $HOME/.mkshrc
+ Supports: variables, aliases, functions, arrays
+
+pdksh: Put this in $HOME/.profile: source `which env_parallel.pdksh`
+ E.g. by doing: echo '. `which env_parallel.pdksh`' >> $HOME/.profile
+ Supports: variables, aliases, functions, arrays
+
+zsh: Put this in $HOME/.zshrc: . `which env_parallel.zsh`
+ E.g. by doing: echo '. `which env_parallel.zsh`' >> $HOME/.zshenv
+ Supports: variables, functions, arrays
+
+ash: Put this in $HOME/.profile: . `which env_parallel.ash`
+ E.g. by doing: echo '. `which env_parallel.ash`' >> $HOME/.profile
+ Supports: variables, aliases
+
+dash: Put this in $HOME/.profile: . `which env_parallel.dash`
+ E.g. by doing: echo '. `which env_parallel.dash`' >> $HOME/.profile
+ Supports: variables, aliases
+
+fish: Unsupported
+
+csh: Unsupported
+
+tcsh: Unsupported
+
+To install in all shells run:
+
+ parset --install
+
+In a script you need to run this before using parset:
+
+bash: . `which env_parallel.bash`
+ksh: source `which env_parallel.ksh`
+mksh: source `which env_parallel.mksh`
+pdksh: source `which env_parallel.pdksh`
+zsh: . `which env_parallel.zsh`
+ash: . `which env_parallel.ash`
+dash: . `which env_parallel.dash`
+
+For details: see man parset
+
+_EOS
diff --git a/src/parset.pod b/src/parset.pod
new file mode 100644
index 0000000..ef381db
--- /dev/null
+++ b/src/parset.pod
@@ -0,0 +1,327 @@
+#!/usr/bin/perl -w
+
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GFDL-1.3-or-later
+# SPDX-License-Identifier: CC-BY-SA-4.0
+
+=encoding utf8
+
+=head1 NAME
+
+parset - set shell variables in parallel
+
+
+=head1 SYNOPSIS
+
+B<parset> I<variablename> [options for GNU Parallel]
+
+B<env_parset> I<variablename> [options for GNU Parallel]
+
+=head1 DESCRIPTION
+
+B<parset> is a shell function that puts the output from GNU
+B<parallel> into shell variables.
+
+B<env_parset> is a shell function that puts the output from
+B<env_parallel> into shell variables.
+
+The B<parset> and B<env_parset> functions are defined as part of
+B<env_parallel>.
+
+If I<variablename> is a single variable name, this will be treated as
+the destination variable. If the variable is defined as an associative
+array (using B<typeset -A myassoc>), this will be used. Otherwise the
+variable will be made into a normal array.
+
+If I<variablename> contains multiple names separated by ',' or space,
+the names will be the destination variables. The number of names must
+be at least the number of jobs.
+
+
+=head1 OPTIONS
+
+Same as GNU B<parallel>, but they are put I<after> the destination
+variable.
+
+
+=head1 SUPPORTED SHELLS
+
+=head2 Bash/Zsh/Ksh/Mksh
+
+=head3 Examples
+
+Put output into B<myarray>:
+
+ parset myarray seq 3 ::: 4 5 6
+ echo "${myarray[1]}"
+
+Put output into vars B<$seq, $pwd, $ls>:
+
+ parset "seq pwd ls" ::: "seq 10" pwd ls
+ echo "$ls"
+
+Put output into vars B<$seq, $pwd, $ls>:
+
+ into_vars=(seq pwd ls)
+ parset "${into_vars[*]}" ::: "seq 10" pwd ls
+ echo "$ls"
+
+Put output into associative array B<myassoc> (not supported for mksh):
+
+ typeset -A myassoc
+ parset myassoc seq ::: 4 5 ::: 6 7
+ echo "${myassoc[4 7]}"
+
+The commands to run can be an array:
+
+ cmd=("echo first" "echo '<<joe \"double space\" cartoon>>'" "pwd")
+ parset data ::: "${cmd[@]}"
+ echo "${data[1]}"
+ echo "${data[2]}"
+
+B<parset> can read from stdin (standard input) if it is a file:
+
+ parset res echo < parallel_input_file
+
+but B<parset> can I<not> be part of a pipe. In particular this means
+it cannot read from a pipe or write to a pipe:
+
+ seq 10 | parset res echo Does not work
+
+but must instead use a tempfile:
+
+ seq 10 > parallel_input
+ parset res echo :::: parallel_input
+ echo "${res[1]}"
+ echo "${res[9]}"
+
+or a FIFO:
+
+ mkfifo input_fifo
+ seq 30 > input_fifo &
+ parset res echo :::: input_fifo
+ echo "${res[1]}"
+ echo "${res[29]}"
+
+or Bash/Zsh/Ksh process substitution:
+
+ parset res echo :::: <(seq 100)
+ echo "${res[1]}"
+ echo "${res[99]}"
+
+
+=head3 Installation
+
+Put this in the relevant B<$HOME/.bashrc> or B<$HOME/.zshenv> or B<$HOME/.kshrc>:
+
+ . `which env_parallel.bash`
+ . `which env_parallel.zsh`
+ source `which env_parallel.ksh`
+
+E.g. by doing:
+
+ echo '. `which env_parallel.bash`' >> $HOME/.bashrc
+ echo '. `which env_parallel.zsh`' >> $HOME/.zshenv
+ echo 'source `which env_parallel.ksh`' >> $HOME/.kshrc
+
+or by doing:
+
+ env_parallel --install
+
+
+=head2 ash/dash (FreeBSD's /bin/sh)
+
+=head3 Examples
+
+ash does not support arrays.
+
+Put output into vars B<$seq, $pwd, $ls>:
+
+ parset "seq pwd ls" ::: "seq 10" pwd ls
+ echo "$ls"
+
+B<parset> can read from stdin (standard input) if it is a file:
+
+ parset res1,res2,res3 echo < parallel_input_file
+
+but B<parset> can not be part of a pipe. In particular this means it
+cannot read from a pipe or write to a pipe:
+
+ seq 3 | parset res1,res2,res3 echo Does not work
+
+but must instead use a tempfile:
+
+ seq 3 > parallel_input
+ parset res1,res2,res3 echo :::: parallel_input
+ echo "$res1"
+ echo "$res2"
+ echo "$res3"
+
+or a FIFO:
+
+ mkfifo input_fifo
+ seq 3 > input_fifo &
+ parset res1,res2,res3 echo :::: input_fifo
+ echo "$res1"
+ echo "$res2"
+ echo "$res3"
+
+=head3 Installation
+
+Put the relevant one of these into B<$HOME/.profile>:
+
+ . `which env_parallel.sh`
+ . `which env_parallel.ash`
+ . `which env_parallel.dash`
+
+E.g. by doing:
+
+ echo '. `which env_parallel.ash`' >> $HOME/.bashrc
+
+or by doing:
+
+ env_parallel --install
+
+
+=head1 EXIT STATUS
+
+Same as GNU B<parallel>.
+
+
+=head1 AUTHOR
+
+When using GNU B<parallel> for a publication please cite:
+
+O. Tange (2011): GNU Parallel - The Command-Line Power Tool, ;login:
+The USENIX Magazine, February 2011:42-47.
+
+This helps funding further development; and it won't cost you a cent.
+If you pay 10000 EUR you should feel free to use GNU Parallel without citing.
+
+Copyright (C) 2007-10-18 Ole Tange, http://ole.tange.dk
+
+Copyright (C) 2008-2010 Ole Tange, http://ole.tange.dk
+
+Copyright (C) 2010-2024 Ole Tange, http://ole.tange.dk and Free
+Software Foundation, Inc.
+
+
+=head1 LICENSE
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3 of the License, or
+at your option any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+=head2 Documentation license I
+
+Permission is granted to copy, distribute and/or modify this
+documentation under the terms of the GNU Free Documentation License,
+Version 1.3 or any later version published by the Free Software
+Foundation; with no Invariant Sections, with no Front-Cover Texts, and
+with no Back-Cover Texts. A copy of the license is included in the
+file LICENSES/GFDL-1.3-or-later.txt.
+
+=head2 Documentation license II
+
+You are free:
+
+=over 9
+
+=item B<to Share>
+
+to copy, distribute and transmit the work
+
+=item B<to Remix>
+
+to adapt the work
+
+=back
+
+Under the following conditions:
+
+=over 9
+
+=item B<Attribution>
+
+You must attribute the work in the manner specified by the author or
+licensor (but not in any way that suggests that they endorse you or
+your use of the work).
+
+=item B<Share Alike>
+
+If you alter, transform, or build upon this work, you may distribute
+the resulting work only under the same, similar or a compatible
+license.
+
+=back
+
+With the understanding that:
+
+=over 9
+
+=item B<Waiver>
+
+Any of the above conditions can be waived if you get permission from
+the copyright holder.
+
+=item B<Public Domain>
+
+Where the work or any of its elements is in the public domain under
+applicable law, that status is in no way affected by the license.
+
+=item B<Other Rights>
+
+In no way are any of the following rights affected by the license:
+
+=over 2
+
+=item *
+
+Your fair dealing or fair use rights, or other applicable
+copyright exceptions and limitations;
+
+=item *
+
+The author's moral rights;
+
+=item *
+
+Rights other persons may have either in the work itself or in
+how the work is used, such as publicity or privacy rights.
+
+=back
+
+=back
+
+=over 9
+
+=item B<Notice>
+
+For any reuse or distribution, you must make clear to others the
+license terms of this work.
+
+=back
+
+A copy of the full license is included in the file as
+LICENCES/CC-BY-SA-4.0.txt
+
+=head1 DEPENDENCIES
+
+B<parset> uses GNU B<parallel>.
+
+
+=head1 SEE ALSO
+
+B<parallel>(1), B<env_parallel>(1), B<bash>(1).
+
+
+=cut
diff --git a/src/parsort b/src/parsort
new file mode 100755
index 0000000..41a75da
--- /dev/null
+++ b/src/parsort
@@ -0,0 +1,459 @@
+#!/usr/bin/perl
+
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+=pod
+
+=head1 NAME
+
+parsort - Sort (big files) in parallel
+
+
+=head1 SYNOPSIS
+
+B<parsort> I<options for sort>
+
+
+=head1 DESCRIPTION
+
+B<parsort> uses GNU B<sort> to sort in parallel. It works just like
+B<sort> but faster on inputs with more than 1 M lines, if you have a
+multicore machine.
+
+Hopefully these ideas will make it into GNU B<sort> in the future.
+
+
+=head1 OPTIONS
+
+Same as B<sort>. Except:
+
+=over 4
+
+=item B<--parallel=>I<N>
+
+Change the number of sorts run concurrently to I<N>. I<N> will be
+increased to number of files if B<parsort> is given more than I<N>
+files.
+
+=back
+
+
+=head1 EXAMPLE
+
+Sort files:
+
+ parsort *.txt > sorted.txt
+
+Sort stdin (standard input) numerically:
+
+ cat numbers | parsort -n > sorted.txt
+
+
+=head1 PERFORMANCE
+
+B<parsort> is faster on files than on stdin (standard input), because
+different parts of a file can be read in parallel.
+
+On a 48 core machine you should see a speedup of 3x over B<sort>.
+
+
+=head1 AUTHOR
+
+Copyright (C) 2020-2024 Ole Tange,
+http://ole.tange.dk and Free Software Foundation, Inc.
+
+
+=head1 LICENSE
+
+Copyright (C) 2012 Free Software Foundation, Inc.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3 of the License, or
+at your option any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+=head1 DEPENDENCIES
+
+B<parsort> uses B<sort>, B<bash>, and B<parallel>.
+
+
+=head1 SEE ALSO
+
+B<sort>
+
+
+=cut
+
+use strict;
+use Getopt::Long;
+use POSIX qw(mkfifo);
+
+Getopt::Long::Configure("bundling","require_order");
+
+my @ARGV_before = @ARGV;
+
+GetOptions(
+ "debug|D" => \$opt::D,
+ "version" => \$opt::version,
+ "verbose|v" => \$opt::verbose,
+ "b|ignore-leading-blanks" => \$opt::ignore_leading_blanks,
+ "d|dictionary-order" => \$opt::dictionary_order,
+ "f|ignore-case" => \$opt::ignore_case,
+ "g|general-numeric-sort" => \$opt::general_numeric_sort,
+ "i|ignore-nonprinting" => \$opt::ignore_nonprinting,
+ "M|month-sort" => \$opt::month_sort,
+ "h|human-numeric-sort" => \$opt::human_numeric_sort,
+ "n|numeric-sort" => \$opt::numeric_sort,
+ "N|numascii" => \$opt::numascii,
+ "r|reverse" => \$opt::reverse,
+ "R|random-sort" => \$opt::random_sort,
+ "sort=s" => \$opt::sort,
+ "V|version-sort" => \$opt::version_sort,
+ "k|key=s" => \@opt::key,
+ "t|field-separator=s" => \$opt::field_separator,
+ "z|zero-terminated" => \$opt::zero_terminated,
+ "files0-from=s" => \$opt::files0_from,
+ "random-source=s" => \$opt::dummy,
+ "batch-size=s" => \$opt::dummy,
+ "check=s" => \$opt::dummy,
+ "c" => \$opt::dummy,
+ "C" => \$opt::dummy,
+ "compress-program=s" => \$opt::dummy,
+ "T|temporary-directory=s" => \$opt::dummy,
+ "parallel=s" => \$opt::parallel,
+ "u|unique" => \$opt::dummy,
+ "S|buffer-size=s" => \$opt::dummy,
+ "s|stable" => \$opt::dummy,
+ "help" => \$opt::dummy,
+ ) || exit(255);
+$Global::progname = ($0 =~ m:(^|/)([^/]+)$:)[1];
+$Global::version = 20240222;
+if($opt::version) { version(); exit 0; }
+# Remove -D and --parallel=N
+my @s = (grep { ! /^-D$|^--parallel=\S+$/ }
+ @ARGV_before[0..($#ARGV_before-$#ARGV-1)]);
+my @sortoptions;
+while(@s) {
+ my $o = shift @s;
+ # Remove '--parallel N'
+ if($o eq "--parallel") {
+ $o = shift @s;
+ } else {
+ push @sortoptions, $o;
+ }
+}
+@Global::sortoptions = shell_quote(@sortoptions);
+$ENV{'TMPDIR'} ||= "/tmp";
+
+sub merge {
+ # Input:
+ # @cmd = commands to 'cat' (part of) a file
+ # 'cat a' 'cat b' 'cat c' =>
+ # sort -m <(sort -m <(cat a) <(cat b)) <(sort -m <(cat c))
+ my @cmd = @_;
+ chomp(@cmd);
+ while($#cmd > 0) {
+ my @tmp;
+ while($#cmd >= 0) {
+ my $a = shift @cmd;
+ my $b = shift @cmd;
+ $a &&= "<($a)";
+ $b &&= "<($b)";
+ # This looks like useless use of 'cat', but contrary to
+ # naive belief it increases performance dramatically.
+ push @tmp, "sort -m @Global::sortoptions $a $b | cat"
+ }
+ @cmd = @tmp;
+ }
+ return @cmd;
+}
+
+sub sort_files {
+ # Input is files
+ my @files = @_;
+ # Let GNU Parallel generate the commands to read parts of files
+ # The commands split at \n (or \0)
+ # and there will be at least one for each CPU thread
+ my @subopt;
+ if($opt::zero_terminated) { push @subopt, qw(--recend "\0"); }
+ if($opt::parallel) { push @subopt, qw(--jobs), $opt::parallel; }
+ # $uniq is needed because @files could contain \n
+ my $uniq = join "", map { (0..9,"a".."z","A".."Z")[rand(62)] } (1..20);
+ open(my $par,"-|",qw(parallel), @subopt,
+ qw(--pipepart --block -1 --dryrun -vv sort),
+ @Global::sortoptions, $uniq, '::::', @files) || die;
+ # Generated commands:
+ # <file perl-catter | (sort ... $uniq )
+ # Use $uniq to split into commands
+ # (We cannot use \n because 'file' may contain newline)
+ my @cmd = map { "$_)\n" } split(/$uniq[)]\n/, join("",<$par>));
+ debug(1,@cmd);
+ close $par;
+ @cmd = merge(@cmd);
+ # The command uses <(...) so it is incompatible with /bin/sh
+ open(my $bash,"|-","bash") || die;
+ print $bash @cmd;
+ close $bash;
+}
+
+sub sort_stdin {
+ # Input is stdin
+ # Spread the input between n processes that each sort
+ # n = number of CPU threads
+ my $numthreads;
+ chomp($numthreads = $opt::parallel || `parallel --number-of-threads`);
+ my @fifos = map { tmpfifo() } 1..$numthreads;
+ map { mkfifo($_,0600) } @fifos;
+ # This trick removes the fifo as soon as it is connected in the other end
+ # (rm fifo; ...) < fifo
+ my @cmd = (map { "(rm $_; sort @Global::sortoptions) < $_" }
+ map { Q($_) } @fifos);
+ @cmd = merge(@cmd);
+ if(fork) {
+ } else {
+ my @subopt = $opt::zero_terminated ? qw(--recend "\0") : ();
+ exec(qw(parallel -0 -j), $numthreads, @subopt,
+ # 286k is the best mean value after testing 250..350
+ qw(--block 286k --pipe --roundrobin cat > {} :::),@fifos);
+ }
+ # The command uses <(...) so it is incompatible with /bin/sh
+ open(my $bash,"|-","bash") || die;
+ print $bash @cmd;
+ close $bash;
+}
+
+sub tmpname {
+ # Select a name that does not exist
+ # Do not create the file as it may be used for creating a socket (by tmux)
+ # Remember the name in $Global::unlink to avoid hitting the same name twice
+ my $name = shift;
+ my($tmpname);
+ if(not -w $ENV{'TMPDIR'}) {
+ if(not -e $ENV{'TMPDIR'}) {
+ ::error("Tmpdir '$ENV{'TMPDIR'}' does not exist.","Try 'mkdir ".
+ Q($ENV{'TMPDIR'})."'");
+ } else {
+ ::error("Tmpdir '$ENV{'TMPDIR'}' is not writable.","Try 'chmod +w ".
+ Q($ENV{'TMPDIR'})."'");
+ }
+ exit(255);
+ }
+ do {
+ $tmpname = $ENV{'TMPDIR'}."/".$name.
+ join"", map { (0..9,"a".."z","A".."Z")[rand(62)] } (1..5);
+ } while(-e $tmpname);
+ return $tmpname;
+}
+
+sub tmpfifo {
+ # Find an unused name and mkfifo on it
+ my $tmpfifo = tmpname("psort");
+ mkfifo($tmpfifo,0600);
+ return $tmpfifo;
+}
+
+sub debug {
+ # Returns: N/A
+ $opt::D or return;
+ @_ = grep { defined $_ ? $_ : "" } @_;
+ print STDERR @_[1..$#_];
+}
+
+sub version() {
+ # Returns: N/A
+ print join
+ ("\n",
+ "GNU $Global::progname $Global::version",
+ "Copyright (C) 2020-2024 Ole Tange, http://ole.tange.dk and Free Software",
+ "Foundation, Inc.",
+ "License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>",
+ "This is free software: you are free to change and redistribute it.",
+ "GNU $Global::progname comes with no warranty.",
+ "",
+ "Web site: https://www.gnu.org/software/parallel\n",
+ );
+}
+
+sub shell_quote(@) {
+ # Input:
+ # @strings = strings to be quoted
+ # Returns:
+ # @shell_quoted_strings = string quoted as needed by the shell
+ return wantarray ? (map { Q($_) } @_) : (join" ",map { Q($_) } @_);
+}
+
+sub shell_quote_scalar_rc($) {
+ # Quote for the rc-shell
+ my $a = $_[0];
+ if(defined $a) {
+ if(($a =~ s/'/''/g)
+ +
+ ($a =~ s/[\n\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\^\*\<\=\>\~\|\; \"\!\$\&\'\202-\377]+/'$&'/go)) {
+ # A string was replaced
+ # No need to test for "" or \0
+ } elsif($a eq "") {
+ $a = "''";
+ } elsif($a eq "\0") {
+ $a = "";
+ }
+ }
+ return $a;
+}
+
+sub shell_quote_scalar_csh($) {
+ # Quote for (t)csh
+ my $a = $_[0];
+ if(defined $a) {
+ # $a =~ s/([\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\^\*\>\<\~\|\; \"\!\$\&\'\202-\377])/\\$1/g;
+ # This is 1% faster than the above
+ if(($a =~ s/[\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\^\*\<\=\>\~\|\; \"\!\$\&\'\202-\377]/\\$&/go)
+ +
+ # quote newline in csh as \\\n
+ ($a =~ s/[\n]/"\\\n"/go)) {
+ # A string was replaced
+ # No need to test for "" or \0
+ } elsif($a eq "") {
+ $a = "''";
+ } elsif($a eq "\0") {
+ $a = "";
+ }
+ }
+ return $a;
+}
+
+sub shell_quote_scalar_default($) {
+ # Quote for other shells (Bourne compatibles)
+ # Inputs:
+ # $string = string to be quoted
+ # Returns:
+ # $shell_quoted = string quoted as needed by the shell
+ my $s = $_[0];
+ if($s =~ /[^-_.+a-z0-9\/]/i) {
+ $s =~ s/'/'"'"'/g; # "-quote single quotes
+ $s = "'$s'"; # '-quote entire string
+ $s =~ s/^''//; # Remove unneeded '' at ends
+ $s =~ s/''$//; # (faster than s/^''|''$//g)
+ return $s;
+ } elsif ($s eq "") {
+ return "''";
+ } else {
+ # No quoting needed
+ return $s;
+ }
+}
+
+sub shell_quote_scalar($) {
+ # Quote the string so the shell will not expand any special chars
+ # Inputs:
+ # $string = string to be quoted
+ # Returns:
+ # $shell_quoted = string quoted as needed by the shell
+
+ # Speed optimization: Choose the correct shell_quote_scalar_*
+ # and call that directly from now on
+ no warnings 'redefine';
+ if($Global::cshell) {
+ # (t)csh
+ *shell_quote_scalar = \&shell_quote_scalar_csh;
+ } elsif($Global::shell =~ m:(^|/)rc$:) {
+ # rc-shell
+ *shell_quote_scalar = \&shell_quote_scalar_rc;
+ } else {
+ # other shells
+ *shell_quote_scalar = \&shell_quote_scalar_default;
+ }
+ # The sub is now redefined. Call it
+ return shell_quote_scalar($_[0]);
+}
+
+sub Q($) {
+ # Q alias for ::shell_quote_scalar
+ my $ret = shell_quote_scalar($_[0]);
+ no warnings 'redefine';
+ *Q = \&::shell_quote_scalar;
+ return $ret;
+}
+
+
+sub status(@) {
+ my @w = @_;
+ my $fh = $Global::status_fd || *STDERR;
+ print $fh map { ($_, "\n") } @w;
+ flush $fh;
+}
+
+sub status_no_nl(@) {
+ my @w = @_;
+ my $fh = $Global::status_fd || *STDERR;
+ print $fh @w;
+ flush $fh;
+}
+
+sub warning(@) {
+ my @w = @_;
+ my $prog = $Global::progname || "parsort";
+ status_no_nl(map { ($prog, ": Warning: ", $_, "\n"); } @w);
+}
+
+{
+ my %warnings;
+ sub warning_once(@) {
+ my @w = @_;
+ my $prog = $Global::progname || "parsort";
+ $warnings{@w}++ or
+ status_no_nl(map { ($prog, ": Warning: ", $_, "\n"); } @w);
+ }
+}
+
+sub error(@) {
+ my @w = @_;
+ my $prog = $Global::progname || "parsort";
+ status(map { ($prog.": Error: ". $_); } @w);
+}
+
+sub die_bug($) {
+ my $bugid = shift;
+ print STDERR
+ ("$Global::progname: This should not happen. You have found a bug. ",
+ "Please follow\n",
+ "https://www.gnu.org/software/parallel/man.html#REPORTING-BUGS\n",
+ "\n",
+ "Include this in the report:\n",
+ "* The version number: $Global::version\n",
+ "* The bugid: $bugid\n",
+ "* The command line being run\n",
+ "* The files being read (put the files on a webserver if they are big)\n",
+ "\n",
+ "If you get the error on smaller/fewer files, please include those instead.\n");
+ exit(255);
+}
+
+if(@ARGV) {
+ sort_files(@ARGV);
+} elsif(length $opt::files0_from) {
+ $/="\0";
+ open(my $fh,"<",$opt::files0_from) || die;
+ my @files = <$fh>;
+ chomp(@files);
+ sort_files(@files);
+} else {
+ sort_stdin();
+}
+
+# Test
+# -z
+# OK: cat bigfile | parsort
+# OK: parsort -k4n files*.txt
+# OK: parsort files*.txt
+# OK: parsort "file with space"
+
diff --git a/src/pod2graph b/src/pod2graph
new file mode 100755
index 0000000..f3044ef
--- /dev/null
+++ b/src/pod2graph
@@ -0,0 +1,128 @@
+#!/usr/bin/perl
+
+# Copyright (C) 2007-2024 Ole Tange, http://ole.tange.dk and Free
+# Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <https://www.gnu.org/licenses/>
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+#
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Convert .pod file containing:
+#
+# =item --option
+#
+# See also: --other-option
+#
+# to a graph.pdf with link between --option and --other-option
+
+$pod=join("",<>);
+# Remove stuff before OPTIONS
+$pod=~s/^.*=head1 OPTIONS//s;
+# Remove from EXAMPLES (which is next section) and till end
+$pod=~s/=head1 EXAMPLES.*//s;
+# Remove =over / =back pairs
+$pod=~s/^.*?=over//s;
+$pod=~s/=back\s*$//s;
+$pod=~s/=over.*?=back//sg;
+
+$in_text = 0;
+$in_item = 0;
+$in_see_also = 0;
+
+for(split(/\n\n+/,$pod)) {
+ if(/^See also:\s+(\S.*)/s) {
+ # "See also" paragraph
+ $lex = "seealso";
+ $in_text = 0;
+ $in_item = 0;
+ $in_see_only = 1;
+ } elsif(/^=item\s+(B<[{]=.*?perl expression.*?=[}]>|[IB]<.*?>)(\s|$)/s) {
+ # "=item" paragraph
+ $lex = "item";
+ $in_text = 0;
+ $in_item = 1;
+ $in_see_only = 0;
+ } elsif(/\S/) {
+ # else it is just text
+ $lex = "text";
+ $in_text = 1;
+ $in_item = 0;
+ $in_see_only = 0;
+ }
+
+ if($lex eq "seealso") {
+ # We found "See also": output edge
+ if($lastlex eq "item") {
+ @saveditems = @items;
+ @items = ();
+ }
+ my $to = $1;
+ # Edge from = item/item/item
+ my $from = (join "/",
+ map {
+ s/I<(.*?)>/$1/g;
+ s/B<(.*?)>/$1/g;
+ $_ }
+ @saveditems[0]);
+ my @to;
+ while($to =~ s/(B<[{]=.*?perl expression.*?=[}]>|[BI]<.*?>)(\s|$)//) {
+ my $v = $1;
+ push @to, map {
+ s/I<(.*?)>/$1/g;
+ s/B<(.*?)>/$1/g;
+ $_;
+ } $v;
+ }
+ map {
+ if(not $seen{$from,$_}++
+ and
+ not $seen{$_,$from}++) {
+ push @nodelines, "\"$from\" -- \"$_\"\n"
+ }
+ } @to;
+
+ } elsif($lex eq "text") {
+ if($lastlex eq "item") {
+ @saveditems = @items;
+ @items = ();
+ }
+ } elsif($lex eq "item") {
+ push(@items,$1);
+ }
+ $lastlex=$lex;
+}
+
+
+sub header() {
+ return q[
+ graph test123 {
+ graph [splines=true; overlap=false;];
+ labelloc="t";
+ label="Related map for options for GNU Parallel\nFind the options you use and learn about the options related to it";fontsize=33;
+
+ "{}"[margin=0.3;]
+ "--sshlogin"[margin=0.3;]
+ "--pipe"[margin=0.3;]
+ ":::"[margin=0.3;]
+ "-X"[margin=0.3;]
+ ];
+}
+
+open(GRAPHVIZ,"|-","tee foo.dot |neato -Gepsilon=.000000001 -Tpdf") || die;
+print GRAPHVIZ header(), (sort { rand()*3 -1 } @nodelines), "}";
+close GRAPHVIZ;
+
diff --git a/src/sem b/src/sem
new file mode 100755
index 0000000..e45fcf0
--- /dev/null
+++ b/src/sem
@@ -0,0 +1,15425 @@
+#!/usr/bin/env perl
+
+# Copyright (C) 2007-2024 Ole Tange, http://ole.tange.dk and Free
+# Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <https://www.gnu.org/licenses/>
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+#
+# SPDX-FileCopyrightText: 2007-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# open3 used in Job::start
+use IPC::Open3;
+use POSIX;
+# gensym used in Job::start
+use Symbol qw(gensym);
+# tempfile used in Job::start
+use File::Temp qw(tempfile tempdir);
+# mkpath used in openresultsfile
+use File::Path;
+# GetOptions used in get_options_from_array
+use Getopt::Long;
+# Used to ensure code quality
+use strict;
+use File::Basename;
+
+sub set_input_source_header($$) {
+ my ($command_ref,$input_source_fh_ref) = @_;
+ if(defined $opt::header and not $opt::pipe) {
+ # split with colsep or \t
+ # $header force $colsep = \t if undef?
+ my $delimiter = defined $opt::colsep ? $opt::colsep : "\t";
+ # regexp for {=
+ my $left = "\Q$Global::parensleft\E";
+ my $l = $Global::parensleft;
+ # regexp for =}
+ my $right = "\Q$Global::parensright\E";
+ my $r = $Global::parensright;
+ if($opt::header ne "0") {
+ my $id = 1;
+ for my $fh (@$input_source_fh_ref) {
+ my $line = <$fh>;
+ chomp($line);
+ $line =~ s/\r$//;
+ ::debug("init", "Delimiter: '$delimiter'");
+ for my $s (split /$delimiter/o, $line) {
+ ::debug("init", "Colname: '$s'");
+ # Replace {colname} with {2}
+ for(@$command_ref, @Global::ret_files,
+ @Global::transfer_files, $opt::tagstring,
+ $opt::workdir, $opt::results, $opt::retries,
+ @Global::template_contents, @Global::template_names,
+ @opt::filter) {
+ # Skip if undefined
+ $_ or next;
+ s:\{$s(|/|//|\.|/\.)\}:\{$id$1\}:g;
+ # {=header1 ... =} => {=1 ... =}
+ s:$left $s (.*?) $right:$l$id$1$r:gx;
+ }
+ $Global::input_source_header{$id} = $s;
+ $id++;
+ }
+ }
+ }
+ # Make it possible to do:
+ # parallel --header 0 echo {file2} {file1} :::: file1 file2
+ my $id = 1;
+ for my $s (@opt::a) {
+ # ::: are put into files and given a filehandle
+ # ignore these and only keep the filenames.
+ fileno $s and next;
+ for(@$command_ref, @Global::ret_files,
+ @Global::transfer_files, $opt::tagstring,
+ $opt::workdir, $opt::results, $opt::retries,
+ @Global::template_contents, @Global::template_names,
+ @opt::filter) {
+ # Skip if undefined
+ $_ or next;
+ s:\{\Q$s\E(|/|//|\.|/\.)\}:\{$id$1\}:g;
+ # {=header1 ... =} => {=1 ... =}
+ s:$left \Q$s\E (.*?) $right:$l$id$1$r:gx;
+ }
+ $Global::input_source_header{$id} = $s;
+ $id++;
+ }
+ } else {
+ my $id = 1;
+ for my $fh (@$input_source_fh_ref) {
+ $Global::input_source_header{$id} = $id;
+ $id++;
+ }
+ }
+}
+
+sub max_jobs_running() {
+ # Compute $Global::max_jobs_running as the max number of jobs
+ # running on each sshlogin.
+ # Returns:
+ # $Global::max_jobs_running
+ if(not $Global::max_jobs_running) {
+ for my $sshlogin (values %Global::host) {
+ $sshlogin->max_jobs_running();
+ }
+ }
+ if(not $Global::max_jobs_running) {
+ ::error(255,"Cannot run any jobs.");
+ }
+ return $Global::max_jobs_running;
+}
+
+sub halt() {
+ # Compute exit value,
+ # wait for children to complete
+ # and exit
+ if($opt::halt and $Global::halt_when ne "never") {
+ if(not defined $Global::halt_exitstatus) {
+ if($Global::halt_pct) {
+ $Global::halt_exitstatus =
+ ::ceil($Global::total_failed /
+ ($Global::total_started || 1) * 100);
+ } elsif($Global::halt_count) {
+ $Global::halt_exitstatus =
+ ::min(undef_as_zero($Global::total_failed),101);
+ }
+ }
+ wait_and_exit($Global::halt_exitstatus);
+ } else {
+ if($Global::semaphore) {
+ # --semaphore runs a single job:
+ # Use exit value of that
+ wait_and_exit($Global::halt_exitstatus);
+ } else {
+ # 0 = all jobs succeeded
+ # 1-100 = n jobs failed
+ # 101 = >100 jobs failed
+ wait_and_exit(min(undef_as_zero($Global::exitstatus),101));
+ }
+ }
+}
+
+
+sub __PIPE_MODE__() {}
+
+
+sub pipepart_setup() {
+ # Compute the blocksize
+ # Generate the commands to extract the blocks
+ # Push the commands on queue
+ # Changes:
+ # @Global::cat_prepends
+ # $Global::JobQueue
+ if($opt::tee) {
+ # Prepend each command with
+ # < file
+ my $cat_string = "< ".Q($opt::a[0]);
+ for(1..$Global::JobQueue->total_jobs()) {
+ push @Global::cat_appends, $cat_string;
+ push @Global::cat_prepends, "";
+ }
+ } else {
+ if(not $opt::blocksize) {
+ # --blocksize with 10 jobs per jobslot
+ $opt::blocksize = -10;
+ }
+ if($opt::roundrobin) {
+ # --blocksize with 1 job per jobslot
+ $opt::blocksize = -1;
+ }
+ if($opt::blocksize < 0) {
+ my $size = 0;
+ # Compute size of -a
+ for(@opt::a) {
+ if(-f $_) {
+ $size += -s $_;
+ } elsif(-b $_) {
+ $size += size_of_block_dev($_);
+ } elsif(-e $_) {
+ ::error(255,"$_ is neither a file nor a block device");
+ } else {
+ ::error(255,"File not found: $_");
+ }
+ }
+ # Run in total $job_slots*(- $blocksize) jobs
+ # Set --blocksize = size / no of proc / (- $blocksize)
+ $Global::dummy_jobs = 1;
+ $Global::blocksize = 1 +
+ int($size / max_jobs_running() /
+ -multiply_binary_prefix($opt::blocksize));
+ }
+ @Global::cat_prepends = (map { pipe_part_files($_) }
+ # ::: are put into files and given a filehandle
+ # ignore these and only keep the filenames.
+ grep { ! fileno $_ } @opt::a);
+ # Unget the empty arg as many times as there are parts
+ $Global::JobQueue->{'commandlinequeue'}{'arg_queue'}->unget(
+ map { [Arg->new("\0noarg")] } @Global::cat_prepends
+ );
+ }
+}
+
+sub pipe_tee_setup() {
+ # Create temporary fifos
+ # Run 'tee fifo1 fifo2 fifo3 ... fifoN' in the background
+ # This will spread the input to fifos
+ # Generate commands that reads from fifo1..N:
+ # cat fifo | user_command
+ # Changes:
+ # @Global::cat_prepends
+ my @fifos;
+ for(1..$Global::JobQueue->total_jobs()) {
+ push @fifos, tmpfifo();
+ }
+ # cat foo | tee fifo1 fifo2 fifo3 fifo4 fifo5 > /dev/null
+ if(not fork()){
+ # Test if tee supports --output-error=warn-nopipe
+ `echo | tee --output-error=warn-nopipe /dev/null >/dev/null 2>/dev/null`;
+ my $opt = $? ? "" : "--output-error=warn-nopipe";
+ ::debug("init","tee $opt");
+ if($opt::dryrun) {
+ # This is not exactly what is run, but it gives the basic idea
+ print "mkfifo @fifos\n";
+ print "tee $opt @fifos >/dev/null &\n";
+ } else {
+ # Let tee inherit our stdin
+ # and redirect stdout to null
+ open STDOUT, ">","/dev/null";
+ if($opt) {
+ exec "tee", $opt, @fifos;
+ } else {
+ exec "tee", @fifos;
+ }
+ }
+ exit(0);
+ }
+ # For each fifo
+ # (rm fifo1; grep 1) < fifo1
+ # (rm fifo2; grep 2) < fifo2
+ # (rm fifo3; grep 3) < fifo3
+ # Remove the tmpfifo as soon as it is open
+ @Global::cat_prepends = map { "(rm $_;" } shell_quote(@fifos);
+ @Global::cat_appends = map { ") < $_" } shell_quote(@fifos);
+}
+
+
+sub parcat_script() {
+ # TODO if script fails: Use parallel -j0 --plain --lb cat ::: fifos
+ my $script = q'{
+ use POSIX qw(:errno_h);
+ use IO::Select;
+ use strict;
+ use threads;
+ use Thread::Queue;
+ use Fcntl qw(:DEFAULT :flock);
+
+ my $opened :shared;
+ my $q = Thread::Queue->new();
+ my $okq = Thread::Queue->new();
+ my @producers;
+
+ if(not @ARGV) {
+ if(-t *STDIN) {
+ print "Usage:\n";
+ print " parcat file(s)\n";
+ print " cat argfile | parcat\n";
+ } else {
+ # Read arguments from stdin
+ chomp(@ARGV = <STDIN>);
+ }
+ }
+ my $files_to_open = 0;
+ # Default: fd = stdout
+ my $fd = 1;
+ for (@ARGV) {
+ # --rm = remove file when opened
+ /^--rm$/ and do { $opt::rm = 1; next; };
+ # -1 = output to fd 1, -2 = output to fd 2
+ /^-(\d+)$/ and do { $fd = $1; next; };
+ push @producers, threads->create("producer", $_, $fd);
+ $files_to_open++;
+ }
+
+ sub producer {
+ # Open a file/fifo, set non blocking, enqueue fileno of the file handle
+ my $file = shift;
+ my $output_fd = shift;
+ open(my $fh, "<", $file) || do {
+ print STDERR "parcat: Cannot open $file: $!\n";
+ exit(1);
+ };
+ # Remove file when it has been opened
+ if($opt::rm) {
+ unlink $file;
+ }
+ set_fh_non_blocking($fh);
+ $opened++;
+ # Pass the fileno to parent
+ $q->enqueue(fileno($fh),$output_fd);
+ # Get an OK that the $fh is opened and we can release the $fh
+ while(1) {
+ my $ok = $okq->dequeue();
+ if($ok == fileno($fh)) { last; }
+ # Not ours - very unlikely to happen
+ $okq->enqueue($ok);
+ }
+ return;
+ }
+
+ my $s = IO::Select->new();
+ my %buffer;
+
+ sub add_file {
+ my $infd = shift;
+ my $outfd = shift;
+ open(my $infh, "<&=", $infd) || die;
+ open(my $outfh, ">&=", $outfd) || die;
+ $s->add($infh);
+ # Tell the producer now opened here and can be released
+ $okq->enqueue($infd);
+ # Initialize the buffer
+ @{$buffer{$infh}{$outfd}} = ();
+ $Global::fh{$outfd} = $outfh;
+ }
+
+ sub add_files {
+ # Non-blocking dequeue
+ my ($infd,$outfd);
+ do {
+ ($infd,$outfd) = $q->dequeue_nb(2);
+ if(defined($outfd)) {
+ add_file($infd,$outfd);
+ }
+ } while(defined($outfd));
+ }
+
+ sub add_files_block {
+ # Blocking dequeue
+ my ($infd,$outfd) = $q->dequeue(2);
+ add_file($infd,$outfd);
+ }
+
+
+ my $fd;
+ my (@ready,$infh,$rv,$buf);
+ do {
+ # Wait until at least one file is opened
+ add_files_block();
+ while($q->pending or keys %buffer) {
+ add_files();
+ while(keys %buffer) {
+ @ready = $s->can_read(0.01);
+ if(not @ready) {
+ add_files();
+ }
+ for $infh (@ready) {
+ # There is only one key, namely the output file descriptor
+ for my $outfd (keys %{$buffer{$infh}}) {
+ # TODO test if 60800 is optimal (2^17 is used elsewhere)
+ $rv = sysread($infh, $buf, 60800);
+ if (!$rv) {
+ if($! == EAGAIN) {
+ # Would block: Nothing read
+ next;
+ } else {
+ # Nothing read, but would not block:
+ # This file is done
+ $s->remove($infh);
+ for(@{$buffer{$infh}{$outfd}}) {
+ syswrite($Global::fh{$outfd},$_);
+ }
+ delete $buffer{$infh};
+ # Closing the $infh causes it to block
+ # close $infh;
+ add_files();
+ next;
+ }
+ }
+ # Something read.
+ # Find \n or \r for full line
+ my $i = (rindex($buf,"\n")+1);
+ if($i) {
+ # Print full line
+ for(@{$buffer{$infh}{$outfd}}, substr($buf,0,$i)) {
+ syswrite($Global::fh{$outfd},$_);
+ }
+ # @buffer = remaining half line
+ $buffer{$infh}{$outfd} = [substr($buf,$i,$rv-$i)];
+ } else {
+ # Something read, but not a full line
+ push @{$buffer{$infh}{$outfd}}, $buf;
+ }
+ redo;
+ }
+ }
+ }
+ }
+ } while($opened < $files_to_open);
+
+ for (@producers) {
+ $_->join();
+ }
+
+ sub set_fh_non_blocking {
+ # Set filehandle as non-blocking
+ # Inputs:
+ # $fh = filehandle to be blocking
+ # Returns:
+ # N/A
+ my $fh = shift;
+ my $flags;
+ fcntl($fh, &F_GETFL, $flags) || die $!; # Get the current flags on the filehandle
+ $flags |= &O_NONBLOCK; # Add non-blocking to the flags
+ fcntl($fh, &F_SETFL, $flags) || die $!; # Set the flags on the filehandle
+ }
+ }';
+ return ::spacefree(3, $script);
+}
+
+sub sharder_script() {
+ my $script = q{
+ use B;
+ # Column separator
+ my $sep = shift;
+ # Which columns to shard on (count from 1)
+ my $col = shift;
+ # Which columns to shard on (count from 0)
+ my $col0 = $col - 1;
+ # Perl expression
+ my $perlexpr = shift;
+ my $bins = @ARGV;
+ # Open fifos for writing, fh{0..$bins}
+ my $t = 0;
+ my %fh;
+ for(@ARGV) {
+ open $fh{$t++}, ">", $_;
+ # open blocks until it is opened by reader
+ # so unlink only happens when it is ready
+ unlink $_;
+ }
+ if($perlexpr) {
+ my $subref = eval("sub { no strict; no warnings; $perlexpr }");
+ while(<STDIN>) {
+ # Split into $col columns (no need to split into more)
+ @F = split $sep, $_, $col+1;
+ {
+ local $_ = $F[$col0];
+ &$subref();
+ $fh = $fh{ hex(B::hash($_))%$bins };
+ }
+ print $fh $_;
+ }
+ } else {
+ while(<STDIN>) {
+ # Split into $col columns (no need to split into more)
+ @F = split $sep, $_, $col+1;
+ $fh = $fh{ hex(B::hash($F[$col0]))%$bins };
+ print $fh $_;
+ }
+ }
+ # Close all open fifos
+ close values %fh;
+ };
+ return ::spacefree(1, $script);
+}
+
+sub binner_script() {
+ my $script = q{
+ use B;
+ # Column separator
+ my $sep = shift;
+ # Which columns to shard on (count from 1)
+ my $col = shift;
+ # Which columns to shard on (count from 0)
+ my $col0 = $col - 1;
+ # Perl expression
+ my $perlexpr = shift;
+ my $bins = @ARGV;
+ # Open fifos for writing, fh{0..$bins}
+ my $t = 0;
+ my %fh;
+ # Let the last output fifo be the 0'th
+ open $fh{$t++}, ">", pop @ARGV;
+ for(@ARGV) {
+ open $fh{$t++}, ">", $_;
+ # open blocks until it is opened by reader
+ # so unlink only happens when it is ready
+ unlink $_;
+ }
+ if($perlexpr) {
+ my $subref = eval("sub { no strict; no warnings; $perlexpr }");
+ while(<STDIN>) {
+ # Split into $col columns (no need to split into more)
+ @F = split $sep, $_, $col+1;
+ {
+ local $_ = $F[$col0];
+ &$subref();
+ $fh = $fh{ $_%$bins };
+ }
+ print $fh $_;
+ }
+ } else {
+ while(<STDIN>) {
+ # Split into $col columns (no need to split into more)
+ @F = split $sep, $_, $col+1;
+ $fh = $fh{ $F[$col0]%$bins };
+ print $fh $_;
+ }
+ }
+ # Close all open fifos
+ close values %fh;
+ };
+ return ::spacefree(1, $script);
+}
+
+sub pipe_shard_setup() {
+ # Create temporary fifos
+ # Run 'shard.pl sep col fifo1 fifo2 fifo3 ... fifoN' in the background
+ # This will spread the input to fifos
+ # Generate commands that reads from fifo1..N:
+ # cat fifo | user_command
+ # Changes:
+ # @Global::cat_prepends
+ my @shardfifos;
+ my @parcatfifos;
+ # TODO $opt::jobs should be evaluated (100%)
+ # TODO $opt::jobs should be number of total_jobs if there are arguments
+ max_jobs_running();
+ my $njobs = $Global::max_jobs_running;
+ for my $m (0..$njobs-1) {
+ for my $n (0..$njobs-1) {
+ # sharding to A B C D
+ # parcatting all As together
+ $parcatfifos[$n][$m] = $shardfifos[$m][$n] = tmpfifo();
+ }
+ }
+ my $shardbin = ($opt::shard || $opt::bin);
+ my $script;
+ if($opt::bin) {
+ $script = binner_script();
+ } else {
+ $script = sharder_script();
+ }
+
+ # cat foo | sharder sep col fifo1 fifo2 fifo3 ... fifoN
+
+ if($shardbin =~ /^[a-z_][a-z_0-9]*(\s|$)/i) {
+ # Group by column name
+ # (Yes, this will also wrongly match a perlexpr like: chop)
+ my($read,$char,@line);
+ # A full line, but nothing more (the rest must be read by the child)
+ # $Global::header used to prepend block to each job
+ do {
+ $read = sysread(STDIN,$char,1);
+ push @line, $char;
+ } while($read and $char ne "\n");
+ $Global::header = join "", @line;
+ }
+ my ($col, $perlexpr, $subref) =
+ column_perlexpr($shardbin, $Global::header, $opt::colsep);
+ if(not fork()) {
+ # Let the sharder inherit our stdin
+ # and redirect stdout to null
+ open STDOUT, ">","/dev/null";
+ # The PERL_HASH_SEED must be the same for all sharders
+ # so B::hash will return the same value for any given input
+ $ENV{'PERL_HASH_SEED'} = $$;
+ exec qw(parallel -0 --block 100k -q --pipe -j), $njobs,
+ qw(--roundrobin -u perl -e), $script, ($opt::colsep || ","),
+ $col, $perlexpr, '{}', (map { (':::+', @{$_}) } @parcatfifos);
+ }
+ # For each fifo
+ # (rm fifo1; grep 1) < fifo1
+ # (rm fifo2; grep 2) < fifo2
+ # (rm fifo3; grep 3) < fifo3
+ my $parcat = Q(parcat_script());
+ if(not $parcat) {
+ ::error(255,"'parcat' must be in path.");
+ }
+ @Global::cat_prepends =
+ map { "perl -e $parcat ".
+ join(" ",shell_quote(@$_))." | "} @parcatfifos;
+}
+
+sub pipe_part_files(@) {
+ # Given the bigfile:
+ # - find header and split positions
+ # - make commands that 'cat's the partial file
+ # Input:
+ # $file = the file to read
+ # Returns:
+ # @commands that will cat_partial each part
+ my ($file) = @_;
+ my $buf = "";
+ if(not -f $file and not -b $file) {
+ ::error(255,"--pipepart only works on seekable files, not streams/pipes.",
+ "$file is not a seekable file.");
+ }
+
+ my $fh = open_or_exit("<",$file);
+ my $firstlinelen = 0;
+ if($opt::skip_first_line) {
+ my $newline;
+ # Read a full line one byte at a time
+ while($firstlinelen += sysread($fh,$newline,1,0)) {
+ $newline eq "\n" and last;
+ }
+ }
+ my $header = find_header(\$buf,$fh);
+ # find positions
+ my @pos = find_split_positions($file,int($Global::blocksize),
+ $header,$firstlinelen);
+ # Make @cat_prepends
+ my @cat_prepends = ();
+ for(my $i=0; $i<$#pos; $i++) {
+ push(@cat_prepends,
+ cat_partial($file, $firstlinelen, $firstlinelen+length($header),
+ $pos[$i], $pos[$i+1]));
+ }
+ return @cat_prepends;
+}
+
+sub find_header($$) {
+ # Compute the header based on $opt::header
+ # Input:
+ # $buf_ref = reference to read-in buffer
+ # $fh = filehandle to read from
+ # Uses:
+ # $opt::header
+ # $Global::blocksize
+ # $Global::header
+ # Returns:
+ # $header string
+ my ($buf_ref, $fh) = @_;
+ my $header = "";
+ # $Global::header may be set in group_by_loop()
+ if($Global::header) { return $Global::header }
+ if($opt::header) {
+ if($opt::header eq ":") { $opt::header = "(.*\n)"; }
+ # Number = number of lines
+ $opt::header =~ s/^(\d+)$/"(.*\n)"x$1/e;
+ while(sysread($fh,$$buf_ref,int($Global::blocksize),length $$buf_ref)) {
+ if($$buf_ref =~ s/^($opt::header)//) {
+ $header = $1;
+ last;
+ }
+ }
+ }
+ return $header;
+}
+
+sub find_split_positions($$$) {
+ # Find positions in bigfile where recend is followed by recstart
+ # Input:
+ # $file = the file to read
+ # $block = (minimal) --block-size of each chunk
+ # $header = header to be skipped
+ # Uses:
+ # $opt::recstart
+ # $opt::recend
+ # Returns:
+ # @positions of block start/end
+ my($file, $block, $header, $firstlinelen) = @_;
+ my $skiplen = $firstlinelen + length $header;
+ my $size = -s $file;
+ if(-b $file) {
+ # $file is a blockdevice
+ $size = size_of_block_dev($file);
+ }
+ $block = int $block;
+ if($opt::groupby) {
+ return split_positions_for_group_by($file,$size,$block,
+ $header,$firstlinelen);
+ }
+ # The optimal dd blocksize for mint, redhat, solaris, openbsd = 2^17..2^20
+ # The optimal dd blocksize for freebsd = 2^15..2^17
+ # The optimal dd blocksize for ubuntu (AMD6376) = 2^16
+ my $dd_block_size = 131072; # 2^17
+ my @pos;
+ my ($recstart,$recend) = recstartrecend();
+ my $recendrecstart = $recend.$recstart;
+ my $fh = ::open_or_exit("<",$file);
+ push(@pos,$skiplen);
+ for(my $pos = $block+$skiplen; $pos < $size; $pos += $block) {
+ my $buf;
+ if($recendrecstart eq "") {
+ # records ends anywhere
+ push(@pos,$pos);
+ } else {
+ # Seek the the block start
+ if(not sysseek($fh, $pos, 0)) {
+ ::error("Cannot seek to $pos in $file");
+ edit(255);
+ }
+ while(sysread($fh,$buf,$dd_block_size,length $buf)) {
+ if($opt::regexp) {
+ # If match /$recend$recstart/ => Record position
+ if($buf =~ m:^(.*$recend)$recstart:os) {
+ # Start looking for next record _after_ this match
+ $pos += length($1);
+ push(@pos,$pos);
+ last;
+ }
+ } else {
+ # If match $recend$recstart => Record position
+ # TODO optimize to only look at the appended
+ # $dd_block_size + len $recendrecstart
+ # TODO increase $dd_block_size to optimize for longer records
+ my $i = index64(\$buf,$recendrecstart);
+ if($i != -1) {
+ # Start looking for next record _after_ this match
+ $pos += $i + length($recend);
+ push(@pos,$pos);
+ last;
+ }
+ }
+ }
+ }
+ }
+ if($pos[$#pos] != $size) {
+ # Last splitpoint was not at end of the file: add $size as the last
+ push @pos, $size;
+ }
+ close $fh;
+ return @pos;
+}
+
+sub split_positions_for_group_by($$$$) {
+ my($fh);
+ my %value;
+ sub value_at($) {
+ my $pos = shift;
+ if(not defined $value{$pos}) {
+ if($pos != 0) {
+ seek($fh, $pos-1, 0) || die;
+ # Read half line
+ <$fh>;
+ }
+ # Read full line
+ my $linepos = tell($fh);
+ if(not defined $value{$linepos}) {
+ $_ = <$fh>;
+ if(defined $_) {
+ # Not end of file
+ my @F;
+ if(defined $group_by::col) {
+ $opt::colsep ||= "\t";
+ @F = split /$opt::colsep/, $_;
+ $_ = $F[$group_by::col];
+ }
+ eval $group_by::perlexpr;
+ }
+ $value{$linepos} = [$_,$linepos];
+ }
+ $value{$pos} = $value{$linepos};
+ }
+ return (@{$value{$pos}});
+ }
+
+ sub binary_search_end($$$) {
+ my ($s,$spos,$epos) = @_;
+ # value_at($spos) == $s
+ # value_at($epos) != $s
+ my $posdif = $epos - $spos;
+ my ($v,$vpos);
+ while($posdif) {
+ ($v,$vpos) = value_at($spos+$posdif);
+ if($v eq $s) {
+ $spos = $vpos;
+ $posdif = $epos - $spos;
+ } else {
+ $epos = $vpos;
+ }
+ $posdif = int($posdif/2);
+ }
+ return($v,$vpos);
+ }
+
+ sub binary_search_start($$$) {
+ my ($s,$spos,$epos) = @_;
+ # value_at($spos) != $s
+ # value_at($epos) == $s
+ my $posdif = $epos - $spos;
+ my ($v,$vpos);
+ while($posdif) {
+ ($v,$vpos) = value_at($spos+$posdif);
+ if($v eq $s) {
+ $epos = $vpos;
+ } else {
+ $spos = $vpos;
+ $posdif = $epos - $spos;
+ }
+ $posdif = int($posdif/2);
+ }
+ return($v,$vpos);
+ }
+
+ my ($file,$size,$block,$header,$firstlinelen) = @_;
+ my @pos;
+ $fh = open_or_exit("<",$file);
+ # Set $Global::group_by_column $Global::group_by_perlexpr
+ group_by_loop($fh,$opt::recsep);
+ if($opt::max_args) {
+ # Split after n values
+ my ($a,$apos);
+ # $xpos = linestart, $x = value at $xpos
+ $apos = $firstlinelen + length $header;
+ for(($a,$apos) = value_at($apos); $apos < $size;) {
+ push @pos, $apos;
+ ($a,$apos) = binary_search_end($a,$apos,$size);
+ if(eof($fh)) {
+ push @pos, $size; last;
+ }
+ }
+ # @pos = start of every value
+ # Merge n values
+ # -nX = keep every X'th position
+ my $i = 0;
+ @pos = grep { not ($i++ % $opt::max_args) } @pos;
+ } else {
+ # Split after any value group
+ # Preferable < $blocksize
+ my ($a,$b,$c,$apos,$bpos,$cpos);
+ # $xpos = linestart, $x = value at $xpos, $apos < $bpos < $cpos
+ $apos = $firstlinelen + length $header;
+ for(($a,$apos) = value_at($apos); $apos < $size;) {
+ push @pos, $apos;
+ $bpos = $apos + $block;
+ ($b,$bpos) = value_at($bpos);
+ if(eof($fh)) {
+ # EOF is less than 1 block away
+ push @pos, $size; last;
+ }
+ $cpos = $bpos + $block;
+ ($c,$cpos) = value_at($cpos);
+ if($a eq $b) {
+ while($b eq $c) {
+ # Move bpos, cpos a block forward until $a == $b != $c
+ $bpos = $cpos;
+ $cpos += $block;
+ ($c,$cpos) = value_at($cpos);
+ if($cpos >= $size) {
+ $cpos = $size;
+ last;
+ }
+ }
+ # $a == $b != $c
+ # Binary search for $b ending between ($bpos,$cpos)
+ ($b,$bpos) = binary_search_end($b,$bpos,$cpos);
+ } else {
+ if($b eq $c) {
+ # $a != $b == $c
+ # Binary search for $b starting between ($apos,$bpos)
+ ($b,$bpos) = binary_search_start($b,$apos,$bpos);
+ } else {
+ # $a != $b != $c
+ # Binary search for $b ending between ($bpos,$cpos)
+ ($b,$bpos) = binary_search_end($b,$bpos,$cpos);
+ }
+ }
+ ($a,$apos) = ($b,$bpos);
+ }
+ }
+ if($pos[$#pos] != $size) {
+ # Last splitpoint was not at end of the file: add it
+ push @pos, $size;
+ }
+ return @pos;
+}
+
+sub cat_partial($@) {
+ # Efficient command to copy from byte X to byte Y
+ # Input:
+ # $file = the file to read
+ # ($start, $end, [$start2, $end2, ...]) = start byte, end byte
+ # Returns:
+ # Efficient command to copy $start..$end, $start2..$end2, ... to stdout
+ my($file, @start_end) = @_;
+ my($start, $i);
+ # Convert (start,end) to (start,len)
+ my @start_len = map {
+ if(++$i % 2) { $start = $_; } else { $_-$start }
+ } @start_end;
+ # The optimal block size differs
+ # It has been measured on:
+ # AMD 6376: n*4k-1; small n
+ # AMD Neo N36L: 44k-200k
+ # Intel i7-3632QM: 55k-
+ # ARM Cortex A53: 4k-28k
+ # Intel i5-2410M: 36k-46k
+ #
+ # I choose 2^15-1 = 32767
+ # q{
+ # expseq() {
+ # perl -E '
+ # $last = pop @ARGV;
+ # $first = shift || 1;
+ # $inc = shift || 1.03;
+ # for($i=$first; $i<=$last;$i*=$inc) { say int $i }
+ # ' "$@"
+ # }
+ #
+ # seq 111111111 > big;
+ # f() { ppar --test $1 -a big --pipepart --block -1 'md5sum > /dev/null'; }
+ # export -f f;
+ # expseq 1000 1.001 300000 | shuf | parallel -j1 --jl jl-md5sum f;
+ # };
+ my $script = spacefree
+ (0,
+ q{
+ while(@ARGV) {
+ sysseek(STDIN,shift,0) || die;
+ $left = shift;
+ while($read =
+ sysread(STDIN,$buf, $left > 32767 ? 32767 : $left)){
+ $left -= $read;
+ syswrite(STDOUT,$buf);
+ }
+ }
+ });
+ return "<". Q($file) .
+ " perl -e '$script' @start_len |";
+}
+
+sub column_perlexpr($$$) {
+ # Compute the column number (if any), perlexpression from combined
+ # string (such as --shard key, --groupby key, {=n perlexpr=}
+ # Input:
+ # $column_perlexpr = string with column and perl expression
+ # $header = header from input file (if column is column name)
+ # $colsep = column separator regexp
+ # Returns:
+ # $col = column number
+ # $perlexpr = perl expression
+ # $subref = compiled perl expression as sub reference
+ my ($column_perlexpr, $header, $colsep) = @_;
+ my ($col, $perlexpr, $subref);
+ if($column_perlexpr =~ /^[-a-z0-9_]+(\s|$)/i) {
+ # Column name/number (possibly prefix)
+ if($column_perlexpr =~ s/^(-?\d+)(\s|$)//) {
+ # Column number (possibly prefix)
+ $col = $1;
+ } elsif($column_perlexpr =~ s/^([a-z0-9_]+)(\s+|$)//i) {
+ # Column name (possibly prefix)
+ my $colname = $1;
+ # Split on --copsep pattern
+ my @headers = split /$colsep/, $header;
+ my %headers;
+ @headers{@headers} = (1..($#headers+1));
+ $col = $headers{$colname};
+ if(not defined $col) {
+ ::error(255,"Column '$colname' $colsep not found in header",keys %headers);
+ }
+ }
+ }
+ # What is left of $column_perlexpr is $perlexpr (possibly empty)
+ $perlexpr = $column_perlexpr;
+ $subref = eval("sub { no strict; no warnings; $perlexpr }");
+ return($col, $perlexpr, $subref);
+}
+
+sub group_by_loop($$) {
+ # Generate perl code for group-by loop
+ # Insert a $recsep when the column value changes
+ # The column value can be computed with $perlexpr
+ my($fh,$recsep) = @_;
+ my $groupby = $opt::groupby;
+ if($groupby =~ /^[a-z_][a-z_0-9]*(\s|$)/i) {
+ # Group by column name
+ # (Yes, this will also wrongly match a perlexpr like: chop)
+ my($read,$char,@line);
+ # Read a full line, but nothing more
+ # (the rest must be read by the child)
+ # $Global::header used to prepend block to each job
+ do {
+ $read = sysread($fh,$char,1);
+ push @line, $char;
+ } while($read and $char ne "\n");
+ $Global::header = join "", @line;
+ }
+ $opt::colsep ||= "\t";
+ ($group_by::col, $group_by::perlexpr, $group_by::subref) =
+ column_perlexpr($groupby, $Global::header, $opt::colsep);
+ # Numbered 0..n-1 due to being used by $F[n]
+ if($group_by::col) { $group_by::col--; }
+
+ my $loop = ::spacefree(0,q{
+ BEGIN{ $last = "RECSEP"; }
+ {
+ local $_=COLVALUE;
+ PERLEXPR;
+ if(($last) ne $_) {
+ print "RECSEP";
+ $last = $_;
+ }
+ }
+ });
+ if(defined $group_by::col) {
+ $loop =~ s/COLVALUE/\$F[$group_by::col]/g;
+ } else {
+ $loop =~ s/COLVALUE/\$_/g;
+ }
+ $loop =~ s/PERLEXPR/$group_by::perlexpr/g;
+ $loop =~ s/RECSEP/$recsep/g;
+ return $loop;
+}
+
+sub pipe_group_by_setup() {
+ # Record separator with 119 bit random value
+ $opt::recend = '';
+ $opt::recstart =
+ join "", map { (0..9,"a".."z","A".."Z")[rand(62)] } (1..20);
+ $opt::remove_rec_sep = 1;
+ my @filter;
+ push @filter, "perl";
+ if($opt::groupby =~ /^[a-z0-9_]+(\s|$)/i) {
+ # This is column number/name
+ # Use -a (auto-split)
+ push @filter, "-a";
+ $opt::colsep ||= "\t";
+ my $sep = $opt::colsep;
+ $sep =~ s/\t/\\t/g;
+ $sep =~ s/\"/\\"/g;
+ # man perlrun: -Fpattern [...] You can't use literal whitespace
+ $sep =~ s/ /\\040/g;
+ push @filter, "-F$sep";
+ }
+ push @filter, "-pe";
+ push @filter, group_by_loop(*STDIN,$opt::recstart);
+ ::debug("init", "@filter\n");
+ open(STDIN, '-|', @filter) || die ("Cannot start @filter");
+ if(which("mbuffer")) {
+ # You get a speed up of 30% by going through mbuffer
+ open(STDIN, '-|', "mbuffer", "-q","-m6M","-b5") ||
+ die ("Cannot start mbuffer");
+ }
+}
+
+sub spreadstdin() {
+ # read a record
+ # Spawn a job and print the record to it.
+ # Uses:
+ # $Global::blocksize
+ # STDIN
+ # $opt::r
+ # $Global::max_lines
+ # $Global::max_number_of_args
+ # $opt::regexp
+ # $Global::start_no_new_jobs
+ # $opt::roundrobin
+ # %Global::running
+ # Returns: N/A
+
+ my $buf = "";
+ my ($recstart,$recend) = recstartrecend();
+ my $recendrecstart = $recend.$recstart;
+ my $chunk_number = 1;
+ my $one_time_through;
+ my $two_gb = 2**31-1;
+ my $blocksize = int($Global::blocksize);
+ my $in = *STDIN;
+ my $timeout = $Global::blocktimeout;
+
+ if($opt::skip_first_line) {
+ my $newline;
+ # Read a full line one byte at a time
+ while(sysread($in,$newline,1,0)) {
+ $newline eq "\n" and last;
+ }
+ }
+ my $header = find_header(\$buf,$in);
+ my $anything_written;
+ my $eof;
+ my $garbage_read;
+
+ sub read_block() {
+ # Read a --blocksize from STDIN
+ # possibly interrupted by --blocktimeout
+ # Add up to the next full block
+ my $readsize = $blocksize - (length $buf) % $blocksize;
+ my ($nread,$alarm);
+ eval {
+ local $SIG{ALRM} = sub { die "alarm\n" }; # NB: \n required
+ # --blocktimeout (or 0 if not set)
+ alarm $timeout;
+ if($] >= 5.026) {
+ do {
+ $nread = sysread $in, $buf, $readsize, length $buf;
+ $readsize -= $nread;
+ } while($readsize and $nread);
+ } else {
+ # Less efficient reading, but 32-bit sysread compatible
+ do {
+ $nread = sysread($in,substr($buf,length $buf,0),$readsize,0);
+ $readsize -= $nread;
+ } while($readsize and $nread);
+ }
+ alarm 0;
+ };
+ if ($@) {
+ die unless $@ eq "alarm\n"; # propagate unexpected errors
+ $alarm = 1;
+ } else {
+ $alarm = 0;
+ }
+ $eof = not ($nread or $alarm);
+ }
+
+ sub pass_n_line_records() {
+ # Pass records of N lines
+ my $n_lines = $buf =~ tr/\n/\n/;
+ my $last_newline_pos = rindex64(\$buf,"\n");
+ # Go backwards until there are full n-line records
+ while($n_lines % $Global::max_lines) {
+ $n_lines--;
+ $last_newline_pos = rindex64(\$buf,"\n",$last_newline_pos-1);
+ }
+ # Chop at $last_newline_pos as that is where n-line record ends
+ $anything_written +=
+ write_record_to_pipe($chunk_number++,\$header,\$buf,
+ $recstart,$recend,$last_newline_pos+1);
+ shorten(\$buf,$last_newline_pos+1);
+ }
+
+ sub pass_n_regexps() {
+ # Pass records of N regexps
+ # -N => (start..*?end){n}
+ # -L -N => (start..*?end){n*l}
+ if(not $garbage_read) {
+ $garbage_read = 1;
+ if($buf !~ /^$recstart/o) {
+ # Buf does not start with $recstart => There is garbage.
+ # Make a single record of the garbage
+ if($buf =~
+ /(?s:^)(
+ (?:(?:(?!$recend$recstart)(?s:.))*?$recend)
+ )
+ # Followed by recstart
+ (?=$recstart)/mox and length $1 > 0) {
+ $anything_written +=
+ write_record_to_pipe($chunk_number++,\$header,\$buf,
+ $recstart,$recend,length $1);
+ shorten(\$buf,length $1);
+ }
+ }
+ }
+
+ my $n_records =
+ $Global::max_number_of_args * ($Global::max_lines || 1);
+ # (?!negative lookahead) is needed to avoid backtracking
+ # See: https://unix.stackexchange.com/questions/439356/
+ # (?s:.) = (.|[\n]) but faster
+ while($buf =~
+ /(?s:^)(
+ # n more times recstart.*recend
+ (?:$recstart(?:(?!$recend$recstart)(?s:.))*?$recend){$n_records}
+ )
+ # Followed by recstart
+ (?=$recstart)/mox and length $1 > 0) {
+ $anything_written +=
+ write_record_to_pipe($chunk_number++,\$header,\$buf,
+ $recstart,$recend,length $1);
+ shorten(\$buf,length $1);
+ }
+ }
+
+ sub pass_regexp() {
+ # Find the last recend-recstart in $buf
+ $eof and return;
+ # (?s:.) = (.|[\n]) but faster
+ if($buf =~ /^((?s:.)*$recend)$recstart(?s:.)*?$/mox) {
+ $anything_written +=
+ write_record_to_pipe($chunk_number++,\$header,\$buf,
+ $recstart,$recend,length $1);
+ shorten(\$buf,length $1);
+ }
+ }
+
+ sub pass_csv_record() {
+ # Pass CVS record
+ # We define a CSV record as an even number of " + end of line
+ # This works if you use " as quoting character
+ my $last_newline_pos = length $buf;
+ # Go backwards from the last \n and search for a position
+ # where there is an even number of "
+ do {
+ # find last EOL
+ $last_newline_pos = rindex64(\$buf,"\n",$last_newline_pos-1);
+ # While uneven "
+ } while((substr($buf,0,$last_newline_pos) =~ y/"/"/)%2
+ and $last_newline_pos >= 0);
+ # Chop at $last_newline_pos as that is where CSV record ends
+ $anything_written +=
+ write_record_to_pipe($chunk_number++,\$header,\$buf,
+ $recstart,$recend,$last_newline_pos+1);
+ shorten(\$buf,$last_newline_pos+1);
+ }
+
+ sub pass_n() {
+ # Pass n records of --recend/--recstart
+ # -N => (start..*?end){n}
+ my $i = 0;
+ my $read_n_lines =
+ $Global::max_number_of_args * ($Global::max_lines || 1);
+ while(($i = nindex(\$buf,$recendrecstart,$read_n_lines)) != -1
+ and
+ length $buf) {
+ $i += length $recend; # find the actual splitting location
+ $anything_written +=
+ write_record_to_pipe($chunk_number++,\$header,\$buf,
+ $recstart,$recend,$i);
+ shorten(\$buf,$i);
+ }
+ }
+
+ sub pass() {
+ # Pass records of --recend/--recstart
+ # Split record at fixed string
+ # Find the last recend+recstart in $buf
+ $eof and return;
+ my $i = rindex64(\$buf,$recendrecstart);
+ if($i != -1) {
+ $i += length $recend; # find the actual splitting location
+ $anything_written +=
+ write_record_to_pipe($chunk_number++,\$header,\$buf,
+ $recstart,$recend,$i);
+ shorten(\$buf,$i);
+ }
+ }
+
+ sub increase_blocksize_maybe() {
+ if(not $anything_written
+ and not $opt::blocktimeout
+ and not $Global::no_autoexpand_block) {
+ # Nothing was written - maybe the block size < record size?
+ # Increase blocksize exponentially up to 2GB-1 (2GB causes problems)
+ if($blocksize < $two_gb) {
+ my $old_blocksize = $blocksize;
+ $blocksize = ::min(ceil($blocksize * 1.3 + 1), $two_gb);
+ ::warning("A record was longer than $old_blocksize. " .
+ "Increasing to --blocksize $blocksize.");
+ }
+ }
+ }
+
+ while(1) {
+ $anything_written = 0;
+ read_block();
+ if($opt::r) {
+ # Remove empty lines
+ $buf =~ s/^\s*\n//gm;
+ if(length $buf == 0) {
+ if($eof) {
+ last;
+ } else {
+ next;
+ }
+ }
+ }
+ if($Global::max_lines and not $Global::max_number_of_args) {
+ # Pass n-line records
+ pass_n_line_records();
+ } elsif($opt::csv) {
+ # Pass a full CSV record
+ pass_csv_record();
+ } elsif($opt::regexp) {
+ # Split record at regexp
+ if($Global::max_number_of_args) {
+ pass_n_regexps();
+ } else {
+ pass_regexp();
+ }
+ } else {
+ # Pass normal --recend/--recstart record
+ if($Global::max_number_of_args) {
+ pass_n();
+ } else {
+ pass();
+ }
+ }
+ $eof and last;
+ increase_blocksize_maybe();
+ ::debug("init", "Round\n");
+ }
+ ::debug("init", "Done reading input\n");
+
+ # If there is anything left in the buffer write it
+ write_record_to_pipe($chunk_number++, \$header, \$buf, $recstart,
+ $recend, length $buf);
+
+ if($opt::retries) {
+ $Global::no_more_input = 1;
+ # We need to start no more jobs: At most we need to retry some
+ # of the already running.
+ my @running = values %Global::running;
+ # Stop any virgins.
+ for my $job (@running) {
+ if(defined $job and $job->virgin()) {
+ close $job->fh(0,"w");
+ }
+ }
+ # Wait for running jobs to be done
+ my $sleep = 1;
+ while($Global::total_running > 0) {
+ $sleep = ::reap_usleep($sleep);
+ start_more_jobs();
+ }
+ }
+ $Global::start_no_new_jobs ||= 1;
+ if($opt::roundrobin) {
+ # Flush blocks to roundrobin procs
+ my $sleep = 1;
+ while(%Global::running) {
+ my $something_written = 0;
+ for my $job (values %Global::running) {
+ if($job->block_length()) {
+ $something_written += $job->non_blocking_write();
+ } else {
+ close $job->fh(0,"w");
+ }
+ }
+ if($something_written) {
+ $sleep = $sleep/2+0.001;
+ }
+ $sleep = ::reap_usleep($sleep);
+ }
+ }
+}
+
+sub recstartrecend() {
+ # Uses:
+ # $opt::recstart
+ # $opt::recend
+ # Returns:
+ # $recstart,$recend with default values and regexp conversion
+ my($recstart,$recend);
+ if(defined($opt::recstart) and defined($opt::recend)) {
+ # If both --recstart and --recend is given then both must match
+ $recstart = $opt::recstart;
+ $recend = $opt::recend;
+ } elsif(defined($opt::recstart)) {
+ # If --recstart is given it must match start of record
+ $recstart = $opt::recstart;
+ $recend = "";
+ } elsif(defined($opt::recend)) {
+ # If --recend is given then it must match end of record
+ $recstart = "";
+ $recend = $opt::recend;
+ if($opt::regexp and $recend eq '') {
+ # --regexp --recend ''
+ $recend = '(?s:.)';
+ }
+ }
+
+ if($opt::regexp) {
+ # Do not allow /x comments - to avoid having to quote space
+ $recstart = "(?-x:".$recstart.")";
+ $recend = "(?-x:".$recend.")";
+ # If $recstart/$recend contains '|'
+ # the | should only apply to the regexp
+ $recstart = "(?:".$recstart.")";
+ $recend = "(?:".$recend.")";
+ } else {
+ # $recstart/$recend = printf strings (\n)
+ $recstart =~ s/\\([0rnt\'\"\\])/"qq|\\$1|"/gee;
+ $recend =~ s/\\([0rnt\'\"\\])/"qq|\\$1|"/gee;
+ }
+ return ($recstart,$recend);
+}
+
+sub nindex($$) {
+ # See if string is in buffer N times
+ # Returns:
+ # the position where the Nth copy is found
+ my ($buf_ref, $str, $n) = @_;
+ my $i = 0;
+ for(1..$n) {
+ $i = index64($buf_ref,$str,$i+1);
+ if($i == -1) { last }
+ }
+ return $i;
+}
+
+{
+ my @robin_queue;
+ my $sleep = 1;
+
+ sub round_robin_write($$$$$) {
+ # Input:
+ # $header_ref = ref to $header string
+ # $block_ref = ref to $block to be written
+ # $recstart = record start string
+ # $recend = record end string
+ # $endpos = end position of $block
+ # Uses:
+ # %Global::running
+ # Returns:
+ # $something_written = amount of bytes written
+ my ($header_ref,$buffer_ref,$recstart,$recend,$endpos) = @_;
+ my $written = 0;
+ my $block_passed = 0;
+ while(not $block_passed) {
+ # Continue flushing existing buffers
+ # until one is empty and a new block is passed
+ if(@robin_queue) {
+ # Rotate queue once so new blocks get a fair chance
+ # to be given to another slot
+ push @robin_queue, shift @robin_queue;
+ } else {
+ # Make a queue to spread the blocks evenly
+ push @robin_queue, (sort { $a->seq() <=> $b->seq() }
+ values %Global::running);
+ }
+ do {
+ $written = 0;
+ for my $job (@robin_queue) {
+ if($job->block_length() > 0) {
+ $written += $job->non_blocking_write();
+ } else {
+ $job->set_block($header_ref, $buffer_ref,
+ $endpos, $recstart, $recend);
+ $block_passed = 1;
+ $written += $job->non_blocking_write();
+ last;
+ }
+ }
+ if($written) {
+ $sleep = $sleep/1.5+0.001;
+ }
+ # Don't sleep if something is written
+ } while($written and not $block_passed);
+ $sleep = ::reap_usleep($sleep);
+ }
+ return $written;
+ }
+}
+
+sub index64($$$) {
+ # Do index on strings > 2GB.
+ # index in Perl < v5.22 does not work for > 2GB
+ # Input:
+ # as index except STR which must be passed as a reference
+ # Output:
+ # as index
+ my $ref = shift;
+ my $match = shift;
+ my $pos = shift || 0;
+ my $max2gb = 2**31-1;
+ my $strlen = length($$ref);
+ # No point in doing extra work if we don't need to.
+ if($strlen < $max2gb or $] > 5.022) {
+ return index($$ref, $match, $pos);
+ }
+
+ my $matchlen = length($match);
+ my $ret;
+ my $offset = $pos;
+ while($offset < $strlen) {
+ $ret = index(
+ substr($$ref, $offset, $max2gb),
+ $match, $pos-$offset);
+ if($ret != -1) {
+ return $ret + $offset;
+ }
+ $offset += ($max2gb - $matchlen - 1);
+ }
+ return -1;
+}
+
+sub rindex64($@) {
+ # Do rindex on strings > 2GB.
+ # rindex in Perl < v5.22 does not work for > 2GB
+ # Input:
+ # as rindex except STR which must be passed as a reference
+ # Output:
+ # as rindex
+ my $ref = shift;
+ my $match = shift;
+ my $pos = shift;
+ my $block_size = 2**31-1;
+ my $strlen = length($$ref);
+ # Default: search from end
+ $pos = defined $pos ? $pos : $strlen;
+ # No point in doing extra work if we don't need to.
+ if($strlen < $block_size or $] > 5.022) {
+ return rindex($$ref, $match, $pos);
+ }
+
+ my $matchlen = length($match);
+ my $ret;
+ my $offset = $pos - $block_size + $matchlen;
+ if($offset < 0) {
+ # The offset is less than a $block_size
+ # Set the $offset to 0 and
+ # Adjust block_size accordingly
+ $block_size = $block_size + $offset;
+ $offset = 0;
+ }
+ while($offset >= 0) {
+ $ret = rindex(
+ substr($$ref, $offset, $block_size),
+ $match);
+ if($ret != -1) {
+ return $ret + $offset;
+ }
+ $offset -= ($block_size - $matchlen - 1);
+ }
+ return -1;
+}
+
+sub shorten($$) {
+ # Do: substr($buf,0,$i) = "";
+ # Some Perl versions do not support $i > 2GB, so do this in 2GB chunks
+ # Input:
+ # $buf_ref = \$buf
+ # $i = position to shorten to
+ # Returns: N/A
+ my ($buf_ref, $i) = @_;
+ my $two_gb = 2**31-1;
+ while($i > $two_gb) {
+ substr($$buf_ref,0,$two_gb) = "";
+ $i -= $two_gb;
+ }
+ substr($$buf_ref,0,$i) = "";
+}
+
+sub write_record_to_pipe($$$$$$) {
+ # Fork then
+ # Write record from pos 0 .. $endpos to pipe
+ # Input:
+ # $chunk_number = sequence number - to see if already run
+ # $header_ref = reference to header string to prepend
+ # $buffer_ref = reference to record to write
+ # $recstart = start string of record
+ # $recend = end string of record
+ # $endpos = position in $buffer_ref where record ends
+ # Uses:
+ # $Global::job_already_run
+ # $opt::roundrobin
+ # @Global::virgin_jobs
+ # Returns:
+ # Number of chunks written (0 or 1)
+ my ($chunk_number, $header_ref, $buffer_ref,
+ $recstart, $recend, $endpos) = @_;
+ if($endpos == 0) { return 0; }
+ if(vec($Global::job_already_run,$chunk_number,1)) { return 1; }
+ if($opt::roundrobin) {
+ # Write the block to one of the already running jobs
+ return round_robin_write($header_ref, $buffer_ref,
+ $recstart, $recend, $endpos);
+ }
+ # If no virgin found, backoff
+ my $sleep = 0.0001; # 0.01 ms - better performance on highend
+ while(not @Global::virgin_jobs) {
+ ::debug("pipe", "No virgin jobs");
+ $sleep = ::reap_usleep($sleep);
+ # Jobs may not be started because of loadavg
+ # or too little time between each ssh login
+ # or retrying failed jobs.
+ start_more_jobs();
+ }
+ my $job = shift @Global::virgin_jobs;
+ $job->set_block($header_ref, $buffer_ref, $endpos, $recstart, $recend);
+ $job->write_block();
+ return 1;
+}
+
+
+sub __SEM_MODE__() {}
+
+
+sub acquire_semaphore() {
+ # Acquires semaphore. If needed: spawns to the background
+ # Uses:
+ # @Global::host
+ # Returns:
+ # The semaphore to be released when jobs is complete
+ $Global::host{':'} = SSHLogin->new(":");
+ my $sem = Semaphore->new($Semaphore::name,
+ $Global::host{':'}->max_jobs_running());
+ $sem->acquire();
+ if($Semaphore::fg) {
+ # skip
+ } else {
+ if(fork()) {
+ exit(0);
+ } else {
+ # If run in the background, the PID will change
+ $sem->pid_change();
+ }
+ }
+ return $sem;
+}
+
+
+sub __PARSE_OPTIONS__() {}
+
+sub shell_completion() {
+ if($opt::shellcompletion eq "zsh") {
+ # if shell == zsh
+ zsh_competion();
+ } elsif($opt::shellcompletion eq "bash") {
+ # if shell == bash
+ bash_competion();
+ } elsif($opt::shellcompletion eq "auto") {
+ if($Global::shell =~ m:/zsh$|^zsh$:) {
+ # if shell == zsh
+ zsh_competion();
+ } elsif($Global::shell =~ m:/bash$|^bash$:) {
+ # if shell == bash
+ bash_competion();
+ } else {
+ ::error(255,"--shellcompletion is not implemented for ".
+ "'$Global::shell'.");
+ }
+ } else {
+ ::error(255,"--shellcompletion is not implemented for ".
+ "'$opt::shellcompletion'.");
+ }
+}
+
+sub bash_competion() {
+ # Print:
+ # complete -F _comp_parallel parallel;
+ # _comp_parallel() {
+ # COMPREPLY=($(compgen -W "--options" --
+ # "${COMP_WORDS[$COMP_CWORD]}"));
+ # };
+ my @bash_completion =
+ ("complete -F _comp_parallel parallel;",
+ '_comp_parallel() { COMPREPLY=($(compgen -W "');
+ my @och = options_completion_hash();
+ while(@och) {
+ $_ = shift @och;
+ # Split input like:
+ # "joblog|jl=s[Logfile for executed jobs]:logfile:_files"
+ if(/^(.*?)(\[.*?])?(:[^:]*)?(:.*)?$/) {
+ my $opt = $1;
+ my $desc = $2;
+ my $argdesc = $3;
+ my $func = $4;
+ # opt=s => opt
+ $opt =~ s/[:=].$//;
+ if($opt =~ /^_/) {
+ # internal options start with --_
+ # skip
+ } else {
+ push @bash_completion,
+ (map { (length $_ == 1) ? "-$_ " : "--$_ " }
+ split /\|/, $opt);
+ }
+ }
+ shift @och;
+ }
+ push @bash_completion,'" -- "${COMP_WORDS[$COMP_CWORD]}")); };'."\n";
+ print @bash_completion;
+}
+
+sub zsh_competion() {
+ # Print code used for completion in zsh
+ my @zsh_completion =
+ ("compdef _comp_parallel parallel; ",
+ "setopt localoptions extended_glob; ",
+ "_comp_parallel() { ",
+ "_arguments ");
+ my @och = options_completion_hash();
+ while(@och) {
+ $_ = shift @och;
+ # Split input like:
+ # "joblog|jl=s[Logfile for executed jobs]:logfile:_files"
+ if(/^(.*?)(\[.*?])?(:[^:]*)?(:.*)?$/) {
+ my $opt = $1;
+ my $desc = $2;
+ my $argdesc = $3;
+ my $func = $4;
+ # opt=s => opt
+ $opt =~ s/[:=].$//;
+ if($opt =~ /^_/) {
+ # internal options start with --_
+ # skip
+ } else {
+ # {-o,--option}
+ my $zsh_opt = join(",",
+ (map { (length $_ == 1) ? "-$_" : "--$_" }
+ split /\|/, $opt));
+ if($zsh_opt =~ /,/) { $zsh_opt = "{$zsh_opt}"; }
+ $desc =~ s/'/'"'"'/g;
+ $argdesc =~ s/'/'"'"'/g;
+ $func =~ s/'/'"'"'/g;
+ push @zsh_completion, $zsh_opt."'".$desc.$argdesc.$func."' ";
+ }
+ }
+ shift @och;
+ }
+ push @zsh_completion,
+ q{'(-)1:command:{_command_names -e}' },
+ q{'*::arguments:_normal'},
+ "};\n";
+ print @zsh_completion;
+}
+
+sub options_hash() {
+ # Returns:
+ # %hash = for GetOptions
+ my %och = options_completion_hash();
+ my %oh;
+ my ($k,$v);
+ while(($k,$v) = each %och) {
+ # Remove description
+ $k =~ s/\[.*//;
+ $oh{$k} = $v;
+ }
+ return %oh;
+}
+
+sub options_completion_hash() {
+ # Returns:
+ # %hash = for GetOptions and shell completion
+ return
+ ("debug|D=s" => \$opt::D,
+ "xargs[Insert as many arguments as the command line length permits]"
+ => \$opt::xargs,
+ "m[Multiple arguments]" => \$opt::m,
+ ("X[Insert as many arguments with context as the command line ".
+ "length permits]"
+ => \$opt::X),
+ "v[Verbose]" => \@opt::v,
+ "sql=s[Use --sql-master instead (obsolete)]:DBURL" => \$opt::retired,
+ ("sql-master|sqlmaster=s".
+ "[Submit jobs via SQL server. DBURL must point to a table, which ".
+ "will contain --joblog, the values, and output]:DBURL"
+ => \$opt::sqlmaster),
+ ("sql-worker|sqlworker=s".
+ "[Execute jobs via SQL server. Read the input sources variables ".
+ "from the table pointed to by DBURL.]:DBURL"
+ => \$opt::sqlworker),
+ ("sql-and-worker|sqlandworker=s".
+ "[--sql-master DBURL --sql-worker DBURL]:DBURL"
+ => \$opt::sqlandworker),
+ ("joblog|jl=s[Logfile for executed jobs]:logfile:_files"
+ => \$opt::joblog),
+ ("results|result|res=s[Save the output into files]:name:_files"
+ => \$opt::results),
+ "resume[Resumes from the last unfinished job]" => \$opt::resume,
+ ("resume-failed|resumefailed".
+ "[Retry all failed and resume from the last unfinished job]"
+ => \$opt::resume_failed),
+ ("retry-failed|retryfailed[Retry all failed jobs in joblog]"
+ => \$opt::retry_failed),
+ "silent[Silent]" => \$opt::silent,
+ ("keep-order|keeporder|k".
+ "[Keep sequence of output same as the order of input]"
+ => \$opt::keeporder),
+ ("no-keep-order|nokeeporder|nok|no-k".
+ "[Overrides an earlier --keep-order (e.g. if set in ".
+ "~/.parallel/config)]"
+ => \$opt::nokeeporder),
+ "group[Group output]" => \$opt::group,
+ "g" => \$opt::retired,
+ ("ungroup|u".
+ "[Output is printed as soon as possible and bypasses GNU parallel ".
+ "internal processing]"
+ => \$opt::ungroup),
+ ("latest-line|latestline|ll".
+ "[Print latest line of each job]"
+ => \$opt::latestline),
+ ("line-buffer|line-buffered|linebuffer|linebuffered|lb".
+ "[Buffer output on line basis]"
+ => \$opt::linebuffer),
+ ("tmux".
+ "[Use tmux for output. Start a tmux session and run each job in a ".
+ "window in that session. No other output will be produced]"
+ => \$opt::tmux),
+ ("tmux-pane|tmuxpane".
+ "[Use tmux for output but put output into panes in the first ".
+ "window. Useful if you want to monitor the progress of less than ".
+ "100 concurrent jobs]"
+ => \$opt::tmuxpane),
+ "null|0[Use NUL as delimiter]" => \$opt::null,
+ "quote|q[Quote command]" => \$opt::quote,
+ # Replacement strings
+ ("parens=s[Use parensstring instead of {==}]:parensstring"
+ => \$opt::parens),
+ ('rpl=s[Define replacement string]:"tag perl expression"'
+ => \@opt::rpl),
+ "plus[Add more replacement strings]" => \$opt::plus,
+ ("I=s".
+ "[Use the replacement string replace-str instead of {}]:replace-str"
+ => \$opt::I),
+ ("extensionreplace|er=s".
+ "[Use the replacement string replace-str instead of {.} for input ".
+ "line without extension]:replace-str"
+ => \$opt::U),
+ "U=s" => \$opt::retired,
+ ("basenamereplace|bnr=s".
+ "[Use the replacement string replace-str instead of {/} for ".
+ "basename of input line]:replace-str"
+ => \$opt::basenamereplace),
+ ("dirnamereplace|dnr=s".
+ "[Use the replacement string replace-str instead of {//} for ".
+ "dirname of input line]:replace-str"
+ => \$opt::dirnamereplace),
+ ("basenameextensionreplace|bner=s".
+ "[Use the replacement string replace-str instead of {/.} for ".
+ "basename of input line without extension]:replace-str"
+ => \$opt::basenameextensionreplace),
+ ("seqreplace=s".
+ "[Use the replacement string replace-str instead of {#} for job ".
+ "sequence number]:replace-str"
+ => \$opt::seqreplace),
+ ("slotreplace=s".
+ "[Use the replacement string replace-str instead of {%} for job ".
+ "slot number]:replace-str"
+ => \$opt::slotreplace),
+ ("delay=s".
+ "[Delay starting next job by duration]:duration" => \$opt::delay),
+ ("ssh-delay|sshdelay=f".
+ "[Delay starting next ssh by duration]:duration"
+ => \$opt::sshdelay),
+ ("load=s".
+ "[Only start jobs if load is less than max-load]:max-load"
+ => \$opt::load),
+ "noswap[Do not start job is computer is swapping]" => \$opt::noswap,
+ ("max-line-length-allowed|maxlinelengthallowed".
+ "[Print maximal command line length]"
+ => \$opt::max_line_length_allowed),
+ ("number-of-cpus|numberofcpus".
+ "[Print the number of physical CPU cores and exit (obsolete)]"
+ => \$opt::number_of_cpus),
+ ("number-of-sockets|numberofsockets".
+ "[Print the number of CPU sockets and exit]"
+ => \$opt::number_of_sockets),
+ ("number-of-cores|numberofcores".
+ "[Print the number of physical CPU cores and exit]"
+ => \$opt::number_of_cores),
+ ("number-of-threads|numberofthreads".
+ "[Print the number of hyperthreaded CPU cores and exit]"
+ => \$opt::number_of_threads),
+ ("use-sockets-instead-of-threads|usesocketsinsteadofthreads".
+ "[Determine how GNU Parallel counts the number of CPUs]"
+ => \$opt::use_sockets_instead_of_threads),
+ ("use-cores-instead-of-threads|usecoresinsteadofthreads".
+ "[Determine how GNU Parallel counts the number of CPUs]"
+ => \$opt::use_cores_instead_of_threads),
+ ("use-cpus-instead-of-cores|usecpusinsteadofcores".
+ "[Determine how GNU Parallel counts the number of CPUs]"
+ => \$opt::use_cpus_instead_of_cores),
+ ("shell-quote|shellquote|shell_quote".
+ "[Does not run the command but quotes it. Useful for making ".
+ "quoted composed commands for GNU parallel]"
+ => \@opt::shellquote),
+ ('nice=i[Run the command at this niceness]:niceness:($(seq -20 19))'
+ => \$opt::nice),
+ "tag[Tag lines with arguments]" => \$opt::tag,
+ ("tag-string|tagstring=s".
+ "[Tag lines with a string]:str" => \$opt::tagstring),
+ "ctag[Color tag]:str" => \$opt::ctag,
+ "ctag-string|ctagstring=s[Colour tagstring]:str" => \$opt::ctagstring,
+ "color|colour[Colourize output]" => \$opt::color,
+ ("color-failed|colour-failed|colorfailed|colourfailed|".
+ "color-fail|colour-fail|colorfail|colourfail|cf".
+ "[Colour failed jobs red]"
+ => \$opt::colorfailed),
+ ("onall[Run all the jobs on all computers given with --sshlogin]"
+ => \$opt::onall),
+ "nonall[--onall with no arguments]" => \$opt::nonall,
+ ("filter-hosts|filterhosts|filter-host[Remove down hosts]"
+ => \$opt::filter_hosts),
+ ('sshlogin|S=s'.
+ '[Distribute jobs to remote computers]'.
+ ':[@hostgroups/][ncpus/]sshlogin'.
+ '[,[@hostgroups/][ncpus/]sshlogin[,...]] or @hostgroup'.
+ ':_users') => \@opt::sshlogin,
+ ("sshloginfile|slf=s".
+ "[File with sshlogins on separate lines. Lines starting with '#' ".
+ "are ignored.]:filename:_files"
+ => \@opt::sshloginfile),
+ ("controlmaster|M".
+ "[Use ssh's ControlMaster to make ssh connections faster]"
+ => \$opt::controlmaster),
+ ("ssh=s".
+ "[Use this command instead of ssh for remote access]:sshcommand"
+ => \$opt::ssh),
+ ("transfer-file|transferfile|transfer-files|transferfiles|tf=s".
+ "[Transfer filename to remote computers]:filename:_files"
+ => \@opt::transfer_files),
+ ("return=s[Transfer files from remote computers]:filename:_files"
+ => \@opt::return),
+ ("trc=s[--transfer --return filename --cleanup]:filename:_files"
+ => \@opt::trc),
+ "transfer[Transfer files to remote computers]" => \$opt::transfer,
+ "cleanup[Remove transferred files]" => \$opt::cleanup,
+ ("basefile|bf=s".
+ "[Transfer file to each sshlogin before first job is started]".
+ ":file:_files"
+ => \@opt::basefile),
+ ("template|tmpl=s".
+ "[Replace replacement strings in file and save it in repl]".
+ ":file=repl:_files"
+ => \%opt::template),
+ "B=s" => \$opt::retired,
+ "ctrl-c|ctrlc" => \$opt::retired,
+ "no-ctrl-c|no-ctrlc|noctrlc" => \$opt::retired,
+ ("work-dir|workdir|wd=s".
+ "[Jobs will be run in the dir mydir. (default: the current dir ".
+ "for the local machine, the login dir for remote computers)]".
+ ":mydir:_cd"
+ => \$opt::workdir),
+ "W=s" => \$opt::retired,
+ ("rsync-opts|rsyncopts=s[Options to pass on to rsync]:options"
+ => \$opt::rsync_opts),
+ ("tmpdir|tempdir=s[Directory for temporary files]:dirname:_cd"
+ => \$opt::tmpdir),
+ ("use-compress-program|compress-program|".
+ "usecompressprogram|compressprogram=s".
+ "[Use prg for compressing temporary files]:prg:_commands"
+ => \$opt::compress_program),
+ ("use-decompress-program|decompress-program|".
+ "usedecompressprogram|decompressprogram=s".
+ "[Use prg for decompressing temporary files]:prg:_commands"
+ => \$opt::decompress_program),
+ "compress[Compress temporary files]" => \$opt::compress,
+ "open-tty|o[Open terminal tty]" => \$opt::open_tty,
+ "tty[Open terminal tty]" => \$opt::tty,
+ "T" => \$opt::retired,
+ "H=i" => \$opt::retired,
+ ("dry-run|dryrun|dr".
+ "[Print the job to run on stdout (standard output), but do not ".
+ "run the job]"
+ => \$opt::dryrun),
+ "progress[Show progress of computations]" => \$opt::progress,
+ ("eta[Show the estimated number of seconds before finishing]"
+ => \$opt::eta),
+ "bar[Show progress as a progress bar]" => \$opt::bar,
+ ("total-jobs|totaljobs|total=s".
+ "[Set total number of jobs]" => \$opt::totaljobs),
+ "shuf[Shuffle jobs]" => \$opt::shuf,
+ ("arg-sep|argsep=s".
+ "[Use sep-str instead of ::: as separator string]:sep-str"
+ => \$opt::arg_sep),
+ ("arg-file-sep|argfilesep=s".
+ "[Use sep-str instead of :::: as separator string ".
+ "between command and argument files]:sep-str"
+ => \$opt::arg_file_sep),
+ ('trim=s[Trim white space in input]:trim_method:'.
+ '((n\:"No trim" l\:"Left\ trim" r\:"Right trim" '.
+ 'lr\:"Both trim" rl\:"Both trim"))'
+ => \$opt::trim),
+ "env=s[Copy environment variable var]:var:_vars" => \@opt::env,
+ "recordenv|record-env[Record environment]" => \$opt::record_env,
+ ('session'.
+ '[Record names in current environment in $PARALLEL_IGNORED_NAMES '.
+ 'and exit. Only used with env_parallel. '.
+ 'Aliases, functions, and variables with names i]'
+ => \$opt::session),
+ ('plain[Ignore --profile, $PARALLEL, and ~/.parallel/config]'
+ => \$opt::plain),
+ ("profile|J=s".
+ "[Use profile profilename for options]:profilename:_files"
+ => \@opt::profile),
+ "tollef" => \$opt::tollef,
+ "gnu[Behave like GNU parallel]" => \$opt::gnu,
+ "link|xapply[Link input sources]" => \$opt::link,
+ "linkinputsource|xapplyinputsource=i" => \@opt::linkinputsource,
+ # Before changing these lines, please read
+ # https://www.gnu.org/software/parallel/parallel_design.html#citation-notice
+ # https://git.savannah.gnu.org/cgit/parallel.git/tree/doc/citation-notice-faq.txt
+ # You accept to be put in a public hall-of-shame by removing
+ # these lines
+ ("bibtex|citation".
+ "[Print the citation notice and BibTeX entry for GNU parallel, ".
+ "silence citation notice for all future runs, and exit. ".
+ "It will not run any commands]"
+ => \$opt::citation),
+ "will-cite|willcite|nn|nonotice|no-notice" => \$opt::willcite,
+ # Termination and retries
+ ('halt-on-error|haltonerror|halt=s'.
+ '[When should GNU parallel terminate]'.
+ ':when:((now\:"kill all running jobs and halt immediately" '.
+ 'soon\:"wait for all running jobs to complete, start no new jobs"))'
+ => \$opt::halt),
+ 'limit=s[Dynamic job limit]:"command args"' => \$opt::limit,
+ ("memfree=s".
+ "[Minimum memory free when starting another job]:size"
+ => \$opt::memfree),
+ ("memsuspend=s".
+ "[Suspend jobs when there is less memory available]:size"
+ => \$opt::memsuspend),
+ "retries=s[Try failing jobs n times]:n" => \$opt::retries,
+ ("timeout=s".
+ "[Time out for command. If the command runs for longer than ".
+ "duration seconds it will get killed as per --term-seq]:duration"
+ => \$opt::timeout),
+ ("term-seq|termseq=s".
+ "[Termination sequence]:sequence" => \$opt::termseq),
+ # xargs-compatibility - implemented, man, testsuite
+ ("max-procs|maxprocs|P|jobs|j=s".
+ "[Add N to/Subtract N from/Multiply N% with/ the number of CPU ".
+ "threads or read parameter from file]:+N/-N/N%/N/procfile:_files"
+ => \$opt::jobs),
+ ("delimiter|d=s[Input items are terminated by delim]:delim"
+ => \$opt::d),
+ ("max-chars|maxchars|s=s[Limit length of command]:max-chars"
+ => \$opt::max_chars),
+ ("arg-file|argfile|a=s".
+ "[Use input-file as input source]:input-file:_files" => \@opt::a),
+ "no-run-if-empty|norunifempty|r[Do not run empty input]" => \$opt::r,
+ ("replace|i:s".
+ "[This option is deprecated; use -I instead]:replace-str"
+ => \$opt::i),
+ "E=s" => \$opt::eof,
+ ("eof|e:s[Set the end of file string to eof-str]:eof-str"
+ => \$opt::eof),
+ ("process-slot-var|processslotvar=s".
+ "[Set this variable to job slot number]:varname"
+ => \$opt::process_slot_var),
+ ("max-args|maxargs|n=s".
+ "[Use at most max-args arguments per command line]:max-args"
+ => \$opt::max_args),
+ ("max-replace-args|maxreplaceargs|N=s".
+ "[Use at most max-args arguments per command line]:max-args"
+ => \$opt::max_replace_args),
+ "col-sep|colsep|C=s[Column separator]:regexp" => \$opt::colsep,
+ "csv[Treat input as CSV-format]"=> \$opt::csv,
+ ("help|h[Print a summary of the options to GNU parallel and exit]"
+ => \$opt::help),
+ ("L=s[When used with --pipe: Read records of recsize]:recsize"
+ => \$opt::L),
+ ("max-lines|maxlines|l:f".
+ "[When used with --pipe: Read records of recsize lines]:recsize"
+ => \$opt::max_lines),
+ "interactive|p[Ask user before running a job]" => \$opt::interactive,
+ ("verbose|t[Print the job to be run on stderr (standard error)]"
+ => \$opt::verbose),
+ ("version|V[Print the version GNU parallel and exit]"
+ => \$opt::version),
+ ('min-version|minversion=i'.
+ '[Print the version GNU parallel and exit]'.
+ ':version:($(parallel --minversion 0))'
+ => \$opt::minversion),
+ ("show-limits|showlimits".
+ "[Display limits given by the operating system]"
+ => \$opt::show_limits),
+ ("exit|x[Exit if the size (see the -s option) is exceeded]"
+ => \$opt::x),
+ # Semaphore
+ "semaphore[Work as a counting semaphore]" => \$opt::semaphore,
+ ("semaphore-timeout|semaphoretimeout|st=s".
+ "[If secs > 0: If the semaphore is not released within secs ".
+ "seconds, take it anyway]:secs"
+ => \$opt::semaphoretimeout),
+ ("semaphore-name|semaphorename|id=s".
+ "[Use name as the name of the semaphore]:name"
+ => \$opt::semaphorename),
+ "fg[Run command in foreground]" => \$opt::fg,
+ "bg[Run command in background]" => \$opt::bg,
+ "wait[Wait for all commands to complete]" => \$opt::wait,
+ # Shebang #!/usr/bin/parallel --shebang
+ ("shebang|hashbang".
+ "[GNU parallel can be called as a shebang (#!) command as the ".
+ "first line of a script. The content of the file will be treated ".
+ "as inputsource]"
+ => \$opt::shebang),
+ ("_pipe-means-argfiles[internal]"
+ => \$opt::_pipe_means_argfiles),
+ "Y" => \$opt::retired,
+ ("skip-first-line|skipfirstline".
+ "[Do not use the first line of input]"
+ => \$opt::skip_first_line),
+ "_bug" => \$opt::_bug,
+ "_unsafe" => \$opt::_unsafe,
+ # --pipe
+ ("pipe|spreadstdin".
+ "[Spread input to jobs on stdin (standard input)]" => \$opt::pipe),
+ ("round-robin|roundrobin|round".
+ "[Distribute chunks of standard input in a round robin fashion]"
+ => \$opt::roundrobin),
+ "recstart=s" => \$opt::recstart,
+ ("recend=s".
+ "[Split record between endstring and startstring]:endstring"
+ => \$opt::recend),
+ ("regexp|regex".
+ "[Interpret --recstart and --recend as regular expressions]"
+ => \$opt::regexp),
+ ("remove-rec-sep|removerecsep|rrs".
+ "[Remove record separator]" => \$opt::remove_rec_sep),
+ ("output-as-files|outputasfiles|files[Save output to files]"
+ => \$opt::files),
+ ("output-as-files0|outputasfiles0|files0".
+ "[Save output to files separated by NUL]"
+ => \$opt::files0),
+ ("block-size|blocksize|block=s".
+ "[Size of block in bytes to read at a time]:size"
+ => \$opt::blocksize),
+ ("block-timeout|blocktimeout|bt=s".
+ "[Timeout for reading block when using --pipe]:duration"
+ => \$opt::blocktimeout),
+ "header=s[Use regexp as header]:regexp" => \$opt::header,
+ "cat[Create a temporary file with content]" => \$opt::cat,
+ "fifo[Create a temporary fifo with content]" => \$opt::fifo,
+ ("pipe-part|pipepart[Pipe parts of a physical file]"
+ => \$opt::pipepart),
+ "tee[Pipe all data to all jobs]" => \$opt::tee,
+ ("shard=s".
+ "[Use shardexpr as shard key and shard input to the jobs]:shardexpr"
+ => \$opt::shard),
+ ("bin=s".
+ "[Use binexpr as binning key and bin input to the jobs]:binexpr"
+ => \$opt::bin),
+ "group-by|groupby=s[Group input by value]:val" => \$opt::groupby,
+ #
+ ("hgrp|hostgrp|hostgroup|hostgroups[Enable hostgroups on arguments]"
+ => \$opt::hostgroups),
+ "embed[Embed GNU parallel in a shell script]" => \$opt::embed,
+ ("filter=s[Only run jobs where filter is true]:filter"
+ => \@opt::filter),
+ "combineexec|combine-exec|combineexecutable|combine-executable=s".
+ "[Embed GNU parallel in a shell script]" => \$opt::combineexec,
+ ("filter=s[Only run jobs where filter is true]:filter"
+ => \@opt::filter),
+ "_parset=s[Generate shell code for parset]" => \$opt::_parset,
+ ("shell-completion|shellcompletion=s".
+ "[Generate shell code for shell completion]:shell:(bash zsh)"
+ => \$opt::shellcompletion),
+ # Parameter for testing optimal values
+ "_test=s" => \$opt::_test,
+ );
+}
+
+sub get_options_from_array($@) {
+ # Run GetOptions on @array
+ # Input:
+ # $array_ref = ref to @ARGV to parse
+ # @keep_only = Keep only these options (e.g. --profile)
+ # Uses:
+ # @ARGV
+ # Returns:
+ # true if parsing worked
+ # false if parsing failed
+ # @$array_ref is changed
+ my ($array_ref, @keep_only) = @_;
+ if(not @$array_ref) {
+ # Empty array: No need to look more at that
+ return 1;
+ }
+ # A bit of shuffling of @ARGV needed as GetOptionsFromArray is not
+ # supported everywhere
+ my @save_argv;
+ my $this_is_ARGV = (\@::ARGV == $array_ref);
+ if(not $this_is_ARGV) {
+ @save_argv = @::ARGV;
+ @::ARGV = @{$array_ref};
+ }
+ # If @keep_only set: Ignore all values except @keep_only
+ my %options = options_hash();
+ if(@keep_only) {
+ my (%keep,@dummy);
+ @keep{@keep_only} = @keep_only;
+ for my $k (grep { not $keep{$_} } keys %options) {
+ # Store the value of the option in @dummy
+ $options{$k} = \@dummy;
+ }
+ }
+ my $retval = GetOptions(%options);
+ if(not $this_is_ARGV) {
+ @{$array_ref} = @::ARGV;
+ @::ARGV = @save_argv;
+ }
+ return $retval;
+}
+
+sub parse_parset() {
+ $Global::progname = "parset";
+ @Global::parset_vars = split /[ ,]/, $opt::_parset;
+ my $var_or_assoc = shift @Global::parset_vars;
+ # Legal names: var _v2ar arrayentry[2]
+ my @illegal = (grep { not /^[a-zA-Z_][a-zA-Z_0-9]*(\[\d+\])?$/ }
+ @Global::parset_vars);
+ if(@illegal) {
+ ::error
+ (255,"@illegal is an invalid variable name.",
+ "Variable names must be letter followed by letters or digits.",
+ "Usage:",
+ " parset varname GNU Parallel options and command");
+ }
+ if($var_or_assoc eq "assoc") {
+ my $var = shift @Global::parset_vars;
+ print "$var=(";
+ $Global::parset = "assoc";
+ $Global::parset_endstring=")\n";
+ } elsif($var_or_assoc eq "var") {
+ if($#Global::parset_vars > 0) {
+ $Global::parset = "var";
+ } else {
+ my $var = shift @Global::parset_vars;
+ print "$var=(";
+ $Global::parset = "array";
+ $Global::parset_endstring=")\n";
+ }
+ } else {
+ ::die_bug("parset: unknown '$opt::_parset'");
+ }
+}
+
+sub parse_options(@) {
+ # Returns: N/A
+ init_globals();
+ my @argv_before = @ARGV;
+ @ARGV = read_options();
+ # Before changing these line, please read
+ # https://www.gnu.org/software/parallel/parallel_design.html#citation-notice
+ # https://git.savannah.gnu.org/cgit/parallel.git/tree/doc/citation-notice-faq.txt
+ # You accept to be added to a public hall-of-shame by removing the lines
+ if(defined $opt::citation) {
+ citation(\@argv_before,\@ARGV);
+ wait_and_exit(0);
+ }
+ # no-* overrides *
+ if($opt::nokeeporder) { $opt::keeporder = undef; }
+
+ if(@opt::v) { $Global::verbose = $#opt::v+1; } # Convert -v -v to v=2
+ if($opt::_bug) { ::die_bug("test-bug"); }
+ $Global::debug = $opt::D;
+ #
+ ## Shell
+ #
+ $Global::shell = $ENV{'PARALLEL_SHELL'} || parent_shell($$)
+ || $ENV{'SHELL'} || "/bin/sh";
+ if(not -x $Global::shell and not which($Global::shell)) {
+ ::error(255,"Shell '$Global::shell' not found.");
+ }
+ ::debug("init","Global::shell $Global::shell\n");
+ $Global::cshell = $Global::shell =~ m:(/[-a-z]*)?csh:;
+ $Global::fish = $Global::shell =~ m:(/[-a-z]*)?fish:;
+ if(defined $opt::_parset) { parse_parset(); }
+ if(defined $opt::X) { $Global::ContextReplace = 1; }
+ if(defined $opt::silent) { $Global::verbose = 0; }
+ if(defined $opt::null) { $/ = "\0"; }
+ if(defined $opt::files) { $Global::files = 1; $Global::files_sep = "\n"; }
+ if(defined $opt::files0) { $Global::files = 1; $Global::files_sep = "\0"; }
+ if(defined $opt::d) { $/ = unquote_printf($opt::d) }
+ parse_replacement_string_options();
+ $opt::tag ||= $opt::ctag;
+ $opt::tagstring ||= $opt::ctagstring;
+ if(defined $opt::ctag or defined $opt::ctagstring
+ or defined $opt::color) {
+ $Global::color = 1;
+ }
+ if($opt::linebuffer or $opt::latestline) {
+ $Global::linebuffer = 1;
+ Job::latestline_init();
+ }
+ if(defined $opt::tag and not defined $opt::tagstring) {
+ # Default = {}
+ $opt::tagstring = $Global::parensleft.$Global::parensright;
+ }
+ if(defined $opt::tagstring) {
+ $opt::tagstring = unquote_printf($opt::tagstring);
+ if($opt::tagstring =~
+ /\Q$Global::parensleft\E.*\S+.*\Q$Global::parensright\E/
+ and
+ $Global::linebuffer) {
+ # --tagstring contains {= ... =} and --linebuffer =>
+ # recompute replacement string for each use (do not cache)
+ $Global::cache_replacement_eval = 0;
+ }
+ }
+ if(defined $opt::interactive) { $Global::interactive = $opt::interactive; }
+ if(defined $opt::quote) { $Global::quoting = 1; }
+ if(defined $opt::r) { $Global::ignore_empty = 1; }
+ if(defined $opt::verbose) { $Global::stderr_verbose = 1; }
+ if(defined $opt::eof) { $Global::end_of_file_string = $opt::eof; }
+ if(defined $opt::max_args) {
+ $opt::max_args = multiply_binary_prefix($opt::max_args);
+ $Global::max_number_of_args = $opt::max_args;
+ if($opt::pipepart and $opt::groupby) { $Global::max_number_of_args = 1; }
+ }
+ if(defined $opt::blocktimeout) {
+ $Global::blocktimeout = int(multiply_time_units($opt::blocktimeout));
+ if($Global::blocktimeout < 1) {
+ ::error(255,"--block-timeout must be at least 1");
+ }
+ }
+ if(defined $opt::timeout) {
+ $Global::timeoutq = TimeoutQueue->new($opt::timeout);
+ }
+ if(defined $opt::tmpdir) { $ENV{'TMPDIR'} = $opt::tmpdir; }
+ $ENV{'PARALLEL_RSYNC_OPTS'} = $opt::rsync_opts ||
+ $ENV{'PARALLEL_RSYNC_OPTS'} || '-rlDzR';
+ # Default: Same nice level as GNU Parallel is started at
+ $opt::nice ||= eval { getpriority(0,0) } || 0;
+ if(defined $opt::help) { usage(); exit(0); }
+ if(defined $opt::shellcompletion) { shell_completion(); exit(0); }
+ if(defined $opt::embed) { embed(); exit(0); }
+ if(defined $opt::sqlandworker) {
+ $opt::sqlmaster = $opt::sqlworker = $opt::sqlandworker;
+ }
+ if(defined $opt::tmuxpane) { $opt::tmux = $opt::tmuxpane; }
+ if(defined $opt::colsep) { $Global::trim = 'lr'; }
+ if(defined $opt::csv) {
+ if(not $Global::use{"Text::CSV"} ||= eval "use Text::CSV; 1;") {
+ ::error(255,"The perl module Text::CSV is not installed.",
+ "Try installing libtext-csv-perl or perl-Text-CSV.");
+ }
+ $opt::colsep = defined $opt::colsep ? $opt::colsep : ",";
+ my $csv_setting = { binary => 1, sep_char => $opt::colsep };
+ my $sep = $csv_setting->{sep_char};
+ $Global::csv = Text::CSV->new($csv_setting)
+ or die "Cannot use CSV: ".Text::CSV->error_diag ();
+ }
+ if(defined $opt::header) {
+ $opt::colsep = defined $opt::colsep ? $opt::colsep : "\t";
+ }
+ if(defined $opt::trim) { $Global::trim = $opt::trim; }
+ if(defined $opt::arg_sep) { $Global::arg_sep = $opt::arg_sep; }
+ if(defined $opt::arg_file_sep) {
+ $Global::arg_file_sep = $opt::arg_file_sep;
+ }
+ if(not defined $opt::process_slot_var) {
+ $opt::process_slot_var = 'PARALLEL_JOBSLOT0';
+ }
+ if(defined $opt::number_of_sockets) {
+ print SSHLogin::no_of_sockets(),"\n"; wait_and_exit(0);
+ }
+ if(defined $opt::number_of_cpus) {
+ print SSHLogin::no_of_cores(),"\n"; wait_and_exit(0);
+ }
+ if(defined $opt::number_of_cores) {
+ print SSHLogin::no_of_cores(),"\n"; wait_and_exit(0);
+ }
+ if(defined $opt::number_of_threads) {
+ print SSHLogin::no_of_threads(),"\n"; wait_and_exit(0);
+ }
+ if(defined $opt::max_line_length_allowed) {
+ print Limits::Command::real_max_length(),"\n"; wait_and_exit(0);
+ }
+ if(defined $opt::max_chars) {
+ $opt::max_chars = multiply_binary_prefix($opt::max_chars);
+ }
+ if(defined $opt::version) { version(); wait_and_exit(0); }
+ if(defined $opt::record_env) { record_env(); wait_and_exit(0); }
+ if(@opt::sshlogin) { @Global::sshlogin = @opt::sshlogin; }
+ if(@opt::sshloginfile) { read_sshloginfiles(@opt::sshloginfile); }
+ if(@opt::return) { push @Global::ret_files, @opt::return; }
+ if($opt::transfer) {
+ push @Global::transfer_files, $opt::i || $opt::I || "{}";
+ }
+ push @Global::transfer_files, @opt::transfer_files;
+ if(%opt::template) {
+ while (my ($source, $template_name) = each %opt::template) {
+ push @Global::template_names, $template_name;
+ push @Global::template_contents, slurp_or_exit($source);
+ }
+ }
+ if(not defined $opt::recstart and
+ not defined $opt::recend) { $opt::recend = "\n"; }
+ $Global::blocksize = multiply_binary_prefix($opt::blocksize || "1M");
+ if($Global::blocksize > 2**31-1 and not $opt::pipepart) {
+ warning("--blocksize >= 2G causes problems. Using 2G-1.");
+ $Global::blocksize = 2**31-1;
+ }
+ if($^O eq "cygwin" and
+ ($opt::pipe or $opt::pipepart or $opt::roundrobin)
+ and $Global::blocksize > 65535) {
+ warning("--blocksize >= 64K causes problems on Cygwin.");
+ }
+ $opt::memfree = multiply_binary_prefix($opt::memfree);
+ $opt::memsuspend = multiply_binary_prefix($opt::memsuspend);
+ $Global::memlimit = $opt::memsuspend + $opt::memfree;
+ check_invalid_option_combinations();
+ if((defined $opt::fifo or defined $opt::cat) and not $opt::pipepart) {
+ $opt::pipe = 1;
+ }
+ if(defined $opt::minversion) {
+ print $Global::version,"\n";
+ if($Global::version < $opt::minversion) {
+ wait_and_exit(255);
+ } else {
+ wait_and_exit(0);
+ }
+ }
+ if(not defined $opt::delay) {
+ # Set --delay to --sshdelay if not set
+ $opt::delay = $opt::sshdelay;
+ }
+ $Global::sshdelayauto = $opt::sshdelay =~ s/auto$//;
+ $opt::sshdelay = multiply_time_units($opt::sshdelay);
+ $Global::delayauto = $opt::delay =~ s/auto$//;
+ $opt::delay = multiply_time_units($opt::delay);
+ if($opt::compress_program) {
+ $opt::compress = 1;
+ $opt::decompress_program ||= $opt::compress_program." -dc";
+ }
+
+ if(defined $opt::results) {
+ # Is the output a dir or CSV-file?
+ if($opt::results =~ /\.csv$/i) {
+ # CSV with , as separator
+ $Global::csvsep = ",";
+ $Global::membuffer ||= 1;
+ } elsif($opt::results =~ /\.tsv$/i) {
+ # CSV with TAB as separator
+ $Global::csvsep = "\t";
+ $Global::membuffer ||= 1;
+ } elsif($opt::results =~ /\.json$/i) {
+ # JSON output
+ $Global::jsonout ||= 1;
+ $Global::membuffer ||= 1;
+ }
+ }
+ if($opt::compress) {
+ my ($compress, $decompress) = find_compression_program();
+ $opt::compress_program ||= $compress;
+ $opt::decompress_program ||= $decompress;
+ if(($opt::results and not $Global::csvsep) or $Global::files) {
+ # No need for decompressing
+ $opt::decompress_program = "cat >/dev/null";
+ }
+ }
+ if(defined $opt::dryrun) {
+ # Force grouping due to bug #51039: --dry-run --timeout 3600 -u breaks
+ $opt::ungroup = 0;
+ $opt::group = 1;
+ }
+ if(defined $opt::nonall) {
+ # Append a dummy empty argument if there are no arguments
+ # on the command line to avoid reading from STDIN.
+ # arg_sep = random 50 char
+ # \0noarg => nothing (not the empty string)
+ $Global::arg_sep = join "",
+ map { (0..9,"a".."z","A".."Z")[rand(62)] } (1..50);
+ push @ARGV, $Global::arg_sep, "\0noarg";
+ }
+ if(defined $opt::tee) {
+ if(not defined $opt::jobs) {
+ $opt::jobs = 0;
+ }
+ }
+ if(defined $opt::tty) {
+ # Defaults for --tty: -j1 -u
+ # Can be overridden with -jXXX -g
+ if(not defined $opt::jobs) {
+ $opt::jobs = 1;
+ }
+ if(not defined $opt::group) {
+ $opt::ungroup = 1;
+ }
+ }
+ if(@opt::trc) {
+ push @Global::ret_files, @opt::trc;
+ if(not @Global::transfer_files) {
+ # Defaults to --transferfile {}
+ push @Global::transfer_files, $opt::i || $opt::I || "{}";
+ }
+ $opt::cleanup = 1;
+ }
+ if(defined $opt::max_lines) {
+ if($opt::max_lines eq "-0") {
+ # -l -0 (swallowed -0)
+ $opt::max_lines = 1;
+ $opt::null = 1;
+ $/ = "\0";
+ } else {
+ $opt::max_lines = multiply_binary_prefix($opt::max_lines);
+ if ($opt::max_lines == 0) {
+ # If not given (or if 0 is given) => 1
+ $opt::max_lines = 1;
+ }
+ }
+
+ $Global::max_lines = $opt::max_lines;
+ if(not $opt::pipe) {
+ # --pipe -L means length of record - not max_number_of_args
+ $Global::max_number_of_args ||= $Global::max_lines;
+ }
+ }
+
+ # Read more than one arg at a time (-L, -N)
+ if(defined $opt::L) {
+ $opt::L = multiply_binary_prefix($opt::L);
+ $Global::max_lines = $opt::L;
+ if(not $opt::pipe) {
+ # --pipe -L means length of record - not max_number_of_args
+ $Global::max_number_of_args ||= $Global::max_lines;
+ }
+ }
+ if(defined $opt::max_replace_args) {
+ $opt::max_replace_args =
+ multiply_binary_prefix($opt::max_replace_args);
+ $Global::max_number_of_args = $opt::max_replace_args;
+ $Global::ContextReplace = 1;
+ }
+ if((defined $opt::L or defined $opt::max_replace_args)
+ and
+ not ($opt::xargs or $opt::m)) {
+ $Global::ContextReplace = 1;
+ }
+ # Deal with ::: :::+ :::: ::::+ and -a +file
+ my @ARGV_with_argsep = @ARGV;
+ @ARGV = read_args_from_command_line();
+ if(defined $opt::combineexec) {
+ pack_combined_executable(\@argv_before,\@ARGV_with_argsep,\@ARGV);
+ exit(0);
+ }
+ parse_semaphore();
+
+ if(defined $opt::eta) { $opt::progress = $opt::eta; }
+ if(defined $opt::bar) { $opt::progress = $opt::bar; }
+ if(defined $opt::bar or defined $opt::latestline) {
+ my $fh = $Global::status_fd || *STDERR;
+ # Activate decode_utf8
+ eval q{
+ # Enable utf8 if possible
+ use utf8;
+ binmode $fh, "encoding(utf8)";
+ *decode_utf8 = \&Encode::decode_utf8;
+ };
+ if(eval { decode_utf8("x") }) {
+ # Great: decode works
+ } else {
+ # UTF8-decode not supported: Dummy decode
+ eval q{sub decode_utf8($;$) { $_[0]; }};
+ }
+ # Activate decode_utf8
+ eval q{
+ # Enable utf8 if possible
+ use utf8;
+ use Encode qw( encode_utf8 );
+ use Text::CharWidth qw( mbswidth );
+ use Unicode::Normalize qw( NFC NFD );
+ };
+ if(eval { mbswidth("ヌー平行") }) {
+ # Great: mbswidth works
+ } else {
+ # mbswidth not supported: Dummy mbswidth
+ eval q{ sub mbswidth { return length @_; } };
+ }
+ }
+
+ # If you want GNU Parallel to be maintained in the future you
+ # should keep this.
+ # *YOU* will be harming free software by removing the notice.
+ #
+ # Funding a free software project is hard. GNU Parallel is no
+ # exception. On top of that it seems the less visible a project
+ # is, the harder it is to get funding. And the nature of GNU
+ # Parallel is that it will never be seen by "the guy with the
+ # checkbook", but only by the people doing the actual work.
+ #
+ # This problem has been covered by others - though no solution has
+ # been found:
+ # https://www.slideshare.net/NadiaEghbal/consider-the-maintainer
+ # https://www.numfocus.org/blog/why-is-numpy-only-now-getting-funded/
+ #
+ # The FAQ tells you why the citation notice exists:
+ # https://git.savannah.gnu.org/cgit/parallel.git/tree/doc/citation-notice-faq.txt
+ #
+ # If you want GNU Parallel to be maintained in the future, and not
+ # just wither away like so many other free software tools, you
+ # need to help finance the development.
+ #
+ # The citation notice is a simple way of doing so, as citations
+ # makes it possible to me to get a job where I can maintain GNU
+ # Parallel as part of the job.
+ #
+ # This means you can help financing development
+ #
+ # WITHOUT PAYING A SINGLE CENT!
+ #
+ # Before implementing the citation notice it was discussed with
+ # the users:
+ # https://lists.gnu.org/archive/html/parallel/2013-11/msg00006.html
+ #
+ # Having to spend 10 seconds on running 'parallel --citation' once
+ # is no doubt not an ideal solution, but no one has so far come up
+ # with an ideal solution - neither for funding GNU Parallel nor
+ # other free software.
+ #
+ # If you believe you have the perfect solution, you should try it
+ # out, and if it works, you should post it on the email
+ # list. Ideas that will cost work and which have not been tested
+ # are, however, unlikely to be prioritized.
+ #
+ # Please note that GPL version 3 gives you the right to fork GNU
+ # Parallel under a new name, but it does not give you the right to
+ # distribute modified copies with the citation notice disabled in
+ # a way where the software can be confused with GNU Parallel. To
+ # do that you need to be the owner of the GNU Parallel
+ # trademark. The xt:Commerce case shows this.
+ #
+ # Description of the xt:Commerce case in OLG Duesseldorf
+ # https://web.archive.org/web/20180715073746/http://www.inta.org/INTABulletin/Pages/GERMANYGeneralPublicLicenseDoesNotPermitUseofThird-PartyTrademarksforAdvertisingModifiedVersionsofOpen-SourceSoftware.aspx
+ #
+ # The verdict in German
+ # https://www.admody.com/urteilsdatenbank/cafe6fdaeed3/OLG-Duesseldorf_Urteil_vom_28-September-2010_Az_I-20-U-41-09
+ # https://web.archive.org/web/20180715073717/https://www.admody.com/urteilsdatenbank/cafe6fdaeed3/OLG-Duesseldorf_Urteil_vom_28-September-2010_Az_I-20-U-41-09
+ #
+ # Other free software limiting derivates by the same name:
+ # https://en.wikipedia.org/wiki/Red_Hat_Enterprise_Linux_derivatives#Legal_aspects
+ # https://tm.joomla.org/trademark-faq.html
+ # https://www.mozilla.org/en-US/foundation/trademarks/faq/
+ #
+ # Running 'parallel --citation' one single time takes less than 10
+ # seconds, and will silence the citation notice for future
+ # runs. If that is too much trouble for you, why not use one of
+ # the alternatives instead?
+ # See a list in: 'man parallel_alternatives'
+ #
+ # If you want GNU Parallel to be maintained in the future, you
+ # should keep this line:
+ citation_notice();
+ # This is because _YOU_ actively make it harder to justify
+ # spending time developing GNU Parallel by removing it.
+
+ # If you disagree, please read (especially 77-):
+ # https://www.fordfoundation.org/media/2976/roads-and-bridges-the-unseen-labor-behind-our-digital-infrastructure.pdf
+
+ # *YOU* will be harming free software by removing the notice. You
+ # accept to be added to a public hall of shame by removing the
+ # line. That includes you, George and Andreas.
+
+ parse_halt();
+
+ if($ENV{'PARALLEL_ENV'}) {
+ # Read environment and set $Global::parallel_env
+ # Must be done before is_acceptable_command_line_length()
+ my $penv = $ENV{'PARALLEL_ENV'};
+ # unset $PARALLEL_ENV: It should not be given to children
+ # because it takes up a lot of env space
+ delete $ENV{'PARALLEL_ENV'};
+ if(-e $penv) {
+ # This is a file/fifo: Replace envvar with content of file
+ $penv = slurp_or_exit($penv);
+ }
+ # Map \001 to \n to make it easer to quote \n in $PARALLEL_ENV
+ $penv =~ s/\001/\n/g;
+ if($penv =~ /\0/) {
+ ::warning('\0 (NUL) in environment is not supported');
+ }
+ $Global::parallel_env = $penv;
+ }
+
+ parse_sshlogin();
+ if(defined $opt::show_limits) { show_limits(); }
+
+ if(remote_hosts() and
+ (defined $opt::X or defined $opt::m or defined $opt::xargs)) {
+ # As we do not know the max line length on the remote machine
+ # long commands generated by xargs may fail
+ # If $opt::max_replace_args is set, it is probably safe
+ ::warning("Using -X or -m with --sshlogin may fail.");
+ }
+
+ if(not defined $opt::jobs) { $opt::jobs = "100%"; }
+ open_joblog();
+ open_json_csv();
+ if(defined $opt::sqlmaster or defined $opt::sqlworker) {
+ $Global::sql = SQL->new($opt::sqlmaster || $opt::sqlworker);
+ }
+ if(defined $opt::sqlworker) { $Global::membuffer ||= 1; }
+ # The sqlmaster groups the arguments, so the should just read one
+ if(defined $opt::sqlworker and not defined $opt::sqlmaster) {
+ $Global::max_number_of_args = 1;
+ }
+ if(defined $Global::color or defined $opt::colorfailed) {
+ Job::init_color();
+ }
+}
+
+sub check_invalid_option_combinations() {
+ if(defined $opt::timeout and
+ $opt::timeout !~ /^\d+(\.\d+)?%?$|^(\d+(\.\d+)?[dhms])+$/i) {
+ ::error(255,"--timeout must be seconds or percentage.");
+ }
+ if(defined $opt::fifo and defined $opt::cat) {
+ ::error(255,"--fifo cannot be combined with --cat.");
+ }
+ if(defined $opt::retries and defined $opt::roundrobin) {
+ ::error(255,"--retries cannot be combined with --roundrobin.");
+ }
+ if(defined $opt::pipepart and
+ (defined $opt::L or defined $opt::max_lines
+ or defined $opt::max_replace_args)) {
+ ::error(255,"--pipepart is incompatible with --max-replace-args, ".
+ "--max-lines, and -L.");
+ }
+ if(defined $opt::group and defined $opt::ungroup) {
+ ::error(255,"--group cannot be combined with --ungroup.");
+ }
+ if(defined $opt::group and defined $opt::linebuffer) {
+ ::error(255,"--group cannot be combined with --line-buffer.");
+ }
+ if(defined $opt::ungroup and defined $opt::linebuffer) {
+ ::error(255,"--ungroup cannot be combined with --line-buffer.");
+ }
+ if(defined $opt::tollef and not defined $opt::gnu) {
+ ::error(255,"--tollef has been retired.",
+ "Remove --tollef or use --gnu to override --tollef.");
+ }
+ if(defined $opt::retired) {
+ ::error(255,
+ "-g has been retired. Use --group.",
+ "-B has been retired. Use --bf.",
+ "-T has been retired. Use --tty.",
+ "-U has been retired. Use --er.",
+ "-W has been retired. Use --wd.",
+ "-Y has been retired. Use --shebang.",
+ "-H has been retired. Use --halt.",
+ "--sql has been retired. Use --sqlmaster.",
+ "--ctrlc has been retired.",
+ "--noctrlc has been retired.");
+ }
+ if(defined $opt::groupby) {
+ if(not defined $opt::pipe and not defined $opt::pipepart) {
+ $opt::pipe = 1;
+ }
+ if(defined $opt::remove_rec_sep) {
+ ::error(255,"--remove-rec-sep is not compatible with --groupby");
+ }
+ if(defined $opt::recstart) {
+ ::error(255,"--recstart is not compatible with --groupby");
+ }
+ if($opt::recend ne "\n") {
+ ::error(255,"--recend is not compatible with --groupby");
+ }
+ }
+ sub unsafe_warn {
+ # use --_unsafe to only generate a warning
+ if($opt::_unsafe) { ::warning(@_); } else { ::error(255,@_); }
+ }
+ if(defined $opt::results) {
+ if($opt::nonall or $opt::onall) {
+ unsafe_warn("--(n)onall + --results not supported (yet).");
+ }
+ }
+ sub test_safe_chars {
+ my $var = shift;
+ if($ENV{$var} =~ m{^[-a-z0-9_+,.%:/= ]*$}i) {
+ # OK
+ } else {
+ unsafe_warn("\$$var can only contain [-a-z0-9_+,.%:/= ].");
+ }
+ }
+ if($ENV{'TMPDIR'} =~ /\n/) {
+ if(defined $opt::files) {
+ ::warning("Use --files0 when \$TMPDIR contains newline.");
+ } elsif($Global::cshell
+ and
+ (defined $opt::cat or defined $opt::fifo)) {
+ ::warning("--cat/--fifo fails under csh ".
+ "if \$TMPDIR contains newline.");
+ }
+ } elsif($ENV{'TMPDIR'} =~ /\257/) {
+ unsafe_warn("\$TMPDIR with \\257 (\257) is not supported.");
+ } else{
+ test_safe_chars('TMPDIR');
+ }
+ map { test_safe_chars($_); } qw(PARALLEL_HOME XDG_CONFIG_DIRS
+ PARALLEL_REMOTE_TMPDIR XDG_CACHE_HOME);
+}
+
+sub init_globals() {
+ # Defaults:
+ $Global::version = 20240222;
+ $Global::progname = 'parallel';
+ $::name = "GNU Parallel";
+ $Global::infinity = 2**31;
+ $Global::debug = 0;
+ $Global::verbose = 0;
+ # Don't quote every part of the command line
+ $Global::quoting = 0;
+ # Quote replacement strings
+ $Global::quote_replace = 1;
+ $Global::total_completed = 0;
+ $Global::cache_replacement_eval = 1;
+ # Read only table with default --rpl values
+ %Global::replace =
+ (
+ '{}' => '',
+ '{#}' => '1 $_=$job->seq()',
+ '{%}' => '1 $_=$job->slot()',
+ '{/}' => 's:.*/::',
+ '{//}' =>
+ ('$Global::use{"File::Basename"} ||= eval "use File::Basename; 1;"; '.
+ '$_ = dirname($_);'),
+ '{/.}' => 's:.*/::; s:\.[^/.]*$::;',
+ '{.}' => 's:\.[^/.]*$::',
+ );
+ %Global::plus =
+ (
+ # {} = {+/}/{/}
+ # = {.}.{+.} = {+/}/{/.}.{+.}
+ # = {..}.{+..} = {+/}/{/..}.{+..}
+ # = {...}.{+...} = {+/}/{/...}.{+...}
+ '{+/}' => 's:/[^/]*$:: || s:.*$::',
+ # a.b => b; a => ''
+ '{+.}' => 's:.*\.:: || s:.*$::',
+ # a.b.c => b.c; a.b => ''; a => ''
+ '{+..}' => 's:.*\.([^/.]*\.[^/.]*)$:$1: || s:.*$::',
+ '{+...}' => 's:.*\.([^/.]*\.[^/.]*\.[^/.]*)$:$1: || s:.*$::',
+ '{..}' => 's:\.[^/.]*\.[^/.]*$::',
+ '{...}' => 's:\.[^/.]*\.[^/.]*\.[^/.]*$::',
+ '{/..}' => 's:.*/::; s:\.[^/.]*\.[^/.]*$::',
+ '{/...}' => 's:.*/::; s:\.[^/.]*\.[^/.]*\.[^/.]*$::',
+ # n choose k = Binomial coefficient
+ '{choose_k}' => ('for $t (2..$#arg)'.
+ '{ if($arg[$t-1] ge $arg[$t]) { skip() } }'),
+ # unique values: Skip job if any args are the same
+ '{uniq}' => 'if(::uniq(@arg) != @arg) { skip(); }',
+ # {##} = number of jobs
+ '{##}' => '1 $_=total_jobs()',
+ # {0#} = 0-padded seq
+ '{0#}' => ('1 $f=1+int((log(total_jobs())/log(10)));'.
+ '$_=sprintf("%0${f}d",seq())'),
+ # {0%} = 0-padded jobslot
+ '{0%}' => ('1 $f=1+int((log($Global::max_jobs_running||1)/log(10)));'.
+ '$_=sprintf("%0${f}d",slot())'),
+ # {seq-1} = seq-1 = counting from 0
+ '{seq(.*?)}' => '$_=eval q{$job->seq()}.qq{$$1}',
+ # {seq-1} = jobslot-1 = counting from 0
+ '{slot(.*?)}' => '$_=eval q{$job->slot()}.qq{$$1}',
+
+ ## Bash inspired replacement strings
+ # Bash ${a:-myval}
+ '{:-([^}]+?)}' => '$_ ||= $$1',
+ # Bash ${a:2}
+ '{:(\d+?)}' => 'substr($_,0,$$1) = ""',
+ # Bash ${a:2:3}
+ '{:(\d+?):(\d+?)}' => '$_ = substr($_,$$1,$$2);',
+ # echo {#z.*z.} ::: z.z.z.foo => z.foo
+ # echo {##z.*z.} ::: z.z.z.foo => foo
+ # Bash ${a#bc}
+ '{#([^#}][^}]*?)}' =>
+ '$nongreedy=::make_regexp_ungreedy($$1);s/^$nongreedy(.*)/$1/;',
+ # Bash ${a##bc}
+ '{##([^#}][^}]*?)}' => 's/^$$1//;',
+ # echo {%.z.*z} ::: foo.z.z.z => foo.z
+ # echo {%%.z.*z} ::: foo.z.z.z => foo
+ # Bash ${a%def}
+ '{%([^}]+?)}' =>
+ '$nongreedy=::make_regexp_ungreedy($$1);s/(.*)$nongreedy$/$1/;',
+ # Bash ${a%%def}
+ '{%%([^}]+?)}' => 's/$$1$//;',
+ # Bash ${a/def/ghi} ${a/def/}
+ '{/([^#%}/]+?)/([^}]*?)}' => 's/$$1/$$2/;',
+ # Bash ${a/#def/ghi} ${a/#def/}
+ '{/#([^}]+?)/([^}]*?)}' => 's/^$$1/$$2/g;',
+ # Bash ${a/%def/ghi} ${a/%def/}
+ '{/%([^}]+?)/([^}]*?)}' => 's/$$1$/$$2/g;',
+ # Bash ${a//def/ghi} ${a//def/}
+ '{//([^}]+?)/([^}]*?)}' => 's/$$1/$$2/g;',
+ # Bash ${a^a}
+ '{^([^}]+?)}' => 's/^($$1)/uc($1)/e;',
+ # Bash ${a^^a}
+ '{^^([^}]+?)}' => 's/($$1)/uc($1)/eg;',
+ # Bash ${a,A}
+ '{,([^}]+?)}' => 's/^($$1)/lc($1)/e;',
+ # Bash ${a,,A}
+ '{,,([^}]+?)}' => 's/($$1)/lc($1)/eg;',
+
+ # {slot} = $PARALLEL_JOBSLOT
+ '{slot}' => '1 $_="\${PARALLEL_JOBSLOT}";uq()',
+ # {host} = ssh host
+ '{host}' => '1 $_="\${PARALLEL_SSHHOST}";uq()',
+ # {sshlogin} = sshlogin
+ '{sshlogin}' => '1 $_="\${PARALLEL_SSHLOGIN}";uq()',
+ # {hgrp} = hostgroups of the host
+ '{hgrp}' => '1 $_="\${PARALLEL_HOSTGROUPS}";uq()',
+ # {agrp} = hostgroups of the argument
+ '{agrp}' => '1 $_="\${PARALLEL_ARGHOSTGROUPS}";uq()',
+ );
+ # Modifiable copy of %Global::replace
+ %Global::rpl = %Global::replace;
+ $/ = "\n";
+ $Global::ignore_empty = 0;
+ $Global::interactive = 0;
+ $Global::stderr_verbose = 0;
+ $Global::default_simultaneous_sshlogins = 9;
+ $Global::exitstatus = 0;
+ $Global::arg_sep = ":::";
+ $Global::arg_file_sep = "::::";
+ $Global::trim = 'n';
+ $Global::max_jobs_running = 0;
+ $Global::job_already_run = '';
+ $ENV{'TMPDIR'} ||= "/tmp";
+ $ENV{'PARALLEL_REMOTE_TMPDIR'} ||= "/tmp";
+ # bug #55398: set $OLDPWD when using --wd
+ $ENV{'OLDPWD'} = $ENV{'PWD'};
+ if(not $ENV{HOME}) {
+ # $ENV{HOME} is sometimes not set if called from PHP
+ ::warning("\$HOME not set. Using /tmp.");
+ $ENV{HOME} = "/tmp";
+ }
+ # no warnings to allow for undefined $XDG_*
+ no warnings 'uninitialized';
+ # If $PARALLEL_HOME is set, but does not exist, try making it.
+ if(defined $ENV{'PARALLEL_HOME'}) {
+ eval { File::Path::mkpath($ENV{'PARALLEL_HOME'}); };
+ }
+ # $xdg_config_home is needed to make env_parallel.fish stop complaining
+ my $xdg_config_home = $ENV{'XDG_CONFIG_HOME'};
+ # Use the first config dir that exists from:
+ # $PARALLEL_HOME
+ # $XDG_CONFIG_HOME/parallel
+ # $(each XDG_CONFIG_DIRS)/parallel
+ # $HOME/.parallel
+ #
+ # Keep only dirs that exist
+ @Global::config_dirs =
+ (grep { -d $_ }
+ $ENV{'PARALLEL_HOME'},
+ (map { "$_/parallel" }
+ $xdg_config_home,
+ split /:/, $ENV{'XDG_CONFIG_DIRS'}),
+ $ENV{'HOME'} . "/.parallel");
+ # Use first dir as config dir
+ $Global::config_dir = $Global::config_dirs[0] ||
+ $ENV{'HOME'} . "/.parallel";
+ if($ENV{'PARALLEL_HOME'} =~ /./ and not -d $ENV{'PARALLEL_HOME'}) {
+ ::warning("\$PARALLEL_HOME ($ENV{'PARALLEL_HOME'}) does not exist.");
+ ::warning("Using $Global::config_dir");
+ }
+ # Use the first cache dir that exists from:
+ # $PARALLEL_HOME
+ # $XDG_CACHE_HOME/parallel
+ # Keep only dirs that exist
+ @Global::cache_dirs = (grep { -d $_ }
+ $ENV{'PARALLEL_HOME'},
+ $ENV{'XDG_CACHE_HOME'}."/parallel");
+ $Global::cache_dir = $Global::cache_dirs[0] ||
+ $ENV{'HOME'} . "/.parallel";
+ Job::init_color();
+}
+
+sub parse_halt() {
+ # $opt::halt flavours
+ # Uses:
+ # $opt::halt
+ # $Global::halt_when
+ # $Global::halt_fail
+ # $Global::halt_success
+ # $Global::halt_pct
+ # $Global::halt_count
+ if(defined $opt::halt) {
+ my %halt_expansion = (
+ "0" => "never",
+ "1" => "soon,fail=1",
+ "2" => "now,fail=1",
+ "-1" => "soon,success=1",
+ "-2" => "now,success=1",
+ );
+ # Expand -2,-1,0,1,2 into long form
+ $opt::halt = $halt_expansion{$opt::halt} || $opt::halt;
+ # --halt 5% == --halt soon,fail=5%
+ $opt::halt =~ s/^(\d+)%$/soon,fail=$1%/;
+ # Split: soon,fail=5%
+ my ($when,$fail_success,$pct_count) = split /[,=]/, $opt::halt;
+ if(not grep { $when eq $_ } qw(never soon now)) {
+ ::error(255,"--halt must have 'never', 'soon', or 'now'.");
+ }
+ $Global::halt_when = $when;
+ if($when ne "never") {
+ if($fail_success eq "fail") {
+ $Global::halt_fail = 1;
+ } elsif($fail_success eq "success") {
+ $Global::halt_success = 1;
+ } elsif($fail_success eq "done") {
+ $Global::halt_done = 1;
+ } else {
+ ::error(255,"--halt $when must be followed by ".
+ ",success or ,fail.");
+ }
+ if($pct_count =~ /^(\d+)%$/) {
+ $Global::halt_pct = $1/100;
+ } elsif($pct_count =~ /^(\d+)$/) {
+ $Global::halt_count = $1;
+ } else {
+ ::error(255,"--halt $when,$fail_success ".
+ "must be followed by ,number or ,percent%.");
+ }
+ }
+ }
+}
+
+sub parse_replacement_string_options() {
+ # Deal with --rpl
+ # Uses:
+ # %Global::rpl
+ # $Global::parensleft
+ # $Global::parensright
+ # $opt::parens
+ # $Global::parensleft
+ # $Global::parensright
+ # $opt::plus
+ # %Global::plus
+ # $opt::I
+ # $opt::U
+ # $opt::i
+ # $opt::basenamereplace
+ # $opt::dirnamereplace
+ # $opt::seqreplace
+ # $opt::slotreplace
+ # $opt::basenameextensionreplace
+
+ sub rpl($$) {
+ # Modify %Global::rpl
+ # Replace $old with $new
+ my ($old,$new) = @_;
+ if($old ne $new) {
+ $Global::rpl{$new} = $Global::rpl{$old};
+ delete $Global::rpl{$old};
+ }
+ }
+ my $parens = "{==}";
+ if(defined $opt::parens) { $parens = $opt::parens; }
+ my $parenslen = 0.5*length $parens;
+ $Global::parensleft = substr($parens,0,$parenslen);
+ $Global::parensright = substr($parens,$parenslen);
+ if(defined $opt::plus) { %Global::rpl = (%Global::plus,%Global::rpl); }
+ if(defined $opt::I) { rpl('{}',$opt::I); }
+ if(defined $opt::i and $opt::i) { rpl('{}',$opt::i); }
+ if(defined $opt::U) { rpl('{.}',$opt::U); }
+ if(defined $opt::basenamereplace) { rpl('{/}',$opt::basenamereplace); }
+ if(defined $opt::dirnamereplace) { rpl('{//}',$opt::dirnamereplace); }
+ if(defined $opt::seqreplace) { rpl('{#}',$opt::seqreplace); }
+ if(defined $opt::slotreplace) { rpl('{%}',$opt::slotreplace); }
+ if(defined $opt::basenameextensionreplace) {
+ rpl('{/.}',$opt::basenameextensionreplace);
+ }
+ for(@opt::rpl) {
+ # Create $Global::rpl entries for --rpl options
+ # E.g: "{..} s:\.[^.]+$:;s:\.[^.]+$:;"
+ my ($shorthand,$long) = split/\s/,$_,2;
+ $Global::rpl{$shorthand} = $long;
+ }
+}
+
+sub parse_semaphore() {
+ # Semaphore defaults
+ # Must be done before computing number of processes and max_line_length
+ # because when running as a semaphore GNU Parallel does not read args
+ # Uses:
+ # $opt::semaphore
+ # $Global::semaphore
+ # $opt::semaphoretimeout
+ # $Semaphore::timeout
+ # $opt::semaphorename
+ # $Semaphore::name
+ # $opt::fg
+ # $Semaphore::fg
+ # $opt::wait
+ # $Semaphore::wait
+ # $opt::bg
+ # @opt::a
+ # @Global::unget_argv
+ # $Global::default_simultaneous_sshlogins
+ # $opt::jobs
+ # $Global::interactive
+ $Global::semaphore ||= ($0 =~ m:(^|/)sem$:); # called as 'sem'
+ if(defined $opt::semaphore) { $Global::semaphore = 1; }
+ if(defined $opt::semaphoretimeout) { $Global::semaphore = 1; }
+ if(defined $opt::semaphorename) { $Global::semaphore = 1; }
+ if(defined $opt::fg and not $opt::tmux and not $opt::tmuxpane) {
+ $Global::semaphore = 1;
+ }
+ if(defined $opt::bg) { $Global::semaphore = 1; }
+ if(defined $opt::wait and not $opt::sqlmaster) {
+ $Global::semaphore = 1; @ARGV = "true";
+ }
+ if($Global::semaphore) {
+ if(@opt::a) {
+ # Assign the first -a to STDIN
+ open(STDIN,"<",shift @opt::a);
+ if(@opt::a) {
+ # We currently have no way of dealing with more -a
+ ::error(255,"A semaphore cannot take input from ".
+ "more files\n");
+ }
+ }
+ @opt::a = ("/dev/null");
+ # Append a dummy empty argument
+ # \0 => nothing (not the empty string)
+ push(@Global::unget_argv, [Arg->new("\0noarg")]);
+ $Semaphore::timeout = int(multiply_time_units($opt::semaphoretimeout))
+ || 0;
+ if(defined $opt::semaphorename) {
+ $Semaphore::name = $opt::semaphorename;
+ } else {
+ local $/ = "\n";
+ $Semaphore::name = `tty`;
+ chomp $Semaphore::name;
+ }
+ $Semaphore::fg = $opt::fg;
+ $Semaphore::wait = $opt::wait;
+ $Global::default_simultaneous_sshlogins = 1;
+ if(not defined $opt::jobs) {
+ $opt::jobs = 1;
+ }
+ if($Global::interactive and $opt::bg) {
+ ::error(255,"Jobs running in the ".
+ "background cannot be interactive.");
+ }
+ }
+}
+
+sub record_env() {
+ # Record current %ENV-keys in $PARALLEL_HOME/ignored_vars
+ # Returns: N/A
+ my $ignore_filename = $Global::config_dir . "/ignored_vars";
+ write_or_exit($ignore_filename,map { $_,"\n" } keys %ENV);
+}
+
+sub open_joblog() {
+ # Open joblog as specified by --joblog
+ # Uses:
+ # $opt::resume
+ # $opt::resume_failed
+ # $opt::joblog
+ # $opt::results
+ # $Global::job_already_run
+ # %Global::fh
+ my $append = 0;
+ if(($opt::resume or $opt::resume_failed)
+ and
+ not ($opt::joblog or $opt::results)) {
+ ::error(255,"--resume and --resume-failed require --joblog ".
+ "or --results.");
+ }
+ if(defined $opt::joblog and $opt::joblog =~ s/^\+//) {
+ # --joblog +filename = append to filename
+ $append = 1;
+ }
+ if($opt::joblog
+ and
+ ($opt::sqlmaster
+ or
+ not $opt::sqlworker)) {
+ # Do not log if --sqlworker
+ if($opt::resume || $opt::resume_failed || $opt::retry_failed) {
+ if(open(my $joblog_fh, "<", $opt::joblog)) {
+ # Enable utf8 if possible
+ eval q{ binmode $joblog_fh, "encoding(utf8)"; };
+ # Read the joblog
+ # Override $/ with \n because -d might be set
+ local $/ = "\n";
+ # If there is a header: Open as append later
+ $append = <$joblog_fh>;
+ my $joblog_regexp;
+ if($opt::retry_failed) {
+ # Make a regexp that matches commands with exit+signal=0
+ # 4 host 1360490623.067 3.445 1023 1222 0 0 command
+ $joblog_regexp='^(\d+)(?:\t[^\t]+){5}\t0\t0\t';
+ my @group;
+ while(<$joblog_fh>) {
+ if(/$joblog_regexp/o) {
+ # This is 30% faster than set_job_already_run($1);
+ vec($Global::job_already_run,($1||0),1) = 1;
+ $Global::total_completed++;
+ $group[$1-1] = "true";
+ } elsif(/(\d+)\s+\S+(\s+[-0-9.]+){6}\s+(.*)$/) {
+ # Grab out the command
+ $group[$1-1] = $3;
+ } else {
+ chomp;
+ ::error(255,"Format of '$opt::joblog' is ".
+ "wrong: $_");
+ }
+ }
+ if(@group) {
+ my ($outfh,$name) = ::tmpfile(SUFFIX => ".arg");
+ unlink($name);
+ # Put args into argfile
+ if(grep /\0/, @group) {
+ # force --null to deal with \n in commandlines
+ ::warning("Command lines contain newline. ".
+ "Forcing --null.");
+ $opt::null = 1;
+ $/ = "\0";
+ }
+ # Replace \0 with '\n' as used in print_joblog()
+ print $outfh (map { s/\0/\n/g; $_,$/ }
+ map { $_ } @group);
+ seek $outfh, 0, 0;
+ exit_if_disk_full();
+ # Set filehandle to -a
+ @opt::a = ($outfh);
+ }
+ # Remove $command (so -a is run)
+ @ARGV = ();
+ }
+ if($opt::resume || $opt::resume_failed) {
+ if($opt::resume_failed) {
+ # Make a regexp that matches commands with exit+signal=0
+ # 4 host 1360490623.067 3.445 1023 1222 0 0 command
+ $joblog_regexp='^(\d+)(?:\t[^\t]+){5}\t0\t0\t';
+ } else {
+ # Just match the job number
+ $joblog_regexp='^(\d+)';
+ }
+ while(<$joblog_fh>) {
+ if(/$joblog_regexp/o) {
+ # This is 30% faster than set_job_already_run($1);
+ vec($Global::job_already_run,($1||0),1) = 1;
+ $Global::total_completed++;
+ } elsif(not /\d+\s+[^\s]+\s+([-0-9.]+\s+){6}/) {
+ ::error(255,"Format of '$opt::joblog' ".
+ "is wrong: $_");
+ }
+ }
+ }
+ close $joblog_fh;
+ }
+ # $opt::null may be set if the commands contain \n
+ if($opt::null) { $/ = "\0"; }
+ }
+ if($opt::dryrun) {
+ # Do not write to joblog in a dry-run
+
+ } elsif($append) {
+ # Append to joblog
+ $Global::joblog = open_or_exit(">>", $opt::joblog);
+ } else {
+ if($opt::joblog eq "-") {
+ # Use STDOUT as joblog
+ $Global::joblog = $Global::fh{1};
+ } else {
+ # Overwrite the joblog
+ $Global::joblog = open_or_exit(">", $opt::joblog);
+ }
+ print $Global::joblog
+ join("\t", "Seq", "Host", "Starttime", "JobRuntime",
+ "Send", "Receive", "Exitval", "Signal", "Command"
+ ). "\n";
+ }
+ }
+}
+
+sub open_json_csv() {
+ if($opt::results) {
+ # Output as JSON/CSV/TSV
+ if($opt::results eq "-.csv"
+ or
+ $opt::results eq "-.tsv"
+ or
+ $opt::results eq "-.json") {
+ # Output as JSON/CSV/TSV on stdout
+ open $Global::csv_fh, ">&", "STDOUT" or
+ ::die_bug("Can't dup STDOUT in csv: $!");
+ # Do not print any other output to STDOUT
+ # by forcing all other output to /dev/null
+ open my $fd, ">", "/dev/null" or
+ ::die_bug("Can't >/dev/null in csv: $!");
+ $Global::fh{1} = $fd;
+ $Global::fh{2} = $fd;
+ } elsif($Global::csvsep or $Global::jsonout) {
+ $Global::csv_fh = open_or_exit(">",$opt::results);
+ }
+ }
+}
+
+sub find_compression_program() {
+ # Find a fast compression program
+ # Returns:
+ # $compress_program = compress program with options
+ # $decompress_program = decompress program with options
+
+ # Search for these. Sorted by speed on 128 core
+
+ # seq 120000000|shuf > 1gb &
+ # apt-get update
+ # apt install make g++ htop
+ # wget -O - pi.dk/3 | bash
+ # apt install zstd clzip liblz4-tool lzop pigz pxz gzip plzip pbzip2 lzma xz-utils lzip bzip2 lbzip2 lrzip pixz
+ # git clone https://github.com/facebook/zstd.git
+ # (cd zstd/contrib/pzstd; make -j; cp pzstd /usr/local/bin)
+ # echo 'lrzip -L $((-$1))' >/usr/local/bin/lrz
+ # chmod +x /usr/local/bin/lrz
+ # wait
+ # onethread="zstd clzip lz4 lzop gzip lzma xz bzip2"
+ # multithread="pzstd pigz pxz plzip pbzip2 lzip lbzip2 lrz pixz"
+ # parallel --shuf -j1 --joblog jl-m --arg-sep , parallel --compress-program \'{3}" "-{2}\' cat ::: 1gb '>'/dev/null , 1 2 3 , {1..3} , $multithread
+ # parallel --shuf -j50% --delay 1 --joblog jl-s --arg-sep , parallel --compress-program \'{3}" "-{2}\' cat ::: 1gb '>'/dev/null , 1 2 3 , {1..3} , $onethread
+ # sort -nk4 jl-?
+
+ # 1-core:
+ # 2-cores: pzstd zstd lz4 lzop pigz gzip lbzip2 pbzip2 lrz bzip2 lzma pxz plzip xz lzip clzip
+ # 4-cores:
+ # 8-cores: pzstd lz4 zstd pigz lzop lbzip2 pbzip2 gzip lzip lrz plzip pxz bzip2 lzma xz clzip
+ # 16-cores: pzstd lz4 pigz lzop lbzip2 pbzip2 plzip lzip lrz pxz gzip lzma xz bzip2
+ # 32-cores: pzstd lbzip2 pbzip2 zstd pigz lz4 lzop plzip lzip lrz gzip pxz lzma bzip2 xz clzip
+ # 64-cores: pzstd lbzip2 pbzip2 pigz zstd pixz lz4 plzip lzop lzip lrz gzip pxz lzma bzip2 xz clzip
+ # 128-core: pzstd lbzip2 pbzip2 zstd pixz lz4 pigz lzop plzip lzip gzip lrz pxz bzip2 lzma xz clzip
+
+ my @prg = qw(pzstd lbzip2 pbzip2 zstd pixz lz4 pigz lzop plzip lzip gzip
+ lrz pxz bzip2 lzma xz clzip);
+ for my $p (@prg) {
+ if(which($p)) {
+ return ("$p -c -1","$p -dc");
+ }
+ }
+ # Fall back to cat
+ return ("cat","cat");
+}
+
+sub read_options() {
+ # Read options from command line, profile and $PARALLEL
+ # Uses:
+ # $opt::shebang_wrap
+ # $opt::shebang
+ # @ARGV
+ # $opt::plain
+ # @opt::profile
+ # $ENV{'HOME'}
+ # $ENV{'PARALLEL'}
+ # Returns:
+ # @ARGV_no_opt = @ARGV without --options
+
+ # This must be done first as this may exec myself
+ if(defined $ARGV[0] and ($ARGV[0] =~ /^--shebang/ or
+ $ARGV[0] =~ /^--shebang-?wrap/ or
+ $ARGV[0] =~ /^--hashbang/)) {
+ # Program is called from #! line in script
+ # remove --shebang-wrap if it is set
+ $opt::shebang_wrap = ($ARGV[0] =~ s/^--shebang-?wrap *//);
+ # remove --shebang if it is set
+ $opt::shebang = ($ARGV[0] =~ s/^--shebang *//);
+ # remove --hashbang if it is set
+ $opt::shebang .= ($ARGV[0] =~ s/^--hashbang *//);
+ if($opt::shebang) {
+ my $argfile = Q(pop @ARGV);
+ # exec myself to split $ARGV[0] into separate fields
+ exec "$0 --skip-first-line -a $argfile @ARGV";
+ }
+ if($opt::shebang_wrap) {
+ my @options;
+ my @parser;
+ if ($^O eq 'freebsd') {
+ # FreeBSD's #! puts different values in @ARGV than Linux' does
+ my @nooptions = @ARGV;
+ get_options_from_array(\@nooptions);
+ while($#ARGV > $#nooptions) {
+ push @options, shift @ARGV;
+ }
+ while(@ARGV and $ARGV[0] ne ":::") {
+ push @parser, shift @ARGV;
+ }
+ if(@ARGV and $ARGV[0] eq ":::") {
+ shift @ARGV;
+ }
+ } else {
+ @options = shift @ARGV;
+ }
+ my $script = Q(Q(shift @ARGV)); # TODO - test if script = " "
+ my @args = map{ Q($_) } @ARGV;
+ # exec myself to split $ARGV[0] into separate fields
+ exec "$0 --_pipe-means-argfiles @options @parser $script ".
+ "::: @args";
+ }
+ }
+ if($ARGV[0] =~ / --shebang(-?wrap)? /) {
+ ::warning("--shebang and --shebang-wrap must be the first ".
+ "argument.\n");
+ }
+
+ Getopt::Long::Configure("bundling","require_order");
+ my @ARGV_copy = @ARGV;
+ my @ARGV_orig = @ARGV;
+ # Check if there is a --profile to set @opt::profile
+ get_options_from_array(\@ARGV_copy,"profile|J=s","plain") || die_usage();
+ my @ARGV_profile = ();
+ my @ARGV_env = ();
+ if(not $opt::plain) {
+ # Add options from $PARALLEL_HOME/config and other profiles
+ my @config_profiles = (
+ "/etc/parallel/config",
+ (map { "$_/config" } @Global::config_dirs),
+ $ENV{'HOME'}."/.parallelrc");
+ my @profiles = @config_profiles;
+ if(@opt::profile) {
+ # --profile overrides default profiles
+ @profiles = ();
+ for my $profile (@opt::profile) {
+ if($profile =~ m:^\./|^/:) {
+ # Look for ./profile in .
+ # Look for /profile in /
+ push @profiles, grep { -r $_ } $profile;
+ } else {
+ # Look for the $profile in @Global::config_dirs
+ push @profiles, grep { -r $_ }
+ map { "$_/$profile" } @Global::config_dirs;
+ }
+ }
+ }
+ for my $profile (@profiles) {
+ if(-r $profile) {
+ ::debug("init","Read $profile\n");
+ local $/ = "\n";
+ open (my $in_fh, "<", $profile) ||
+ ::die_bug("read-profile: $profile");
+ while(<$in_fh>) {
+ /^\s*\#/ and next;
+ chomp;
+ push @ARGV_profile, shell_words($_);
+ }
+ close $in_fh;
+ } else {
+ if(grep /^\Q$profile\E$/, @config_profiles) {
+ # config file is not required to exist
+ } else {
+ ::error(255,"$profile not readable.");
+ }
+ }
+ }
+ # Add options from shell variable $PARALLEL
+ if($ENV{'PARALLEL'}) {
+ push @ARGV_env, shell_words($ENV{'PARALLEL'});
+ }
+ # Add options from env_parallel.csh via $PARALLEL_CSH
+ if($ENV{'PARALLEL_CSH'}) {
+ push @ARGV_env, shell_words($ENV{'PARALLEL_CSH'});
+ }
+ }
+ Getopt::Long::Configure("bundling","require_order");
+ get_options_from_array(\@ARGV_profile) || die_usage();
+ get_options_from_array(\@ARGV_env) || die_usage();
+ get_options_from_array(\@ARGV) || die_usage();
+ # What were the options given on the command line?
+ # Used to start --sqlworker
+ my $ai = arrayindex(\@ARGV_orig, \@ARGV);
+ @Global::options_in_argv = @ARGV_orig[0..$ai-1];
+ # Prepend non-options to @ARGV (such as commands like 'nice')
+ unshift @ARGV, @ARGV_profile, @ARGV_env;
+ return @ARGV;
+}
+
+sub arrayindex($$) {
+ # Similar to Perl's index function, but for arrays
+ # Input:
+ # $arr_ref1 = ref to @array1 to search in
+ # $arr_ref2 = ref to @array2 to search for
+ # Returns:
+ # $pos = position of @array1 in @array2, -1 if not found
+ my ($arr_ref1,$arr_ref2) = @_;
+ my $array1_as_string = join "", map { "\0".$_ } @$arr_ref1;
+ my $array2_as_string = join "", map { "\0".$_ } @$arr_ref2;
+ my $i = index($array1_as_string,$array2_as_string,0);
+ if($i == -1) { return -1 }
+ my @before = split /\0/, substr($array1_as_string,0,$i);
+ return $#before;
+}
+
+sub read_args_from_command_line() {
+ # Arguments given on the command line after:
+ # ::: ($Global::arg_sep)
+ # :::: ($Global::arg_file_sep)
+ # :::+ ($Global::arg_sep with --link)
+ # ::::+ ($Global::arg_file_sep with --link)
+ # Removes the arguments from @ARGV and:
+ # - puts filenames into -a
+ # - puts arguments into files and add the files to -a
+ # - adds --linkinputsource with 0/1 for each -a depending on :::+/::::+
+ # Input:
+ # @::ARGV = command option ::: arg arg arg :::: argfiles
+ # Uses:
+ # $Global::arg_sep
+ # $Global::arg_file_sep
+ # $opt::_pipe_means_argfiles
+ # $opt::pipe
+ # @opt::a
+ # Returns:
+ # @argv_no_argsep = @::ARGV without ::: and :::: and following args
+ my %group_sep = ($Global::arg_sep => ":::",
+ $Global::arg_sep."+" => ":::+",
+ $Global::arg_file_sep => "::::",
+ $Global::arg_file_sep."+" => "::::+");
+ sub is_linked($) {
+ # file is linked if file starts with +
+ local $_ = shift;
+ if(/^\+(.*)/) {
+ my $noplus = $1;
+ if(-e $_ and -e $noplus) {
+ ::error(255,"It is unclear whether you mean ".
+ "+./$noplus or ./+$noplus");
+ } elsif(-e $_ and not -e $noplus) {
+ # This is ./+file = this is not linked
+ return 0;
+ } elsif(not -e $_ and -e $noplus) {
+ # This is +./file = this is linked
+ return 1;
+ } elsif(not -e $_ and not -e $noplus) {
+ # File does not exist, maybe it is stdin?
+ if($_ eq "-") {
+ # This is - = this is not linked
+ return 0;
+ } elsif($_ eq "+-") {
+ # This is +- = this is linked
+ return 1;
+ } else {
+ ::error(255,"File not found: $_");
+ }
+ } else {
+ ::die_bug("noplus: $noplus $_");
+ }
+ }
+ # not linked
+ return 0;
+ }
+ sub cmd_template() {
+ # remove command template from @ARGV
+ # keep ::: / :::: in @ARGV if any
+ my @cmd_template;
+ while(@ARGV) {
+ my $arg = shift @ARGV;
+ if($group_sep{$arg}) {
+ # Found separator: push it back and exit loop
+ unshift @ARGV, $arg;
+ last;
+ }
+ push @cmd_template, $arg;
+ }
+ return @cmd_template;
+ }
+ sub divide_into_groups() {
+ # Split arguments from @ARGV into groups:
+ # ::: 1 2 3 :::: a b c ::::+ d e f
+ # =>
+ # [ ::: 1 2 3 ], [ :::: a b c ], [ ::::+ d e f ]
+ my @g;
+ my @grp;
+ while(@ARGV) {
+ my $arg = shift @ARGV;
+ if($group_sep{$arg}) {
+ # start a new group
+ push @grp, [@g];
+ @g = ($group_sep{$arg});
+ } else {
+ push @g, $arg;
+ }
+ }
+ push @grp, [@g];
+ shift @grp; # The first will always be empty
+ return @grp;
+ }
+ sub save_to_file(@) {
+ # Put args into a file, return open file handle of file
+ # Create argfile
+ my ($fh,$name) = ::tmpfile(SUFFIX => ".arg");
+ unlink($name);
+ # Put args into argfile
+ print $fh map { $_,$/ } @_;
+ seek $fh, 0, 0;
+ exit_if_disk_full();
+ return $fh;
+ }
+ my @cmd = cmd_template();
+ # The rest of @ARGV is ::: / :::: args
+ # If there are any -a: Rewrite them to use ::::
+ if(@opt::a) { unshift @ARGV, $Global::arg_file_sep, @opt::a; }
+ @opt::a = ();
+ # Convert ::: and :::: into (linked) files and put those into @opt::a
+ for my $g_ref (divide_into_groups()) {
+ my $group_sep = shift @$g_ref;
+ if($group_sep eq ":::" or $group_sep eq ":::+") {
+ # Group starts with ::: / :::+
+ if($opt::_pipe_means_argfiles and $#$g_ref < 0) {
+ # TODO
+ # Deal with --shebang-wrap and ::: on the shebang line
+ } else {
+ push @opt::a, save_to_file(@$g_ref);
+ # if $group_sep == ":::+": it is linked
+ push @opt::linkinputsource, ($group_sep eq ":::+");
+ }
+ } elsif($group_sep eq "::::" or $group_sep eq "::::+") {
+ # Group starts with :::: / ::::+
+ for my $f (@$g_ref) {
+ if($group_sep eq "::::+") {
+ # Linking forced
+ push @opt::a, $f;
+ push @opt::linkinputsource, 1;
+ } elsif($group_sep eq "::::") {
+ # Auto detect linking
+ if(is_linked($f)) {
+ # +file
+ push @opt::linkinputsource, 1;
+ $f =~ s/^\+//;
+ } else {
+ # file (no plus)
+ push @opt::linkinputsource, 0;
+ }
+ push @opt::a, $f;
+ } else {
+ ::die_bug("arg link error");
+ }
+ }
+ } else {
+ ::die_bug("arg link error");
+ }
+ }
+ # Output: command to run with options
+ return @cmd;
+}
+
+sub cleanup() {
+ # Returns: N/A
+ unlink keys %Global::unlink;
+ map { rmdir $_ } keys %Global::unlink;
+ if(@opt::basefile and $opt::cleanup) { cleanup_basefile(); }
+ for(keys %Global::sshmaster) {
+ # If 'ssh -M's are running: kill them
+ kill "TERM", $_;
+ }
+}
+
+
+sub __QUOTING_ARGUMENTS_FOR_SHELL__() {}
+
+sub shell_quote(@) {
+ # Input:
+ # @strings = strings to be quoted
+ # Returns:
+ # @shell_quoted_strings = string quoted as needed by the shell
+ return wantarray ? (map { Q($_) } @_) : (join" ",map { Q($_) } @_);
+}
+
+sub shell_quote_scalar_rc($) {
+ # Quote for the rc-shell
+ my $a = $_[0];
+ if(defined $a) {
+ if(($a =~ s/'/''/g)
+ +
+ ($a =~ s/[\n\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\^\*\<\=\>\~\|\; \"\!\$\&\'\202-\377]+/'$&'/go)) {
+ # A string was replaced
+ # No need to test for "" or \0
+ } elsif($a eq "") {
+ $a = "''";
+ } elsif($a eq "\0") {
+ $a = "";
+ }
+ }
+ return $a;
+}
+
+sub shell_quote_scalar_csh($) {
+ # Quote for (t)csh
+ my $a = $_[0];
+ if(defined $a) {
+ # $a =~ s/([\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\^\*\>\<\~\|\; \"\!\$\&\'\202-\377])/\\$1/g;
+ # This is 1% faster than the above
+ if(($a =~ s/[\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\^\*\<\=\>\~\|\; \"\!\$\&\'\202-\377]/\\$&/go)
+ +
+ # quote newline in csh as \\\n
+ ($a =~ s/[\n]/"\\\n"/go)) {
+ # A string was replaced
+ # No need to test for "" or \0
+ } elsif($a eq "") {
+ $a = "''";
+ } elsif($a eq "\0") {
+ $a = "";
+ }
+ }
+ return $a;
+}
+
+sub shell_quote_scalar_default($) {
+ # Quote for other shells (Bourne compatibles)
+ # Inputs:
+ # $string = string to be quoted
+ # Returns:
+ # $shell_quoted = string quoted as needed by the shell
+ local $_ = $_[0];
+ if(/[^-_.+a-z0-9\/]/i) {
+ s/'+/'"$&"'/g; # "-quote '-quotes: ''' => "'''"
+ $_ = "'$_'"; # '-quote entire string
+ s/^''//; # Remove unneeded '' at ends
+ s/''$//; # (faster than s/^''|''$//g)
+ return $_;
+ } elsif ($_ eq "") {
+ return "''";
+ } else {
+ # No quoting needed
+ return $_;
+ }
+}
+
+sub shell_quote_scalar($) {
+ # Quote the string so the shell will not expand any special chars
+ # Inputs:
+ # $string = string to be quoted
+ # Returns:
+ # $shell_quoted = string quoted as needed by the shell
+
+ # Speed optimization: Choose the correct shell_quote_scalar_*
+ # and call that directly from now on
+ no warnings 'redefine';
+ if($Global::cshell) {
+ # (t)csh
+ *shell_quote_scalar = \&shell_quote_scalar_csh;
+ } elsif($Global::shell =~ m:(^|/)rc$:) {
+ # rc-shell
+ *shell_quote_scalar = \&shell_quote_scalar_rc;
+ } else {
+ # other shells
+ *shell_quote_scalar = \&shell_quote_scalar_default;
+ }
+ # The sub is now redefined. Call it
+ return shell_quote_scalar($_[0]);
+}
+
+sub Q($) {
+ # Q alias for ::shell_quote_scalar
+ my $ret = shell_quote_scalar($_[0]);
+ no warnings 'redefine';
+ *Q = \&::shell_quote_scalar;
+ return $ret;
+}
+
+sub shell_quote_file($) {
+ # Quote the string so shell will not expand any special chars
+ # and prepend ./ if needed
+ # Input:
+ # $filename = filename to be shell quoted
+ # Returns:
+ # $quoted_filename = filename quoted with \ and ./ if needed
+ my $a = shift;
+ if(defined $a) {
+ if($a =~ m:^/: or $a =~ m:^\./:) {
+ # /abs/path or ./rel/path => skip
+ } else {
+ # rel/path => ./rel/path
+ $a = "./".$a;
+ }
+ }
+ return Q($a);
+}
+
+sub shell_words(@) {
+ # Input:
+ # $string = shell line
+ # Returns:
+ # @shell_words = $string split into words as shell would do
+ $Global::use{"Text::ParseWords"} ||= eval "use Text::ParseWords; 1;";
+ return Text::ParseWords::shellwords(@_);
+}
+
+sub perl_quote_scalar($) {
+ # Quote the string so perl's eval will not expand any special chars
+ # Inputs:
+ # $string = string to be quoted
+ # Returns:
+ # $perl_quoted = string quoted with \ as needed by perl's eval
+ my $a = $_[0];
+ if(defined $a) {
+ $a =~ s/[\\\"\$\@]/\\$&/go;
+ }
+ return $a;
+}
+
+# -w complains about prototype
+sub pQ($) {
+ # pQ alias for ::perl_quote_scalar
+ my $ret = perl_quote_scalar($_[0]);
+ *pQ = \&::perl_quote_scalar;
+ return $ret;
+}
+
+sub unquote_printf() {
+ # Convert \t \n \r \000 \0
+ # Inputs:
+ # $string = string with \t \n \r \num \0
+ # Returns:
+ # $replaced = string with TAB NEWLINE CR <ascii-num> NUL
+ $_ = shift;
+ s/\\t/\t/g;
+ s/\\n/\n/g;
+ s/\\r/\r/g;
+ s/\\(\d\d\d)/eval 'sprintf "\\'.$1.'"'/ge;
+ s/\\(\d)/eval 'sprintf "\\'.$1.'"'/ge;
+ return $_;
+}
+
+
+sub __FILEHANDLES__() {}
+
+
+sub save_stdin_stdout_stderr() {
+ # Remember the original STDIN, STDOUT and STDERR
+ # and file descriptors opened by the shell (e.g. 3>/tmp/foo)
+ # Uses:
+ # %Global::fh
+ # $Global::original_stderr
+ # $Global::original_stdin
+ # Returns: N/A
+
+ # TODO Disabled until we have an open3 that will take n filehandles
+ # for my $fdno (1..61) {
+ # # /dev/fd/62 and above are used by bash for <(cmd)
+ # # Find file descriptors that are already opened (by the shell)
+ # Only focus on stdout+stderr for now
+ for my $fdno (1..2) {
+ my $fh;
+ # 2-argument-open is used to be compatible with old perl 5.8.0
+ # bug #43570: Perl 5.8.0 creates 61 files
+ if(open($fh,">&=$fdno")) {
+ $Global::fh{$fdno}=$fh;
+ }
+ }
+ open $Global::original_stderr, ">&", "STDERR" or
+ ::die_bug("Can't dup STDERR: $!");
+ open $Global::status_fd, ">&", "STDERR" or
+ ::die_bug("Can't dup STDERR: $!");
+ open $Global::original_stdin, "<&", "STDIN" or
+ ::die_bug("Can't dup STDIN: $!");
+}
+
+sub enough_file_handles() {
+ # Check that we have enough filehandles available for starting
+ # another job
+ # Uses:
+ # $opt::ungroup
+ # %Global::fh
+ # Returns:
+ # 1 if ungrouped (thus not needing extra filehandles)
+ # 0 if too few filehandles
+ # 1 if enough filehandles
+ if(not $opt::ungroup) {
+ my %fh;
+ my $enough_filehandles = 1;
+ # perl uses 7 filehandles for something?
+ # open3 uses 2 extra filehandles temporarily
+ # We need a filehandle for each redirected file descriptor
+ # (normally just STDOUT and STDERR)
+ for my $i (1..(7+2+keys %Global::fh)) {
+ $enough_filehandles &&= open($fh{$i}, "<", "/dev/null");
+ }
+ for (values %fh) { close $_; }
+ return $enough_filehandles;
+ } else {
+ # Ungrouped does not need extra file handles
+ return 1;
+ }
+}
+
+sub open_or_exit($$) {
+ # Open a file name or exit if the file cannot be opened
+ # Inputs:
+ # $mode = read:"<" write:">"
+ # $file = filehandle or filename to open
+ # Uses:
+ # $Global::original_stdin
+ # Returns:
+ # $fh = file handle to opened file
+ my $mode = shift;
+ my $file = shift;
+ if($file eq "-") {
+ if($mode eq "<") {
+ return ($Global::original_stdin || *STDIN);
+ } else {
+ return ($Global::original_stderr || *STDERR);
+ }
+ }
+ if(ref $file eq "GLOB") {
+ # This is an open filehandle
+ return $file;
+ }
+ my $fh = gensym;
+ open($fh, $mode, $file) || ::error(255,"Cannot open `$file': $!");
+ return $fh;
+}
+
+sub slurp_or_exit($) {
+ # Read content of a file or exit if the file cannot be opened
+ # Inputs:
+ # $file = filehandle or filename to open
+ # Returns:
+ # $content = content as scalar
+ my $fh = open_or_exit("<",shift);
+ # $/ = undef => slurp whole file
+ local $/;
+ my $content = <$fh>;
+ close $fh;
+ return $content;
+}
+
+sub write_or_exit(@) {
+ # Write content to a file or exit if the file cannot be opened
+ # Inputs:
+ # $file = filehandle or filename to open
+ # @content = content to be written
+ # Returns:
+ # N/A
+ my $file = shift;
+ sub failed { error(255,"Cannot write to `$file': $!"); }
+ my $fh = open_or_exit(">",$file);
+ print($fh @_) or failed();
+ close($fh) or failed();
+}
+
+sub set_fh_blocking($) {
+ # Set filehandle as blocking
+ # Inputs:
+ # $fh = filehandle to be blocking
+ # Returns:
+ # N/A
+ my $fh = shift;
+ $Global::use{"Fcntl"} ||= eval "use Fcntl qw(:DEFAULT :flock); 1;";
+ my $flags;
+ # Get the current flags on the filehandle
+ fcntl($fh, &F_GETFL, $flags) || die $!;
+ # Remove non-blocking from the flags
+ $flags &= ~&O_NONBLOCK;
+ # Set the flags on the filehandle
+ fcntl($fh, &F_SETFL, $flags) || die $!;
+}
+
+sub set_fh_non_blocking($) {
+ # Set filehandle as non-blocking
+ # Inputs:
+ # $fh = filehandle to be blocking
+ # Returns:
+ # N/A
+ my $fh = shift;
+ $Global::use{"Fcntl"} ||= eval "use Fcntl qw(:DEFAULT :flock); 1;";
+ my $flags;
+ # Get the current flags on the filehandle
+ fcntl($fh, &F_GETFL, $flags) || die $!;
+ # Add non-blocking to the flags
+ $flags |= &O_NONBLOCK;
+ # Set the flags on the filehandle
+ fcntl($fh, &F_SETFL, $flags) || die $!;
+}
+
+
+sub __RUNNING_THE_JOBS_AND_PRINTING_PROGRESS__() {}
+
+
+# Variable structure:
+#
+# $Global::running{$pid} = Pointer to Job-object
+# @Global::virgin_jobs = Pointer to Job-object that have received no input
+# $Global::host{$sshlogin} = Pointer to SSHLogin-object
+# $Global::total_running = total number of running jobs
+# $Global::total_started = total jobs started
+# $Global::max_procs_file = filename if --jobs is given a filename
+# $Global::JobQueue = JobQueue object for the queue of jobs
+# $Global::timeoutq = queue of times where jobs timeout
+# $Global::newest_job = Job object of the most recent job started
+# $Global::newest_starttime = timestamp of $Global::newest_job
+# @Global::sshlogin
+# $Global::minimal_command_line_length = min len supported by all sshlogins
+# $Global::start_no_new_jobs = should more jobs be started?
+# $Global::original_stderr = file handle for STDERR when the program started
+# $Global::total_started = total number of jobs started
+# $Global::joblog = filehandle of joblog
+# $Global::debug = Is debugging on?
+# $Global::exitstatus = status code of GNU Parallel
+# $Global::quoting = quote the command to run
+
+sub init_run_jobs() {
+ # Set Global variables and progress signal handlers
+ # Do the copying of basefiles
+ # Returns: N/A
+ $Global::total_running = 0;
+ $Global::total_started = 0;
+ $SIG{USR1} = \&list_running_jobs;
+ $SIG{USR2} = \&toggle_progress;
+ if(@opt::basefile) { setup_basefile(); }
+}
+
+{
+ my $last_time;
+ my %last_mtime;
+ my $max_procs_file_last_mod;
+
+ sub changed_procs_file {
+ # If --jobs is a file and it is modfied:
+ # Force recomputing of max_jobs_running for each $sshlogin
+ # Uses:
+ # $Global::max_procs_file
+ # %Global::host
+ # Returns: N/A
+ if($Global::max_procs_file) {
+ # --jobs filename
+ my $mtime = (stat($Global::max_procs_file))[9];
+ $max_procs_file_last_mod ||= 0;
+ if($mtime > $max_procs_file_last_mod) {
+ # file changed: Force re-computing max_jobs_running
+ $max_procs_file_last_mod = $mtime;
+ for my $sshlogin (values %Global::host) {
+ $sshlogin->set_max_jobs_running(undef);
+ }
+ }
+ }
+ }
+
+ sub changed_sshloginfile {
+ # If --slf is changed:
+ # reload --slf
+ # filter_hosts
+ # setup_basefile
+ # Uses:
+ # @opt::sshloginfile
+ # @Global::sshlogin
+ # %Global::host
+ # $opt::filter_hosts
+ # Returns: N/A
+ if(@opt::sshloginfile) {
+ # Is --sshloginfile changed?
+ for my $slf (@opt::sshloginfile) {
+ my $actual_file = expand_slf_shorthand($slf);
+ my $mtime = (stat($actual_file))[9];
+ $last_mtime{$actual_file} ||= $mtime;
+ if($mtime - $last_mtime{$actual_file} > 1) {
+ ::debug("run",
+ "--sshloginfile $actual_file changed. reload\n");
+ $last_mtime{$actual_file} = $mtime;
+ # Reload $slf
+ # Empty sshlogins
+ @Global::sshlogin = ();
+ for (values %Global::host) {
+ # Don't start new jobs on any host
+ # except the ones added back later
+ $_->set_max_jobs_running(0);
+ }
+ # This will set max_jobs_running on the SSHlogins
+ read_sshloginfile($actual_file);
+ parse_sshlogin();
+ $opt::filter_hosts and filter_hosts();
+ setup_basefile();
+ }
+ }
+ }
+ }
+
+ sub start_more_jobs {
+ # Run start_another_job() but only if:
+ # * not $Global::start_no_new_jobs set
+ # * not JobQueue is empty
+ # * not load on server is too high
+ # * not server swapping
+ # * not too short time since last remote login
+ # Uses:
+ # %Global::host
+ # $Global::start_no_new_jobs
+ # $Global::JobQueue
+ # $opt::pipe
+ # $opt::load
+ # $opt::noswap
+ # $opt::delay
+ # $Global::newest_starttime
+ # Returns:
+ # $jobs_started = number of jobs started
+ my $jobs_started = 0;
+ if($Global::start_no_new_jobs) {
+ return $jobs_started;
+ }
+ if(time - ($last_time||0) > 1) {
+ # At most do this every second
+ $last_time = time;
+ changed_procs_file();
+ changed_sshloginfile();
+ }
+ # This will start 1 job on each --sshlogin (if possible)
+ # thus distribute the jobs on the --sshlogins round robin
+ for my $sshlogin (values %Global::host) {
+ if($Global::JobQueue->empty() and not $opt::pipe) {
+ # No more jobs in the queue
+ last;
+ }
+ debug("run", "Running jobs before on ", $sshlogin->string(), ": ",
+ $sshlogin->jobs_running(), "\n");
+ if ($sshlogin->jobs_running() < $sshlogin->max_jobs_running()) {
+ if($opt::delay
+ and
+ $opt::delay-0.008 > ::now()-$Global::newest_starttime) {
+ # It has been too short since last start
+ next;
+ }
+ if($opt::load and $sshlogin->loadavg_too_high()) {
+ # The load is too high or unknown
+ next;
+ }
+ if($opt::noswap and $sshlogin->swapping()) {
+ # The server is swapping
+ next;
+ }
+ if($opt::limit and $sshlogin->limit()) {
+ # Over limit
+ next;
+ }
+ if(($opt::memfree or $opt::memsuspend)
+ and
+ $sshlogin->memfree() < $Global::memlimit) {
+ # The server has not enough mem free
+ ::debug("mem", "Not starting job: not enough mem\n");
+ next;
+ }
+ if($sshlogin->too_fast_remote_login()) {
+ # It has been too short since last login
+ next;
+ }
+ debug("run", $sshlogin->string(),
+ " has ", $sshlogin->jobs_running(),
+ " out of ", $sshlogin->max_jobs_running(),
+ " jobs running. Start another.\n");
+ if(start_another_job($sshlogin) == 0) {
+ # No more jobs to start on this $sshlogin
+ debug("run","No jobs started on ",
+ $sshlogin->string(), "\n");
+ next;
+ }
+ $sshlogin->inc_jobs_running();
+ $sshlogin->set_last_login_at(::now());
+ $jobs_started++;
+ }
+ debug("run","Running jobs after on ", $sshlogin->string(), ": ",
+ $sshlogin->jobs_running(), " of ",
+ $sshlogin->max_jobs_running(), "\n");
+ }
+
+ return $jobs_started;
+ }
+}
+
+{
+ my $no_more_file_handles_warned;
+
+ sub start_another_job() {
+ # If there are enough filehandles
+ # and JobQueue not empty
+ # and not $job is in joblog
+ # Then grab a job from Global::JobQueue,
+ # start it at sshlogin
+ # mark it as virgin_job
+ # Inputs:
+ # $sshlogin = the SSHLogin to start the job on
+ # Uses:
+ # $Global::JobQueue
+ # $opt::pipe
+ # $opt::results
+ # $opt::resume
+ # @Global::virgin_jobs
+ # Returns:
+ # 1 if another jobs was started
+ # 0 otherwise
+ my $sshlogin = shift;
+ # Do we have enough file handles to start another job?
+ if(enough_file_handles()) {
+ if($Global::JobQueue->empty() and not $opt::pipe) {
+ # No more commands to run
+ debug("start", "Not starting: JobQueue empty\n");
+ return 0;
+ } else {
+ my $job;
+ # Skip jobs already in job log
+ # Skip jobs already in results
+ do {
+ $job = get_job_with_sshlogin($sshlogin);
+ if(not defined $job) {
+ # No command available for that sshlogin
+ debug("start", "Not starting: no jobs available for ",
+ $sshlogin->string(), "\n");
+ return 0;
+ }
+ if($job->is_already_in_joblog()) {
+ $job->free_slot();
+ }
+ } while ($job->is_already_in_joblog()
+ or
+ ($opt::results and $opt::resume
+ and $job->is_already_in_results()));
+ debug("start", "Command to run on '",
+ $job->sshlogin()->string(), "': '",
+ $job->replaced(),"'\n");
+ if($job->start()) {
+ if($opt::pipe) {
+ if($job->virgin()) {
+ push(@Global::virgin_jobs,$job);
+ } else {
+ # Block already set: This is a retry
+ $job->write_block();
+ }
+ }
+ debug("start", "Started as seq ", $job->seq(),
+ " pid:", $job->pid(), "\n");
+ return 1;
+ } else {
+ # Not enough processes to run the job.
+ # Put it back on the queue.
+ $Global::JobQueue->unget($job);
+ # Count down the number of jobs to run for this SSHLogin.
+ my $max = $sshlogin->max_jobs_running();
+ if($max > 1) { $max--; } else {
+ my @arg;
+ for my $record (@{$job->{'commandline'}{'arg_list'}}) {
+ push @arg, map { $_->orig() } @$record;
+ }
+ ::error(255,"No more processes: ".
+ "cannot run a single job. ".
+ "Something is wrong at @arg.");
+ }
+ $sshlogin->set_max_jobs_running($max);
+ # Sleep up to 300 ms to give other processes time to die
+ ::usleep(rand()*300);
+ ::warning("No more processes: ".
+ "Decreasing number of running jobs to $max.",
+ "Try increasing 'ulimit -u' (try: ulimit -u `ulimit -Hu`)",
+ "or increasing 'nproc' in /etc/security/limits.conf",
+ "or increasing /proc/sys/kernel/pid_max");
+ return 0;
+ }
+ }
+ } else {
+ # No more file handles
+ $no_more_file_handles_warned++ or
+ ::warning("No more file handles. ",
+ "Try running 'parallel -j0 -N 100 --pipe parallel -j0'",
+ "or increasing 'ulimit -n' (try: ulimit -n `ulimit -Hn`)",
+ "or increasing 'nofile' in /etc/security/limits.conf",
+ "or increasing /proc/sys/fs/file-max");
+ debug("start", "No more file handles. ");
+ return 0;
+ }
+ }
+}
+
+sub init_progress() {
+ # Uses:
+ # $opt::bar
+ # Returns:
+ # list of computers for progress output
+ $|=1;
+ if($opt::bar) {
+ return("","");
+ }
+ my $progress = progress();
+ my $cpu_units = $opt::use_sockets_instead_of_threads ? "CPU sockets" :
+ ($opt::use_cores_instead_of_threads ? "CPU cores" : "CPU threads");
+ return ("\nComputers / $cpu_units / Max jobs to run\n",
+ $progress->{'workerlist'},"\n",$progress->{'header'});
+}
+
+sub drain_job_queue(@) {
+ # Uses:
+ # $opt::progress
+ # $Global::total_running
+ # $Global::max_jobs_running
+ # %Global::running
+ # $Global::JobQueue
+ # %Global::host
+ # $Global::start_no_new_jobs
+ # Returns: N/A
+ my @command = @_;
+ my $sleep = 0.2;
+ my $sleepsum = 0;
+ do {
+ while($Global::total_running > 0) {
+ debug("init",$Global::total_running, "==", scalar
+ keys %Global::running," slots: ", $Global::max_jobs_running);
+ if($opt::pipe) {
+ # When using --pipe sometimes file handles are not
+ # closed properly
+ for my $job (values %Global::running) {
+ close $job->fh(0,"w");
+ }
+ }
+ if($opt::progress) {
+ my $progress = progress();
+ ::status_no_nl("\r",$progress->{'status'});
+ }
+ if($Global::total_running < $Global::max_jobs_running
+ and not $Global::JobQueue->empty()) {
+ # These jobs may not be started because of loadavg
+ # or too little time between each ssh login.
+ if(start_more_jobs() > 0) {
+ # Exponential back-on if jobs were started
+ $sleep = $sleep/2+0.001;
+ }
+ }
+ # Exponential back-off sleeping
+ $sleep = ::reap_usleep($sleep);
+ $sleepsum += $sleep;
+ if($sleepsum >= 1000) {
+ # At most do this every second
+ $sleepsum = 0;
+ changed_procs_file();
+ changed_sshloginfile();
+ start_more_jobs();
+ }
+ }
+ if(not $Global::JobQueue->empty()) {
+ # These jobs may not be started:
+ # * because there the --filter-hosts has removed all
+ %Global::host or
+ ::error(255,"There are no hosts left to run on.");
+ # * because of loadavg
+ # * because of too little time between each ssh login.
+ $sleep = ::reap_usleep($sleep);
+ start_more_jobs();
+ if($Global::max_jobs_running == 0) {
+ ::warning("There are no job slots available. Increase --jobs.");
+ }
+ }
+ while($opt::sqlmaster and not $Global::sql->finished()) {
+ # SQL master
+ $sleep = ::reap_usleep($sleep);
+ start_more_jobs();
+ if($Global::start_sqlworker) {
+ # Start an SQL worker as we are now sure there is work to do
+ $Global::start_sqlworker = 0;
+ if(my $pid = fork()) {
+ $Global::unkilled_sqlworker = $pid;
+ } else {
+ # Replace --sql/--sqlandworker with --sqlworker
+ my @ARGV = (map { s/^--sql(andworker)?$/--sqlworker/; $_ }
+ @Global::options_in_argv);
+ # exec the --sqlworker
+ exec($0,@ARGV,@command);
+ }
+ }
+ }
+ } while ($Global::total_running > 0
+ or
+ not $Global::start_no_new_jobs and not $Global::JobQueue->empty()
+ or
+ $opt::sqlmaster and not $Global::sql->finished());
+ $Global::all_jobs_done = 1;
+ if($opt::progress) {
+ my $progress = progress();
+ ::status("\r".$progress->{'status'});
+ }
+}
+
+sub toggle_progress() {
+ # Turn on/off progress view
+ # Uses:
+ # $opt::progress
+ # Returns: N/A
+ $opt::progress = not $opt::progress;
+ if($opt::progress) {
+ ::status_no_nl(init_progress());
+ }
+}
+
+{
+ my $last_header;
+ my $eol;
+
+ sub progress() {
+ # Uses:
+ # $opt::bar
+ # $opt::eta
+ # %Global::host
+ # $Global::total_started
+ # Returns:
+ # $workerlist = list of workers
+ # $header = that will fit on the screen
+ # $status = message that will fit on the screen
+ if($opt::bar) {
+ return {"workerlist" => "", "header" => "", "status" => bar()};
+ }
+ my $eta = "";
+ my ($status,$header)=("","");
+ if($opt::eta) {
+ my($total, $completed, $left, $pctcomplete, $avgtime, $this_eta) =
+ compute_eta();
+ $eta = sprintf("ETA: %ds Left: %d AVG: %.2fs ",
+ $this_eta, $left, $avgtime);
+ }
+ my $termcols = terminal_columns();
+ my @workers = sort keys %Global::host;
+ my $workerno = 1;
+ my %wrk;
+ for my $w (@workers) {
+ my %i;
+ $i{'sshlogin'} = $w eq ":" ? "local" : $w;
+ $i{'no'} = $workerno++;
+ $i{'ncpu'} = ($Global::host{$w}->ncpus() || "-");
+ $i{'jobslots'} = $Global::host{$w}->max_jobs_running();
+ $i{'completed'} = ($Global::host{$w}->jobs_completed() || 0);
+ $i{'running'} = $Global::host{$w}->jobs_running();
+ $i{'pct'} = $Global::total_started ?
+ (($i{'running'}+$i{'completed'})*100 /
+ $Global::total_started) : 0;
+ $i{'time'} = $i{'completed'} ? (time-$^T)/($i{'completed'}) : 0;
+ $wrk{$w} = \%i;
+ }
+
+ my $workerlist = "";
+ for my $w (@workers) {
+ $workerlist .=
+ $wrk{$w}{'no'}.":".$wrk{$w}{'sshlogin'} ." / ".
+ $wrk{$w}{'ncpu'}." / ".
+ $wrk{$w}{'jobslots'}."\n";
+ }
+ # Force $status to select one of the below formats
+ $status = "c"x($termcols+1);
+ # Select an output format that will fit on a single line
+ if(length $status > $termcols) {
+ # sshlogin1:XX/XX/XX%/XX.Xs s2:XX/XX/XX%/XX.Xs s3:XX/XX/XX%/XX.Xs
+ $header = "Computer:jobs running/jobs completed/".
+ "%of started jobs/Average seconds to complete";
+ $status = $eta . join(" ",map {
+ sprintf("%s:%d/%d/%d%%/%.1fs ",
+ @{$wrk{$_}}
+ {'sshlogin','running','completed','pct','time'}
+ ); } @workers);
+ }
+ if(length $status > $termcols) {
+ # 1:XX/XX/XX%/X.Xs 2:XX/XX/XX%/X.Xs 3:XX/XX/XX%/X.Xs
+ $header = "Computer:jobs running/jobs completed/%of started jobs";
+ $status = $eta . join(" ",map {
+ sprintf("%s:%d/%d/%d%%/%.1fs ",
+ @{$wrk{$_}}
+ {'no','running','completed','pct','time'}
+ ); } @workers);
+ }
+ if(length $status > $termcols) {
+ # sshlogin1:XX/XX/XX% sshlogin2:XX/XX/XX% sshlogin3:XX/XX/XX%
+ $header = "Computer:jobs running/jobs completed/%of started jobs";
+ $status = $eta . join(" ",map {
+ sprintf("%s:%d/%d/%d%%",
+ @{$wrk{$_}}
+ {'sshlogin','running','completed','pct'}
+ ); } @workers);
+ }
+ if(length $status > $termcols) {
+ # 1:XX/XX/XX% 2:XX/XX/XX% 3:XX/XX/XX% 4:XX/XX/XX% 5:XX/XX/XX%
+ $header = "Computer:jobs running/jobs completed/%of started jobs";
+ $status = $eta . join(" ",map {
+ sprintf("%s:%d/%d/%d%%",
+ @{$wrk{$_}}
+ {'no','running','completed','pct'}
+ ); } @workers);
+ }
+ if(length $status > $termcols) {
+ # sshlogin1:XX/XX sshlogin2:XX/XX sshlogin3:XX/XX
+ $header = "Computer:jobs running/jobs completed";
+ $status = $eta . join(" ", map {
+ sprintf("%s:%d/%d",
+ @{$wrk{$_}}
+ {'sshlogin','running','completed'}
+ ); } @workers);
+ }
+ if(length $status > $termcols) {
+ # 1:XX/XX 2:XX/XX 3:XX/XX 4:XX/XX 5:XX/XX 6:XX/XX
+ $header = "Computer:jobs running/jobs completed";
+ $status = $eta . join(" ", map {
+ sprintf("%s:%d/%d",
+ @{$wrk{$_}}
+ {'no','running','completed'}
+ ); } @workers);
+ }
+ if(length $status > $termcols) {
+ # sshlogin1:XX sshlogin2:XX sshlogin3:XX sshlogin4:XX sshlogin5:XX
+ $header = "Computer:jobs completed";
+ $status = $eta . join(" ", map {
+ sprintf("%s:%d",
+ @{$wrk{$_}}
+ {'sshlogin','completed'}
+ ); } @workers);
+ }
+ if(length $status > $termcols) {
+ # 1:XX 2:XX 3:XX 4:XX 5:XX 6:XX
+ $header = "Computer:jobs completed";
+ $status = $eta . join(" ", map {
+ sprintf("%s:%d",
+ @{$wrk{$_}}
+ {'no','completed'}
+ ); } @workers);
+ }
+ if($last_header ne $header) {
+ $header .= "\n";
+ $last_header = $header;
+ } else {
+ $header = "";
+ }
+ if(not $eol) {
+ $eol = `sh -c "tput el </dev/tty" 2>/dev/null`;
+ chomp($eol);
+ if($eol eq "") { $eol = "\033[K"; }
+ }
+
+ return {"workerlist" => $workerlist, "header" => $header,
+ "status" => $status.$eol};
+ }
+}
+
+{
+
+ my ($first_completed, $smoothed_avg_time, $last_eta);
+
+ sub compute_eta {
+ # Calculate important numbers for ETA
+ # Returns:
+ # $total = number of jobs in total
+ # $completed = number of jobs completed
+ # $left = number of jobs left
+ # $pctcomplete = percent of jobs completed
+ # $avgtime = averaged time
+ # $eta = smoothed eta
+ my $completed = $Global::total_completed;
+ # In rare cases with -X will $completed > total_jobs()
+ my $total = ::max($Global::JobQueue->total_jobs(),$completed);
+ my $left = $total - $completed;
+ if(not $completed) {
+ return($total, $completed, $left, 0, 0, 0);
+ }
+ my $pctcomplete = ::min($completed / $total,100);
+ $first_completed ||= time;
+ my $timepassed = (time - $first_completed);
+ my $avgtime = $timepassed / $completed;
+ $smoothed_avg_time ||= $avgtime;
+ # Smooth the eta so it does not jump wildly
+ $smoothed_avg_time = (1 - $pctcomplete) * $smoothed_avg_time +
+ $pctcomplete * $avgtime;
+ my $eta = int($left * $smoothed_avg_time);
+ if($eta*0.90 < $last_eta and $last_eta < $eta) {
+ # Eta jumped less that 10% up: Keep the last eta instead
+ $eta = $last_eta;
+ } else {
+ $last_eta = $eta;
+ }
+ return($total, $completed, $left, $pctcomplete, $avgtime, $eta);
+ }
+}
+
+{
+ my ($rev,$reset);
+
+ sub bar() {
+ # Return:
+ # $status = bar with eta, completed jobs, arg and pct
+ $rev ||= "\033[7m";
+ $reset ||= "\033[0m";
+ my($total, $completed, $left, $pctcomplete, $avgtime, $eta) =
+ compute_eta();
+ if($Global::all_jobs_done) { $eta = now()-$Global::start_time; }
+ my $arg = $Global::newest_job ?
+ $Global::newest_job->{'commandline'}->
+ replace_placeholders(["\257<\257>"],0,0) : "";
+ $arg = decode_utf8($arg);
+ my $eta_dhms = ::seconds_to_time_units($eta);
+ my $bar_text =
+ sprintf("%d%% %d:%d=%s %s",
+ $pctcomplete*100, $completed, $left, $eta_dhms, $arg);
+ my $terminal_width = terminal_columns();
+ my $s = sprintf("%-${terminal_width}s",
+ substr($bar_text." "x$terminal_width,
+ 0,$terminal_width));
+ my $width = int($terminal_width * $pctcomplete);
+ substr($s,$width,0) = $reset;
+ my $zenity = sprintf("%-${terminal_width}s",
+ substr("# $eta sec $arg",
+ 0,$terminal_width));
+ # Prefix with zenity header
+ $s = "\r" . $zenity . "\r" . $pctcomplete*100 .
+ "\r" . $rev . $s . $reset;
+ return $s;
+ }
+}
+
+{
+ my ($rows,$columns,$last_update_time);
+
+ sub compute_terminal_size() {
+ # && true is to force spawning a shell and not just exec'ing
+ my @tput = qx{ tput lines cols </dev/tty 2>/dev/null && true };
+ $rows = 0 + $tput[0];
+ $columns = 0 + $tput[1];
+ if(not ($rows && $columns)) {
+ # && true is to force spawning a shell and not just exec'ing
+ my $stty = qx{ stty -a </dev/tty 2>/dev/null && true };
+ # FreeBSD/OpenBSD/NetBSD/Dragonfly/MirOS
+ # MacOSX/IRIX/AIX/Tru64
+ $stty =~ /(\d+) columns/ and do { $columns = $1; };
+ $stty =~ /(\d+) rows/ and do { $rows = $1; };
+ # GNU/Linux/Solaris
+ $stty =~ /columns (\d+)/ and do { $columns = $1; };
+ $stty =~ /rows (\d+)/ and do { $rows = $1; };
+ # Solaris-x86/HPUX/SCOsysV/UnixWare/OpenIndiana
+ $stty =~ /columns = (\d+)/ and do { $columns = $1; };
+ $stty =~ /rows = (\d+)/ and do { $rows = $1; };
+ # QNX
+ $stty =~ /rows=(\d+),(\d+)/ and do { ($rows,$columns) = ($1,$2); };
+ }
+ if(not ($rows && $columns)) {
+ # && true is to force spawning a shell and not just exec'ing
+ my $resize = qx{ resize 2>/dev/null && true };
+ $resize =~ /COLUMNS=(\d+);/ and do { $columns ||= $1; };
+ $resize =~ /LINES=(\d+);/ and do { $rows ||= $1; };
+ }
+ $rows ||= 24;
+ $columns ||= 80;
+ }
+
+ sub update_terminal_size() {
+ # Only update once per second.
+ if($last_update_time < time) {
+ $last_update_time = time;
+ compute_terminal_size();
+ # Set signal WINdow CHange to force recompute
+ $SIG{WINCH} = \&compute_terminal_size;
+ }
+ }
+
+ sub terminal_rows() {
+ # Get the number of rows of the terminal.
+ # Returns:
+ # number of rows of the screen
+ update_terminal_size();
+ return $rows;
+ }
+
+ sub terminal_columns() {
+ # Get the number of columns of the terminal.
+ # Returns:
+ # number of columns of the screen
+ update_terminal_size();
+ return $columns;
+ }
+}
+
+sub untabify($) {
+ # Convert \t into spaces
+ my @out;
+ my ($src);
+ # Deal with multi-byte characters
+ for my $src (split("\t",$_[0])) {
+ push @out, $src. " "x(8-mbswidth($src)%8);
+ }
+ return join "",@out;
+}
+
+# Prototype forwarding
+sub get_job_with_sshlogin($);
+sub get_job_with_sshlogin($) {
+ # Input:
+ # $sshlogin = which host should the job be run on?
+ # Uses:
+ # $opt::hostgroups
+ # $Global::JobQueue
+ # Returns:
+ # $job = next job object for $sshlogin if any available
+ my $sshlogin = shift;
+ my $job;
+
+ if ($opt::hostgroups) {
+ my @other_hostgroup_jobs = ();
+
+ while($job = $Global::JobQueue->get()) {
+ if($sshlogin->in_hostgroups($job->hostgroups())) {
+ # Found a job to be run on a hostgroup of this
+ # $sshlogin
+ last;
+ } else {
+ # This job was not in the hostgroups of $sshlogin
+ push @other_hostgroup_jobs, $job;
+ }
+ }
+ $Global::JobQueue->unget(@other_hostgroup_jobs);
+ if(not defined $job) {
+ # No more jobs
+ return undef;
+ }
+ } else {
+ $job = $Global::JobQueue->get();
+ if(not defined $job) {
+ # No more jobs
+ ::debug("start", "No more jobs: JobQueue empty\n");
+ return undef;
+ }
+ }
+ if(not $job->suspended()) {
+ $job->set_sshlogin($sshlogin);
+ }
+ if(defined $opt::retries and $job->failed_here()) {
+ # This command with these args failed for this sshlogin
+ my ($no_of_failed_sshlogins,$min_failures) = $job->min_failed();
+ # Only look at the Global::host that have > 0 jobslots
+ if($no_of_failed_sshlogins ==
+ grep { $_->max_jobs_running() > 0 } values %Global::host
+ and $job->failed_here() == $min_failures) {
+ # It failed the same or more times on another host:
+ # run it on this host
+ } else {
+ # If it failed fewer times on another host:
+ # Find another job to run
+ my $nextjob;
+ if(not $Global::JobQueue->empty()) {
+ # This can potentially recurse for all args
+ no warnings 'recursion';
+ $nextjob = get_job_with_sshlogin($sshlogin);
+ }
+ # Push the command back on the queue
+ $Global::JobQueue->unget($job);
+ return $nextjob;
+ }
+ }
+ return $job;
+}
+
+
+sub __REMOTE_SSH__() {}
+
+
+sub read_sshloginfiles(@) {
+ # Read a list of --slf's
+ # Input:
+ # @files = files or symbolic file names to read
+ # Returns: N/A
+ for my $s (@_) {
+ read_sshloginfile(expand_slf_shorthand($s));
+ }
+}
+
+sub expand_slf_shorthand($) {
+ # Expand --slf shorthand into a read file name
+ # Input:
+ # $file = file or symbolic file name to read
+ # Returns:
+ # $file = actual file name to read
+ my $file = shift;
+ if($file eq "-") {
+ # skip: It is stdin
+ } elsif($file eq "..") {
+ $file = $Global::config_dir."/sshloginfile";
+ } elsif($file eq ".") {
+ $file = "/etc/parallel/sshloginfile";
+ } elsif(not -r $file) {
+ for(@Global::config_dirs) {
+ if(not -r $_."/".$file) {
+ # Try prepending $PARALLEL_HOME
+ ::error(255,"Cannot open $file.");
+ } else {
+ $file = $_."/".$file;
+ last;
+ }
+ }
+ }
+ return $file;
+}
+
+sub read_sshloginfile($) {
+ # Read sshloginfile into @Global::sshlogin
+ # Input:
+ # $file = file to read
+ # Uses:
+ # @Global::sshlogin
+ # Returns: N/A
+ local $/ = "\n";
+ my $file = shift;
+ my $close = 1;
+ my $in_fh;
+ ::debug("init","--slf ",$file);
+ if($file eq "-") {
+ $in_fh = *STDIN;
+ $close = 0;
+ } else {
+ $in_fh = open_or_exit("<", $file);
+ }
+ while(<$in_fh>) {
+ chomp;
+ /^\s*#/ and next;
+ /^\s*$/ and next;
+ push @Global::sshlogin, $_;
+ }
+ if($close) {
+ close $in_fh;
+ }
+}
+
+sub parse_sshlogin() {
+ # Parse @Global::sshlogin into %Global::host.
+ # Keep only hosts that are in one of the given ssh hostgroups.
+ # Uses:
+ # @Global::sshlogin
+ # $Global::minimal_command_line_length
+ # %Global::host
+ # $opt::transfer
+ # @opt::return
+ # $opt::cleanup
+ # @opt::basefile
+ # @opt::trc
+ # Returns: N/A
+ sub expand_range($) {
+ # Expand host[9-11,15]a[09-11]b
+ # [9-11,15] => 9 10 11 15
+ # [09-11] => 09 10 11
+ my ($in) = @_;
+ my ($prefix, $range, $suffix);
+ if(($prefix, $range, $suffix) = $in =~ /^(.*?)\[([-0-9,]*)\](.*)$/) {
+ my @res;
+ while(length $range) {
+ if($range =~ s/^,//) {
+ # skip
+ } elsif($range =~ s/^(\d+)-(\d+)//) {
+ my ($start, $end) = ($1, $2);
+ push @res, map { $prefix . $_ . $suffix } $start..$end;
+ } elsif($range =~ s/^(\d+)//) {
+ push @res, map { $prefix . $_ . $suffix } $1;
+ } else {
+ die "Cannot parse $in (at $range)";
+ }
+ }
+ return map { expand_range($_) } @res;
+ } else {
+ return $in;
+ }
+ }
+ my @login;
+ if(not @Global::sshlogin) { @Global::sshlogin = (":"); }
+ for my $sshlogin (@Global::sshlogin) {
+ # Split up -S sshlogin,sshlogin
+ # Parse ,, and \, as , but do not split on that
+ # -S "ssh -J jump1,,jump2 host1,host2" =>
+ # ssh -J jump1,jump2 host1
+ # host2
+ # Protect \, and ,, as \0
+ $sshlogin =~ s/\\,|,,/\0/g;
+ # Protect , in ranges: [___,___] => [___\0___]
+ while($sshlogin =~ s/(\[[-0-9\0]*),(.*\])/$1\0$2/g) {}
+ for my $s (split /,|\n/, $sshlogin) {
+ # Replace \0 => ,
+ $s =~ s/\0/,/g;
+ if ($s eq ".." or $s eq "-") {
+ # This may add to @Global::sshlogin - possibly bug
+ read_sshloginfile(expand_slf_shorthand($s));
+ } else {
+ $s =~ s/\s*$//;
+ # Expand host[1-12,15]a[01-10]b
+ push @login, expand_range($s);
+ }
+ }
+ }
+ $Global::minimal_command_line_length = 100_000_000;
+ my @allowed_hostgroups;
+ for my $ncpu_sshlogin_string (::uniq(@login)) {
+ my $sshlogin = SSHLogin->new($ncpu_sshlogin_string);
+ my $sshlogin_string = $sshlogin->string();
+ if($sshlogin_string eq "") {
+ # This is an ssh group: -S @webservers
+ push @allowed_hostgroups, $sshlogin->hostgroups();
+ next;
+ }
+ if($Global::host{$sshlogin_string}) {
+ # This sshlogin has already been added:
+ # It is probably a host that has come back
+ # Set the max_jobs_running back to the original
+ debug("run","Already seen $sshlogin_string\n");
+ if($sshlogin->{'ncpus'}) {
+ # If ncpus set by '#/' of the sshlogin, overwrite it:
+ $Global::host{$sshlogin_string}->set_ncpus($sshlogin->ncpus());
+ }
+ $Global::host{$sshlogin_string}->set_max_jobs_running(undef);
+ next;
+ }
+ $sshlogin->set_maxlength(Limits::Command::max_length());
+
+ $Global::minimal_command_line_length =
+ ::min($Global::minimal_command_line_length, $sshlogin->maxlength());
+ $Global::host{$sshlogin_string} = $sshlogin;
+ }
+ $Global::usable_command_line_length =
+ # Usable len = maxlen - 3000 for wrapping, div 2 for hexing
+ int(($Global::minimal_command_line_length - 3000)/2);
+ if($opt::max_chars) {
+ if($opt::max_chars <= $Global::usable_command_line_length) {
+ $Global::usable_command_line_length = $opt::max_chars;
+ } else {
+ ::warning("Value for option -s should be < ".
+ $Global::usable_command_line_length.".");
+ }
+ }
+ if(@allowed_hostgroups) {
+ # Remove hosts that are not in these groups
+ while (my ($string, $sshlogin) = each %Global::host) {
+ if(not $sshlogin->in_hostgroups(@allowed_hostgroups)) {
+ delete $Global::host{$string};
+ }
+ }
+ }
+
+ # debug("start", "sshlogin: ", my_dump(%Global::host),"\n");
+ if(@Global::transfer_files or @opt::return
+ or $opt::cleanup or @opt::basefile) {
+ if(not remote_hosts()) {
+ # There are no remote hosts
+ if(@opt::trc) {
+ ::warning("--trc ignored as there are no remote --sshlogin.");
+ } elsif (defined $opt::transfer) {
+ ::warning("--transfer ignored as there are ".
+ "no remote --sshlogin.");
+ } elsif (@opt::transfer_files) {
+ ::warning("--transferfile ignored as there ".
+ "are no remote --sshlogin.");
+ } elsif (@opt::return) {
+ ::warning("--return ignored as there are no remote --sshlogin.");
+ } elsif (defined $opt::cleanup and not %opt::template) {
+ ::warning("--cleanup ignored as there ".
+ "are no remote --sshlogin.");
+ } elsif (@opt::basefile) {
+ ::warning("--basefile ignored as there ".
+ "are no remote --sshlogin.");
+ }
+ }
+ }
+}
+
+sub remote_hosts() {
+ # Return sshlogins that are not ':'
+ # Uses:
+ # %Global::host
+ # Returns:
+ # list of sshlogins with ':' removed
+ return grep !/^:$/, keys %Global::host;
+}
+
+sub setup_basefile() {
+ # Transfer basefiles to each $sshlogin
+ # This needs to be done before first jobs on $sshlogin is run
+ # Uses:
+ # %Global::host
+ # @opt::basefile
+ # Returns: N/A
+ my @cmd;
+ my $rsync_destdir;
+ my $workdir;
+ for my $sshlogin (values %Global::host) {
+ if($sshlogin->local()) { next }
+ for my $file (@opt::basefile) {
+ if($file !~ m:^/: and $opt::workdir eq "...") {
+ ::error(255,"Work dir '...' will not work with ".
+ "relative basefiles.");
+ }
+ if(not $workdir) {
+ my $dummycmdline =
+ CommandLine->new(1,["true"],{},0,0,[],[],[],[],{},{});
+ my $dummyjob = Job->new($dummycmdline);
+ $workdir = $dummyjob->workdir();
+ }
+ push @cmd, $sshlogin->rsync_transfer_cmd($file,$workdir);
+ }
+ }
+ debug("init", "basesetup: @cmd\n");
+ my ($exitstatus,$stdout_ref,$stderr_ref) =
+ run_gnu_parallel((join "\n",@cmd),"-j0","--retries",5);
+ if($exitstatus) {
+ my @stdout = @$stdout_ref;
+ my @stderr = @$stderr_ref;
+ ::error(255,"Copying of --basefile failed: @stdout@stderr");
+ }
+}
+
+sub cleanup_basefile() {
+ # Remove the basefiles transferred
+ # Uses:
+ # %Global::host
+ # @opt::basefile
+ # Returns: N/A
+ my @cmd;
+ my $workdir;
+ if(not $workdir) {
+ my $dummycmdline = CommandLine->new(1,["true"],{},0,0,[],[],[],[],{},{});
+ my $dummyjob = Job->new($dummycmdline);
+ $workdir = $dummyjob->workdir();
+ }
+ for my $sshlogin (values %Global::host) {
+ if($sshlogin->local()) { next }
+ for my $file (@opt::basefile) {
+ push @cmd, $sshlogin->cleanup_cmd($file,$workdir);
+ }
+ }
+ debug("init", "basecleanup: @cmd\n");
+ my ($exitstatus,$stdout_ref,$stderr_ref) =
+ run_gnu_parallel(join("\n",@cmd),"-j0","--retries",5);
+ if($exitstatus) {
+ my @stdout = @$stdout_ref;
+ my @stderr = @$stderr_ref;
+ ::error(255,"Cleanup of --basefile failed: @stdout@stderr");
+ }
+}
+
+sub run_gnu_parallel() {
+ my ($stdin,@args) = @_;
+ my $cmd = join "",map { " $_ & " } split /\n/, $stdin;
+ print $Global::original_stderr ` $cmd wait` ;
+ return 0
+}
+
+sub _run_gnu_parallel() {
+ # Run GNU Parallel
+ # This should ideally just fork an internal copy
+ # and not start it through a shell
+ # Input:
+ # $stdin = data to provide on stdin for GNU Parallel
+ # @args = command line arguments
+ # Returns:
+ # $exitstatus = exitcode of GNU Parallel run
+ # \@stdout = standard output
+ # \@stderr = standard error
+ my ($stdin,@args) = @_;
+ my ($exitstatus,@stdout,@stderr);
+ my ($stdin_fh,$stdout_fh)=(gensym(),gensym());
+ my ($stderr_fh, $stderrname) = ::tmpfile(SUFFIX => ".par");
+ unlink $stderrname;
+
+ my $pid = ::open3($stdin_fh,$stdout_fh,$stderr_fh,
+ $0,qw(--plain --shell /bin/sh --will-cite), @args);
+ if(my $writerpid = fork()) {
+ close $stdin_fh;
+ @stdout = <$stdout_fh>;
+ # Now stdout is closed:
+ # These pids should be dead or die very soon
+ while(kill 0, $writerpid) { ::usleep(1); }
+ die;
+# reap $writerpid;
+# while(kill 0, $pid) { ::usleep(1); }
+# reap $writerpid;
+ $exitstatus = $?;
+ seek $stderr_fh, 0, 0;
+ @stderr = <$stderr_fh>;
+ close $stdout_fh;
+ close $stderr_fh;
+ } else {
+ close $stdout_fh;
+ close $stderr_fh;
+ print $stdin_fh $stdin;
+ close $stdin_fh;
+ exit(0);
+ }
+ return ($exitstatus,\@stdout,\@stderr);
+}
+
+sub filter_hosts() {
+ # Remove down --sshlogins from active duty.
+ # Find ncpus, ncores, maxlen, time-to-login for each host.
+ # Uses:
+ # %Global::host
+ # $Global::minimal_command_line_length
+ # $opt::use_sockets_instead_of_threads
+ # $opt::use_cores_instead_of_threads
+ # $opt::use_cpus_instead_of_cores
+ # Returns: N/A
+
+ my ($nsockets_ref,$ncores_ref, $nthreads_ref, $time_to_login_ref,
+ $maxlen_ref, $echo_ref, $down_hosts_ref) =
+ parse_host_filtering(parallelized_host_filtering());
+
+ delete @Global::host{@$down_hosts_ref};
+ @$down_hosts_ref and ::warning("Removed @$down_hosts_ref.");
+
+ $Global::minimal_command_line_length = 100_000_000;
+ while (my ($string, $sshlogin) = each %Global::host) {
+ if($sshlogin->local()) { next }
+ my ($nsockets,$ncores,$nthreads,$time_to_login,$maxlen) =
+ ($nsockets_ref->{$string},$ncores_ref->{$string},
+ $nthreads_ref->{$string},$time_to_login_ref->{$string},
+ $maxlen_ref->{$string});
+ defined $nsockets or ::die_bug("nsockets missing: $string");
+ defined $ncores or ::die_bug("ncores missing: $string");
+ defined $nthreads or ::die_bug("nthreads missing: $string");
+ defined $time_to_login or ::die_bug("time_to_login missing: $string");
+ defined $maxlen or ::die_bug("maxlen missing: $string");
+ # ncpus may be set by 4/hostname or may be undefined yet
+ my $ncpus = $sshlogin->{'ncpus'};
+ # $nthreads may be 0 if GNU Parallel is not installed remotely
+ $ncpus = $nthreads || $ncpus || $sshlogin->ncpus();
+ if($opt::use_cpus_instead_of_cores) {
+ $ncpus = $ncores || $ncpus;
+ } elsif($opt::use_sockets_instead_of_threads) {
+ $ncpus = $nsockets || $ncpus;
+ } elsif($opt::use_cores_instead_of_threads) {
+ $ncpus = $ncores || $ncpus;
+ }
+ $sshlogin->set_ncpus($ncpus);
+ $sshlogin->set_time_to_login($time_to_login);
+ $maxlen = $maxlen || Limits::Command::max_length();
+ $sshlogin->set_maxlength($maxlen);
+ ::debug("init", "Timing from -S:$string ",
+ " ncpus:", $ncpus,
+ " nsockets:",$nsockets,
+ " ncores:", $ncores,
+ " nthreads:",$nthreads,
+ " time_to_login:", $time_to_login,
+ " maxlen:", $maxlen,
+ " min_max_len:", $Global::minimal_command_line_length,"\n");
+ }
+}
+
+sub parse_host_filtering() {
+ # Input:
+ # @lines = output from parallelized_host_filtering()
+ # Returns:
+ # \%nsockets = number of sockets of {host}
+ # \%ncores = number of cores of {host}
+ # \%nthreads = number of hyperthreaded cores of {host}
+ # \%time_to_login = time_to_login on {host}
+ # \%maxlen = max command len on {host}
+ # \%echo = echo received from {host}
+ # \@down_hosts = list of hosts with no answer
+ local $/ = "\n";
+ my (%nsockets, %ncores, %nthreads, %time_to_login, %maxlen, %echo,
+ @down_hosts);
+ for (@_) {
+ ::debug("init","Read: ",$_);
+ chomp;
+ my @col = split /\t/, $_;
+ if($col[0] =~ /^parallel: Warning:/) {
+ # Timed out job: Ignore it
+ next;
+ } elsif(defined $col[6]) {
+ # This is a line from --joblog
+ # seq host time spent sent received exit signal command
+ # 2 : 1372607672.654 0.675 0 0 0 0 eval true\ m\;ssh\ m\ parallel\ --number-of-cores
+ if($col[0] eq "Seq" and $col[1] eq "Host" and
+ $col[2] eq "Starttime") {
+ # Header => skip
+ next;
+ }
+ # Get server from: eval true server\;
+ $col[8] =~ /eval .?true.?\s([^\;]+);/ or
+ ::die_bug("col8 does not contain host: $col[8] in $_");
+ my $host = $1;
+ $host =~ tr/\\//d;
+ $Global::host{$host} or next;
+ if($col[6] eq "255" or $col[6] eq "-1" or $col[6] eq "1") {
+ # exit == 255 or exit == timeout (-1): ssh failed/timedout
+ # exit == 1: lsh failed
+ # Remove sshlogin
+ ::debug("init", "--filtered $host\n");
+ push(@down_hosts, $host);
+ } elsif($col[6] eq "127") {
+ # signal == 127: parallel not installed remote
+ # Set nsockets, ncores, nthreads = 1
+ ::warning("Could not figure out ".
+ "number of cpus on $host. Using 1.");
+ $nsockets{$host} = 1;
+ $ncores{$host} = 1;
+ $nthreads{$host} = 1;
+ $maxlen{$host} = Limits::Command::max_length();
+ } elsif($col[0] =~ /^\d+$/ and $Global::host{$host}) {
+ # Remember how log it took to log in
+ # 2 : 1372607672.654 0.675 0 0 0 0 eval true\ m\;ssh\ m\ echo
+ $time_to_login{$host} = ::min($time_to_login{$host},$col[3]);
+ } else {
+ ::die_bug("host check unmatched long jobline: $_");
+ }
+ } elsif($Global::host{$col[0]}) {
+ # This output from --number-of-cores, --number-of-cpus,
+ # --max-line-length-allowed
+ # ncores: server 8
+ # ncpus: server 2
+ # maxlen: server 131071
+ if(/parallel: Warning: Cannot figure out number of/) {
+ next;
+ }
+ if(/\t(perl: warning:|LANGUAGE =|LC_ALL =|LANG =|are supported and installed|Disconnected from|Received disconnect from)/
+ or
+ /\tWarning: /
+ or
+ /\t(Host key fingerprint is|\+-.*-\+|\|.*\|)/
+ or
+ /\t\S+: Undefined variable./
+ ) {
+ # Skip these (from perl):
+ # perl: warning: Setting locale failed.
+ # perl: warning: Please check that your locale settings:
+ # LANGUAGE = (unset),
+ # LC_ALL = (unset),
+ # LANG = "en_US.UTF-8"
+ # are supported and installed on your system.
+ # perl: warning: Falling back to the standard locale ("C").
+ # Disconnected from 127.0.0.1 port 22
+ #
+ # Skip these (from ssh):
+ # Warning: Permanently added * to the list of known hosts.
+ # Warning: Identity file * not accessible: *
+ # (VisualHostKey=yes)
+ # Host key fingerprint is SHA256:...
+ # +--[ED25519 256]--+
+ # | o |
+ # +----[SHA256]-----+
+ #
+ # Skip these (from csh):
+ # MANPATH: Undefined variable.
+ } elsif(not defined $nsockets{$col[0]}) {
+ $nsockets{$col[0]} = $col[1];
+ } elsif(not defined $ncores{$col[0]}) {
+ $ncores{$col[0]} = $col[1];
+ } elsif(not defined $nthreads{$col[0]}) {
+ $nthreads{$col[0]} = $col[1];
+ } elsif(not defined $maxlen{$col[0]}) {
+ $maxlen{$col[0]} = $col[1];
+ } elsif(not defined $echo{$col[0]}) {
+ $echo{$col[0]} = $col[1];
+ } else {
+ ::die_bug("host check too many col0: $_");
+ }
+ } else {
+ ::die_bug("host check unmatched short jobline ($col[0]): $_");
+ }
+ }
+ @down_hosts = uniq(@down_hosts);
+ return(\%nsockets, \%ncores, \%nthreads, \%time_to_login,
+ \%maxlen, \%echo, \@down_hosts);
+}
+
+sub parallelized_host_filtering() {
+ # Uses:
+ # %Global::host
+ # Returns:
+ # text entries with:
+ # * joblog line
+ # * hostname \t number of cores
+ # * hostname \t number of cpus
+ # * hostname \t max-line-length-allowed
+ # * hostname \t empty
+
+ sub sshwrapped {
+ # Wrap with ssh and --env
+ # Return $default_value if command fails
+ my $sshlogin = shift;
+ my $command = shift;
+ # wrapper that returns output "0\n" if the command fails
+ # E.g. parallel not installed => "0\n"
+ my $wcmd = q(perl -e '$a=`).$command.q(`; print $? ? "0".v010 : $a');
+ my $commandline = CommandLine->new(1,[$wcmd],{},0,0,[],[],[],[],{},{});
+ my $job = Job->new($commandline);
+ $job->set_sshlogin($sshlogin);
+ $job->wrapped();
+ return($job->{'wrapped'});
+ }
+
+ my(@sockets, @cores, @threads, @maxline, @echo);
+ while (my ($host, $sshlogin) = each %Global::host) {
+ if($host eq ":") { next }
+ # The 'true' is used to get the $host out later
+ push(@sockets, $host."\t"."true $host; ".
+ sshwrapped($sshlogin,"parallel --number-of-sockets")."\n\0");
+ push(@cores, $host."\t"."true $host; ".
+ sshwrapped($sshlogin,"parallel --number-of-cores")."\n\0");
+ push(@threads, $host."\t"."true $host; ".
+ sshwrapped($sshlogin,"parallel --number-of-threads")."\n\0");
+ push(@maxline, $host."\t"."true $host; ".
+ sshwrapped($sshlogin,
+ "parallel --max-line-length-allowed")."\n\0");
+ # 'echo' is used to get the fastest possible ssh login time
+ push(@echo, $host."\t"."true $host; ".
+ $sshlogin->wrap("echo $host")."\n\0");
+ }
+ # --timeout 10: Setting up an SSH connection and running a simple
+ # command should never take > 10 sec.
+ # --delay 0.1: If multiple sshlogins use the same proxy the delay
+ # will make it less likely to overload the ssh daemon.
+ # --retries 3: If the ssh daemon is overloaded, try 3 times
+ my $cmd =
+ "$0 -j0 --timeout 10 --joblog - --plain --delay 0.1 --retries 3 ".
+ "--tag --tagstring '{1}' -0 --colsep '\t' -k eval '{2}' && true ";
+ $cmd = $Global::shell." -c ".Q($cmd);
+ ::debug("init", $cmd, "\n");
+ my @out;
+ my $prepend = "";
+
+ my ($host_fh,$in,$err);
+ open3($in, $host_fh, $err, $cmd) || ::die_bug("parallel host check: $cmd");
+ ::debug("init", map { $_,"\n" } @sockets, @cores, @threads, @maxline, @echo);
+
+ if(not fork()) {
+ # Give the commands to run to the $cmd
+ close $host_fh;
+ print $in @sockets, @cores, @threads, @maxline, @echo;
+ close $in;
+ exit();
+ }
+ close $in;
+ # If -0: $/ must be \n
+ local $/ = "\n";
+ for(<$host_fh>) {
+ # TODO incompatible with '-quoting. Needs to be fixed differently
+ #if(/\'$/) {
+ # # if last char = ' then append next line
+ # # This may be due to quoting of \n in environment var
+ # $prepend .= $_;
+ # next;
+ #}
+ $_ = $prepend . $_;
+ $prepend = "";
+ push @out, $_;
+ }
+ close $host_fh;
+ return @out;
+}
+
+sub onall($@) {
+ # Runs @command on all hosts.
+ # Uses parallel to run @command on each host.
+ # --jobs = number of hosts to run on simultaneously.
+ # For each host a parallel command with the args will be running.
+ # Uses:
+ # $Global::debug
+ # $Global::exitstatus
+ # $Global::joblog
+ # $Global::quoting
+ # $opt::D
+ # $opt::arg_file_sep
+ # $opt::arg_sep
+ # $opt::colsep
+ # $opt::files
+ # $opt::files0
+ # $opt::group
+ # $opt::joblog
+ # $opt::jobs
+ # $opt::keeporder
+ # $opt::linebuffer
+ # $opt::max_chars
+ # $opt::plain
+ # $opt::retries
+ # $opt::tag
+ # $opt::tee
+ # $opt::timeout
+ # $opt::ungroup
+ # %Global::host
+ # @opt::basefile
+ # @opt::env
+ # @opt::v
+ # Input:
+ # @command = command to run on all hosts
+ # Returns: N/A
+ sub tmp_joblog {
+ # Input:
+ # $joblog = filename of joblog - undef if none
+ # Returns:
+ # $tmpfile = temp file for joblog - undef if none
+ my $joblog = shift;
+ if(not defined $joblog) {
+ return undef;
+ }
+ my ($fh, $tmpfile) = ::tmpfile(SUFFIX => ".log");
+ close $fh;
+ return $tmpfile;
+ }
+ my ($input_source_fh_ref,@command) = @_;
+ if($Global::quoting) {
+ @command = shell_quote(@command);
+ }
+
+ # Copy all @input_source_fh (-a and :::) into tempfiles
+ my @argfiles = ();
+ for my $fh (@$input_source_fh_ref) {
+ my ($outfh, $name) = ::tmpfile(SUFFIX => ".all", UNLINK => not $opt::D);
+ print $outfh (<$fh>);
+ close $outfh;
+ push @argfiles, $name;
+ }
+ if(@opt::basefile) { setup_basefile(); }
+ # for each sshlogin do:
+ # parallel -S $sshlogin $command :::: @argfiles
+ #
+ # Pass some of the options to the sub-parallels, not all of them as
+ # -P should only go to the first, and -S should not be copied at all.
+ my $options =
+ join(" ",
+ ((defined $opt::sshdelay) ? "--delay ".$opt::sshdelay : ""),
+ ((defined $opt::memfree) ? "--memfree ".$opt::memfree : ""),
+ ((defined $opt::memsuspend) ? "--memfree ".$opt::memsuspend : ""),
+ ((defined $opt::D) ? "-D $opt::D" : ""),
+ ((defined $opt::group) ? "--group" : ""),
+ ((defined $opt::jobs) ? "-P $opt::jobs" : ""),
+ ((defined $opt::keeporder) ? "--keeporder" : ""),
+ ((defined $opt::linebuffer) ? "--linebuffer" : ""),
+ ((defined $opt::max_chars) ? "--max-chars ".$opt::max_chars : ""),
+ ((defined $opt::plain) ? "--plain" : ""),
+ (($opt::ungroup == 1) ? "-u" : ""),
+ ((defined $opt::tee) ? "--tee" : ""),
+ );
+ my $suboptions =
+ join(" ",
+ ((defined $opt::sshdelay) ? "--delay ".$opt::sshdelay : ""),
+ ((defined $opt::D) ? "-D $opt::D" : ""),
+ ((defined $opt::arg_file_sep) ? "--arg-file-sep ".$opt::arg_file_sep : ""),
+ ((defined $opt::arg_sep) ? "--arg-sep ".$opt::arg_sep : ""),
+ ((defined $opt::colsep) ? "--colsep ".shell_quote($opt::colsep) : ""),
+ ((defined $opt::files) ? "--files" : ""),
+ ((defined $opt::files0) ? "--files0" : ""),
+ ((defined $opt::group) ? "--group" : ""),
+ ((defined $opt::cleanup) ? "--cleanup" : ""),
+ ((defined $opt::keeporder) ? "--keeporder" : ""),
+ ((defined $opt::linebuffer) ? "--linebuffer" : ""),
+ ((defined $opt::max_chars) ? "--max-chars ".$opt::max_chars : ""),
+ ((defined $opt::plain) ? "--plain" : ""),
+ ((defined $opt::plus) ? "--plus" : ""),
+ ((defined $opt::retries) ? "--retries ".$opt::retries : ""),
+ ((defined $opt::timeout) ? "--timeout ".$opt::timeout : ""),
+ (($opt::ungroup == 1) ? "-u" : ""),
+ ((defined $opt::ssh) ? "--ssh '".$opt::ssh."'" : ""),
+ ((defined $opt::tee) ? "--tee" : ""),
+ ((defined $opt::workdir) ? "--wd ".Q($opt::workdir) : ""),
+ (@Global::transfer_files ? map { "--tf ".Q($_) }
+ @Global::transfer_files : ""),
+ (@Global::ret_files ? map { "--return ".Q($_) }
+ @Global::ret_files : ""),
+ (@opt::env ? map { "--env ".Q($_) } @opt::env : ""),
+ (map { "-v" } @opt::v),
+ );
+ ::debug("init", "| $0 $options\n");
+ open(my $parallel_fh, "|-", "$0 -0 --will-cite -j0 $options") ||
+ ::die_bug("This does not run GNU Parallel: $0 $options");
+ my @joblogs;
+ for my $host (sort keys %Global::host) {
+ my $sshlogin = $Global::host{$host};
+ my $qsshlogin = Q($sshlogin->string());
+ my $joblog = tmp_joblog($opt::joblog);
+ if($joblog) {
+ push @joblogs, $joblog;
+ $joblog = "--joblog ".::Q($joblog);
+ }
+ my $quad = $opt::arg_file_sep || "::::";
+ # If PARALLEL_ENV is set: Pass it on
+ my $penv=$Global::parallel_env ?
+ "PARALLEL_ENV=".Q($Global::parallel_env) :
+ '';
+ my $results;
+ if(defined $opt::results) {
+ $results = Q($opt::results) . $qsshlogin;
+ }
+ ::debug("init", "$penv $0 $suboptions -j1 $joblog ",
+ ((defined $opt::tag) ? "--tagstring ".$qsshlogin : ""),
+ ((defined $opt::results) ? "--results ".$results : ""),
+ " -S $qsshlogin ",
+ join(" ",shell_quote(@command,$quad,@argfiles)),"\n");
+ print $parallel_fh "$penv $0 $suboptions -j1 $joblog ",
+ ((defined $opt::tag) ? "--tagstring ".$qsshlogin : ""),
+ ((defined $opt::results) ? "--results ".$results : ""),
+ " -S $qsshlogin ",
+ join(" ",shell_quote(@command,$quad,@argfiles)),"\0";
+ }
+ close $parallel_fh;
+ $Global::exitstatus = $? >> 8;
+ debug("init", "--onall exitvalue ", $?);
+ if(@opt::basefile and $opt::cleanup) { cleanup_basefile(); }
+ $Global::debug or unlink(@argfiles);
+ my %seen;
+ for my $joblog (@joblogs) {
+ # Append to $joblog
+ my $fh = open_or_exit("<", $joblog);
+ # Skip first line (header);
+ <$fh>;
+ print $Global::joblog (<$fh>);
+ close $fh;
+ unlink($joblog);
+ }
+}
+
+
+sub __SIGNAL_HANDLING__() {}
+
+
+sub sigtstp() {
+ # Send TSTP signal (Ctrl-Z) to all children process groups
+ # Uses:
+ # %SIG
+ # Returns: N/A
+ signal_children("TSTP");
+}
+
+sub sigpipe() {
+ # Send SIGPIPE signal to all children process groups
+ # Uses:
+ # %SIG
+ # Returns: N/A
+ signal_children("PIPE");
+}
+
+sub signal_children() {
+ # Send signal to all children process groups
+ # and GNU Parallel itself
+ # Uses:
+ # %SIG
+ # Returns: N/A
+ my $signal = shift;
+ debug("run", "Sending $signal ");
+ kill $signal, map { -$_ } keys %Global::running;
+ # Use default signal handler for GNU Parallel itself
+ $SIG{$signal} = undef;
+ kill $signal, $$;
+}
+
+sub save_original_signal_handler() {
+ # Remember the original signal handler
+ # Uses:
+ # %Global::original_sig
+ # Returns: N/A
+ $SIG{INT} = sub {
+ if($opt::tmux) { ::qqx("tmux kill-session -t p$$"); }
+ wait_and_exit(255);
+ };
+ $SIG{TERM} = sub {
+ if($opt::tmux) { ::qqx("tmux kill-session -t p$$"); }
+ wait_and_exit(255);
+ };
+ %Global::original_sig = %SIG;
+ $SIG{TERM} = sub {}; # Dummy until jobs really start
+ $SIG{ALRM} = 'IGNORE';
+ # Allow Ctrl-Z to suspend and `fg` to continue
+ $SIG{TSTP} = \&sigtstp;
+ $SIG{PIPE} = \&sigpipe;
+ $SIG{CONT} = sub {
+ # Set $SIG{TSTP} again (it is undef'ed in sigtstp() )
+ $SIG{TSTP} = \&sigtstp;
+ for my $job (values %Global::running) {
+ if($job->suspended()) {
+ # Force jobs to suspend, if they are marked as suspended.
+ # --memsupspend can suspend a job that will be resumed
+ # if the user presses CTRL-Z followed by `fg`.
+ $job->suspend();
+ } else {
+ # Resume the rest of the jobs
+ $job->resume();
+ }
+ }
+ };
+}
+
+sub list_running_jobs() {
+ # Print running jobs on tty
+ # Uses:
+ # %Global::running
+ # Returns: N/A
+ for my $job (values %Global::running) {
+ ::status("$Global::progname: ".$job->replaced());
+ }
+}
+
+sub start_no_new_jobs() {
+ # Start no more jobs
+ # Uses:
+ # %Global::original_sig
+ # %Global::unlink
+ # $Global::start_no_new_jobs
+ # Returns: N/A
+ unlink keys %Global::unlink;
+ ::status
+ ("$Global::progname: SIGHUP received. No new jobs will be started.",
+ "$Global::progname: Waiting for these ".(keys %Global::running).
+ " jobs to finish. Send SIGTERM to stop now.");
+ list_running_jobs();
+ $Global::start_no_new_jobs ||= 1;
+}
+
+sub reapers() {
+ # Run reaper until there are no more left
+ # Returns:
+ # @pids_reaped = pids of reaped processes
+ my @pids_reaped;
+ my $pid;
+ while($pid = reaper()) {
+ push @pids_reaped, $pid;
+ }
+ return @pids_reaped;
+}
+
+sub reaper() {
+ # A job finished:
+ # * Set exitstatus, exitsignal, endtime.
+ # * Free ressources for new job
+ # * Update median runtime
+ # * Print output
+ # * If --halt = now: Kill children
+ # * Print progress
+ # Uses:
+ # %Global::running
+ # $opt::timeout
+ # $Global::timeoutq
+ # $opt::keeporder
+ # $Global::total_running
+ # Returns:
+ # $stiff = PID of child finished
+ my $stiff;
+ debug("run", "Reaper ");
+ if(($stiff = waitpid(-1, &WNOHANG)) <= 0) {
+ # No jobs waiting to be reaped
+ return 0;
+ }
+
+ # $stiff = pid of dead process
+ my $job = $Global::running{$stiff};
+
+ # '-a <(seq 10)' will give us a pid not in %Global::running
+ # The same will one of the ssh -M: ignore
+ $job or return 0;
+ delete $Global::running{$stiff};
+ $Global::total_running--;
+ if($job->{'commandline'}{'skip'}) {
+ # $job->skip() was called
+ $job->set_exitstatus(-2);
+ $job->set_exitsignal(0);
+ } else {
+ $job->set_exitsignal($? & 127);
+ if($job->exitstatus()) {
+ # Exit status already set - probably by --timeout
+ } elsif($? & 127) {
+ # Killed by signal. Many shells return: 128 | $signal
+ $job->set_exitstatus(128 | $?);
+ } else {
+ # Normal exit
+ $job->set_exitstatus($? >> 8);
+ }
+ }
+
+ debug("run", "\nseq ",$job->seq()," died (", $job->exitstatus(), ")");
+ if($Global::delayauto or $Global::sshdelayauto) {
+ if($job->exitstatus()) {
+ # Job failed: Increase delay (if $opt::(ssh)delay set)
+ $opt::delay &&= $opt::delay * 1.3;
+ $opt::sshdelay &&= $opt::sshdelay * 1.3;
+ } else {
+ # Job succeeded: Decrease delay (if $opt::(ssh)delay set)
+ $opt::delay &&= $opt::delay * 0.9;
+ $opt::sshdelay &&= $opt::sshdelay * 0.9;
+ }
+ debug("run", "delay:$opt::delay ssh:$opt::sshdelay ");
+ }
+ $job->set_endtime(::now());
+ my $sshlogin = $job->sshlogin();
+ $sshlogin->dec_jobs_running();
+ if($job->should_be_retried()) {
+ # Free up file handles
+ $job->free_ressources();
+ } else {
+ # The job is done
+ $sshlogin->inc_jobs_completed();
+ # Free the jobslot
+ $job->free_slot();
+ if($opt::timeout and not $job->exitstatus()) {
+ # Update average runtime for timeout only for successful jobs
+ $Global::timeoutq->update_median_runtime($job->runtime());
+ }
+ if($opt::keeporder and not $opt::latestline) {
+ # --latestline fixes --keeporder in Job::row()
+ $job->print_earlier_jobs();
+ } else {
+ $job->print();
+ }
+ if($job->should_we_halt() eq "now") {
+ # Kill children
+ ::kill_sleep_seq($job->pid());
+ ::killall();
+ ::wait_and_exit($Global::halt_exitstatus);
+ }
+ }
+ $job->cleanup();
+
+ if($opt::progress) {
+ my $progress = progress();
+ ::status_no_nl("\r",$progress->{'status'});
+ }
+
+ debug("run", "jobdone \n");
+ return $stiff;
+}
+
+
+sub __USAGE__() {}
+
+
+sub killall() {
+ # Kill all jobs by killing their process groups
+ # Uses:
+ # $Global::start_no_new_jobs = we are stopping
+ # $Global::killall = Flag to not run reaper
+ $Global::start_no_new_jobs ||= 1;
+ # Do not reap killed children: Ignore them instead
+ $Global::killall ||= 1;
+ kill_sleep_seq(keys %Global::running);
+}
+
+sub kill_sleep_seq(@) {
+ # Send jobs TERM,TERM,KILL to processgroups
+ # Input:
+ # @pids = list of pids that are also processgroups
+ # Convert pids to process groups ($processgroup = -$pid)
+ my @pgrps = map { -$_ } @_;
+ my @term_seq = split/,/,$opt::termseq;
+ if(not @term_seq) {
+ @term_seq = ("TERM",200,"TERM",100,"TERM",50,"KILL",25);
+ }
+ # for each signal+waittime: kill process groups still not dead
+ while(@term_seq) {
+ @pgrps = kill_sleep(shift @term_seq, shift @term_seq, @pgrps);
+ }
+}
+
+sub kill_sleep() {
+ # Kill pids with a signal and wait a while for them to die
+ # Input:
+ # $signal = signal to send to @pids
+ # $sleep_max = number of ms to sleep at most before returning
+ # @pids = pids to kill (actually process groups)
+ # Uses:
+ # $Global::killall = set by killall() to avoid calling reaper
+ # Returns:
+ # @pids = pids still alive
+ my ($signal, $sleep_max, @pids) = @_;
+ ::debug("kill","kill_sleep $signal ",(join " ",sort @pids),"\n");
+ kill $signal, @pids;
+ my $sleepsum = 0;
+ my $sleep = 0.001;
+
+ while(@pids and $sleepsum < $sleep_max) {
+ if($Global::killall) {
+ # Killall => don't run reaper
+ while(waitpid(-1, &WNOHANG) > 0) {
+ $sleep = $sleep/2+0.001;
+ }
+ } elsif(reapers()) {
+ $sleep = $sleep/2+0.001;
+ }
+ $sleep *= 1.1;
+ ::usleep($sleep);
+ $sleepsum += $sleep;
+ # Keep only living children
+ @pids = grep { kill(0, $_) } @pids;
+ }
+ return @pids;
+}
+
+sub wait_and_exit($) {
+ # If we do not wait, we sometimes get segfault
+ # Returns: N/A
+ my $error = shift;
+ unlink keys %Global::unlink;
+ if($error) {
+ # Kill all jobs without printing
+ killall();
+ }
+ for (keys %Global::unkilled_children) {
+ # Kill any (non-jobs) children (e.g. reserved processes)
+ kill 9, $_;
+ waitpid($_,0);
+ delete $Global::unkilled_children{$_};
+ }
+ if($Global::unkilled_sqlworker) {
+ waitpid($Global::unkilled_sqlworker,0);
+ }
+ # Avoid: Warning: unable to close filehandle properly: No space
+ # left on device during global destruction.
+ $SIG{__WARN__} = sub {};
+ if($opt::_parset) {
+ # Make the shell script return $error
+ print "$Global::parset_endstring\nreturn $error";
+ }
+ exit($error);
+}
+
+sub die_usage() {
+ # Returns: N/A
+ usage();
+ wait_and_exit(255);
+}
+
+sub usage() {
+ # Returns: N/A
+ print join
+ ("\n",
+ "Usage:",
+ "",
+ "$Global::progname [options] [command [arguments]] < list_of_arguments",
+ "$Global::progname [options] [command [arguments]] (::: arguments|:::: argfile(s))...",
+ "cat ... | $Global::progname --pipe [options] [command [arguments]]",
+ "",
+ "-j n Run n jobs in parallel",
+ "-k Keep same order",
+ "-X Multiple arguments with context replace",
+ "--colsep regexp Split input on regexp for positional replacements",
+ "{} {.} {/} {/.} {#} {%} {= perl code =} Replacement strings",
+ "{3} {3.} {3/} {3/.} {=3 perl code =} Positional replacement strings",
+ "With --plus: {} = {+/}/{/} = {.}.{+.} = {+/}/{/.}.{+.} = {..}.{+..} =",
+ " {+/}/{/..}.{+..} = {...}.{+...} = {+/}/{/...}.{+...}",
+ "",
+ "-S sshlogin Example: foo\@server.example.com",
+ "--slf .. Use ~/.parallel/sshloginfile as the list of sshlogins",
+ "--trc {}.bar Shorthand for --transfer --return {}.bar --cleanup",
+ "--onall Run the given command with argument on all sshlogins",
+ "--nonall Run the given command with no arguments on all sshlogins",
+ "",
+ "--pipe Split stdin (standard input) to multiple jobs.",
+ "--recend str Record end separator for --pipe.",
+ "--recstart str Record start separator for --pipe.",
+ "",
+ "GNU Parallel can do much more. See 'man $Global::progname' for details",
+ "",
+ "Academic tradition requires you to cite works you base your article on.",
+ "If you use programs that use GNU Parallel to process data for an article in a",
+ "scientific publication, please cite:",
+ "",
+ " Tange, O. (2024, February 22). GNU Parallel 20240222 ('Навальный').",
+ " Zenodo. https://doi.org/10.5281/zenodo.10719803",
+ "",
+ # Before changing these lines, please read
+ # https://www.gnu.org/software/parallel/parallel_design.html#citation-notice
+ # https://git.savannah.gnu.org/cgit/parallel.git/tree/doc/citation-notice-faq.txt
+ # You accept to be put in a public hall of shame by removing
+ # these lines
+ "This helps funding further development; AND IT WON'T COST YOU A CENT.",
+ "If you pay 10000 EUR you should feel free to use GNU Parallel without citing.",
+ "",
+ "",);
+}
+
+sub citation_notice() {
+ # if --will-cite or --plain: do nothing
+ # if stderr redirected: do nothing
+ # if $PARALLEL_HOME/will-cite: do nothing
+ # else: print citation notice to stderr
+ if($opt::willcite
+ or
+ $opt::plain
+ or
+ not -t $Global::original_stderr
+ or
+ grep { -e "$_/will-cite" } @Global::config_dirs) {
+ # skip
+ } else {
+ ::status
+ ("Academic tradition requires you to cite works you base your article on.",
+ "If you use programs that use GNU Parallel to process data for an article in a",
+ "scientific publication, please cite:",
+ "",
+ " Tange, O. (2024, February 22). GNU Parallel 20240222 ('Навальный').",
+ " Zenodo. https://doi.org/10.5281/zenodo.10719803",
+ "",
+ # Before changing these line, please read
+ # https://www.gnu.org/software/parallel/parallel_design.html#citation-notice and
+ # https://git.savannah.gnu.org/cgit/parallel.git/tree/doc/citation-notice-faq.txt
+ # You accept to be put in a public hall of shame by
+ # removing these lines
+ "This helps funding further development; AND IT WON'T COST YOU A CENT.",
+ "If you pay 10000 EUR you should feel free to use GNU Parallel without citing.",
+ "",
+ "More about funding GNU Parallel and the citation notice:",
+ "https://www.gnu.org/software/parallel/parallel_design.html#citation-notice",
+ "",
+ "To silence this citation notice: run 'parallel --citation' once.",
+ ""
+ );
+ mkdir $Global::config_dir;
+ # Number of times the user has run GNU Parallel without showing
+ # willingness to cite
+ my $runs = 0;
+ if(open (my $fh, "<", $Global::config_dir.
+ "/runs-without-willing-to-cite")) {
+ $runs = <$fh>;
+ close $fh;
+ }
+ $runs++;
+ if(open (my $fh, ">", $Global::config_dir.
+ "/runs-without-willing-to-cite")) {
+ print $fh $runs;
+ close $fh;
+ if($runs >= 10) {
+ ::status("Come on: You have run parallel $runs times. ".
+ "Isn't it about time ",
+ "you run 'parallel --citation' once to silence ".
+ "the citation notice?",
+ "");
+ }
+ }
+ }
+}
+
+sub status(@) {
+ my @w = @_;
+ my $fh = $Global::status_fd || *STDERR;
+ print $fh map { ($_, "\n") } @w;
+ flush $fh;
+}
+
+sub status_no_nl(@) {
+ my @w = @_;
+ my $fh = $Global::status_fd || *STDERR;
+ print $fh @w;
+ flush $fh;
+}
+
+sub warning(@) {
+ my @w = @_;
+ my $prog = $Global::progname || "parallel";
+ status_no_nl(map { ($prog, ": Warning: ", $_, "\n"); } @w);
+}
+
+{
+ my %warnings;
+ sub warning_once(@) {
+ my @w = @_;
+ my $prog = $Global::progname || "parallel";
+ $warnings{@w}++ or
+ status_no_nl(map { ($prog, ": Warning: ", $_, "\n"); } @w);
+ }
+}
+
+sub error(@) {
+ my ($exitcode,@w) = @_;
+ my $prog = $Global::progname || "parallel";
+ if($exitcode =~ /^\d+$/) {
+ # If $exitcode is set, exit the program
+ status(map { ($prog.": Error: ". $_); } @w);
+ wait_and_exit($exitcode);
+ } else {
+ # Otherwise $exitcode is just a string
+ status(map { ($prog.": Error: ". $_); } $exitcode, @w);
+ }
+}
+
+sub die_bug($) {
+ my $bugid = shift;
+ print STDERR
+ ("$Global::progname: This should not happen. You have found a bug. ",
+ "Please follow\n",
+ "https://www.gnu.org/software/parallel/man.html#reporting-bugs\n",
+ "\n",
+ "Include this in the report:\n",
+ "* The version number: $Global::version\n",
+ "* The bugid: $bugid\n",
+ "* The command line being run\n",
+ "* The files being read (put the files on a webserver if they are big)\n",
+ "\n",
+ "If you get the error on smaller/fewer files, please include those instead.\n");
+ ::wait_and_exit(255);
+}
+
+sub version() {
+ # Returns: N/A
+ print join
+ ("\n",
+ "GNU $Global::progname $Global::version",
+ "Copyright (C) 2007-2024 Ole Tange, http://ole.tange.dk and Free Software",
+ "Foundation, Inc.",
+ "License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>",
+ "This is free software: you are free to change and redistribute it.",
+ "GNU $Global::progname comes with no warranty.",
+ "",
+ "Web site: https://www.gnu.org/software/${Global::progname}\n",
+ "When using programs that use GNU Parallel to process data for publication",
+ "please cite as described in 'parallel --citation'.\n",
+ );
+}
+
+sub citation() {
+ # Returns: N/A
+ my ($all_argv_ref,$argv_options_removed_ref) = @_;
+ my $all_argv = "@$all_argv_ref";
+ my $no_opts = "@$argv_options_removed_ref";
+ $all_argv=~s/--citation//;
+ if($all_argv ne $no_opts) {
+ ::warning("--citation ignores all other options and arguments.");
+ ::status("");
+ }
+
+ ::status(
+ "Academic tradition requires you to cite works you base your article on.",
+ "If you use programs that use GNU Parallel to process data for an article in a",
+ "scientific publication, please cite:",
+ "",
+ "\@software{tange_2024_10719803,",
+ " author = {Tange, Ole},",
+ " title = {GNU Parallel 20240222 ('Навальный')},",
+ " month = Feb,",
+ " year = 2024,",
+ " note = {{GNU Parallel is a general parallelizer to run",
+ " multiple serial command line programs in parallel",
+ " without changing them.}},",
+ " publisher = {Zenodo},",
+ " doi = {10.5281/zenodo.10719803},",
+ " url = {https://doi.org/10.5281/zenodo.10719803}",
+ "}",
+ "",
+ "(Feel free to use \\nocite{tange_2024_10719803})",
+ "",
+ # Before changing these lines, please read
+ # https://www.gnu.org/software/parallel/parallel_design.html#citation-notice and
+ # https://git.savannah.gnu.org/cgit/parallel.git/tree/doc/citation-notice-faq.txt
+ # You accept to be put in a public hall of shame by removing
+ # these lines
+ "This helps funding further development; AND IT WON'T COST YOU A CENT.",
+ "If you pay 10000 EUR you should feel free to use GNU Parallel without citing.",
+ "",
+ "More about funding GNU Parallel and the citation notice:",
+ "https://lists.gnu.org/archive/html/parallel/2013-11/msg00006.html",
+ "https://www.gnu.org/software/parallel/parallel_design.html#citation-notice",
+ "https://git.savannah.gnu.org/cgit/parallel.git/tree/doc/citation-notice-faq.txt",
+ ""
+ );
+ while(not grep { -e "$_/will-cite" } @Global::config_dirs) {
+ print "\nType: 'will cite' and press enter.\n> ";
+ my $input = <STDIN>;
+ if(not defined $input) {
+ exit(255);
+ }
+ if($input =~ /will cite/i) {
+ if(mkdir $Global::config_dir) {
+ # Recompute @Global::config_dirs so we can break out of the loop.
+ init_globals();
+ }
+ if(open (my $fh, ">", $Global::config_dir."/will-cite")) {
+ close $fh;
+ ::status(
+ "",
+ "Thank you for your support: You are the reason why there is funding to",
+ "continue maintaining GNU Parallel. On behalf of future versions of",
+ "GNU Parallel, which would not exist without your support:",
+ "",
+ " THANK YOU SO MUCH",
+ "",
+ "It is really appreciated. The citation notice is now silenced.",
+ "");
+ } else {
+ ::status(
+ "",
+ "Thank you for your support. It is much appreciated. The citation",
+ "cannot permanently be silenced. Use '--will-cite' instead.",
+ "",
+ "If you use '--will-cite' in scripts to be run by others you are making",
+ "it harder for others to see the citation notice. The development of",
+ "GNU Parallel is indirectly financed through citations, so if users",
+ "do not know they should cite then you are making it harder to finance",
+ "development. However, if you pay 10000 EUR, you should feel free to",
+ "use '--will-cite' in scripts.",
+ "");
+ last;
+ }
+ }
+ }
+}
+
+sub show_limits() {
+ # Returns: N/A
+ print("Maximal size of command: ",Limits::Command::real_max_length(),"\n",
+ "Maximal usable size of command: ",
+ $Global::usable_command_line_length,"\n",
+ "\n",
+ "Execution will continue now, ",
+ "and it will try to read its input\n",
+ "and run commands; if this is not ",
+ "what you wanted to happen, please\n",
+ "press CTRL-D or CTRL-C\n");
+}
+
+sub embed() {
+ # Give an embeddable version of GNU Parallel
+ # Tested with: bash, zsh, ksh, ash, dash, sh
+ my $randomstring = "cut-here-".join"",
+ map { (0..9,"a".."z","A".."Z")[rand(62)] } (1..20);
+ if(not -f $0 or not -r $0) {
+ ::error(255,"--embed only works if parallel is a readable file");
+ }
+ # Read the source from $0
+ my $source = slurp_or_exit($0);
+ my $user = $ENV{LOGNAME} || $ENV{USERNAME} || $ENV{USER};
+ my $env_parallel_source;
+ my $shell = $Global::shell;
+ $shell =~ s:.*/::;
+ for(which("env_parallel.$shell")) {
+ -r $_ or next;
+ # Read the source of env_parallel.shellname
+ $env_parallel_source .= slurp_or_exit($_);
+ last;
+ }
+ print "#!$Global::shell
+
+# Copyright (C) 2007-2024 $user, Ole Tange, http://ole.tange.dk
+# and Free Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <https://www.gnu.org/licenses/>
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+";
+
+ print q!
+# Embedded GNU Parallel created with --embed
+parallel() {
+ # Start GNU Parallel without leaving temporary files
+ #
+ # Not all shells support 'perl <(cat ...)'
+ # This is a complex way of doing:
+ # perl <(cat <<'cut-here'
+ # [...]
+ # ) "$@"
+ # and also avoiding:
+ # [1]+ Done cat
+
+ # Make a temporary fifo that perl can read from
+ _fifo_with_GNU_Parallel_source=`perl -e 'use POSIX qw(mkfifo);
+ do {
+ $f = "/tmp/parallel-".join"",
+ map { (0..9,"a".."z","A".."Z")[rand(62)] } (1..5);
+ } while(-e $f);
+ mkfifo($f,0600);
+ print $f;'`
+ # Put source code into temporary file
+ # so it is easy to copy to the fifo
+ _file_with_GNU_Parallel_source=`mktemp`;
+!,
+ "cat <<'$randomstring' > \$_file_with_GNU_Parallel_source\n",
+ $source,
+ $randomstring,"\n",
+ q!
+ # Copy the source code from the file to the fifo
+ # and remove the file and fifo ASAP
+ # 'sh -c' is needed to avoid
+ # [1]+ Done cat
+ sh -c "(rm $_file_with_GNU_Parallel_source; cat >$_fifo_with_GNU_Parallel_source; rm $_fifo_with_GNU_Parallel_source) < $_file_with_GNU_Parallel_source &"
+
+ # Read the source from the fifo
+ perl $_fifo_with_GNU_Parallel_source "$@"
+}
+!,
+ $env_parallel_source,
+ q!
+
+# This will call the functions above
+parallel -k echo ::: Put your code here
+env_parallel --session
+env_parallel -k echo ::: Put your code here
+parset p,y,c,h -k echo ::: Put your code here
+echo $p $y $c $h
+echo You can also activate GNU Parallel for interactive use by:
+echo . "$0"
+!;
+ ::status("Redirect the output to a file and add your changes at the end:",
+ " $0 --embed > new_script");
+}
+
+sub pack_combined_executable {
+ my ($before_ref,$with_argsep_ref,$argv_ref) = @_;
+ my @parallelopts;
+ my $skip_next;
+ # Remove '--combine-exec file' from options
+ for(@{$before_ref}[0..(arrayindex($before_ref,$with_argsep_ref))-1]) {
+ if (/^--combine-?exec(utable)?$/ || $skip_next) {
+ # Also skip the filename given to --combine-exec
+ $skip_next = !$skip_next;
+ next;
+ }
+ push @parallelopts, $_;
+ }
+ # From ::: and to end
+ my @argsep = @{$with_argsep_ref}[($#ARGV+1)..$#$with_argsep_ref];
+ # The executable is now the first in @ARGV
+ my $execname = shift @ARGV;
+ # The rest of @ARGV are options for $execname
+ my @execopts = @ARGV;
+ debug("combine",
+ "Parallel opts: @parallelopts ",
+ "Executable: $execname ",
+ "Execopts: @execopts ",
+ "Argsep: @argsep\n");
+ # Read the the executable
+ my $exec = slurp_or_exit(which($execname));
+ # Read the source of GNU Parallel and the executable
+ my $parallel = slurp_or_exit($0);
+ # Remove possibly __END__ from GNU Parallel
+ $parallel =~ s/^__END__.*//s;
+ if(-t $Global::original_stderr) {
+ ::status(
+ "Please be aware that combining GNU Parallel and '$execname'",
+ "into a combined executable will make the whole executable",
+ "licensed under GPLv3 (section 5.c).",
+ "",
+ "If the license of '$execname' is incompatible with GPLv3,",
+ "you cannot legally convey copies of the combined executable",
+ "to others. You can, however, still run them yourself.",
+ "",
+ "The combined executable will not have a citation notice,",
+ "so it is your resposibilty to advice that academic tradition",
+ "requires the users to cite GNU Parallel.",
+ ""
+ );
+ my $input;
+ do {
+ ::status_no_nl("\nType: 'I agree' and press enter.\n> ");
+ $input = <STDIN>;
+ if(not defined $input) {
+ exit(255);
+ }
+ } until($input =~ /I agree/i);
+ }
+ write_or_exit($opt::combineexec,
+ $parallel,
+ "\n__END__\n",
+ (map { "$_\0\n" } @parallelopts), "\0\0\n",
+ $execname, "\0\0\n",
+ (map { "$_\0\n" } @execopts), "\0\0\n",
+ (map { "$_\0\n" } @argsep), "\0\0\n",
+ $exec);
+ # Set +x permission
+ chmod 0700, $opt::combineexec;
+ exit(0);
+}
+
+sub unpack_combined_executable {
+ # If the script is a combined executable,
+ # it will have stuff in <DATA> (I.e. after __END__)
+ my $combine_exec = join("",<DATA>);
+ if(length $combine_exec) {
+ # Parse the <DATA>
+ #
+ # __END__
+ # Option for GNU Parallel\0\n
+ # Option for GNU Parallel\0\n
+ # \0\0\n
+ # Name of executable\0\0\n
+ # Option for executable\0\n
+ # Option for executable\0\n
+ # \0\0\n
+ # argsep + args if any\0\n
+ # argsep + args if any\0\n
+ # \0\0\n
+ # <<binary of exec>>
+ #
+ # parallel --combine --pipe -j10% --recend '' myscript --myopt myval
+ # __END__
+ # --pipe\0\n --pipe
+ # -j10%\0\n -j10%
+ # --recend\0\n --recend
+ # \0\n ''
+ # \0\0\n end-of-parallel-options
+ # myscript\0\0\n myscript
+ # --myopt\0\n --myopt
+ # myval\0\n myval
+ # \0\0\n end-of-myscript-options
+ # \0\0\n no argsep
+ # <<binary of myscript>>
+ #
+ # parallel --combine -j10% myscript :::
+ # __END__
+ # -j10%\0\n
+ # \0\0\n end-of-parallel-options
+ # myscript\0\0\n
+ # \0\0\n end-of-myscript-options
+ # :::\0\n
+ # \0\0\n
+ # <<binary of myscript>>
+
+ my ($opts,$execname,$execopts,$argsep,$exec) =
+ split /\0\0\n/,$combine_exec,5;
+ # Make a tmpdir with a file called $execname
+ local %ENV;
+ $ENV{TMPDIR} ||= "/tmp";
+ my $dir = File::Temp::tempdir($ENV{'TMPDIR'} . "/parXXXXX", CLEANUP => 1);
+ my $script = $dir."/".$execname;
+ write_or_exit($script,$exec);
+ # Set +x permission
+ chmod 0700, $script;
+ # Mark it for unlinking later
+ $Global::unlink{$script}++;
+ $Global::unlink{$dir}++;
+ # pass the options for GNU Parallel
+ my @opts = split /\0\n/, $opts;
+ my @execopts = split /\0\n/, $execopts;
+ if(length $argsep) {
+ # Only add argsep if set
+ unshift(@ARGV, split(/\0\n/,$argsep));
+ }
+ unshift(@ARGV,@opts,$script,@execopts);
+ }
+}
+
+
+sub __GENERIC_COMMON_FUNCTION__() {}
+
+
+sub mkdir_or_die($) {
+ # If dir is not executable: die
+ my $dir = shift;
+ # The eval is needed to catch exception from mkdir
+ eval { File::Path::mkpath($dir); };
+ if(not -x $dir) {
+ ::error(255,"Cannot change into non-executable dir $dir: $!");
+ }
+}
+
+sub tmpfile(@) {
+ # Create tempfile as $TMPDIR/parXXXXX
+ # Returns:
+ # $filehandle = opened file handle
+ # $filename = file name created
+ my($filehandle,$filename) =
+ ::tempfile(DIR=>$ENV{'TMPDIR'}, TEMPLATE => 'parXXXXX', @_);
+ if(wantarray) {
+ return($filehandle,$filename);
+ } else {
+ # Separate unlink due to NFS dealing badly with File::Temp
+ unlink $filename;
+ return $filehandle;
+ }
+}
+
+sub tmpname($) {
+ # Select a name that does not exist
+ # Do not create the file as it may be used for creating a socket (by tmux)
+ # Remember the name in $Global::unlink to avoid hitting the same name twice
+ my $name = shift;
+ my($tmpname);
+ if(not -w $ENV{'TMPDIR'}) {
+ my $qtmp = ::Q($ENV{'TMPDIR'});
+ if(not -e $ENV{'TMPDIR'}) {
+ ::error(255,"Tmpdir $qtmp does not exist.",
+ "Try: mkdir -p $qtmp");
+ } else {
+ ::error(255,"Tmpdir $qtmp is not writable.",
+ "Try: chmod +w $qtmp");
+ }
+ }
+ do {
+ $tmpname = $ENV{'TMPDIR'}."/".$name.
+ join"", map { (0..9,"a".."z","A".."Z")[rand(62)] } (1..5);
+ } while(-e $tmpname or $Global::unlink{$tmpname}++);
+ return $tmpname;
+}
+
+sub tmpfifo() {
+ # Find an unused name and mkfifo on it
+ my $tmpfifo = tmpname("fif");
+ mkfifo($tmpfifo,0600);
+ return $tmpfifo;
+}
+
+sub rm(@) {
+ # Remove file and remove it from %Global::unlink
+ # Uses:
+ # %Global::unlink
+ delete @Global::unlink{@_};
+ unlink @_;
+}
+
+sub size_of_block_dev() {
+ # Like -s but for block devices
+ # Input:
+ # $blockdev = file name of block device
+ # Returns:
+ # $size = in bytes, undef if error
+ my $blockdev = shift;
+ my $fh = open_or_exit("<", $blockdev);
+ seek($fh,0,2) || ::die_bug("cannot seek $blockdev");
+ my $size = tell($fh);
+ close $fh;
+ return $size;
+}
+
+sub qqx(@) {
+ # Like qx but with clean environment (except for @keep)
+ # and STDERR ignored
+ # This is needed if the environment contains functions
+ # that /bin/sh does not understand
+ my %env;
+ # ssh with ssh-agent needs PATH SSH_AUTH_SOCK SSH_AGENT_PID
+ # ssh with Kerberos needs KRB5CCNAME
+ # sshpass needs SSHPASS
+ # tmux needs LC_CTYPE
+ # lsh needs HOME LOGNAME
+ my @keep = qw(PATH SSH_AUTH_SOCK SSH_AGENT_PID KRB5CCNAME LC_CTYPE
+ HOME LOGNAME SSHPASS);
+ @env{@keep} = @ENV{@keep};
+ local %ENV;
+ %ENV = %env;
+ if($Global::debug) {
+ # && true is to force spawning a shell and not just exec'ing
+ return qx{ @_ && true };
+ } else {
+ # CygWin does not respect 2>/dev/null
+ # so we do that by hand
+ # This trick does not work:
+ # https://stackoverflow.com/q/13833088/363028
+ # local *STDERR;
+ # open(STDERR, ">", "/dev/null");
+ open(local *CHILD_STDIN, '<', '/dev/null') or die $!;
+ open(local *CHILD_STDERR, '>', '/dev/null') or die $!;
+ my $out;
+ # eval is needed if open3 fails (e.g. command line too long)
+ eval {
+ my $pid = open3(
+ '<&CHILD_STDIN',
+ $out,
+ '>&CHILD_STDERR',
+ # && true is to force spawning a shell and not just exec'ing
+ "@_ && true");
+ my @arr = <$out>;
+ close $out;
+ # Make sure $? is set
+ waitpid($pid, 0);
+ return wantarray ? @arr : join "",@arr;
+ } or do {
+ # If eval fails, force $?=false
+ `false`;
+ };
+ }
+}
+
+sub uniq(@) {
+ # Remove duplicates and return unique values
+ return keys %{{ map { $_ => 1 } @_ }};
+}
+
+sub min(@) {
+ # Returns:
+ # Minimum value of array
+ my $min;
+ for (@_) {
+ # Skip undefs
+ defined $_ or next;
+ defined $min or do { $min = $_; next; }; # Set $_ to the first non-undef
+ $min = ($min < $_) ? $min : $_;
+ }
+ return $min;
+}
+
+sub max(@) {
+ # Returns:
+ # Maximum value of array
+ my $max;
+ for (@_) {
+ # Skip undefs
+ defined $_ or next;
+ defined $max or do { $max = $_; next; }; # Set $_ to the first non-undef
+ $max = ($max > $_) ? $max : $_;
+ }
+ return $max;
+}
+
+sub sum(@) {
+ # Returns:
+ # Sum of values of array
+ my @args = @_;
+ my $sum = 0;
+ for (@args) {
+ # Skip undefs
+ $_ and do { $sum += $_; }
+ }
+ return $sum;
+}
+
+sub undef_as_zero($) {
+ my $a = shift;
+ return $a ? $a : 0;
+}
+
+sub undef_as_empty($) {
+ my $a = shift;
+ return $a ? $a : "";
+}
+
+sub undef_if_empty($) {
+ if(defined($_[0]) and $_[0] eq "") {
+ return undef;
+ }
+ return $_[0];
+}
+
+sub multiply_binary_prefix(@) {
+ # Evalualte numbers with binary prefix
+ # Ki=2^10, Mi=2^20, Gi=2^30, Ti=2^40, Pi=2^50, Ei=2^70, Zi=2^80, Yi=2^80
+ # ki=2^10, mi=2^20, gi=2^30, ti=2^40, pi=2^50, ei=2^70, zi=2^80, yi=2^80
+ # K =2^10, M =2^20, G =2^30, T =2^40, P =2^50, E =2^70, Z =2^80, Y =2^80
+ # k =10^3, m =10^6, g =10^9, t=10^12, p=10^15, e=10^18, z=10^21, y=10^24
+ # 13G = 13*1024*1024*1024 = 13958643712
+ # Input:
+ # $s = string with prefixes
+ # Returns:
+ # $value = int with prefixes multiplied
+ my @v = @_;
+ for(@v) {
+ defined $_ or next;
+ s/ki/*1024/gi;
+ s/mi/*1024*1024/gi;
+ s/gi/*1024*1024*1024/gi;
+ s/ti/*1024*1024*1024*1024/gi;
+ s/pi/*1024*1024*1024*1024*1024/gi;
+ s/ei/*1024*1024*1024*1024*1024*1024/gi;
+ s/zi/*1024*1024*1024*1024*1024*1024*1024/gi;
+ s/yi/*1024*1024*1024*1024*1024*1024*1024*1024/gi;
+ s/xi/*1024*1024*1024*1024*1024*1024*1024*1024*1024/gi;
+
+ s/K/*1024/g;
+ s/M/*1024*1024/g;
+ s/G/*1024*1024*1024/g;
+ s/T/*1024*1024*1024*1024/g;
+ s/P/*1024*1024*1024*1024*1024/g;
+ s/E/*1024*1024*1024*1024*1024*1024/g;
+ s/Z/*1024*1024*1024*1024*1024*1024*1024/g;
+ s/Y/*1024*1024*1024*1024*1024*1024*1024*1024/g;
+ s/X/*1024*1024*1024*1024*1024*1024*1024*1024*1024/g;
+
+ s/k/*1000/g;
+ s/m/*1000*1000/g;
+ s/g/*1000*1000*1000/g;
+ s/t/*1000*1000*1000*1000/g;
+ s/p/*1000*1000*1000*1000*1000/g;
+ s/e/*1000*1000*1000*1000*1000*1000/g;
+ s/z/*1000*1000*1000*1000*1000*1000*1000/g;
+ s/y/*1000*1000*1000*1000*1000*1000*1000*1000/g;
+ s/x/*1000*1000*1000*1000*1000*1000*1000*1000*1000/g;
+
+ $_ = eval $_;
+ }
+ return wantarray ? @v : $v[0];
+}
+
+sub multiply_time_units($) {
+ # Evalualte numbers with time units
+ # s=1, m=60, h=3600, d=86400
+ # Input:
+ # $s = string time units
+ # Returns:
+ # $value = int in seconds
+ my @v = @_;
+ for(@v) {
+ defined $_ or next;
+ if(/[dhms]/i) {
+ s/s/*1+/gi;
+ s/m/*60+/gi;
+ s/h/*3600+/gi;
+ s/d/*86400+/gi;
+ # 1m/3 => 1*60+/3 => 1*60/3
+ s/\+(\D)/$1/gi;
+ }
+ $_ = eval $_."-0";
+ }
+ return wantarray ? @v : $v[0];
+}
+
+sub seconds_to_time_units() {
+ # Convert seconds into ??d??h??m??s
+ # s=1, m=60, h=3600, d=86400
+ # Input:
+ # $s = int in seconds
+ # Returns:
+ # $str = string time units
+ my $s = shift;
+ my $str;
+ my $d = int($s/86400);
+ $s -= $d * 86400;
+ my $h = int($s/3600);
+ $s -= $h * 3600;
+ my $m = int($s/60);
+ $s -= $m * 60;
+ if($d) {
+ $str = sprintf("%dd%02dh%02dm%02ds",$d,$h,$m,$s);
+ } elsif($h) {
+ $str = sprintf("%dh%02dm%02ds",$h,$m,$s);
+ } elsif($m) {
+ $str = sprintf("%dm%02ds",$m,$s);
+ } else {
+ $str = sprintf("%ds",$s);
+ }
+ return $str;
+}
+
+{
+ my ($disk_full_fh, $b8193);
+ sub exit_if_disk_full() {
+ # Checks if $TMPDIR is full by writing 8kb to a tmpfile
+ # If the disk is full: Exit immediately.
+ # Returns:
+ # N/A
+ if(not $disk_full_fh) {
+ $disk_full_fh = ::tmpfile(SUFFIX => ".df");
+ $b8193 = "b"x8193;
+ }
+ # Linux does not discover if a disk is full if writing <= 8192
+ # Tested on:
+ # bfs btrfs cramfs ext2 ext3 ext4 ext4dev jffs2 jfs minix msdos
+ # ntfs reiserfs tmpfs ubifs vfat xfs
+ # TODO this should be tested on different OS similar to this:
+ #
+ # doit() {
+ # sudo mount /dev/ram0 /mnt/loop; sudo chmod 1777 /mnt/loop
+ # seq 100000 | parallel --tmpdir /mnt/loop/ true &
+ # seq 6900000 > /mnt/loop/i && echo seq OK
+ # seq 6980868 > /mnt/loop/i
+ # seq 10000 > /mnt/loop/ii
+ # sleep 3
+ # sudo umount /mnt/loop/ || sudo umount -l /mnt/loop/
+ # echo >&2
+ # }
+ print $disk_full_fh $b8193;
+ if(not $disk_full_fh
+ or
+ tell $disk_full_fh != 8193) {
+ # On raspbian the disk can be full except for 10 chars.
+ ::error(255,"Output is incomplete.",
+ "Cannot append to buffer file in $ENV{'TMPDIR'}.",
+ "Is the disk full?",
+ "Change \$TMPDIR with --tmpdir or use --compress.");
+ }
+ truncate $disk_full_fh, 0;
+ seek($disk_full_fh, 0, 0) || die;
+ }
+}
+
+sub spacefree($$) {
+ # Remove comments and spaces
+ # Inputs:
+ # $spaces = keep 1 space?
+ # $s = string to remove spaces from
+ # Returns:
+ # $s = with spaces removed
+ my $spaces = shift;
+ my $s = shift;
+ $s =~ s/#.*//mg;
+ if(1 == $spaces) {
+ $s =~ s/\s+/ /mg;
+ } elsif(2 == $spaces) {
+ # Keep newlines
+ $s =~ s/\n\n+/\n/sg;
+ $s =~ s/[ \t]+/ /mg;
+ } elsif(3 == $spaces) {
+ # Keep perl code required space
+ $s =~ s{([^a-zA-Z0-9/])\s+}{$1}sg;
+ $s =~ s{([a-zA-Z0-9/])\s+([^:a-zA-Z0-9/])}{$1$2}sg;
+ } else {
+ $s =~ s/\s//mg;
+ }
+ return $s;
+}
+
+{
+ my $hostname;
+ sub hostname() {
+ local $/ = "\n";
+ if(not $hostname) {
+ $hostname = `hostname`;
+ chomp($hostname);
+ $hostname ||= "nohostname";
+ }
+ return $hostname;
+ }
+}
+
+sub which(@) {
+ # Input:
+ # @programs = programs to find the path to
+ # Returns:
+ # @full_path = full paths to @programs. Nothing if not found
+ my @which;
+ for my $prg (@_) {
+ push(@which, grep { not -d $_ and -x $_ }
+ map { $_."/".$prg } split(":",$ENV{'PATH'}));
+ if($prg =~ m:/:) {
+ # Test if program with full path exists
+ push(@which, grep { not -d $_ and -x $_ } $prg);
+ }
+ }
+ ::debug("which", "$which[0] in $ENV{'PATH'}\n");
+ return wantarray ? @which : $which[0];
+}
+
+{
+ my ($regexp,$shell,%fakename);
+
+ sub parent_shell {
+ # Input:
+ # $pid = pid to see if (grand)*parent is a shell
+ # Returns:
+ # $shellpath = path to shell - undef if no shell found
+ my $pid = shift;
+ ::debug("init","Parent of $pid\n");
+ if(not $regexp) {
+ # All shells known to mankind
+ #
+ # ash bash csh dash fdsh fish fizsh ksh ksh93 mksh pdksh
+ # posh rbash rc rush rzsh sash sh static-sh tcsh yash zsh
+
+ my @shells = (qw(ash bash bsd-csh csh dash fdsh fish fizsh ksh
+ ksh93 lksh mksh pdksh posh rbash rc rush rzsh sash sh
+ static-sh tcsh yash zsh -sh -csh -bash),
+ '-sh (sh)' # sh on FreeBSD
+ );
+ # Can be formatted as:
+ # [sh] -sh sh busybox sh -sh (sh)
+ # /bin/sh /sbin/sh /opt/csw/sh
+ # But not: foo.sh sshd crash flush pdflush scosh fsflush ssh
+ $shell = "(?:".join("|",map { "\Q$_\E" } @shells).")";
+ $regexp = '^((\[)(-?)('. $shell. ')(\])|(|\S+/|busybox )'.
+ '(-?)('. $shell. '))( *$| [^(])';
+ %fakename = (
+ # sh disguises itself as -sh (sh) on FreeBSD
+ "-sh (sh)" => ["sh"],
+ # csh and tcsh disguise themselves as -sh/-csh
+ # E.g.: ssh -tt csh@lo 'ps aux;true' |egrep ^csh
+ # but sh also disguises itself as -sh
+ # (TODO When does that happen?)
+ "-sh" => ["sh"],
+ "-csh" => ["tcsh", "csh"],
+ # ash disguises itself as -ash
+ "-ash" => ["ash", "dash", "sh"],
+ # dash disguises itself as -dash
+ "-dash" => ["dash", "ash", "sh"],
+ # bash disguises itself as -bash
+ "-bash" => ["bash", "sh"],
+ # ksh disguises itself as -ksh
+ "-ksh" => ["ksh", "sh"],
+ # zsh disguises itself as -zsh
+ "-zsh" => ["zsh", "sh"],
+ );
+ }
+ if($^O eq "linux") {
+ # Optimized for GNU/Linux
+ my $testpid = $pid;
+ my $shellpath;
+ my $shellline;
+ while($testpid) {
+ if(open(my $fd, "<", "/proc/$testpid/cmdline")) {
+ local $/="\0";
+ chomp($shellline = <$fd>);
+ if($shellline =~ /$regexp/o) {
+ my $shellname = $4 || $8;
+ my $dash = $3 || $7;
+ if($shellname eq "sh" and $dash) {
+ # -sh => csh or sh
+ if($shellpath = readlink "/proc/$testpid/exe") {
+ ::debug("init","procpath $shellpath\n");
+ if($shellpath =~ m:/$shell$:o) {
+ ::debug("init",
+ "proc which ".$shellpath." => ");
+ return $shellpath;
+ }
+ }
+ }
+ ::debug("init", "which ".$shellname." => ");
+ $shellpath = (which($shellname,
+ @{$fakename{$shellname}}))[0];
+ ::debug("init", "shell path $shellpath\n");
+ return $shellpath;
+ }
+ }
+ # Get parent pid
+ if(open(my $fd, "<", "/proc/$testpid/stat")) {
+ my $line = <$fd>;
+ close $fd;
+ # Parent pid is field 4
+ $testpid = (split /\s+/, $line)[3];
+ } else {
+ # Something is wrong: fall back to old method
+ last;
+ }
+ }
+ }
+ # if -sh or -csh try readlink /proc/$$/exe
+ my ($children_of_ref, $parent_of_ref, $name_of_ref) = pid_table();
+ my $shellpath;
+ my $testpid = $pid;
+ while($testpid) {
+ if($name_of_ref->{$testpid} =~ /$regexp/o) {
+ my $shellname = $4 || $8;
+ my $dash = $3 || $7;
+ if($shellname eq "sh" and $dash) {
+ # -sh => csh or sh
+ if($shellpath = readlink "/proc/$testpid/exe") {
+ ::debug("init","procpath $shellpath\n");
+ if($shellpath =~ m:/$shell$:o) {
+ ::debug("init", "proc which ".$shellpath." => ");
+ return $shellpath;
+ }
+ }
+ }
+ ::debug("init", "which ".$shellname." => ");
+ $shellpath = (which($shellname,@{$fakename{$shellname}}))[0];
+ ::debug("init", "shell path $shellpath\n");
+ $shellpath and last;
+ }
+ if($testpid == $parent_of_ref->{$testpid}) {
+ # In Solaris zones, the PPID of the zsched process is itself
+ last;
+ }
+ $testpid = $parent_of_ref->{$testpid};
+ }
+ return $shellpath;
+ }
+}
+
+{
+ my %pid_parentpid_cmd;
+
+ sub pid_table() {
+ # Returns:
+ # %children_of = { pid -> children of pid }
+ # %parent_of = { pid -> pid of parent }
+ # %name_of = { pid -> commandname }
+
+ if(not %pid_parentpid_cmd) {
+ # Filter for SysV-style `ps`
+ my $sysv = q( ps -ef |).
+ q(perl -ane '1..1 and /^(.*)CO?MM?A?N?D/ and $s=length $1;).
+ q(s/^.{$s}//; print "@F[1,2] $_"' );
+ # Minix uses cols 2,3 and can have newlines in the command
+ # so lines not having numbers in cols 2,3 must be ignored
+ my $minix = q( ps -ef |).
+ q(perl -ane '1..1 and /^(.*)CO?MM?A?N?D/ and $s=length $1;).
+ q(s/^.{$s}// and $F[2]>0 and $F[3]>0 and print "@F[2,3] $_"' );
+ # BSD-style `ps`
+ my $bsd = q(ps -o pid,ppid,command -ax);
+ %pid_parentpid_cmd =
+ (
+ 'aix' => $sysv,
+ 'android' => $sysv,
+ 'cygwin' => $sysv,
+ 'darwin' => $bsd,
+ 'dec_osf' => $sysv,
+ 'dragonfly' => $bsd,
+ 'freebsd' => $bsd,
+ 'gnu' => $sysv,
+ 'hpux' => $sysv,
+ 'linux' => $sysv,
+ 'mirbsd' => $bsd,
+ 'minix' => $minix,
+ 'msys' => $sysv,
+ 'MSWin32' => $sysv,
+ 'netbsd' => $bsd,
+ 'nto' => $sysv,
+ 'openbsd' => $bsd,
+ 'solaris' => $sysv,
+ 'svr5' => $sysv,
+ 'syllable' => "echo ps not supported",
+ );
+ }
+ $pid_parentpid_cmd{$^O} or
+ ::die_bug("pid_parentpid_cmd for $^O missing");
+
+ my (@pidtable,%parent_of,%children_of,%name_of);
+ # Table with pid -> children of pid
+ @pidtable = `$pid_parentpid_cmd{$^O}`;
+ my $p=$$;
+ for (@pidtable) {
+ # must match: 24436 21224 busybox ash
+ # must match: 24436 21224 <<empty on MacOSX running cubase>>
+ # must match: 24436 21224 <<empty on system running Viber>>
+ # or: perl -e 'while($0=" "){}'
+ if(/^\s*(\S+)\s+(\S+)\s+(\S+.*)/
+ or
+ /^\s*(\S+)\s+(\S+)\s+()$/) {
+ $parent_of{$1} = $2;
+ push @{$children_of{$2}}, $1;
+ $name_of{$1} = $3;
+ } else {
+ ::die_bug("pidtable format: $_");
+ }
+ }
+ return(\%children_of, \%parent_of, \%name_of);
+ }
+}
+
+sub now() {
+ # Returns time since epoch as in seconds with 3 decimals
+ # Uses:
+ # @Global::use
+ # Returns:
+ # $time = time now with millisecond accuracy
+ if(not $Global::use{"Time::HiRes"}) {
+ if(eval "use Time::HiRes qw ( time );") {
+ eval "sub TimeHiRestime { return Time::HiRes::time };";
+ } else {
+ eval "sub TimeHiRestime { return time() };";
+ }
+ $Global::use{"Time::HiRes"} = 1;
+ }
+
+ return (int(TimeHiRestime()*1000))/1000;
+}
+
+sub usleep($) {
+ # Sleep this many milliseconds.
+ # Input:
+ # $ms = milliseconds to sleep
+ my $ms = shift;
+ ::debug("timing",int($ms),"ms ");
+ select(undef, undef, undef, $ms/1000);
+}
+
+sub make_regexp_ungreedy {
+ my $regexp = shift;
+ my $class_state = 0;
+ my $escape_state = 0;
+ my $found = 0;
+ my $ungreedy = "";
+ my $c;
+
+ for $c (split (//, $regexp)) {
+ if ($found) {
+ if($c ne "?") { $ungreedy .= "?"; }
+ $found = 0;
+ }
+ $ungreedy .= $c;
+
+ if ($escape_state) { $escape_state = 0; next; }
+ if ($c eq "\\") { $escape_state = 1; next; }
+ if ($c eq '[') { $class_state = 1; next; }
+ if ($class_state) {
+ if($c eq ']') { $class_state = 0; }
+ next;
+ }
+ # Quantifiers: + * {...}
+ if ($c =~ /[*}+]/) { $found = 1; }
+ }
+ if($found) { $ungreedy .= '?'; }
+ return $ungreedy;
+}
+
+
+sub __KILLER_REAPER__() {}
+
+sub reap_usleep() {
+ # Reap dead children.
+ # If no dead children: Sleep specified amount with exponential backoff
+ # Input:
+ # $ms = milliseconds to sleep
+ # Returns:
+ # $ms/2+0.001 if children reaped
+ # $ms*1.1 if no children reaped
+ my $ms = shift;
+ if(reapers()) {
+ if(not $Global::total_completed % 100) {
+ if($opt::timeout) {
+ # Force cleaning the timeout queue for every 100 jobs
+ # Fixes potential memleak
+ $Global::timeoutq->process_timeouts();
+ }
+ }
+ # Sleep exponentially shorter (1/2^n) if a job finished
+ return $ms/2+0.001;
+ } else {
+ if($opt::timeout) {
+ $Global::timeoutq->process_timeouts();
+ }
+ if($opt::memfree) {
+ kill_youngster_if_not_enough_mem($opt::memfree*0.5);
+ }
+ if($opt::memsuspend) {
+ suspend_young_if_not_enough_mem($opt::memsuspend);
+ }
+ if($opt::limit) {
+ kill_youngest_if_over_limit();
+ }
+ exit_if_disk_full();
+ if($Global::linebuffer) {
+ my $something_printed = 0;
+ if($opt::keeporder and not $opt::latestline) {
+ for my $job (values %Global::running) {
+ $something_printed += $job->print_earlier_jobs();
+ }
+ } else {
+ for my $job (values %Global::running) {
+ $something_printed += $job->print();
+ }
+ }
+ if($something_printed) { $ms = $ms/2+0.001; }
+ }
+ if($ms > 0.002) {
+ # When a child dies, wake up from sleep (or select(,,,))
+ $SIG{CHLD} = sub { kill "ALRM", $$ };
+ if($opt::delay and not $Global::linebuffer) {
+ # The 0.004s is approximately the time it takes for one round
+ my $next_earliest_start =
+ $Global::newest_starttime + $opt::delay - 0.004;
+ my $remaining_ms = 1000 * ($next_earliest_start - ::now());
+ # The next job can only start at $next_earliest_start
+ # so sleep until then (but sleep at least $ms)
+ usleep(::max($ms,$remaining_ms));
+ } else {
+ usleep($ms);
+ }
+ # --compress needs $SIG{CHLD} unset
+ $SIG{CHLD} = 'DEFAULT';
+ }
+ # Sleep exponentially longer (1.1^n) if a job did not finish,
+ # though at most 1000 ms.
+ return (($ms < 1000) ? ($ms * 1.1) : ($ms));
+ }
+}
+
+sub kill_youngest_if_over_limit() {
+ # Check each $sshlogin we are over limit
+ # If over limit: kill off the youngest child
+ # Put the child back in the queue.
+ # Uses:
+ # %Global::running
+ my %jobs_of;
+ my @sshlogins;
+
+ for my $job (values %Global::running) {
+ if(not $jobs_of{$job->sshlogin()}) {
+ push @sshlogins, $job->sshlogin();
+ }
+ push @{$jobs_of{$job->sshlogin()}}, $job;
+ }
+ for my $sshlogin (@sshlogins) {
+ for my $job (sort { $b->seq() <=> $a->seq() }
+ @{$jobs_of{$sshlogin}}) {
+ if($sshlogin->limit() == 2) {
+ $job->kill();
+ last;
+ }
+ }
+ }
+}
+
+sub suspend_young_if_not_enough_mem() {
+ # Check each $sshlogin if there is enough mem.
+ # If less than $limit free mem: suspend some of the young children
+ # Else: Resume all jobs
+ # Uses:
+ # %Global::running
+ my $limit = shift;
+ my %jobs_of;
+ my @sshlogins;
+
+ for my $job (values %Global::running) {
+ if(not $jobs_of{$job->sshlogin()}) {
+ push @sshlogins, $job->sshlogin();
+ }
+ push @{$jobs_of{$job->sshlogin()}}, $job;
+ }
+ for my $sshlogin (@sshlogins) {
+ my $free = $sshlogin->memfree();
+ if($free < 2*$limit) {
+ # Suspend all jobs (resume some of them later)
+ map { $_->suspended() or $_->suspend(); } @{$jobs_of{$sshlogin}};
+ my @jobs = (sort { $b->seq() <=> $a->seq() }
+ @{$jobs_of{$sshlogin}});
+ # how many should be running?
+ # limit*1 => 1;
+ # limit*1.5 => 2;
+ # limit*1.75 => 4;
+ # free < limit*(2-1/2^n);
+ # =>
+ # 1/(2-free/limit) < 2^n;
+ my $run = int(1/(2-$free/$limit));
+ $run = ::min($run,$#jobs);
+ # Resume the oldest running
+ for my $job ((sort { $a->seq() <=> $b->seq() } @jobs)[0..$run]) {
+ ::debug("mem","\nResume ",$run+1, " jobs. Seq ",
+ $job->seq(), " resumed ",
+ $sshlogin->memfree()," < ",2*$limit);
+ $job->resume();
+ }
+ } else {
+ for my $job (@{$jobs_of{$sshlogin}}) {
+ if($job->suspended()) {
+ $job->resume();
+ ::debug("mem","\nResume ",$#{$jobs_of{$sshlogin}}+1,
+ " jobs. Seq ", $job->seq(), " resumed ",
+ $sshlogin->memfree()," > ",2*$limit);
+ last;
+ }
+ }
+ }
+ }
+}
+
+sub kill_youngster_if_not_enough_mem() {
+ # Check each $sshlogin if there is enough mem.
+ # If less than 50% enough free mem: kill off the youngest child
+ # Put the child back in the queue.
+ # Uses:
+ # %Global::running
+ my $limit = shift;
+ my %jobs_of;
+ my @sshlogins;
+
+ for my $job (values %Global::running) {
+ if(not $jobs_of{$job->sshlogin()}) {
+ push @sshlogins, $job->sshlogin();
+ }
+ push @{$jobs_of{$job->sshlogin()}}, $job;
+ }
+ for my $sshlogin (@sshlogins) {
+ for my $job (sort { $b->seq() <=> $a->seq() }
+ @{$jobs_of{$sshlogin}}) {
+ if($sshlogin->memfree() < $limit) {
+ ::debug("mem","\n",map { $_->seq()." " }
+ (sort { $b->seq() <=> $a->seq() }
+ @{$jobs_of{$sshlogin}}));
+ ::debug("mem","\n", $job->seq(), "killed ",
+ $sshlogin->memfree()," < ",$limit);
+ $job->kill();
+ $sshlogin->memfree_recompute();
+ } else {
+ last;
+ }
+ }
+ ::debug("mem","Free mem OK? ",
+ $sshlogin->memfree()," > ",$limit);
+ }
+}
+
+
+sub __DEBUGGING__() {}
+
+
+sub debug(@) {
+ # Uses:
+ # $Global::debug
+ # %Global::fh
+ # Returns: N/A
+ $Global::debug or return;
+ @_ = grep { defined $_ ? $_ : "" } @_;
+ if($Global::debug eq "all" or $Global::debug eq $_[0]) {
+ if($Global::fh{2}) {
+ # Original stderr was saved
+ my $stderr = $Global::fh{2};
+ print $stderr @_[1..$#_];
+ } else {
+ print STDERR @_[1..$#_];
+ }
+ }
+}
+
+sub my_memory_usage() {
+ # Returns:
+ # memory usage if found
+ # 0 otherwise
+ use strict;
+ use FileHandle;
+
+ local $/ = "\n";
+ my $pid = $$;
+ if(-e "/proc/$pid/stat") {
+ my $fh = FileHandle->new("</proc/$pid/stat");
+
+ my $data = <$fh>;
+ chomp $data;
+ $fh->close;
+
+ my @procinfo = split(/\s+/,$data);
+
+ return undef_as_zero($procinfo[22]);
+ } else {
+ return 0;
+ }
+}
+
+sub my_size() {
+ # Returns:
+ # $size = size of object if Devel::Size is installed
+ # -1 otherwise
+ my @size_this = (@_);
+ eval "use Devel::Size qw(size total_size)";
+ if ($@) {
+ return -1;
+ } else {
+ return total_size(@_);
+ }
+}
+
+sub my_dump(@) {
+ # Returns:
+ # ascii expression of object if Data::Dump(er) is installed
+ # error code otherwise
+ my @dump_this = (@_);
+ eval "use Data::Dump qw(dump);";
+ if ($@) {
+ # Data::Dump not installed
+ eval "use Data::Dumper;";
+ if ($@) {
+ my $err = "Neither Data::Dump nor Data::Dumper is installed\n".
+ "Not dumping output\n";
+ ::status($err);
+ return $err;
+ } else {
+ return Dumper(@dump_this);
+ }
+ } else {
+ # Create a dummy Data::Dump:dump as Hans Schou sometimes has
+ # it undefined
+ eval "sub Data::Dump:dump {}";
+ eval "use Data::Dump qw(dump);";
+ return (Data::Dump::dump(@dump_this));
+ }
+}
+
+sub my_croak(@) {
+ eval "use Carp; 1";
+ $Carp::Verbose = 1;
+ croak(@_);
+}
+
+sub my_carp() {
+ eval "use Carp; 1";
+ $Carp::Verbose = 1;
+ carp(@_);
+}
+
+
+sub __OBJECT_ORIENTED_PARTS__() {}
+
+
+package SSHLogin;
+
+sub new($$) {
+ my $class = shift;
+ my $s = shift;
+ my $origs = $s;
+ my %hostgroups;
+ my $ncpus;
+ my $sshcommand;
+ my $user;
+ my $password;
+ my $host;
+ my $port;
+ my $local;
+ my $string;
+ # SSHLogins can have these formats:
+ # @grp+grp/ncpu//usr/bin/ssh user@server
+ # ncpu//usr/bin/ssh user@server
+ # /usr/bin/ssh user@server
+ # user@server
+ # ncpu/user@server
+ # @grp+grp/user@server
+ # above with: user:password@server
+ # above with: user@server:port
+ # So:
+ # [@grp+grp][ncpu/][ssh command ][[user][:password]@][server[:port]]
+
+ # [@grp+grp]/ncpu//usr/bin/ssh user:pass@server:port
+ if($s =~ s:^\@([^/]+)/?::) {
+ # Look for SSHLogin hostgroups
+ %hostgroups = map { $_ => 1 } split(/\+|,/, $1);
+ }
+ # An SSHLogin is always in the hostgroup of its "numcpu/host"
+ $hostgroups{$s} = 1;
+
+ # [ncpu/]/usr/bin/ssh user:pass@server:port
+ if ($s =~ s:^(\d+)/::) { $ncpus = $1; }
+
+ # [/usr/bin/ssh ]user:pass@server:port
+ if($s =~ s/^(.*) //) { $sshcommand = $1; }
+
+ # [user:pass@]server:port
+ if($s =~ s/^([^@]+)@//) {
+ my $userpw = $1;
+ # user[:pass]
+ if($userpw =~ s/:(.*)//) {
+ $password = $1;
+ if($password eq "") { $password = $ENV{'SSHPASS'} }
+ if(not ::which("sshpass")) {
+ ::error(255,"--sshlogin with password requires ".
+ "sshpass installed");
+ }
+ }
+ $user = $userpw;
+ }
+ # [server]:port
+ if(not $s =~ /:.*:/
+ and
+ $s =~ s/^([-a-z0-9._]+)//i) {
+ # Not IPv6 (IPv6 has 2 or more ':')
+ $host = $1;
+ } elsif($s =~ s/^(\\[\[\]box0-9a-f.]+)//i) {
+ # RFC2673 allows for:
+ # \[b11010000011101] \[o64072/14] \[xd074/14] \[208.116.0.0/14]
+ $host = $1;
+ } elsif($s =~ s/^\[([0-9a-f:]+)\]//i
+ or
+ $s =~ s/^([0-9a-f:]+)//i) {
+ # RFC5952
+ # [2001:db8::1]:80
+ # 2001:db8::1.80
+ # 2001:db8::1p80
+ # 2001:db8::1#80
+ # 2001:db8::1:80 - not supported
+ # 2001:db8::1 port 80 - not supported
+ $host = $1;
+ }
+
+ # [:port]
+ if($s =~ s/^:(\w+)//i) {
+ $port = $1;
+ } elsif($s =~ s/^[p\.\#](\w+)//i) {
+ # RFC5952
+ # 2001:db8::1.80
+ # 2001:db8::1p80
+ # 2001:db8::1#80
+ $port = $1;
+ }
+
+ if($s and $s ne ':') {
+ ::die_bug("SSHLogin parser failed on '$origs' => '$s'");
+ }
+
+ $string =
+ # Only include the sshcommand in $string if it is set by user
+ ($sshcommand && $sshcommand." ").
+ ($user && $user."@").
+ ($host && $host).
+ ($port && ":$port");
+ if($host eq ':') {
+ $local = 1;
+ $string = ":";
+ } else {
+ $sshcommand ||= $opt::ssh || $ENV{'PARALLEL_SSH'} || "ssh";
+ }
+ # An SSHLogin is always in the hostgroup of its $string-name
+ $hostgroups{$string} = 1;
+ @Global::hostgroups{keys %hostgroups} = values %hostgroups;
+ # Used for file names for loadavg
+ my $no_slash_string = $string;
+ $no_slash_string =~ s/[^-a-z0-9:]/_/gi;
+ return bless {
+ 'string' => $string,
+ 'jobs_running' => 0,
+ 'jobs_completed' => 0,
+ 'maxlength' => undef,
+ 'max_jobs_running' => undef,
+ 'orig_max_jobs_running' => undef,
+ 'ncpus' => $ncpus,
+ 'sshcommand' => $sshcommand,
+ 'user' => $user,
+ 'password' => $password,
+ 'host' => $host,
+ 'port' => $port,
+ 'hostgroups' => \%hostgroups,
+ 'local' => $local,
+ 'control_path_dir' => undef,
+ 'control_path' => undef,
+ 'time_to_login' => undef,
+ 'last_login_at' => undef,
+ 'loadavg_file' => $Global::cache_dir . "/tmp/sshlogin/" .
+ $no_slash_string . "/loadavg",
+ 'loadavg' => undef,
+ 'last_loadavg_update' => 0,
+ 'swap_activity_file' => $Global::cache_dir . "/tmp/sshlogin/" .
+ $no_slash_string . "/swap_activity",
+ 'swap_activity' => undef,
+ }, ref($class) || $class;
+}
+
+sub DESTROY($) {
+ my $self = shift;
+ # Remove temporary files if they are created.
+ ::rm($self->{'loadavg_file'});
+ ::rm($self->{'swap_activity_file'});
+}
+
+sub string($) {
+ my $self = shift;
+ return $self->{'string'};
+}
+
+sub host($) {
+ my $self = shift;
+ return $self->{'host'};
+}
+
+sub sshcmd($) {
+ # Give the ssh command without hostname
+ # Returns:
+ # "sshpass -e ssh -p port -l user"
+ my $self = shift;
+ my @local;
+ # [sshpass -e] ssh -p port -l user
+ if($self->{'password'}) { push @local, "sshpass -e"; }
+ # [ssh] -p port -l user
+ # TODO sshpass + space
+ push @local, $self->{'sshcommand'};
+ # [-p port] -l user
+ if($self->{'port'}) { push @local, '-p',$self->{'port'}; }
+ # [-l user]
+ if($self->{'user'}) { push @local, '-l',$self->{'user'}; }
+ if($opt::controlmaster) {
+ # Use control_path to make ssh faster
+ my $control_path = $self->control_path_dir()."/ssh-%r@%h:%p";
+
+ if(not $self->{'control_path'}{$control_path}++) {
+ # Master is not running for this control_path
+ # Start it
+ my $pid = fork();
+ if($pid) {
+ $Global::sshmaster{$pid} ||= 1;
+ } else {
+ push @local, "-S", $control_path;
+ $SIG{'TERM'} = undef;
+ # Run a sleep that outputs data, so it will discover
+ # if the ssh connection closes.
+ my $sleep = ::Q('$|=1;while(1){sleep 1;print "foo\n"}');
+ # Ignore the 'foo' being printed
+ open(STDOUT,">","/dev/null");
+ # STDERR >/dev/null to ignore
+ open(STDERR,">","/dev/null");
+ open(STDIN,"<","/dev/null");
+ exec(@local, "-MT", $self->{'host'}, "--",
+ "perl", "-e", $sleep);
+ }
+ }
+ push @local, "-S", ::Q($control_path);
+ }
+
+ return "@local";
+}
+
+sub wrap($@) {
+ # Input:
+ # @cmd = shell command to run on remote
+ # Returns:
+ # $sshwrapped = ssh remote @cmd
+ my $self = shift;
+ my @remote = @_;
+ return(join " ",
+ $self->sshcmd(), $self->{'host'}, "--", "exec", @remote);
+}
+
+sub hexwrap($@) {
+ # Input:
+ # @cmd = perl expresion to eval
+ # Returns:
+ # $hexencoded = perl command that decodes hex and evals @cmd
+ my $self = shift;
+ my $cmd = join("",@_);
+
+ # "#" is needed because Perl on MacOS X adds NULs
+ # when running pack q/H10000000/
+ my $hex = unpack "H*", $cmd."#";
+ # csh does not deal well with > 1000 chars in one word
+ # Insert space every 1000 char
+ $hex =~ s/\G.{1000}\K/ /sg;
+ # Explanation:
+ # Write this without special chars: eval pack 'H*', join '',@ARGV
+ # GNU_Parallel_worker = String so people can see this is from GNU Parallel
+ # eval+ = way to write 'eval ' without space (gives warning)
+ # pack+ = way to write 'pack ' without space
+ # q/H10000000/, = almost the same as "H*" but does not use *
+ # join+q//, = join '',
+ return('perl -X -e '.
+ 'GNU_Parallel_worker,eval+pack+q/H10000000/,join+q//,@ARGV '.
+ $hex);
+}
+
+sub jobs_running($) {
+ my $self = shift;
+ return ($self->{'jobs_running'} || "0");
+}
+
+sub inc_jobs_running($) {
+ my $self = shift;
+ $self->{'jobs_running'}++;
+}
+
+sub dec_jobs_running($) {
+ my $self = shift;
+ $self->{'jobs_running'}--;
+}
+
+sub set_maxlength($$) {
+ my $self = shift;
+ $self->{'maxlength'} = shift;
+}
+
+sub maxlength($) {
+ my $self = shift;
+ return $self->{'maxlength'};
+}
+
+sub jobs_completed() {
+ my $self = shift;
+ return $self->{'jobs_completed'};
+}
+
+sub in_hostgroups() {
+ # Input:
+ # @hostgroups = the hostgroups to look for
+ # Returns:
+ # true if intersection of @hostgroups and the hostgroups of this
+ # SSHLogin is non-empty
+ my $self = shift;
+ return grep { defined $self->{'hostgroups'}{$_} } @_;
+}
+
+sub hostgroups() {
+ my $self = shift;
+ return keys %{$self->{'hostgroups'}};
+}
+
+sub inc_jobs_completed($) {
+ my $self = shift;
+ $self->{'jobs_completed'}++;
+ $Global::total_completed++;
+}
+
+sub set_max_jobs_running($$) {
+ my $self = shift;
+ if(defined $self->{'max_jobs_running'}) {
+ $Global::max_jobs_running -= $self->{'max_jobs_running'};
+ }
+ $self->{'max_jobs_running'} = shift;
+
+ if(defined $self->{'max_jobs_running'}) {
+ # max_jobs_running could be resat if -j is a changed file
+ $Global::max_jobs_running += $self->{'max_jobs_running'};
+ }
+ # Initialize orig to the first non-zero value that comes around
+ $self->{'orig_max_jobs_running'} ||= $self->{'max_jobs_running'};
+}
+
+sub memfree() {
+ # Returns:
+ # $memfree in bytes
+ my $self = shift;
+ $self->memfree_recompute();
+ # Return 1 if not defined.
+ return (not defined $self->{'memfree'} or $self->{'memfree'})
+}
+
+sub memfree_recompute() {
+ my $self = shift;
+ my $script = memfreescript();
+
+ # TODO add sshlogin and backgrounding
+ # Run the script twice if it gives 0 (typically intermittent error)
+ $self->{'memfree'} = ::qqx($script) || ::qqx($script);
+ if(not $self->{'memfree'}) {
+ ::die_bug("Less than 1 byte memory free");
+ }
+ #::debug("mem","New free:",$self->{'memfree'}," ");
+}
+
+{
+ my $script;
+
+ sub memfreescript() {
+ # Returns:
+ # shellscript for giving available memory in bytes
+ if(not $script) {
+ my %script_of = (
+ # /proc/meminfo
+ # MemFree: 7012 kB
+ # Buffers: 19876 kB
+ # Cached: 431192 kB
+ # SwapCached: 0 kB
+ "linux" => (
+ q{
+ print 1024 * qx{
+ awk '/^((Swap)?Cached|MemFree|Buffers):/
+ { sum += \$2} END { print sum }'
+ /proc/meminfo }
+ }),
+ # Android uses same code as GNU/Linux
+ "android" => (
+ q{
+ print 1024 * qx{
+ awk '/^((Swap)?Cached|MemFree|Buffers):/
+ { sum += \$2} END { print sum }'
+ /proc/meminfo }
+ }),
+ # $ vmstat 1 1
+ # procs memory page faults cpu
+ # r b w avm free re at pi po fr de sr in sy cs us sy id
+ # 1 0 0 242793 389737 5 1 0 0 0 0 0 107 978 60 1 1 99
+ "hpux" => (
+ q{
+ print (((reverse `vmstat 1 1`)[0]
+ =~ /(?:\d+\D+){4}(\d+)/)[0]*1024)
+ }),
+ # $ vmstat 1 2
+ # kthr memory page disk faults cpu
+ # r b w swap free re mf pi po fr de sr s3 s4 -- -- in sy cs us sy id
+ # 0 0 0 6496720 5170320 68 260 8 2 1 0 0 -0 3 0 0 309 1371 255 1 2 97
+ # 0 0 0 6434088 5072656 7 15 8 0 0 0 0 0 261 0 0 1889 1899 3222 0 8 92
+ #
+ # The second free value is correct
+ "solaris" => (
+ q{
+ print (((reverse `vmstat 1 2`)[0]
+ =~ /(?:\d+\D+){4}(\d+)/)[0]*1024)
+ }),
+ # hw.pagesize: 4096
+ # vm.stats.vm.v_cache_count: 0
+ # vm.stats.vm.v_inactive_count: 79574
+ # vm.stats.vm.v_free_count: 4507
+ "freebsd" => (
+ q{
+ for(qx{/sbin/sysctl -a}) {
+ if (/^([^:]+):\s+(.+)\s*$/s) {
+ $sysctl->{$1} = $2;
+ }
+ }
+ print $sysctl->{"hw.pagesize"} *
+ ($sysctl->{"vm.stats.vm.v_cache_count"}
+ + $sysctl->{"vm.stats.vm.v_inactive_count"}
+ + $sysctl->{"vm.stats.vm.v_free_count"});
+ }),
+ # Mach Virtual Memory Statistics: (page size of 4096 bytes)
+ # Pages free: 198061.
+ # Pages active: 159701.
+ # Pages inactive: 47378.
+ # Pages speculative: 29707.
+ # Pages wired down: 89231.
+ # "Translation faults": 928901425.
+ # Pages copy-on-write: 156988239.
+ # Pages zero filled: 271267894.
+ # Pages reactivated: 48895.
+ # Pageins: 1798068.
+ # Pageouts: 257.
+ # Object cache: 6603 hits of 1713223 lookups (0% hit rate)
+ 'darwin' => (
+ q{
+ $vm = `vm_stat`;
+ print (($vm =~ /page size of (\d+)/)[0] *
+ (($vm =~ /Pages free:\s+(\d+)/)[0] +
+ ($vm =~ /Pages inactive:\s+(\d+)/)[0]));
+ }),
+ );
+ my $perlscript = "";
+ # Make a perl script that detects the OS ($^O) and runs
+ # the appropriate command
+ for my $os (keys %script_of) {
+ $perlscript .= 'if($^O eq "'.$os.'") { '.$script_of{$os}.'}';
+ }
+ $script = "perl -e " . ::Q(::spacefree(1,$perlscript));
+ }
+ return $script;
+ }
+}
+
+sub limit($) {
+ # Returns:
+ # 0 = Below limit. Start another job.
+ # 1 = Over limit. Start no jobs.
+ # 2 = Kill youngest job
+ my $self = shift;
+
+ if(not defined $self->{'limitscript'}) {
+ my %limitscripts =
+ ("io" => q!
+ io() {
+ limit=$1;
+ io_file=$2;
+ # Do the measurement in the background
+ ((tmp=$(tempfile);
+ LANG=C iostat -x 1 2 > $tmp;
+ mv $tmp $io_file) </dev/null >/dev/null & );
+ perl -e '-e $ARGV[0] or exit(1);
+ for(reverse <>) {
+ /Device/ and last;
+ /(\S+)$/ and $max = $max > $1 ? $max : $1; }
+ exit ('$limit' < $max)' $io_file;
+ };
+ io %s %s
+ !,
+ "mem" => q!
+ mem() {
+ limit=$1;
+ awk '/^((Swap)?Cached|MemFree|Buffers):/{ sum += $2}
+ END {
+ if (sum*1024 < '$limit'/2) { exit 2; }
+ else { exit (sum*1024 < '$limit') }
+ }' /proc/meminfo;
+ };
+ mem %s;
+ !,
+ "load" => q!
+ load() {
+ limit=$1;
+ ps ax -o state,command |
+ grep -E '^[DOR].[^[]' |
+ wc -l |
+ perl -ne 'exit ('$limit' < $_)';
+ };
+ load %s
+ !,
+ );
+ my ($cmd,@args) = split /\s+/,$opt::limit;
+ if($limitscripts{$cmd}) {
+ my $tmpfile = ::tmpname("parlmt");
+ ++$Global::unlink{$tmpfile};
+ $self->{'limitscript'} =
+ ::spacefree(1, sprintf($limitscripts{$cmd},
+ ::multiply_binary_prefix(@args),$tmpfile));
+ } else {
+ $self->{'limitscript'} = $opt::limit;
+ }
+ }
+
+ my %env = %ENV;
+ local %ENV = %env;
+ $ENV{'SSHLOGIN'} = $self->string();
+ system($Global::shell,"-c",$self->{'limitscript'});
+ #::qqx($self->{'limitscript'});
+ ::debug("limit","limit `".$self->{'limitscript'}."` result ".($?>>8)."\n");
+ return $?>>8;
+}
+
+
+sub swapping($) {
+ my $self = shift;
+ my $swapping = $self->swap_activity();
+ return (not defined $swapping or $swapping)
+}
+
+sub swap_activity($) {
+ # If the currently known swap activity is too old:
+ # Recompute a new one in the background
+ # Returns:
+ # last swap activity computed
+ my $self = shift;
+ # Should we update the swap_activity file?
+ my $update_swap_activity_file = 0;
+ # Test with (on 64 core machine):
+ # seq 100 | parallel --lb -j100 'seq 1000 | parallel --noswap -j 1 true'
+ if(open(my $swap_fh, "<", $self->{'swap_activity_file'})) {
+ my $swap_out = <$swap_fh>;
+ close $swap_fh;
+ if($swap_out =~ /^(\d+)$/) {
+ $self->{'swap_activity'} = $1;
+ ::debug("swap", "New swap_activity: ", $self->{'swap_activity'});
+ }
+ ::debug("swap", "Last update: ", $self->{'last_swap_activity_update'});
+ if(time - $self->{'last_swap_activity_update'} > 10) {
+ # last swap activity update was started 10 seconds ago
+ ::debug("swap", "Older than 10 sec: ", $self->{'swap_activity_file'});
+ $update_swap_activity_file = 1;
+ }
+ } else {
+ ::debug("swap", "No swap_activity file: ", $self->{'swap_activity_file'});
+ $self->{'swap_activity'} = undef;
+ $update_swap_activity_file = 1;
+ }
+ if($update_swap_activity_file) {
+ ::debug("swap", "Updating swap_activity file ", $self->{'swap_activity_file'});
+ $self->{'last_swap_activity_update'} = time;
+ my $dir = ::dirname($self->{'swap_activity_file'});
+ -d $dir or eval { File::Path::mkpath($dir); };
+ my $swap_activity;
+ $swap_activity = swapactivityscript();
+ if(not $self->local()) {
+ $swap_activity = $self->wrap($swap_activity);
+ }
+ # Run swap_activity measuring.
+ # As the command can take long to run if run remote
+ # save it to a tmp file before moving it to the correct file
+ my $file = $self->{'swap_activity_file'};
+ my ($dummy_fh, $tmpfile) = ::tmpfile(SUFFIX => ".swp");
+ ::debug("swap", "\n", $swap_activity, "\n");
+ my $qtmp = ::Q($tmpfile);
+ my $qfile = ::Q($file);
+ ::qqx("($swap_activity > $qtmp && mv $qtmp $qfile || rm $qtmp &)");
+ }
+ return $self->{'swap_activity'};
+}
+
+{
+ my $script;
+
+ sub swapactivityscript() {
+ # Returns:
+ # shellscript for detecting swap activity
+ #
+ # arguments for vmstat are OS dependant
+ # swap_in and swap_out are in different columns depending on OS
+ #
+ if(not $script) {
+ my %vmstat = (
+ # linux: $7*$8
+ # $ vmstat 1 2
+ # procs -----------memory---------- ---swap-- -----io---- -system-- ----cpu----
+ # r b swpd free buff cache si so bi bo in cs us sy id wa
+ # 5 0 51208 1701096 198012 18857888 0 0 37 153 28 19 56 11 33 1
+ # 3 0 51208 1701288 198012 18857972 0 0 0 0 3638 10412 15 3 82 0
+ 'linux' => ['vmstat 1 2 | tail -n1', '$7*$8'],
+
+ # solaris: $6*$7
+ # $ vmstat -S 1 2
+ # kthr memory page disk faults cpu
+ # r b w swap free si so pi po fr de sr s3 s4 -- -- in sy cs us sy id
+ # 0 0 0 4628952 3208408 0 0 3 1 1 0 0 -0 2 0 0 263 613 246 1 2 97
+ # 0 0 0 4552504 3166360 0 0 0 0 0 0 0 0 0 0 0 246 213 240 1 1 98
+ 'solaris' => ['vmstat -S 1 2 | tail -1', '$6*$7'],
+
+ # darwin (macosx): $21*$22
+ # $ vm_stat -c 2 1
+ # Mach Virtual Memory Statistics: (page size of 4096 bytes)
+ # free active specul inactive throttle wired prgable faults copy 0fill reactive purged file-backed anonymous cmprssed cmprssor dcomprs comprs pageins pageout swapins swapouts
+ # 346306 829050 74871 606027 0 240231 90367 544858K 62343596 270837K 14178 415070 570102 939846 356 370 116 922 4019813 4 0 0
+ # 345740 830383 74875 606031 0 239234 90369 2696 359 553 0 0 570110 941179 356 370 0 0 0 0 0 0
+ 'darwin' => ['vm_stat -c 2 1 | tail -n1', '$21*$22'],
+
+ # ultrix: $12*$13
+ # $ vmstat -S 1 2
+ # procs faults cpu memory page disk
+ # r b w in sy cs us sy id avm fre si so pi po fr de sr s0
+ # 1 0 0 4 23 2 3 0 97 7743 217k 0 0 0 0 0 0 0 0
+ # 1 0 0 6 40 8 0 1 99 7743 217k 0 0 3 0 0 0 0 0
+ 'ultrix' => ['vmstat -S 1 2 | tail -1', '$12*$13'],
+
+ # aix: $6*$7
+ # $ vmstat 1 2
+ # System configuration: lcpu=1 mem=2048MB
+ #
+ # kthr memory page faults cpu
+ # ----- ----------- ------------------------ ------------ -----------
+ # r b avm fre re pi po fr sr cy in sy cs us sy id wa
+ # 0 0 333933 241803 0 0 0 0 0 0 10 143 90 0 0 99 0
+ # 0 0 334125 241569 0 0 0 0 0 0 37 5368 184 0 9 86 5
+ 'aix' => ['vmstat 1 2 | tail -n1', '$6*$7'],
+
+ # freebsd: $8*$9
+ # $ vmstat -H 1 2
+ # procs memory page disks faults cpu
+ # r b w avm fre flt re pi po fr sr ad0 ad1 in sy cs us sy id
+ # 1 0 0 596716 19560 32 0 0 0 33 8 0 0 11 220 277 0 0 99
+ # 0 0 0 596716 19560 2 0 0 0 0 0 0 0 11 144 263 0 1 99
+ 'freebsd' => ['vmstat -H 1 2 | tail -n1', '$8*$9'],
+
+ # mirbsd: $8*$9
+ # $ vmstat 1 2
+ # procs memory page disks traps cpu
+ # r b w avm fre flt re pi po fr sr wd0 cd0 int sys cs us sy id
+ # 0 0 0 25776 164968 34 0 0 0 0 0 0 0 230 259 38 4 0 96
+ # 0 0 0 25776 164968 24 0 0 0 0 0 0 0 237 275 37 0 0 100
+ 'mirbsd' => ['vmstat 1 2 | tail -n1', '$8*$9'],
+
+ # netbsd: $7*$8
+ # $ vmstat 1 2
+ # procs memory page disks faults cpu
+ # r b avm fre flt re pi po fr sr w0 w1 in sy cs us sy id
+ # 0 0 138452 6012 54 0 0 0 1 2 3 0 4 100 23 0 0 100
+ # 0 0 138456 6008 1 0 0 0 0 0 0 0 7 26 19 0 0 100
+ 'netbsd' => ['vmstat 1 2 | tail -n1', '$7*$8'],
+
+ # openbsd: $8*$9
+ # $ vmstat 1 2
+ # procs memory page disks traps cpu
+ # r b w avm fre flt re pi po fr sr wd0 wd1 int sys cs us sy id
+ # 0 0 0 76596 109944 73 0 0 0 0 0 0 1 5 259 22 0 1 99
+ # 0 0 0 76604 109936 24 0 0 0 0 0 0 0 7 114 20 0 1 99
+ 'openbsd' => ['vmstat 1 2 | tail -n1', '$8*$9'],
+
+ # hpux: $8*$9
+ # $ vmstat 1 2
+ # procs memory page faults cpu
+ # r b w avm free re at pi po fr de sr in sy cs us sy id
+ # 1 0 0 247211 216476 4 1 0 0 0 0 0 102 73005 54 6 11 83
+ # 1 0 0 247211 216421 43 9 0 0 0 0 0 144 1675 96 25269512791222387000 25269512791222387000 105
+ 'hpux' => ['vmstat 1 2 | tail -n1', '$8*$9'],
+
+ # dec_osf (tru64): $11*$12
+ # $ vmstat 1 2
+ # Virtual Memory Statistics: (pagesize = 8192)
+ # procs memory pages intr cpu
+ # r w u act free wire fault cow zero react pin pout in sy cs us sy id
+ # 3 181 36 51K 1895 8696 348M 59M 122M 259 79M 0 5 218 302 4 1 94
+ # 3 181 36 51K 1893 8696 3 15 21 0 28 0 4 81 321 1 1 98
+ 'dec_osf' => ['vmstat 1 2 | tail -n1', '$11*$12'],
+
+ # gnu (hurd): $7*$8
+ # $ vmstat -k 1 2
+ # (pagesize: 4, size: 512288, swap size: 894972)
+ # free actv inact wired zeroed react pgins pgouts pfaults cowpfs hrat caobj cache swfree
+ # 371940 30844 89228 20276 298348 0 48192 19016 756105 99808 98% 876 20628 894972
+ # 371940 30844 89228 20276 +0 +0 +0 +0 +42 +2 98% 876 20628 894972
+ 'gnu' => ['vmstat -k 1 2 | tail -n1', '$7*$8'],
+
+ # -nto (qnx has no swap)
+ #-irix
+ #-svr5 (scosysv)
+ );
+ my $perlscript = "";
+ # Make a perl script that detects the OS ($^O) and runs
+ # the appropriate vmstat command
+ for my $os (keys %vmstat) {
+ $vmstat{$os}[1] =~ s/\$/\\\\\\\$/g; # $ => \\\$
+ $perlscript .= 'if($^O eq "'.$os.'") { print `'.$vmstat{$os}[0].' | awk "{print ' .
+ $vmstat{$os}[1] . '}"` }';
+ }
+ $script = "perl -e " . ::Q($perlscript);
+ }
+ return $script;
+ }
+}
+
+sub too_fast_remote_login($) {
+ my $self = shift;
+ if($self->{'last_login_at'} and $self->{'time_to_login'}) {
+ # sshd normally allows 10 simultaneous logins
+ # A login takes time_to_login
+ # So time_to_login/5 should be safe
+ # If now <= last_login + time_to_login/5: Then it is too soon.
+ my $too_fast = (::now() <= $self->{'last_login_at'}
+ + $self->{'time_to_login'}/5);
+ ::debug("run", "Too fast? $too_fast ");
+ return $too_fast;
+ } else {
+ # No logins so far (or time_to_login not computed): it is not too fast
+ return 0;
+ }
+}
+
+sub last_login_at($) {
+ my $self = shift;
+ return $self->{'last_login_at'};
+}
+
+sub set_last_login_at($$) {
+ my $self = shift;
+ $self->{'last_login_at'} = shift;
+}
+
+sub loadavg_too_high($) {
+ my $self = shift;
+ my $loadavg = $self->loadavg();
+ if(defined $loadavg) {
+ ::debug("load", "Load $loadavg > ",$self->max_loadavg());
+ return $loadavg >= $self->max_loadavg();
+ } else {
+ # Unknown load: Assume load is too high
+ return 1;
+ }
+}
+
+
+
+sub loadavg($) {
+ # If the currently know loadavg is too old:
+ # Recompute a new one in the background
+ # The load average is computed as the number of processes waiting
+ # for disk or CPU right now. So it is the server load this instant
+ # and not averaged over several minutes. This is needed so GNU
+ # Parallel will at most start one job that will push the load over
+ # the limit.
+ #
+ # Returns:
+ # $last_loadavg = last load average computed (undef if none)
+
+ my $self = shift;
+ sub loadavg_cmd() {
+ if(not $Global::loadavg_cmd) {
+ # aix => "ps -ae -o state,command" # state wrong
+ # bsd => "ps ax -o state,command"
+ # sysv => "ps -ef -o s -o comm"
+ # cygwin => perl -ne 'close STDERR; /Name/ and print"\n"; \
+ # /(Name|Pid|Ppid|State):\s+(\S+)/ and print "$2\t";' /proc/*/status |
+ # awk '{print $2,$1}'
+ # dec_osf => bsd
+ # dragonfly => bsd
+ # freebsd => bsd
+ # gnu => bsd
+ # hpux => ps -el|awk '{print $2,$14,$15}'
+ # irix => ps -ef -o state -o comm
+ # linux => bsd
+ # minix => ps el|awk '{print \$1,\$11}'
+ # mirbsd => bsd
+ # netbsd => bsd
+ # openbsd => bsd
+ # solaris => sysv
+ # svr5 => sysv
+ # ultrix => ps -ax | awk '{print $3,$5}'
+ # unixware => ps -el|awk '{print $2,$14,$15}'
+ my $ps = ::spacefree(1,q{
+ $sysv="ps -ef -o s -o comm";
+ $sysv2="ps -ef -o state -o comm";
+ $bsd="ps ax -o state,command";
+ # Treat threads as processes
+ $bsd2="ps axH -o state,command";
+ $psel="ps -el|awk '{ print \$2,\$14,\$15 }'";
+ $cygwin=q{ perl -ne 'close STDERR; /Name/ and print"\n";
+ /(Name|Pid|Ppid|State):\s+(\S+)/ and print "$2\t";' /proc/*/status |
+ awk '{print $2,$1}' };
+ $dummy="echo S COMMAND;echo R dummy";
+ %ps=(
+ # TODO Find better code for AIX/Android
+ 'aix' => "uptime",
+ 'android' => "uptime",
+ 'cygwin' => $cygwin,
+ 'darwin' => $bsd,
+ 'dec_osf' => $sysv2,
+ 'dragonfly' => $bsd,
+ 'freebsd' => $bsd2,
+ 'gnu' => $bsd,
+ 'hpux' => $psel,
+ 'irix' => $sysv2,
+ 'linux' => $bsd2,
+ 'minix' => "ps el|awk '{print \$1,\$11}'",
+ 'mirbsd' => $bsd,
+ 'msys' => $cygwin,
+ 'netbsd' => $bsd,
+ 'nto' => $dummy,
+ 'openbsd' => $bsd,
+ 'solaris' => $sysv,
+ 'svr5' => $psel,
+ 'ultrix' => "ps -ax | awk '{print \$3,\$5}'",
+ 'MSWin32' => $sysv,
+ );
+ print `$ps{$^O}`;
+ });
+ # The command is too long for csh, so base64_wrap the command
+ $Global::loadavg_cmd = $self->hexwrap($ps);
+ }
+ return $Global::loadavg_cmd;
+ }
+ # Should we update the loadavg file?
+ my $update_loadavg_file = 0;
+ if(open(my $load_fh, "<", $self->{'loadavg_file'})) {
+ local $/; # $/ = undef => slurp whole file
+ my $load_out = <$load_fh>;
+ close $load_fh;
+ if($load_out =~ /\S/) {
+ # Content can be empty if ~/ is on NFS
+ # due to reading being non-atomic.
+ #
+ # Count lines starting with D,O,R but command does not start with [
+ my $load =()= ($load_out=~/(^\s?[DOR]\S* +(?=[^\[])\S)/gm);
+ if($load > 0) {
+ # load is overestimated by 1
+ $self->{'loadavg'} = $load - 1;
+ ::debug("load", "New loadavg: ", $self->{'loadavg'},"\n");
+ } elsif ($load_out=~/average: (\d+.\d+)/) {
+ # AIX does not support instant load average
+ # 04:11AM up 21 days, 12:55, 1 user, load average: 1.85, 1.57, 1.55
+ $self->{'loadavg'} = $1;
+ } else {
+ ::die_bug("loadavg_invalid_content: " .
+ $self->{'loadavg_file'} . "\n$load_out");
+ }
+ }
+ $update_loadavg_file = 1;
+ } else {
+ ::debug("load", "No loadavg file: ", $self->{'loadavg_file'});
+ $self->{'loadavg'} = undef;
+ $update_loadavg_file = 1;
+ }
+ if($update_loadavg_file) {
+ ::debug("load", "Updating loadavg file", $self->{'loadavg_file'}, "\n");
+ $self->{'last_loadavg_update'} = time;
+ my $dir = ::dirname($self->{'swap_activity_file'});
+ -d $dir or eval { File::Path::mkpath($dir); };
+ -w $dir or ::die_bug("Cannot write to $dir");
+ my $cmd = "";
+ if($self->{'string'} ne ":") {
+ $cmd = $self->wrap(loadavg_cmd());
+ } else {
+ $cmd .= loadavg_cmd();
+ }
+ # As the command can take long to run if run remote
+ # save it to a tmp file before moving it to the correct file
+ ::debug("load", "Update load\n");
+ my $file = ::Q($self->{'loadavg_file'});
+ # tmpfile on same filesystem as $file
+ my $tmpfile = $file.$$;
+ $ENV{'SSHPASS'} = $self->{'password'};
+ ::qqx("($cmd > $tmpfile && mv $tmpfile $file || rm $tmpfile & )");
+ }
+ return $self->{'loadavg'};
+}
+
+sub max_loadavg($) {
+ my $self = shift;
+ # If --load is a file it might be changed
+ if($Global::max_load_file) {
+ my $mtime = (stat($Global::max_load_file))[9];
+ if($mtime > $Global::max_load_file_last_mod) {
+ $Global::max_load_file_last_mod = $mtime;
+ for my $sshlogin (values %Global::host) {
+ $sshlogin->set_max_loadavg(undef);
+ }
+ }
+ }
+ if(not defined $self->{'max_loadavg'}) {
+ $self->{'max_loadavg'} =
+ $self->compute_max_loadavg($opt::load);
+ }
+ ::debug("load", "max_loadavg: ", $self->string(), " ", $self->{'max_loadavg'});
+ return $self->{'max_loadavg'};
+}
+
+sub set_max_loadavg($$) {
+ my $self = shift;
+ $self->{'max_loadavg'} = shift;
+}
+
+sub compute_max_loadavg($) {
+ # Parse the max loadaverage that the user asked for using --load
+ # Returns:
+ # max loadaverage
+ my $self = shift;
+ my $loadspec = shift;
+ my $load;
+ if(defined $loadspec) {
+ if($loadspec =~ /^\+(\d+)$/) {
+ # E.g. --load +2
+ my $j = $1;
+ $load =
+ $self->ncpus() + $j;
+ } elsif ($loadspec =~ /^-(\d+)$/) {
+ # E.g. --load -2
+ my $j = $1;
+ $load =
+ $self->ncpus() - $j;
+ } elsif ($loadspec =~ /^(\d+)\%$/) {
+ my $j = $1;
+ $load =
+ $self->ncpus() * $j / 100;
+ } elsif ($loadspec =~ /^(\d+(\.\d+)?)$/) {
+ $load = $1;
+ } elsif (-f $loadspec) {
+ $Global::max_load_file = $loadspec;
+ $Global::max_load_file_last_mod = (stat($Global::max_load_file))[9];
+ $load = $self->compute_max_loadavg(
+ ::slurp_or_exit($Global::max_load_file)
+ );
+ } else {
+ ::error("Parsing of --load failed.");
+ ::die_usage();
+ }
+ if($load < 0.01) {
+ $load = 0.01;
+ }
+ }
+ return $load;
+}
+
+sub time_to_login($) {
+ my $self = shift;
+ return $self->{'time_to_login'};
+}
+
+sub set_time_to_login($$) {
+ my $self = shift;
+ $self->{'time_to_login'} = shift;
+}
+
+sub max_jobs_running($) {
+ my $self = shift;
+ if(not defined $self->{'max_jobs_running'}) {
+ my $nproc = $self->compute_number_of_processes($opt::jobs);
+ $self->set_max_jobs_running($nproc);
+ }
+ return $self->{'max_jobs_running'};
+}
+
+sub orig_max_jobs_running($) {
+ my $self = shift;
+ return $self->{'orig_max_jobs_running'};
+}
+
+sub compute_number_of_processes($) {
+ # Number of processes wanted and limited by system resources
+ # Returns:
+ # Number of processes
+ my $self = shift;
+ my $opt_P = shift;
+ my $wanted_processes = $self->user_requested_processes($opt_P);
+ if(not defined $wanted_processes) {
+ $wanted_processes = $Global::default_simultaneous_sshlogins;
+ }
+ ::debug("load", "Wanted procs: $wanted_processes\n");
+ my $system_limit =
+ $self->processes_available_by_system_limit($wanted_processes);
+ ::debug("load", "Limited to procs: $system_limit\n");
+ return $system_limit;
+}
+
+{
+ my @children;
+ my $max_system_proc_reached;
+ my $more_filehandles;
+ my %fh;
+ my $tmpfhname;
+ my $count_jobs_already_read;
+ my @jobs;
+ my $job;
+ my @args;
+ my $arg;
+
+ sub reserve_filehandles($) {
+ # Reserves filehandle
+ my $n = shift;
+ for (1..$n) {
+ $more_filehandles &&= open($fh{$tmpfhname++}, "<", "/dev/null");
+ }
+ }
+
+ sub reserve_process() {
+ # Spawn a dummy process
+ my $child;
+ if($child = fork()) {
+ push @children, $child;
+ $Global::unkilled_children{$child} = 1;
+ } elsif(defined $child) {
+ # This is the child
+ # The child takes one process slot
+ # It will be killed later
+ $SIG{'TERM'} = $Global::original_sig{'TERM'};
+ if($^O eq "cygwin" or $^O eq "msys" or $^O eq "nto") {
+ # The exec does not work on Cygwin and QNX
+ sleep 10101010;
+ } else {
+ # 'exec sleep' takes less RAM than sleeping in perl
+ exec 'sleep', 10101;
+ }
+ exit(0);
+ } else {
+ # Failed to spawn
+ $max_system_proc_reached = 1;
+ }
+ }
+
+ sub get_args_or_jobs() {
+ # Get an arg or a job (depending on mode)
+ if($Global::semaphore or ($opt::pipe and not $opt::tee)) {
+ # Skip: No need to get args
+ return 1;
+ } elsif(defined $opt::retries and $count_jobs_already_read) {
+ # For retries we may need to run all jobs on this sshlogin
+ # so include the already read jobs for this sshlogin
+ $count_jobs_already_read--;
+ return 1;
+ } else {
+ if($opt::X or $opt::m) {
+ # The arguments may have to be re-spread over several jobslots
+ # So pessimistically only read one arg per jobslot
+ # instead of a full commandline
+ if($Global::JobQueue->{'commandlinequeue'}->{'arg_queue'}->empty()) {
+ if($Global::JobQueue->empty()) {
+ return 0;
+ } else {
+ $job = $Global::JobQueue->get();
+ push(@jobs, $job);
+ return 1;
+ }
+ } else {
+ $arg = $Global::JobQueue->{'commandlinequeue'}->{'arg_queue'}->get();
+ push(@args, $arg);
+ return 1;
+ }
+ } else {
+ # If there are no more command lines, then we have a process
+ # per command line, so no need to go further
+ if($Global::JobQueue->empty()) {
+ return 0;
+ } else {
+ $job = $Global::JobQueue->get();
+ # Replacement must happen here due to seq()
+ $job and $job->replaced();
+ push(@jobs, $job);
+ return 1;
+ }
+ }
+ }
+ }
+
+ sub cleanup() {
+ # Cleanup: Close the files
+ for (values %fh) { close $_ }
+ # Cleanup: Kill the children
+ for my $pid (@children) {
+ kill 9, $pid;
+ waitpid($pid,0);
+ delete $Global::unkilled_children{$pid};
+ }
+ # Cleanup: Unget the command_lines or the @args
+ $Global::JobQueue->{'commandlinequeue'}{'arg_queue'}->unget(@args);
+ @args = ();
+ $Global::JobQueue->unget(@jobs);
+ @jobs = ();
+ }
+
+ sub processes_available_by_system_limit($) {
+ # If the wanted number of processes is bigger than the system limits:
+ # Limit them to the system limits
+ # Limits are: File handles, number of input lines, processes,
+ # and taking > 1 second to spawn 10 extra processes
+ # Returns:
+ # Number of processes
+ my $self = shift;
+ my $wanted_processes = shift;
+ my $system_limit = 0;
+ my $slow_spawning_warning_printed = 0;
+ my $time = time;
+ $more_filehandles = 1;
+ $tmpfhname = "TmpFhNamE";
+
+ # perl uses 7 filehandles for something?
+ # parallel uses 1 for memory_usage
+ # parallel uses 4 for ?
+ reserve_filehandles(12);
+ # Two processes for load avg and ?
+ reserve_process();
+ reserve_process();
+
+ # For --retries count also jobs already run
+ $count_jobs_already_read = $Global::JobQueue->next_seq();
+ my $wait_time_for_getting_args = 0;
+ my $start_time = time;
+ if($wanted_processes < $Global::infinity) {
+ $Global::dummy_jobs = 1;
+ }
+ while(1) {
+ $system_limit >= $wanted_processes and last;
+ not $more_filehandles and last;
+ $max_system_proc_reached and last;
+
+ my $before_getting_arg = time;
+ if(!$Global::dummy_jobs) {
+ get_args_or_jobs() or last;
+ }
+ $wait_time_for_getting_args += time - $before_getting_arg;
+ $system_limit++;
+
+ # Every simultaneous process uses 2 filehandles to write to
+ # and 2 filehandles to read from
+ reserve_filehandles(4);
+
+ # System process limit
+ reserve_process();
+
+ my $forktime = time - $time - $wait_time_for_getting_args;
+ ::debug("run", "Time to fork $system_limit procs: ".
+ $wait_time_for_getting_args, " ", $forktime,
+ " (processes so far: ", $system_limit,")\n");
+ if($system_limit > 10 and
+ $forktime > 1 and
+ $forktime > $system_limit * 0.01) {
+ # It took more than 0.01 second to fork a processes on avg.
+ # Give the user a warning. He can press Ctrl-C if this
+ # sucks.
+ ::warning_once(
+ "Starting $system_limit processes took > $forktime sec.",
+ "Consider adjusting -j. Press CTRL-C to stop.");
+ }
+ }
+ cleanup();
+
+ if($system_limit < $wanted_processes) {
+ # The system_limit is less than the wanted_processes
+ if($system_limit < 1 and not $Global::JobQueue->empty()) {
+ ::error(255,"Cannot spawn any jobs.",
+ "Try increasing 'ulimit -u' (try: ulimit -u `ulimit -Hu`)",
+ "or increasing 'nproc' in /etc/security/limits.conf",
+ "or increasing /proc/sys/kernel/pid_max");
+ }
+ if(not $more_filehandles) {
+ ::warning("Only enough file handles to run ".
+ $system_limit. " jobs in parallel.",
+ "Try running 'parallel -j0 -N $system_limit --pipe parallel -j0'",
+ "or increasing 'ulimit -n' (try: ulimit -n `ulimit -Hn`)",
+ "or increasing 'nofile' in /etc/security/limits.conf",
+ "or increasing /proc/sys/fs/file-max");
+ }
+ if($max_system_proc_reached) {
+ ::warning("Only enough available processes to run ".
+ $system_limit. " jobs in parallel.",
+ "Try increasing 'ulimit -u' (try: ulimit -u `ulimit -Hu`)",
+ "or increasing 'nproc' in /etc/security/limits.conf",
+ "or increasing /proc/sys/kernel/pid_max");
+ }
+ }
+ if($] == 5.008008 and $system_limit > 1000) {
+ # https://savannah.gnu.org/bugs/?36942
+ $system_limit = 1000;
+ }
+ if($Global::JobQueue->empty()) {
+ $system_limit ||= 1;
+ }
+ if($self->string() ne ":" and
+ $system_limit > $Global::default_simultaneous_sshlogins) {
+ $system_limit =
+ $self->simultaneous_sshlogin_limit($system_limit);
+ }
+ return $system_limit;
+ }
+}
+
+sub simultaneous_sshlogin_limit($) {
+ # Test by logging in wanted number of times simultaneously
+ # Returns:
+ # min($wanted_processes,$working_simultaneous_ssh_logins-1)
+ my $self = shift;
+ my $wanted_processes = shift;
+ if($self->{'time_to_login'}) {
+ return $wanted_processes;
+ }
+
+ # Try twice because it guesses wrong sometimes
+ # Choose the minimal
+ my $ssh_limit =
+ ::min($self->simultaneous_sshlogin($wanted_processes),
+ $self->simultaneous_sshlogin($wanted_processes));
+ if($ssh_limit < $wanted_processes) {
+ my $serverlogin = $self->string();
+ ::warning("ssh to $serverlogin only allows ".
+ "for $ssh_limit simultaneous logins.",
+ "You may raise this by changing",
+ "/etc/ssh/sshd_config:MaxStartups and MaxSessions on $serverlogin.",
+ "You can also try --sshdelay 0.1",
+ "Using only ".($ssh_limit-1)." connections ".
+ "to avoid race conditions.");
+ # Race condition can cause problem if using all sshs.
+ if($ssh_limit > 1) { $ssh_limit -= 1; }
+ }
+ return $ssh_limit;
+}
+
+sub simultaneous_sshlogin($) {
+ # Using $sshlogin try to see if we can do $wanted_processes
+ # simultaneous logins
+ # (ssh host echo simul-login & ssh host echo simul-login & ...) |
+ # grep simul|wc -l
+ # Input:
+ # $wanted_processes = Try for this many logins in parallel
+ # Returns:
+ # $ssh_limit = Number of succesful parallel logins
+ local $/ = "\n";
+ my $self = shift;
+ my $wanted_processes = shift;
+ my $sshdelay = $opt::sshdelay ? "sleep $opt::sshdelay;" : "";
+ # TODO sh -c wrapper to work for csh
+ my $cmd = ($sshdelay.$self->wrap("echo simultaneouslogin").
+ "</dev/null 2>&1 &")x$wanted_processes;
+ ::debug("init","Trying $wanted_processes logins at ".$self->string()."\n");
+ open (my $simul_fh, "-|", "($cmd)|grep simultaneouslogin | wc -l") or
+ ::die_bug("simultaneouslogin");
+ my $ssh_limit = <$simul_fh>;
+ close $simul_fh;
+ chomp $ssh_limit;
+ return $ssh_limit;
+}
+
+sub set_ncpus($$) {
+ my $self = shift;
+ $self->{'ncpus'} = shift;
+}
+
+sub user_requested_processes($) {
+ # Parse the number of processes that the user asked for using -j
+ # Input:
+ # $opt_P = string formatted as for -P
+ # Returns:
+ # $processes = the number of processes to run on this sshlogin
+ my $self = shift;
+ my $opt_P = shift;
+ my $processes;
+ if(defined $opt_P) {
+ if (-f $opt_P) {
+ $Global::max_procs_file = $opt_P;
+ my $opt_P_file = ::slurp_or_exit($Global::max_procs_file);
+ if($opt_P_file !~ /\S/) {
+ ::warning_once("$Global::max_procs_file is empty. ".
+ "Treated as 100%");
+ $opt_P_file = "100%";
+ }
+ $processes = $self->user_requested_processes($opt_P_file);
+ } else {
+ if($opt_P eq "0") {
+ # -P 0 = infinity (or at least close)
+ $processes = $Global::infinity;
+ } else {
+ # -P +3 and -P -1
+ $opt_P =~ s/^([-+])/\$self->ncpus()$1/;
+ # -P 40%
+ $opt_P =~ s:%$:*\$self->ncpus()/100:;
+ $processes = eval $opt_P;
+ if($processes <= 0) {
+ # Do not go below 1
+ $processes = 1;
+ }
+ }
+ }
+ $processes = ::ceil($processes);
+ }
+ return $processes;
+}
+
+sub ncpus($) {
+ # Number of CPU threads
+ # --use_sockets_instead_of_threads = count socket instead
+ # --use_cores_instead_of_threads = count physical cores instead
+ # Returns:
+ # $ncpus = number of cpu (threads) on this sshlogin
+ local $/ = "\n";
+ my $self = shift;
+ if(not defined $self->{'ncpus'}) {
+ if($self->local()) {
+ if($opt::use_sockets_instead_of_threads) {
+ $self->{'ncpus'} = socket_core_thread()->{'sockets'};
+ } elsif($opt::use_cores_instead_of_threads) {
+ $self->{'ncpus'} = socket_core_thread()->{'cores'};
+ } else {
+ $self->{'ncpus'} = socket_core_thread()->{'threads'};
+ }
+ } else {
+ my $ncpu;
+ $ENV{'SSHPASS'} = $self->{'password'};
+ ::debug("init",("echo | ".$self->wrap("parallel --number-of-sockets")));
+ if($opt::use_sockets_instead_of_threads
+ or
+ $opt::use_cpus_instead_of_cores) {
+ $ncpu = ::qqx("echo | ".$self->wrap("parallel --number-of-sockets"));
+ } elsif($opt::use_cores_instead_of_threads) {
+ $ncpu = ::qqx("echo | ".$self->wrap("parallel --number-of-cores"));
+ } else {
+ $ncpu = ::qqx("echo | ".$self->wrap("parallel --number-of-threads"));
+ }
+ chomp $ncpu;
+ if($ncpu =~ /^\s*[0-9]+\s*$/s) {
+ $self->{'ncpus'} = $ncpu;
+ } else {
+ ::warning("Could not figure out ".
+ "number of cpus on ".$self->string." ($ncpu). Using 1.");
+ $self->{'ncpus'} = 1;
+ }
+ }
+ }
+ return $self->{'ncpus'};
+}
+
+
+sub nproc() {
+ # Returns:
+ # Number of threads using `nproc`
+ my $no_of_threads = ::qqx("nproc");
+ chomp $no_of_threads;
+ return $no_of_threads;
+}
+
+sub no_of_sockets() {
+ return socket_core_thread()->{'sockets'};
+}
+
+sub no_of_cores() {
+ return socket_core_thread()->{'cores'};
+}
+
+sub no_of_threads() {
+ return socket_core_thread()->{'threads'};
+}
+
+sub socket_core_thread() {
+ # Returns:
+ # {
+ # 'sockets' => #sockets = number of socket with CPU present
+ # 'cores' => #cores = number of physical cores
+ # 'threads' => #threads = number of compute cores (hyperthreading)
+ # 'active' => #taskset_threads = number of taskset limited cores
+ # }
+ my $cpu;
+ if ($^O eq 'linux') {
+ $cpu = sct_gnu_linux($cpu);
+ } elsif ($^O eq 'android') {
+ $cpu = sct_android($cpu);
+ } elsif ($^O eq 'freebsd') {
+ $cpu = sct_freebsd($cpu);
+ } elsif ($^O eq 'netbsd') {
+ $cpu = sct_netbsd($cpu);
+ } elsif ($^O eq 'openbsd') {
+ $cpu = sct_openbsd($cpu);
+ } elsif ($^O eq 'gnu') {
+ $cpu = sct_hurd($cpu);
+ } elsif ($^O eq 'darwin') {
+ $cpu = sct_darwin($cpu);
+ } elsif ($^O eq 'solaris') {
+ $cpu = sct_solaris($cpu);
+ } elsif ($^O eq 'aix') {
+ $cpu = sct_aix($cpu);
+ } elsif ($^O eq 'hpux') {
+ $cpu = sct_hpux($cpu);
+ } elsif ($^O eq 'nto') {
+ $cpu = sct_qnx($cpu);
+ } elsif ($^O eq 'svr5') {
+ $cpu = sct_openserver($cpu);
+ } elsif ($^O eq 'irix') {
+ $cpu = sct_irix($cpu);
+ } elsif ($^O eq 'dec_osf') {
+ $cpu = sct_tru64($cpu);
+ } else {
+ # Try all methods until we find something that works
+ $cpu = (sct_gnu_linux($cpu)
+ || sct_android($cpu)
+ || sct_freebsd($cpu)
+ || sct_netbsd($cpu)
+ || sct_openbsd($cpu)
+ || sct_hurd($cpu)
+ || sct_darwin($cpu)
+ || sct_solaris($cpu)
+ || sct_aix($cpu)
+ || sct_hpux($cpu)
+ || sct_qnx($cpu)
+ || sct_openserver($cpu)
+ || sct_irix($cpu)
+ || sct_tru64($cpu)
+ );
+ }
+ if(not $cpu) {
+ # Fall back: Set all to nproc
+ my $nproc = nproc();
+ if($nproc) {
+ $cpu->{'sockets'} =
+ $cpu->{'cores'} =
+ $cpu->{'threads'} =
+ $cpu->{'active'} =
+ $nproc;
+ }
+ }
+ if(not $cpu) {
+ ::warning("Cannot figure out number of cpus. Using 1.");
+ $cpu->{'sockets'} =
+ $cpu->{'cores'} =
+ $cpu->{'threads'} =
+ $cpu->{'active'} =
+ 1
+ }
+ $cpu->{'sockets'} ||= 1;
+ $cpu->{'threads'} ||= $cpu->{'cores'};
+ $cpu->{'active'} ||= $cpu->{'threads'};
+ chomp($cpu->{'sockets'},
+ $cpu->{'cores'},
+ $cpu->{'threads'},
+ $cpu->{'active'});
+ # Choose minimum of active and actual
+ my $mincpu;
+ $mincpu->{'sockets'} = ::min($cpu->{'sockets'},$cpu->{'active'});
+ $mincpu->{'cores'} = ::min($cpu->{'cores'},$cpu->{'active'});
+ $mincpu->{'threads'} = ::min($cpu->{'threads'},$cpu->{'active'});
+ return $mincpu;
+}
+
+sub sct_gnu_linux($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ my $cpu = shift;
+
+ sub read_topology($) {
+ my $prefix = shift;
+ my %sibiling;
+ my %socket;
+ my $thread;
+ for($thread = 0;
+ -r "$prefix/cpu$thread/topology/physical_package_id";
+ $thread++) {
+ $socket{::slurp_or_exit(
+ "$prefix/cpu$thread/topology/physical_package_id")}++;
+ }
+ for($thread = 0;
+ -r "$prefix/cpu$thread/topology/thread_siblings";
+ $thread++) {
+ $sibiling{::slurp_or_exit(
+ "$prefix/cpu$thread/topology/thread_siblings")}++;
+ }
+ $cpu->{'sockets'} = keys %socket;
+ $cpu->{'cores'} = keys %sibiling;
+ $cpu->{'threads'} = $thread;
+ }
+
+ sub read_cpuinfo(@) {
+ my @cpuinfo = @_;
+ $cpu->{'sockets'} = 0;
+ $cpu->{'cores'} = 0;
+ $cpu->{'threads'} = 0;
+ my %seen;
+ my %phy_seen;
+ my $physicalid;
+ for(@cpuinfo) {
+ # physical id : 0
+ if(/^physical id.*[:](.*)/) {
+ $physicalid = $1;
+ if(not $phy_seen{$1}++) {
+ $cpu->{'sockets'}++;
+ }
+ }
+ # core id : 3
+ if(/^core id.*[:](.*)/ and not $seen{$physicalid,$1}++) {
+ $cpu->{'cores'}++;
+ }
+ # processor : 2
+ /^processor.*[:]\s*\d/i and $cpu->{'threads'}++;
+ }
+ $cpu->{'cores'} ||= $cpu->{'threads'};
+ $cpu->{'cpus'} ||= $cpu->{'threads'};
+ $cpu->{'sockets'} ||= 1;
+ }
+
+ sub read_lscpu(@) {
+ my @lscpu = @_;
+ my $threads_per_core;
+ my $cores_per_socket;
+ for(@lscpu) {
+ /^CPU.s.:\s*(\d+)/ and $cpu->{'threads'} = $1;
+ /^Thread.s. per core:\s*(\d+)/ and $threads_per_core = $1;
+ /^Core.s. per socket:\s*(\d+)/ and $cores_per_socket = $1;
+ /^(CPU )?Socket.s.:\s*(\d+)/i and $cpu->{'sockets'} = $2;
+ }
+ if($cores_per_socket and $cpu->{'sockets'}) {
+ $cpu->{'cores'} = $cores_per_socket * $cpu->{'sockets'};
+ }
+ if($threads_per_core and $cpu->{'cores'}) {
+ $cpu->{'threads'} = $threads_per_core * $cpu->{'cores'};
+ }
+ if($threads_per_core and $cpu->{'threads'}) {
+ $cpu->{'cores'} = $cpu->{'threads'} / $threads_per_core;
+ }
+ $cpu->{'cpus'} ||= $cpu->{'threads'};
+ }
+
+ local $/ = "\n"; # If delimiter is set, then $/ will be wrong
+ my @cpuinfo;
+ my @lscpu;
+ if($ENV{'PARALLEL_CPUINFO'}) {
+ # Use CPUINFO from environment - used for testing only
+ read_cpuinfo(split/(?<=\n)/,$ENV{'PARALLEL_CPUINFO'});
+ } elsif($ENV{'PARALLEL_LSCPU'}) {
+ # Use LSCPU from environment - used for testing only
+ read_lscpu(split/(?<=\n)/,$ENV{'PARALLEL_LSCPU'});
+ } elsif(-r "$ENV{'PARALLEL_CPUPREFIX'}/cpu0/topology/thread_siblings") {
+ # Use CPUPREFIX from environment - used for testing only
+ read_topology($ENV{'PARALLEL_CPUPREFIX'});
+ } elsif($cpu->{'sockets'} and $cpu->{'cores'} and $cpu->{'threads'}) {
+ # Skip /proc/cpuinfo - already set
+ } else {
+ # Not debugging: Look at this computer
+ if(!($cpu->{'sockets'} and $cpu->{'cores'} and $cpu->{'threads'})
+ and
+ open(my $in_fh, "-|", "lscpu")) {
+ # Parse output from lscpu
+ read_lscpu(<$in_fh>);
+ close $in_fh;
+ }
+ if(!($cpu->{'sockets'} and $cpu->{'cores'} and $cpu->{'threads'})
+ and
+ -r "/sys/devices/system/cpu/cpu0/topology/thread_siblings") {
+ read_topology("/sys/devices/system/cpu");
+ }
+ if(!($cpu->{'sockets'} and $cpu->{'cores'} and $cpu->{'threads'})
+ and
+ open(my $in_fh, "<", "/proc/cpuinfo")) {
+ # Read /proc/cpuinfo
+ read_cpuinfo(<$in_fh>);
+ close $in_fh;
+ }
+ }
+ if(-e "/proc/self/status"
+ and not $ENV{'PARALLEL_CPUINFO'}
+ and not $ENV{'PARALLEL_LSCPU'}) {
+ # if 'taskset' is used to limit number of threads
+ if(open(my $in_fh, "<", "/proc/self/status")) {
+ while(<$in_fh>) {
+ if(/^Cpus_allowed:\s*(\S+)/) {
+ my $a = $1;
+ $a =~ tr/,//d;
+ $cpu->{'active'} = unpack ("%32b*", pack ("H*",$a));
+ }
+ }
+ close $in_fh;
+ }
+ }
+ return $cpu;
+}
+
+sub sct_android($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ # Use GNU/Linux
+ return sct_gnu_linux($_[0]);
+}
+
+sub sct_freebsd($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ $cpu->{'cores'} ||=
+ (::qqx(qq{ sysctl -a dev.cpu | grep \%parent | awk '{ print \$2 }' | uniq | wc -l | awk '{ print \$1 }' })
+ or
+ ::qqx(qq{ sysctl hw.ncpu | awk '{ print \$2 }' }));
+ $cpu->{'threads'} ||=
+ (::qqx(qq{ sysctl hw.ncpu | awk '{ print \$2 }' })
+ or
+ ::qqx(qq{ sysctl -a dev.cpu | grep \%parent | awk '{ print \$2 }' | uniq | wc -l | awk '{ print \$1 }' }));
+ return $cpu;
+}
+
+sub sct_netbsd($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ $cpu->{'cores'} ||= ::qqx("sysctl -n hw.ncpu");
+ return $cpu;
+}
+
+sub sct_openbsd($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ $cpu->{'cores'} ||= ::qqx('sysctl -n hw.ncpu');
+ return $cpu;
+}
+
+sub sct_hurd($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ $cpu->{'cores'} ||= ::qqx("nproc");
+ return $cpu;
+}
+
+sub sct_darwin($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ $cpu->{'cores'} ||=
+ (::qqx('sysctl -n hw.physicalcpu')
+ or
+ ::qqx(qq{ sysctl -a hw | grep [^a-z]physicalcpu[^a-z] | awk '{ print \$2 }' }));
+ $cpu->{'threads'} ||=
+ (::qqx('sysctl -n hw.logicalcpu')
+ or
+ ::qqx(qq{ sysctl -a hw | grep [^a-z]logicalcpu[^a-z] | awk '{ print \$2 }' }));
+ return $cpu;
+}
+
+sub sct_solaris($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ if(not $cpu->{'cores'}) {
+ if(-x "/usr/bin/kstat") {
+ my @chip_id = ::qqx("/usr/bin/kstat cpu_info|grep chip_id");
+ if($#chip_id >= 0) {
+ $cpu->{'sockets'} ||= $#chip_id +1;
+ }
+ my @core_id = ::qqx("/usr/bin/kstat -m cpu_info|grep -w core_id|uniq");
+ if($#core_id >= 0) {
+ $cpu->{'cores'} ||= $#core_id +1;
+ }
+ }
+ if(-x "/usr/sbin/psrinfo") {
+ my @psrinfo = ::qqx("/usr/sbin/psrinfo -p");
+ if($#psrinfo >= 0) {
+ $cpu->{'sockets'} ||= $psrinfo[0];
+ }
+ }
+ if(-x "/usr/sbin/prtconf") {
+ my @prtconf = ::qqx("/usr/sbin/prtconf | grep cpu..instance");
+ if($#prtconf >= 0) {
+ $cpu->{'cores'} ||= $#prtconf +1;
+ }
+ }
+ }
+ return $cpu;
+}
+
+sub sct_aix($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ if(not $cpu->{'cores'}) {
+ if(-x "/usr/sbin/lscfg") {
+ if(open(my $in_fh, "-|",
+ "/usr/sbin/lscfg -vs |grep proc | wc -l|tr -d ' '")) {
+ $cpu->{'cores'} = <$in_fh>;
+ close $in_fh;
+ }
+ }
+ }
+ if(not $cpu->{'threads'}) {
+ if(-x "/usr/bin/vmstat") {
+ if(open(my $in_fh, "-|", "/usr/bin/vmstat 1 1")) {
+ while(<$in_fh>) {
+ /lcpu=([0-9]*) / and $cpu->{'threads'} = $1;
+ }
+ close $in_fh;
+ }
+ }
+ }
+ return $cpu;
+}
+
+sub sct_hpux($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ $cpu->{'cores'} ||=
+ ::qqx(qq{ /usr/bin/mpsched -s 2>&1 | grep 'Locality Domain Count' | awk '{ print \$4 }'});
+ $cpu->{'threads'} ||=
+ ::qqx(qq{ /usr/bin/mpsched -s 2>&1 | perl -ne '/Processor Count\\D+(\\d+)/ and print "\$1"'});
+ return $cpu;
+}
+
+sub sct_qnx($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ # BUG: It is not known how to calculate this.
+
+ return $cpu;
+}
+
+sub sct_openserver($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ if(not $cpu->{'cores'}) {
+ if(-x "/usr/sbin/psrinfo") {
+ my @psrinfo = ::qqx("/usr/sbin/psrinfo");
+ if($#psrinfo >= 0) {
+ $cpu->{'cores'} = $#psrinfo +1;
+ }
+ }
+ }
+ $cpu->{'sockets'} ||= $cpu->{'cores'};
+ return $cpu;
+}
+
+sub sct_irix($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ $cpu->{'cores'} ||=
+ ::qqx(qq{ hinv | grep HZ | grep Processor | awk '{print \$1}' });
+ return $cpu;
+}
+
+sub sct_tru64($) {
+ # Returns:
+ # { 'sockets' => #sockets
+ # 'cores' => #cores
+ # 'threads' => #threads
+ # 'active' => #taskset_threads }
+ local $/ = "\n";
+ my $cpu = shift;
+ $cpu->{'cores'} ||= ::qqx("sizer -pr");
+ $cpu->{'sockets'} ||= $cpu->{'cores'};
+ $cpu->{'threads'} ||= $cpu->{'cores'};
+
+ return $cpu;
+}
+
+sub sshcommand($) {
+ # Returns:
+ # $sshcommand = the command (incl options) to run when using ssh
+ my $self = shift;
+ if (not defined $self->{'sshcommand'}) {
+ ::die_bug("sshcommand not set");
+ }
+ return $self->{'sshcommand'};
+}
+
+sub local($) {
+ my $self = shift;
+ return $self->{'local'};
+}
+
+sub control_path_dir($) {
+ # Returns:
+ # $control_path_dir = dir of control path (for -M)
+ my $self = shift;
+ if(not defined $self->{'control_path_dir'}) {
+ $self->{'control_path_dir'} =
+ # Use $ENV{'TMPDIR'} as that is typically not
+ # NFS mounted.
+ # The file system must support UNIX domain sockets
+ File::Temp::tempdir($ENV{'TMPDIR'}
+ . "/ctrlpath-XXXX",
+ CLEANUP => 1);
+ }
+ return $self->{'control_path_dir'};
+}
+
+sub rsync_transfer_cmd($) {
+ # Command to run to transfer a file
+ # Input:
+ # $file = filename of file to transfer
+ # $workdir = destination dir
+ # Returns:
+ # $cmd = rsync command to run to transfer $file ("" if unreadable)
+ my $self = shift;
+ my $file = shift;
+ my $workdir = shift;
+ if(not -r $file) {
+ ::warning($file. " is not readable and will not be transferred.");
+ return "true";
+ }
+ my $rsync_destdir;
+ my $relpath = ($file !~ m:^/:) || ($file =~ m:/\./:); # Is the path relative or /./?
+ if($relpath) {
+ $rsync_destdir = ::shell_quote_file($workdir);
+ } else {
+ # rsync /foo/bar /
+ $rsync_destdir = "/";
+ }
+ $file = ::shell_quote_file($file);
+ # Make dir if it does not exist
+ return($self->wrap("mkdir -p $rsync_destdir") . " && " .
+ $self->rsync()." $file ".$self->{'host'}.":$rsync_destdir");
+}
+
+{
+ my $rsync_fix;
+ my $rsync_version;
+
+ sub rsync($) {
+ # rsync 3.1.x uses protocol 31 which is unsupported by 2.5.7.
+ # If the version >= 3.1.0: downgrade to protocol 30
+ # rsync 3.2.4 introduces a quoting bug: Add --old-args for that
+ # Returns:
+ # $rsync = "rsync" or "rsync --protocol 30 --old-args"
+ sub rsync_version {
+ if(not $rsync_version) {
+ my @out = `rsync --version`;
+ if(not @out) {
+ if(::which("rsync")) {
+ ::die_bug("'rsync --version' gave no output.");
+ } else {
+ ::error(255,"'rsync' is not in \$PATH.");
+ }
+ }
+ for (@out) {
+ # rsync version 3.1.3 protocol version 31
+ # rsync version v3.2.3 protocol version 31
+ if(/version v?(\d+)\.(\d+)(\.(\d+))?/) {
+ # 3.2.27 => 03.0227
+ $rsync_version = sprintf "%02d.%02d%02d",$1,$2,$4;
+ }
+ }
+ $rsync_version or
+ ::die_bug("Cannot figure out version of rsync: @out");
+ }
+ }
+
+ sub rsync_fixup {
+ # rsync 3.1.x uses protocol 31 which is unsupported by 2.5.7.
+ # If the version >= 3.1.0: downgrade to protocol 30
+ # Returns:
+ # $rsync = "rsync" or "rsync --protocol 30"
+ if(not $rsync_fix) {
+ rsync_version();
+ if($rsync_version >= 3.01) {
+ # Version 3.1.0 or later: Downgrade to protocol 30
+ $rsync_fix .= " --protocol 30";
+ }
+ if($rsync_version >= 3.0204) {
+ # Version 3.2.4 .. 3.2.8: --old-args
+ $rsync_fix .= " --old-args";
+ }
+ }
+ return $rsync_fix;
+ }
+ my $self = shift;
+
+ return "rsync".rsync_fixup()." ".$ENV{'PARALLEL_RSYNC_OPTS'}.
+ " -e".::Q($self->sshcmd());
+ }
+}
+
+sub cleanup_cmd($$$) {
+ # Command to run to remove the remote file
+ # Input:
+ # $file = filename to remove
+ # $workdir = destination dir
+ # Returns:
+ # $cmd = ssh command to run to remove $file and empty parent dirs
+ my $self = shift;
+ my $file = shift;
+ my $workdir = shift;
+ my $f = $file;
+ if($f =~ m:/\./:) {
+ # foo/bar/./baz/quux => workdir/baz/quux
+ # /foo/bar/./baz/quux => workdir/baz/quux
+ $f =~ s:.*/\./:$workdir/:;
+ } elsif($f =~ m:^[^/]:) {
+ # foo/bar => workdir/foo/bar
+ $f = $workdir."/".$f;
+ }
+ my @subdirs = split m:/:, ::dirname($f);
+ my @rmdir;
+ my $dir = "";
+ for(@subdirs) {
+ $dir .= $_."/";
+ unshift @rmdir, ::shell_quote_file($dir);
+ }
+ my $rmdir = @rmdir ? "rmdir @rmdir 2>/dev/null;" : "";
+ if(defined $opt::workdir and $opt::workdir eq "...") {
+ $rmdir .= "rm -rf " . ::shell_quote_file($workdir).';';
+ }
+ my $rmf = "sh -c ".
+ ::Q("rm -f ".::shell_quote_file($f)." 2>/dev/null;".$rmdir);
+ return $self->wrap(::Q($rmf));
+}
+
+package JobQueue;
+
+sub new($) {
+ my $class = shift;
+ my $commandref = shift;
+ my $read_from = shift;
+ my $context_replace = shift;
+ my $max_number_of_args = shift;
+ my $transfer_files = shift;
+ my $return_files = shift;
+ my $template_names = shift;
+ my $template_contents = shift;
+ my $commandlinequeue = CommandLineQueue->new
+ ($commandref, $read_from, $context_replace, $max_number_of_args,
+ $transfer_files, $return_files, $template_names, $template_contents);
+ my @unget = ();
+ return bless {
+ 'unget' => \@unget,
+ 'commandlinequeue' => $commandlinequeue,
+ 'this_job_no' => 0,
+ 'total_jobs' => undef,
+ }, ref($class) || $class;
+}
+
+sub get($) {
+ my $self = shift;
+
+ $self->{'this_job_no'}++;
+ if(@{$self->{'unget'}}) {
+ my $job = shift @{$self->{'unget'}};
+ # {%} may have changed, so flush computed values
+ $job && $job->flush_cache();
+ return $job;
+ } else {
+ my $commandline = $self->{'commandlinequeue'}->get();
+ if(defined $commandline) {
+ return Job->new($commandline);
+ } else {
+ $self->{'this_job_no'}--;
+ return undef;
+ }
+ }
+}
+
+sub unget($) {
+ my $self = shift;
+ unshift @{$self->{'unget'}}, @_;
+ $self->{'this_job_no'} -= @_;
+}
+
+sub empty($) {
+ my $self = shift;
+ my $empty = (not @{$self->{'unget'}}) &&
+ $self->{'commandlinequeue'}->empty();
+ ::debug("run", "JobQueue->empty $empty ");
+ return $empty;
+}
+
+sub total_jobs($) {
+ my $self = shift;
+ if(not defined $self->{'total_jobs'}) {
+ if($opt::pipe and not $opt::tee) {
+ ::error(255,"--pipe is incompatible with --eta/--bar/--shuf");
+ }
+ if($opt::totaljobs) {
+ $self->{'total_jobs'} = $opt::totaljobs;
+ } elsif($opt::sqlworker) {
+ $self->{'total_jobs'} = $Global::sql->total_jobs();
+ } else {
+ my $record;
+ my @arg_records;
+ my $record_queue = $self->{'commandlinequeue'}{'arg_queue'};
+ my $start = time;
+ while($record = $record_queue->get()) {
+ push @arg_records, $record;
+ if(time - $start > 10) {
+ ::warning("Reading ".scalar(@arg_records).
+ " arguments took longer than 10 seconds.");
+ $opt::eta && ::warning("Consider removing --eta.");
+ $opt::bar && ::warning("Consider removing --bar.");
+ $opt::shuf && ::warning("Consider removing --shuf.");
+ last;
+ }
+ }
+ while($record = $record_queue->get()) {
+ push @arg_records, $record;
+ }
+ if($opt::shuf and @arg_records) {
+ my $i = @arg_records;
+ while (--$i) {
+ my $j = int rand($i+1);
+ @arg_records[$i,$j] = @arg_records[$j,$i];
+ }
+ }
+ $record_queue->unget(@arg_records);
+ # $#arg_records = number of args - 1
+ # We have read one @arg_record for this job (so add 1 more)
+ my $num_args = $#arg_records + 2;
+ # This jobs is not started so -1
+ my $started_jobs = $self->{'this_job_no'} - 1;
+ my $max_args = ::max($Global::max_number_of_args,1);
+ $self->{'total_jobs'} = ::ceil($num_args / $max_args)
+ + $started_jobs;
+ ::debug("init","Total jobs: ".$self->{'total_jobs'}.
+ " ($num_args/$max_args + $started_jobs)\n");
+ }
+ }
+ return $self->{'total_jobs'};
+}
+
+sub flush_total_jobs($) {
+ # Unset total_jobs to force recomputing
+ my $self = shift;
+ ::debug("init","flush Total jobs: ");
+ $self->{'total_jobs'} = undef;
+}
+
+sub next_seq($) {
+ my $self = shift;
+
+ return $self->{'commandlinequeue'}->seq();
+}
+
+sub quote_args($) {
+ my $self = shift;
+ return $self->{'commandlinequeue'}->quote_args();
+}
+
+
+package Job;
+
+sub new($) {
+ my $class = shift;
+ my $commandlineref = shift;
+ return bless {
+ 'commandline' => $commandlineref, # CommandLine object
+ 'workdir' => undef, # --workdir
+ # filehandle for stdin (used for --pipe)
+ # filename for writing stdout to (used for --files)
+ # remaining data not sent to stdin (used for --pipe)
+ # tmpfiles to cleanup when job is done
+ 'unlink' => [],
+ # amount of data sent via stdin (used for --pipe)
+ 'transfersize' => 0, # size of files using --transfer
+ 'returnsize' => 0, # size of files using --return
+ 'pid' => undef,
+ # hash of { SSHLogins => number of times the command failed there }
+ 'failed' => undef,
+ 'sshlogin' => undef,
+ # The commandline wrapped with rsync and ssh
+ 'sshlogin_wrap' => undef,
+ 'exitstatus' => undef,
+ 'exitsignal' => undef,
+ # Timestamp for timeout if any
+ 'timeout' => undef,
+ 'virgin' => 1,
+ # Output used for SQL and CSV-output
+ 'output' => { 1 => [], 2 => [] },
+ 'halfline' => { 1 => [], 2 => [] },
+ }, ref($class) || $class;
+}
+
+sub flush_cache($) {
+ my $self = shift;
+ $self->{'commandline'}->flush_cache();
+}
+
+sub replaced($) {
+ my $self = shift;
+ $self->{'commandline'} or ::die_bug("commandline empty");
+ return $self->{'commandline'}->replaced();
+}
+
+{
+ my $next_available_row;
+
+ sub row($) {
+ my $self = shift;
+ if(not defined $self->{'row'}) {
+ if($opt::keeporder) {
+ $self->{'row'} = $self->seq();
+ } else {
+ $self->{'row'} = ++$next_available_row;
+ }
+ }
+ return $self->{'row'};
+ }
+}
+
+sub seq($) {
+ my $self = shift;
+ return $self->{'commandline'}->seq();
+}
+
+sub set_seq($$) {
+ my $self = shift;
+ return $self->{'commandline'}->set_seq(shift);
+}
+
+sub slot($) {
+ my $self = shift;
+ return $self->{'commandline'}->slot();
+}
+
+sub free_slot($) {
+ my $self = shift;
+ push @Global::slots, $self->slot();
+}
+
+{
+ my($cattail);
+
+ sub cattail() {
+ # Returns:
+ # $cattail = perl program for:
+ # cattail "decomp-prg" wpid [file_stdin] [file_to_unlink]
+ # decomp-prg = decompress program
+ # wpid = pid of writer program
+ # file_stdin = file_to_decompress
+ # file_to_unlink = unlink this file
+ if(not $cattail) {
+ $cattail = q{
+ # cat followed by tail (possibly with rm as soon at the file is opened)
+ # If $writerpid dead: finish after this round
+ use Fcntl;
+ $|=1;
+
+ my ($comfile, $cmd, $writerpid, $read_file, $unlink_file) = @ARGV;
+ if($read_file) {
+ open(IN,"<",$read_file) || die("cattail: Cannot open $read_file");
+ } else {
+ *IN = *STDIN;
+ }
+ while(! -s $comfile) {
+ # Writer has not opened the buffer file, so we cannot remove it yet
+ $sleep = ($sleep < 30) ? ($sleep * 1.001 + 0.01) : ($sleep);
+ usleep($sleep);
+ }
+ # The writer and we have both opened the file, so it is safe to unlink it
+ unlink $unlink_file;
+ unlink $comfile;
+
+ my $first_round = 1;
+ my $flags;
+ fcntl(IN, F_GETFL, $flags) || die $!; # Get the current flags on the filehandle
+ $flags |= O_NONBLOCK; # Add non-blocking to the flags
+ fcntl(IN, F_SETFL, $flags) || die $!; # Set the flags on the filehandle
+
+ while(1) {
+ # clear EOF
+ seek(IN,0,1);
+ my $writer_running = kill 0, $writerpid;
+ $read = sysread(IN,$buf,131072);
+ if($read) {
+ if($first_round) {
+ # Only start the command if there any input to process
+ $first_round = 0;
+ open(OUT,"|-",$cmd) || die("cattail: Cannot run $cmd");
+ }
+
+ # Blocking print
+ while($buf) {
+ my $bytes_written = syswrite(OUT,$buf);
+ # syswrite may be interrupted by SIGHUP
+ substr($buf,0,$bytes_written) = "";
+ }
+ # Something printed: Wait less next time
+ $sleep /= 2;
+ } else {
+ if(eof(IN) and not $writer_running) {
+ # Writer dead: There will never be sent more to the decompressor
+ close OUT;
+ exit;
+ }
+ # TODO This could probably be done more efficiently using select(2)
+ # Nothing read: Wait longer before next read
+ # Up to 100 milliseconds
+ $sleep = ($sleep < 100) ? ($sleep * 1.001 + 0.01) : ($sleep);
+ usleep($sleep);
+ }
+ }
+
+ sub usleep {
+ # Sleep this many milliseconds.
+ my $secs = shift;
+ select(undef, undef, undef, $secs/1000);
+ }
+ };
+ $cattail =~ s/#.*//mg;
+ $cattail =~ s/\s+/ /g;
+ }
+ return $cattail;
+ }
+}
+
+sub openoutputfiles($) {
+ # Open files for STDOUT and STDERR
+ # Set file handles in $self->fh
+ my $self = shift;
+ my ($outfhw, $errfhw, $outname, $errname);
+
+ if($opt::latestline) {
+ # Do not save to files: Use non-blocking pipe
+ my ($outfhr, $errfhr);
+ pipe($outfhr, $outfhw) || die;
+ $self->set_fh(1,'w',$outfhw);
+ $self->set_fh(2,'w',$outfhw);
+ $self->set_fh(1,'r',$outfhr);
+ $self->set_fh(2,'r',$outfhr);
+ # Make it possible to read non-blocking from the pipe
+ for my $fdno (1,2) {
+ ::set_fh_non_blocking($self->fh($fdno,'r'));
+ }
+ # Return immediately because we do not need setting filenames
+ return;
+ } elsif($Global::linebuffer and not
+ ($opt::keeporder or $Global::files or $opt::results or
+ $opt::compress or $opt::compress_program or
+ $opt::decompress_program)) {
+ # Do not save to files: Use non-blocking pipe
+ my ($outfhr, $errfhr);
+ pipe($outfhr, $outfhw) || die;
+ pipe($errfhr, $errfhw) || die;
+ $self->set_fh(1,'w',$outfhw);
+ $self->set_fh(2,'w',$errfhw);
+ $self->set_fh(1,'r',$outfhr);
+ $self->set_fh(2,'r',$errfhr);
+ # Make it possible to read non-blocking from the pipe
+ for my $fdno (1,2) {
+ ::set_fh_non_blocking($self->fh($fdno,'r'));
+ }
+ # Return immediately because we do not need setting filenames
+ return;
+ } elsif($opt::results and not $Global::csvsep and not $Global::jsonout) {
+ # If --results, but not --results *.csv/*.tsv
+ my $out = $self->{'commandline'}->results_out();
+ my $seqname;
+ if($out eq $opt::results or $out =~ m:/$:) {
+ # $opt::results = simple string or ending in /
+ # => $out is a dir/
+ # prefix/name1/val1/name2/val2/seq
+ $seqname = $out."seq";
+ # prefix/name1/val1/name2/val2/stdout
+ $outname = $out."stdout";
+ # prefix/name1/val1/name2/val2/stderr
+ $errname = $out."stderr";
+ } else {
+ # $opt::results = replacement string not ending in /
+ # => $out is a file
+ $outname = $out;
+ $errname = "$out.err";
+ $seqname = "$out.seq";
+ }
+ ::write_or_exit($seqname, $self->seq());
+ $outfhw = ::open_or_exit("+>", $outname);
+ $errfhw = ::open_or_exit("+>", $errname);
+ $self->set_fh(1,"unlink","");
+ $self->set_fh(2,"unlink","");
+ if($opt::sqlworker) {
+ # Save the filenames in SQL table
+ $Global::sql->update("SET Stdout = ?, Stderr = ? ".
+ "WHERE Seq = ". $self->seq(),
+ $outname, $errname);
+ }
+ } elsif(not $opt::ungroup) {
+ # To group we create temporary files for STDOUT and STDERR
+ # To avoid the cleanup unlink the files immediately (but keep them open)
+ if($Global::files) {
+ ($outfhw, $outname) = ::tmpfile(SUFFIX => ".par");
+ ($errfhw, $errname) = ::tmpfile(SUFFIX => ".par");
+ # --files => only remove stderr
+ $self->set_fh(1,"unlink","");
+ $self->set_fh(2,"unlink",$errname);
+ } else {
+ ($outfhw, $outname) = ::tmpfile(SUFFIX => ".par");
+ ($errfhw, $errname) = ::tmpfile(SUFFIX => ".par");
+ $self->set_fh(1,"unlink",$outname);
+ $self->set_fh(2,"unlink",$errname);
+ }
+ } else {
+ # --ungroup
+ open($outfhw,">&",$Global::fh{1}) || die;
+ open($errfhw,">&",$Global::fh{2}) || die;
+ # File name must be empty as it will otherwise be printed
+ $outname = "";
+ $errname = "";
+ $self->set_fh(1,"unlink",$outname);
+ $self->set_fh(2,"unlink",$errname);
+ }
+ # Set writing FD
+ $self->set_fh(1,'w',$outfhw);
+ $self->set_fh(2,'w',$errfhw);
+ $self->set_fh(1,'name',$outname);
+ $self->set_fh(2,'name',$errname);
+ if($opt::compress) {
+ $self->filter_through_compress();
+ } elsif(not $opt::ungroup) {
+ $self->grouped();
+ }
+ if($Global::linebuffer) {
+ # Make it possible to read non-blocking from
+ # the buffer files
+ # Used for --linebuffer with -k, --files, --res, --compress*
+ for my $fdno (1,2) {
+ ::set_fh_non_blocking($self->fh($fdno,'r'));
+ }
+ }
+}
+
+sub print_verbose_dryrun($) {
+ # If -v set: print command to stdout (possibly buffered)
+ # This must be done before starting the command
+ my $self = shift;
+ if($Global::verbose or $opt::dryrun) {
+ my $fh = $self->fh(1,"w");
+ if($Global::verbose <= 1) {
+ print $fh $self->replaced(),"\n";
+ } else {
+ # Verbose level > 1: Print the rsync and stuff
+ print $fh $self->wrapped(),"\n";
+ }
+ }
+ if($opt::sqlworker) {
+ $Global::sql->update("SET Command = ? WHERE Seq = ".$self->seq(),
+ $self->replaced());
+ }
+}
+
+sub add_rm($) {
+ # Files to remove when job is done
+ my $self = shift;
+ push @{$self->{'unlink'}}, @_;
+}
+
+sub get_rm($) {
+ # Files to remove when job is done
+ my $self = shift;
+ return @{$self->{'unlink'}};
+}
+
+sub cleanup($) {
+ # Remove files when job is done
+ my $self = shift;
+ unlink $self->get_rm();
+ delete @Global::unlink{$self->get_rm()};
+}
+
+sub grouped($) {
+ my $self = shift;
+ # Set reading FD if using --group (--ungroup does not need)
+ for my $fdno (1,2) {
+ # Re-open the file for reading
+ # so fdw can be closed seperately
+ # and fdr can be seeked seperately (for --line-buffer)
+ my $fdr = ::open_or_exit("<", $self->fh($fdno,'name'));
+ $self->set_fh($fdno,'r',$fdr);
+ # Unlink if not debugging
+ $Global::debug or ::rm($self->fh($fdno,"unlink"));
+ }
+}
+
+sub empty_input_wrapper($) {
+ # If no input: exit(0)
+ # If some input: Pass input as input to command on STDIN
+ # This avoids starting the command if there is no input.
+ # Input:
+ # $command = command to pipe data to
+ # Returns:
+ # $wrapped_command = the wrapped command
+ my $command = shift;
+ # The optimal block size differs
+ # It has been measured on:
+ # AMD 6376: 59000
+ # <big ppar --pipe --block 100M --test $1 -j1 'cat >/dev/null';
+ my $script =
+ ::spacefree(0,q{
+ if(sysread(STDIN, $buf, 1)) {
+ open($fh, "|-", @ARGV) || die;
+ syswrite($fh, $buf);
+ while($read = sysread(STDIN, $buf, 59000)) {
+ syswrite($fh, $buf);
+ }
+ close $fh;
+ exit ($?&127 ? 128+($?&127) : 1+$?>>8)
+ }
+ });
+ ::debug("run",'Empty wrap: perl -e '.::Q($script)."\n");
+ if($Global::cshell
+ and
+ length $command > 499) {
+ # csh does not like words longer than 1000 (499 quoted)
+ # $command = "perl -e '".base64_zip_eval()."' ".
+ # join" ",string_zip_base64(
+ # 'exec "'.::perl_quote_scalar($command).'"');
+ return 'perl -e '.::Q($script)." ".
+ base64_wrap("exec \"$Global::shell\",'-c',\"".
+ ::perl_quote_scalar($command).'"');
+ } else {
+ return 'perl -e '.::Q($script)." ".
+ $Global::shell." -c ".::Q($command);
+ }
+}
+
+sub filter_through_compress($) {
+ my $self = shift;
+ # Send stdout to stdin for $opt::compress_program(1)
+ # Send stderr to stdin for $opt::compress_program(2)
+ # cattail get pid: $pid = $self->fh($fdno,'rpid');
+ my $cattail = cattail();
+
+ for my $fdno (1,2) {
+ # Make a communication file.
+ my ($fh, $comfile) = ::tmpfile(SUFFIX => ".pac");
+ close $fh;
+ # Compressor: (echo > $comfile; compress pipe) > output
+ # When the echo is written to $comfile,
+ # it is known that output file is opened,
+ # thus output file can then be removed by the decompressor.
+ # empty_input_wrapper is needed for plzip
+ my $qcom = ::Q($comfile);
+ my $wpid = open(my $fdw,"|-", "(echo > $qcom; ".
+ empty_input_wrapper($opt::compress_program).") >".
+ ::Q($self->fh($fdno,'name'))) || die $?;
+ $self->set_fh($fdno,'w',$fdw);
+ $self->set_fh($fdno,'wpid',$wpid);
+ # Decompressor: open output; -s $comfile > 0: rm $comfile output;
+ # decompress output > stdout
+ my $rpid = open(my $fdr, "-|", "perl", "-e", $cattail, $comfile,
+ $opt::decompress_program, $wpid,
+ $self->fh($fdno,'name'),$self->fh($fdno,'unlink'))
+ || die $?;
+ $self->set_fh($fdno,'r',$fdr);
+ $self->set_fh($fdno,'rpid',$rpid);
+ }
+}
+
+sub set_fh($$$$) {
+ # Set file handle
+ my ($self, $fd_no, $key, $fh) = @_;
+ $self->{'fd'}{$fd_no,$key} = $fh;
+}
+
+sub fh($) {
+ # Get file handle
+ my ($self, $fd_no, $key) = @_;
+ return $self->{'fd'}{$fd_no,$key};
+}
+
+sub write_block($) {
+ my $self = shift;
+ my $stdin_fh = $self->fh(0,"w");
+ if(fork()) {
+ # Close in parent
+ close $stdin_fh;
+ } else {
+ # If writing is to a closed pipe:
+ # Do not call signal handler, but let nothing be written
+ local $SIG{PIPE} = undef;
+
+ for my $part (
+ grep { defined $_ }
+ $self->{'header'},$self->{'block'}) {
+ # syswrite may not write all in one go,
+ # so make sure everything is written.
+ my $written;
+ while($written = syswrite($stdin_fh,$$part)) {
+ substr($$part,0,$written) = "";
+ }
+ }
+ close $stdin_fh;
+ exit(0);
+ }
+}
+
+sub write($) {
+ my $self = shift;
+ my $remaining_ref = shift;
+ my $stdin_fh = $self->fh(0,"w");
+
+ my $len = length $$remaining_ref;
+ # syswrite may not write all in one go,
+ # so make sure everything is written.
+ my $written;
+
+ # If writing is to a closed pipe:
+ # Do not call signal handler, but let nothing be written
+ local $SIG{PIPE} = undef;
+ while($written = syswrite($stdin_fh,$$remaining_ref)){
+ substr($$remaining_ref,0,$written) = "";
+ }
+}
+
+sub set_block($$$$$$) {
+ # Copy stdin buffer from $block_ref up to $endpos
+ # Prepend with $header_ref if virgin (i.e. not --roundrobin)
+ # Remove $recstart and $recend if needed
+ # Input:
+ # $header_ref = ref to $header to prepend
+ # $buffer_ref = ref to $buffer containing the block
+ # $endpos = length of $block to pass on
+ # $recstart = --recstart regexp
+ # $recend = --recend regexp
+ # Returns:
+ # N/A
+ my $self = shift;
+ my ($header_ref,$buffer_ref,$endpos,$recstart,$recend) = @_;
+ $self->{'header'} = $header_ref;
+ if($opt::roundrobin or $opt::remove_rec_sep or defined $opt::retries) {
+ my $a = "";
+ if(($opt::roundrobin or defined $opt::retries) and $self->virgin()) {
+ $a .= $$header_ref;
+ }
+ # Job is no longer virgin
+ $self->set_virgin(0);
+ # Make a full copy because $buffer will change
+ $a .= substr($$buffer_ref,0,$endpos);
+ $self->{'block'} = \$a;
+ if($opt::remove_rec_sep) {
+ remove_rec_sep($self->{'block'},$recstart,$recend);
+ }
+ $self->{'block_length'} = length ${$self->{'block'}};
+ } else {
+ $self->set_virgin(0);
+ for(substr($$buffer_ref,0,$endpos)) {
+ $self->{'block'} = \$_;
+ }
+ $self->{'block_length'} = $endpos + length ${$self->{'header'}};
+ }
+ $self->{'block_pos'} = 0;
+ $self->add_transfersize($self->{'block_length'});
+}
+
+sub block_ref($) {
+ my $self = shift;
+ return $self->{'block'};
+}
+
+sub block_length($) {
+ my $self = shift;
+ return $self->{'block_length'};
+}
+
+sub remove_rec_sep($) {
+ # Remove --recstart and --recend from $block
+ # Input:
+ # $block_ref = reference to $block to be modified
+ # $recstart = --recstart
+ # $recend = --recend
+ # Uses:
+ # $opt::regexp = Are --recstart/--recend regexp?
+ # Returns:
+ # N/A
+ my ($block_ref,$recstart,$recend) = @_;
+ # Remove record separator
+ if($opt::regexp) {
+ $$block_ref =~ s/$recend$recstart//gom;
+ $$block_ref =~ s/^$recstart//os;
+ $$block_ref =~ s/$recend$//os;
+ } else {
+ $$block_ref =~ s/\Q$recend$recstart\E//gom;
+ $$block_ref =~ s/^\Q$recstart\E//os;
+ $$block_ref =~ s/\Q$recend\E$//os;
+ }
+}
+
+sub non_blocking_write($) {
+ my $self = shift;
+ my $something_written = 0;
+
+ my $in = $self->fh(0,"w");
+ my $rv = syswrite($in,
+ substr(${$self->{'block'}},$self->{'block_pos'}));
+ if (!defined($rv) && $! == ::EAGAIN()) {
+ # would block - but would have written
+ $something_written = 0;
+ # avoid triggering auto expanding block size
+ $Global::no_autoexpand_block ||= 1;
+ } elsif ($self->{'block_pos'}+$rv != $self->{'block_length'}) {
+ # incomplete write
+ # Remove the written part
+ $self->{'block_pos'} += $rv;
+ $something_written = $rv;
+ } else {
+ # successfully wrote everything
+ # Empty block to free memory
+ my $a = "";
+ $self->set_block(\$a,\$a,0,"","");
+ $something_written = $rv;
+ }
+ ::debug("pipe", "Non-block: ", $something_written);
+ return $something_written;
+}
+
+
+sub virgin($) {
+ my $self = shift;
+ return $self->{'virgin'};
+}
+
+sub set_virgin($$) {
+ my $self = shift;
+ $self->{'virgin'} = shift;
+}
+
+sub pid($) {
+ my $self = shift;
+ return $self->{'pid'};
+}
+
+sub set_pid($$) {
+ my $self = shift;
+ $self->{'pid'} = shift;
+}
+
+sub starttime($) {
+ # Returns:
+ # UNIX-timestamp this job started
+ my $self = shift;
+ return sprintf("%.3f",$self->{'starttime'});
+}
+
+sub set_starttime($@) {
+ my $self = shift;
+ my $starttime = shift || ::now();
+ $self->{'starttime'} = $starttime;
+ $opt::sqlworker and
+ $Global::sql->update("SET Starttime = ? WHERE Seq = ".$self->seq(),
+ $starttime);
+}
+
+sub runtime($) {
+ # Returns:
+ # Run time in seconds with 3 decimals
+ my $self = shift;
+ return sprintf("%.3f",
+ int(($self->endtime() - $self->starttime())*1000)/1000);
+}
+
+sub endtime($) {
+ # Returns:
+ # UNIX-timestamp this job ended
+ # 0 if not ended yet
+ my $self = shift;
+ return ($self->{'endtime'} || 0);
+}
+
+sub set_endtime($$) {
+ my $self = shift;
+ my $endtime = shift;
+ $self->{'endtime'} = $endtime;
+ $opt::sqlworker and
+ $Global::sql->update("SET JobRuntime = ? WHERE Seq = ".$self->seq(),
+ $self->runtime());
+}
+
+sub is_timedout($) {
+ # Is the job timedout?
+ # Input:
+ # $delta_time = time that the job may run
+ # Returns:
+ # True or false
+ my $self = shift;
+ my $delta_time = shift;
+ return time > $self->{'starttime'} + $delta_time;
+}
+
+sub kill($) {
+ my $self = shift;
+ $self->set_exitstatus(-1);
+ ::kill_sleep_seq($self->pid());
+}
+
+sub suspend($) {
+ my $self = shift;
+ my @pgrps = map { -$_ } $self->pid();
+ kill "STOP", @pgrps;
+ $self->set_suspended(1);
+}
+
+sub set_suspended($$) {
+ my $self = shift;
+ $self->{'suspended'} = shift;
+}
+
+sub suspended($) {
+ my $self = shift;
+ return $self->{'suspended'};
+}
+
+sub resume($) {
+ my $self = shift;
+ my @pgrps = map { -$_ } $self->pid();
+ kill "CONT", @pgrps;
+ $self->set_suspended(0);
+}
+
+sub failed($) {
+ # return number of times failed for this $sshlogin
+ # Input:
+ # $sshlogin
+ # Returns:
+ # Number of times failed for $sshlogin
+ my $self = shift;
+ my $sshlogin = shift;
+ return $self->{'failed'}{$sshlogin};
+}
+
+sub failed_here($) {
+ # return number of times failed for the current $sshlogin
+ # Returns:
+ # Number of times failed for this sshlogin
+ my $self = shift;
+ return $self->{'failed'}{$self->sshlogin()};
+}
+
+sub add_failed($) {
+ # increase the number of times failed for this $sshlogin
+ my $self = shift;
+ my $sshlogin = shift;
+ $self->{'failed'}{$sshlogin}++;
+}
+
+sub add_failed_here($) {
+ # increase the number of times failed for the current $sshlogin
+ my $self = shift;
+ $self->{'failed'}{$self->sshlogin()}++;
+}
+
+sub reset_failed($) {
+ # increase the number of times failed for this $sshlogin
+ my $self = shift;
+ my $sshlogin = shift;
+ delete $self->{'failed'}{$sshlogin};
+}
+
+sub reset_failed_here($) {
+ # increase the number of times failed for this $sshlogin
+ my $self = shift;
+ delete $self->{'failed'}{$self->sshlogin()};
+}
+
+sub min_failed($) {
+ # Returns:
+ # the number of sshlogins this command has failed on
+ # the minimal number of times this command has failed
+ my $self = shift;
+ my $min_failures =
+ ::min(map { $self->{'failed'}{$_} } keys %{$self->{'failed'}});
+ my $number_of_sshlogins_failed_on = scalar keys %{$self->{'failed'}};
+ return ($number_of_sshlogins_failed_on,$min_failures);
+}
+
+sub total_failed($) {
+ # Returns:
+ # $total_failures = the number of times this command has failed
+ my $self = shift;
+ my $total_failures = 0;
+ for (values %{$self->{'failed'}}) {
+ $total_failures += $_;
+ }
+ return $total_failures;
+}
+
+{
+ my $script;
+
+ sub postpone_exit_and_cleanup {
+ # Command to remove files and dirs (given as args) without
+ # affecting the exit value in $?/$status.
+ if(not $script) {
+ $script = "perl -e '".
+ ::spacefree(0,q{
+ $bash=shift;
+ $csh=shift;
+ for(@ARGV){
+ unlink;
+ rmdir;
+ }
+ if($bash=~s/(\d+)h/$1/) {
+ exit $bash;
+ }
+ exit $csh;
+ }).
+ # `echo \$?h` is needed to make fish not complain
+ "' ".'"`echo \\\\\\\\\$?h`" "$status" ';
+ }
+ return $script
+ }
+}
+
+{
+ my $script;
+
+ sub fifo_wrap() {
+ # Script to create a fifo, run a command on the fifo
+ # while copying STDIN to the fifo, and finally
+ # remove the fifo and return the exit code of the command.
+ if(not $script) {
+ # {} == $PARALLEL_TMP for --fifo
+ # To make it csh compatible a wrapper needs to:
+ # * mkfifo
+ # * spawn $command &
+ # * cat > fifo
+ # * waitpid to get the exit code from $command
+ # * be less than 1000 chars long
+
+ # The optimal block size differs
+ # It has been measured on:
+ # AMD 6376: 4095
+ # ppar -a big --pipepart --block -1 --test $1 --fifo 'cat {} >/dev/null';
+ $script = "perl -e '".
+ (::spacefree
+ (0, q{
+ ($s,$c,$f) = @ARGV;
+ # mkfifo $PARALLEL_TMP
+ system "mkfifo", $f;
+ # spawn $shell -c $command &
+ $pid = fork || exec $s, "-c", $c;
+ open($o,">",$f) || die $!;
+ # cat > $PARALLEL_TMP
+ while(sysread(STDIN,$buf,4095)){
+ syswrite $o, $buf;
+ }
+ close $o;
+ # waitpid to get the exit code from $command
+ waitpid $pid,0;
+ # Cleanup
+ unlink $f;
+ exit $?/256;
+ }))."'";
+ }
+ return $script;
+ }
+}
+
+sub wrapped($) {
+ # Wrap command with:
+ # * --shellquote
+ # * --nice
+ # * --cat
+ # * --fifo
+ # * --sshlogin
+ # * --pipepart (@Global::cat_prepends)
+ # * --tee (@Global::cat_prepends)
+ # * --pipe
+ # * --tmux
+ # The ordering of the wrapping is important:
+ # * --nice/--cat/--fifo should be done on the remote machine
+ # * --pipepart/--pipe should be done on the local machine inside --tmux
+ # Uses:
+ # @opt::shellquote
+ # $opt::nice
+ # $Global::shell
+ # $opt::cat
+ # $opt::fifo
+ # @Global::cat_prepends
+ # $opt::pipe
+ # $opt::tmux
+ # Returns:
+ # $self->{'wrapped'} = the command wrapped with the above
+ my $self = shift;
+ if(not defined $self->{'wrapped'}) {
+ my $command = $self->replaced();
+ # Bug in Bash and Ksh when running multiline aliases
+ # This will force them to run correctly, but will fail in
+ # tcsh so we do not do it.
+ # $command .= "\n\n";
+ if(@opt::shellquote) {
+ # Quote one time for each --shellquote
+ my $c = $command;
+ for(@opt::shellquote) {
+ $c = ::Q($c);
+ }
+ # Prepend "echo" (it is written in perl because
+ # quoting '-e' causes problem in some versions and
+ # csh's version does something wrong)
+ $command = q(perl -e '$,=" "; print "@ARGV\n";' -- ) . ::Q($c);
+ }
+ if($Global::parallel_env) {
+ # If $PARALLEL_ENV set, put that in front of the command
+ # Used for env_parallel.*
+ if($Global::shell =~ /zsh/) {
+ # The extra 'eval' will make aliases work, too
+ $command = $Global::parallel_env."\n".
+ "eval ".::Q($command);
+ } else {
+ $command = $Global::parallel_env."\n".$command;
+ }
+ }
+ if($opt::cat) {
+ # In '--cat' and '--fifo' {} == $PARALLEL_TMP.
+ # This is to make it possible to compute $PARALLEL_TMP on
+ # the fly when running remotely.
+ # $ENV{PARALLEL_TMP} is set in the remote wrapper before
+ # the command is run.
+ #
+ # Prepend 'cat > $PARALLEL_TMP;'
+ # Append 'unlink $PARALLEL_TMP without affecting $?'
+ $command =
+ 'cat > "$PARALLEL_TMP";'.
+ $command.";". postpone_exit_and_cleanup().
+ '"$PARALLEL_TMP"';
+ } elsif($opt::fifo) {
+ # Prepend fifo-wrapper. In essence:
+ # mkfifo {}
+ # ( $command ) &
+ # # $command must read {}, otherwise this 'cat' will block
+ # cat > {};
+ # wait; rm {}
+ # without affecting $?
+ $command = fifo_wrap(). " ".
+ $Global::shell. " ". ::Q($command). ' "$PARALLEL_TMP"'. ';';
+ }
+ # Wrap with ssh + tranferring of files
+ $command = $self->sshlogin_wrap($command);
+ if(@Global::cat_prepends) {
+ # --pipepart: prepend:
+ # < /tmp/foo perl -e 'while(@ARGV) {
+ # sysseek(STDIN,shift,0) || die; $left = shift;
+ # while($read = sysread(STDIN,$buf, ($left > 60800 ? 60800 : $left))){
+ # $left -= $read; syswrite(STDOUT,$buf);
+ # }
+ # }' 0 0 0 11 |
+ #
+ # --pipepart --tee: prepend:
+ # < dash-a-file
+ #
+ # --pipe --tee: wrap:
+ # (rm fifo; ... ) < fifo
+ #
+ # --pipe --shard X:
+ # (rm fifo; ... ) < fifo
+ $command = (shift @Global::cat_prepends). "($command)".
+ (shift @Global::cat_appends);
+ } elsif($opt::pipe and not $opt::roundrobin) {
+ # Wrap with EOF-detector to avoid starting $command if EOF.
+ $command = empty_input_wrapper($command);
+ }
+ if($opt::tmux) {
+ # Wrap command with 'tmux'
+ $command = $self->tmux_wrap($command);
+ }
+ if($Global::cshell
+ and
+ length $command > 499) {
+ # csh does not like words longer than 1000 (499 quoted)
+ # $command = "perl -e '".base64_zip_eval()."' ".
+ # join" ",string_zip_base64(
+ # 'exec "'.::perl_quote_scalar($command).'"');
+ $command = base64_wrap("exec \"$Global::shell\",'-c',\"".
+ ::perl_quote_scalar($command).'"');
+ }
+ $self->{'wrapped'} = $command;
+ }
+ return $self->{'wrapped'};
+}
+
+sub set_sshlogin($$) {
+ my $self = shift;
+ my $sshlogin = shift;
+ $self->{'sshlogin'} = $sshlogin;
+ delete $self->{'sshlogin_wrap'}; # If sshlogin is changed the wrap is wrong
+ delete $self->{'wrapped'};
+
+ if($opt::sqlworker) {
+ # Identify worker as --sqlworker often runs on different machines
+ # If local: Use hostname
+ my $host = $sshlogin->local() ? ::hostname() : $sshlogin->host();
+ $Global::sql->update("SET Host = ? WHERE Seq = ".$self->seq(), $host);
+ }
+}
+
+sub sshlogin($) {
+ my $self = shift;
+ return $self->{'sshlogin'};
+}
+
+sub string_base64($) {
+ # Base64 encode strings into 1000 byte blocks.
+ # 1000 bytes is the largest word size csh supports
+ # Input:
+ # @strings = to be encoded
+ # Returns:
+ # @base64 = 1000 byte block
+ $Global::use{"MIME::Base64"} ||= eval "use MIME::Base64; 1;";
+ my @base64 = unpack("(A1000)*",encode_base64((join"",@_),""));
+ return @base64;
+}
+
+sub string_zip_base64($) {
+ # Pipe string through 'bzip2 -9' and base64 encode it into 1000
+ # byte blocks.
+ # 1000 bytes is the largest word size csh supports
+ # Zipping will make exporting big environments work, too
+ # Input:
+ # @strings = to be encoded
+ # Returns:
+ # @base64 = 1000 byte block
+ my($zipin_fh, $zipout_fh,@base64);
+ ::open3($zipin_fh,$zipout_fh,">&STDERR","bzip2 -9");
+ if(fork) {
+ close $zipin_fh;
+ $Global::use{"MIME::Base64"} ||= eval "use MIME::Base64; 1;";
+ # Split base64 encoded into 1000 byte blocks
+ @base64 = unpack("(A1000)*",encode_base64((join"",<$zipout_fh>),""));
+ close $zipout_fh;
+ } else {
+ close $zipout_fh;
+ print $zipin_fh @_;
+ close $zipin_fh;
+ exit;
+ }
+ ::debug("base64","Orig:@_\nAs bzip2 base64:@base64\n");
+ return @base64;
+}
+
+sub base64_zip_eval() {
+ # Script that:
+ # * reads base64 strings from @ARGV
+ # * decodes them
+ # * pipes through 'bzip2 -dc'
+ # * evals the result
+ # Reverse of string_zip_base64 + eval
+ # Will be wrapped in ' so single quote is forbidden
+ # Returns:
+ # $script = 1-liner for perl -e
+ my $script = ::spacefree(0,q{
+ @GNU_Parallel = split /_/, "use_IPC::Open3;_use_MIME::Base64";
+ eval"@GNU_Parallel";
+ $chld = $SIG{CHLD};
+ $SIG{CHLD} = "IGNORE";
+ # Search for bzip2. Not found => use default path
+ my $zip = (grep { -x $_ } "/usr/local/bin/bzip2")[0] || "bzip2";
+ # $in = stdin on $zip, $out = stdout from $zip
+ # Forget my() to save chars for csh
+ # my($in, $out,$eval);
+ open3($in,$out,">&STDERR",$zip,"-dc");
+ if(my $perlpid = fork) {
+ close $in;
+ $eval = join "", <$out>;
+ close $out;
+ } else {
+ close $out;
+ # Pipe decoded base64 into 'bzip2 -dc'
+ print $in (decode_base64(join"",@ARGV));
+ close $in;
+ exit;
+ }
+ wait;
+ $SIG{CHLD} = $chld;
+ eval $eval;
+ });
+ ::debug("base64",$script,"\n");
+ return $script;
+}
+
+sub base64_wrap($) {
+ # base64 encode Perl code
+ # Split it into chunks of < 1000 bytes
+ # Prepend it with a decoder that eval's it
+ # Input:
+ # $eval_string = Perl code to run
+ # Returns:
+ # $shell_command = shell command that runs $eval_string
+ my $eval_string = shift;
+ return
+ "perl -e ".
+ ::Q(base64_zip_eval())." ".
+ join" ",::shell_quote(string_zip_base64($eval_string));
+}
+
+sub base64_eval($) {
+ # Script that:
+ # * reads base64 strings from @ARGV
+ # * decodes them
+ # * evals the result
+ # Reverse of string_base64 + eval
+ # Will be wrapped in ' so single quote is forbidden.
+ # Spaces are stripped so spaces cannot be significant.
+ # The funny 'use IPC::Open3'-syntax is to avoid spaces and
+ # to make it clear that this is a GNU Parallel command
+ # when looking at the process table.
+ # Returns:
+ # $script = 1-liner for perl -e
+ my $script = ::spacefree(0,q{
+ @GNU_Parallel=("use","IPC::Open3;","use","MIME::Base64");
+ eval "@GNU_Parallel";
+ my $eval = decode_base64(join"",@ARGV);
+ eval $eval;
+ });
+ ::debug("base64",$script,"\n");
+ return $script;
+}
+
+sub sshlogin_wrap($) {
+ # Wrap the command with the commands needed to run remotely
+ # Input:
+ # $command = command to run
+ # Returns:
+ # $self->{'sshlogin_wrap'} = command wrapped with ssh+transfer commands
+ sub monitor_parent_sshd_script {
+ # This script is to solve the problem of
+ # * not mixing STDERR and STDOUT
+ # * terminating with ctrl-c
+ # If its parent is ssh: all good
+ # If its parent is init(1): ssh died, so kill children
+ my $monitor_parent_sshd_script;
+
+ if(not $monitor_parent_sshd_script) {
+ $monitor_parent_sshd_script =
+ # This will be packed in ', so only use "
+ ::spacefree
+ (0,'$shell = "'.($ENV{'PARALLEL_SHELL'} || '$ENV{SHELL}').'";'.
+ '$tmpdir = $ENV{"TMPDIR"} || "'.
+ ::perl_quote_scalar($ENV{'PARALLEL_REMOTE_TMPDIR'}).'";'.
+ '$nice = '.$opt::nice.';'.
+ '$termseq = "'.$opt::termseq.'";'.
+ # }
+ q{
+ # Check that $tmpdir is writable
+ -w $tmpdir ||
+ die("$tmpdir\040is\040not\040writable.".
+ "\040Set\040PARALLEL_REMOTE_TMPDIR");
+ # Set $PARALLEL_TMP to a non-existent file name in $TMPDIR
+ do {
+ $ENV{PARALLEL_TMP} = $tmpdir."/par".
+ join"", map { (0..9,"a".."z","A".."Z")[rand(62)] } (1..5);
+ } while(-e $ENV{PARALLEL_TMP});
+ # Set $script to a non-existent file name in $TMPDIR
+ do {
+ $script = $tmpdir."/parallel\043$ENV{PARALLEL_SEQ}_".
+ join"", map { (0..9,"a".."z","A".."Z")[rand(62)] } (1..5);
+ } while(-e $script);
+ # Create a script from the hex code
+ # that removes itself and runs the commands
+ open($fh,">",$script) || die;
+ # \040 = space - but we remove spaces in the script
+ # ' needed due to rc-shell
+ print($fh("rm\040\'$script\'\n",$bashfunc.$cmd));
+ close $fh;
+ my $parent = getppid;
+ my $done = 0;
+ $SIG{CHLD} = sub { $done = 1; };
+ $pid = fork;
+ unless($pid) {
+ # Make own process group to be able to kill HUP it later
+ eval { setpgrp };
+ # Set nice value
+ eval { setpriority(0,0,$nice) };
+ # Run the script
+ exec($shell,$script);
+ die("exec\040failed: $!");
+ }
+ while((not $done) and (getppid == $parent)) {
+ # Parent pid is not changed, so sshd is alive
+ # Exponential sleep up to 1 sec
+ $s = $s < 1 ? 0.001 + $s * 1.03 : $s;
+ select(undef, undef, undef, $s);
+ }
+ if(not $done) {
+ # sshd is dead: User pressed Ctrl-C
+ # Kill as per --termseq
+ my @term_seq = split/,/,$termseq;
+ if(not @term_seq) {
+ @term_seq = ("TERM",200,"TERM",100,"TERM",50,"KILL",25);
+ }
+ while(@term_seq && kill(0,-$pid)) {
+ kill(shift @term_seq, -$pid);
+ select(undef, undef, undef, (shift @term_seq)/1000);
+ }
+ }
+ wait;
+ exit ($?&127 ? 128+($?&127) : 1+$?>>8)
+ });
+ }
+ return $monitor_parent_sshd_script;
+ }
+
+ sub vars_to_export {
+ # Uses:
+ # @opt::env
+ my @vars = ("parallel_bash_environment");
+ for my $varstring (@opt::env) {
+ # Split up --env VAR1,VAR2
+ push @vars, split /,/, $varstring;
+ }
+ for (@vars) {
+ if(-r $_ and not -d) {
+ # Read as environment definition bug #44041
+ # TODO parse this
+ $Global::envdef = ::slurp_or_exit($_);
+ }
+ }
+ if(grep { /^_$/ } @vars) {
+ local $/ = "\n";
+ # --env _
+ # Include all vars that are not in a clean environment
+ if(open(my $vars_fh, "<", $Global::config_dir . "/ignored_vars")) {
+ my @ignore = <$vars_fh>;
+ chomp @ignore;
+ my %ignore;
+ @ignore{@ignore} = @ignore;
+ close $vars_fh;
+ push @vars, grep { not defined $ignore{$_} } keys %ENV;
+ @vars = grep { not /^_$/ } @vars;
+ } else {
+ ::error(255,"Run '$Global::progname --record-env' ".
+ "in a clean environment first.");
+ }
+ }
+ # Duplicate vars as BASH functions to include post-shellshock functions (v1+v2)
+ # So --env myfunc should look for BASH_FUNC_myfunc() and BASH_FUNC_myfunc%%
+
+ push(@vars, "PARALLEL_PID", "PARALLEL_SEQ",
+ "PARALLEL_SSHLOGIN", "PARALLEL_SSHHOST",
+ "PARALLEL_HOSTGROUPS", "PARALLEL_ARGHOSTGROUPS",
+ "PARALLEL_JOBSLOT", $opt::process_slot_var,
+ map { ("BASH_FUNC_$_()", "BASH_FUNC_$_%%") } @vars);
+ # Keep only defined variables
+ return grep { defined($ENV{$_}) } @vars;
+ }
+
+ sub env_as_eval {
+ # Returns:
+ # $eval = '$ENV{"..."}=...; ...'
+ my @vars = vars_to_export();
+ my $csh_friendly = not grep { /\n/ } @ENV{@vars};
+ my @bash_functions = grep { substr($ENV{$_},0,4) eq "() {" } @vars;
+ my @non_functions = (grep { !/PARALLEL_ENV/ }
+ grep { substr($ENV{$_},0,4) ne "() {" } @vars);
+
+ # eval of @envset will set %ENV
+ my $envset = join"", map {
+ '$ENV{"'.::perl_quote_scalar($_).'"}="'.
+ ::perl_quote_scalar($ENV{$_}).'";'; } @non_functions;
+
+ # running @bashfunc on the command line, will set the functions
+ my @bashfunc = map {
+ my $v=$_;
+ s/BASH_FUNC_(.*)(\(\)|%%)/$1/;
+ "$_$ENV{$v};\nexport -f $_ 2> /dev/null;\n" } @bash_functions;
+ # eval $bashfuncset will set $bashfunc
+ my $bashfuncset;
+ if(@bashfunc) {
+ # Functions are not supported for all shells
+ if($Global::shell !~ m:(^|/)(ash|bash|rbash|zsh|rzsh|dash|ksh):) {
+ ::warning("Shell functions may not be supported in $Global::shell.");
+ }
+ $bashfuncset =
+ '@bash_functions=qw('."@bash_functions".");".
+ ::spacefree(1,'$shell="'.($ENV{'PARALLEL_SHELL'} || '$ENV{SHELL}').'";'.q{
+ if($shell=~/csh/) {
+ print STDERR "CSH/TCSH DO NOT SUPPORT newlines IN VARIABLES/FUNCTIONS. Unset @bash_functions\n";
+ exec "false";
+ }
+ }).
+ "\n".'$bashfunc = "'.::perl_quote_scalar("@bashfunc").'";';
+ } else {
+ $bashfuncset = '$bashfunc = "";'
+ }
+ if($ENV{'parallel_bash_environment'}) {
+ $bashfuncset .= '$bashfunc .= "eval\ \"\$parallel_bash_environment\"\;";';
+ }
+ ::debug("base64",$envset,$bashfuncset,"\n");
+ return $csh_friendly,$envset,$bashfuncset;
+ }
+
+ my $self = shift;
+ my $command = shift;
+ # TODO test that *sh -c 'parallel --env' use *sh
+ if(not defined $self->{'sshlogin_wrap'}{$command}) {
+ my $sshlogin = $self->sshlogin();
+ $ENV{'PARALLEL_SEQ'} = $self->seq();
+ $ENV{$opt::process_slot_var} = -1 +
+ ($ENV{'PARALLEL_JOBSLOT'} = $self->slot());
+ $ENV{'PARALLEL_SSHLOGIN'} = $sshlogin->string();
+ $ENV{'PARALLEL_SSHHOST'} = $sshlogin->host();
+ if ($opt::hostgroups) {
+ $ENV{'PARALLEL_HOSTGROUPS'} = join '+', $sshlogin->hostgroups();
+ $ENV{'PARALLEL_ARGHOSTGROUPS'} = join '+', $self->hostgroups();
+ }
+ $ENV{'PARALLEL_PID'} = $$;
+ if($sshlogin->local()) {
+ if($opt::workdir) {
+ # Create workdir if needed. Then cd to it.
+ my $wd = $self->workdir();
+ if($opt::workdir eq "." or $opt::workdir eq "...") {
+ # If $wd does not start with '/': Prepend $HOME
+ $wd =~ s:^([^/]):$ENV{'HOME'}/$1:;
+ }
+ ::mkdir_or_die($wd);
+ my $post = "";
+ if($opt::workdir eq "...") {
+ $post = ";".exitstatuswrapper("rm -rf ".::Q($wd).";");
+
+ }
+ $command = "cd ".::Q($wd)." || exit 255; " .
+ $command . $post;;
+ }
+ if(@opt::env) {
+ # Prepend with environment setter, which sets functions in zsh
+ my ($csh_friendly,$envset,$bashfuncset) = env_as_eval();
+ my $perl_code = $envset.$bashfuncset.
+ '@ARGV="'.::perl_quote_scalar($command).'";'.
+ "exec\"$Global::shell\",\"-c\",\(\$bashfunc.\"\@ARGV\"\)\;die\"exec:\$\!\\n\"\;";
+ if(length $perl_code > 999
+ or
+ not $csh_friendly
+ or
+ $command =~ /\n/) {
+ # csh does not deal well with > 1000 chars in one word
+ # csh does not deal well with $ENV with \n
+ $self->{'sshlogin_wrap'}{$command} = base64_wrap($perl_code);
+ } else {
+ $self->{'sshlogin_wrap'}{$command} = "perl -e ".::Q($perl_code);
+ }
+ } else {
+ $self->{'sshlogin_wrap'}{$command} = $command;
+ }
+ } else {
+ my $pwd = "";
+ if($opt::workdir) {
+ # Create remote workdir if needed. Then cd to it.
+ my $wd = ::pQ($self->workdir());
+ $pwd = qq{system("mkdir","-p","--","$wd"); chdir "$wd" ||}.
+ qq{print(STDERR "parallel: Cannot chdir to $wd\\n") &&}.
+ qq{exit 255;};
+ }
+ my ($csh_friendly,$envset,$bashfuncset) = env_as_eval();
+ my $cmd = $command;
+ # q// does not quote \, so we must do that
+ $cmd =~ s/\\/\\\\/g;
+
+ my $remote_command = $sshlogin->hexwrap
+ ($pwd.$envset.$bashfuncset.'$cmd='."q\0".$cmd."\0;".
+ monitor_parent_sshd_script());
+ my ($pre,$post,$cleanup)=("","","");
+ # --transfer
+ $pre .= $self->sshtransfer();
+ # --return
+ $post .= $self->sshreturn();
+ # --cleanup
+ $post .= $self->sshcleanup();
+ if($post) {
+ # We need to save the exit status of the job
+ $post = exitstatuswrapper($post);
+ }
+ $self->{'sshlogin_wrap'}{$command} =
+ ($pre
+ . $sshlogin->wrap($remote_command)
+ . ";"
+ . $post);
+ }
+ }
+ return $self->{'sshlogin_wrap'}{$command};
+}
+
+sub fill_templates($) {
+ # Replace replacement strings in template(s)
+ # Returns:
+ # @templates - File names of replaced templates
+ my $self = shift;
+
+ if(%opt::template) {
+ my @template_name =
+ map { $self->{'commandline'}->replace_placeholders([$_],0,0) }
+ @{$self->{'commandline'}{'template_names'}};
+ ::debug("tmpl","Names: @template_name\n");
+ for(my $i = 0; $i <= $#template_name; $i++) {
+ ::write_or_exit
+ ($template_name[$i],
+ $self->{'commandline'}->
+ replace_placeholders([$self->{'commandline'}
+ {'template_contents'}[$i]],0,0));
+ }
+ if($opt::cleanup) {
+ $self->add_rm(@template_name);
+ }
+ }
+}
+
+sub filter($) {
+ # Replace replacement strings in filter(s) and evaluate them
+ # Returns:
+ # $run - 1=yes, undef=no
+ my $self = shift;
+ my $run = 1;
+ if(@opt::filter) {
+ for my $eval ($self->{'commandline'}->
+ replace_placeholders(\@opt::filter,0,0)) {
+ $run &&= eval $eval;
+ }
+ $self->{'commandline'}{'skip'} ||= not $run;
+ }
+ return $run;
+}
+
+sub transfer($) {
+ # Files to transfer
+ # Non-quoted and with {...} substituted
+ # Returns:
+ # @transfer - File names of files to transfer
+ my $self = shift;
+
+ my $transfersize = 0;
+ my @transfer = $self->{'commandline'}->
+ replace_placeholders($self->{'commandline'}{'transfer_files'},0,0);
+ for(@transfer) {
+ # filesize
+ if(-e $_) {
+ $transfersize += (stat($_))[7];
+ }
+ }
+ $self->add_transfersize($transfersize);
+ return @transfer;
+}
+
+sub transfersize($) {
+ my $self = shift;
+ return $self->{'transfersize'};
+}
+
+sub add_transfersize($) {
+ my $self = shift;
+ my $transfersize = shift;
+ $self->{'transfersize'} += $transfersize;
+ $opt::sqlworker and
+ $Global::sql->update("SET Send = ? WHERE Seq = ".$self->seq(),
+ $self->{'transfersize'});
+}
+
+sub sshtransfer($) {
+ # Returns for each transfer file:
+ # rsync $file remote:$workdir
+ my $self = shift;
+ my @pre;
+ my $sshlogin = $self->sshlogin();
+ my $workdir = $self->workdir();
+ for my $file ($self->transfer()) {
+ push @pre, $sshlogin->rsync_transfer_cmd($file,$workdir).";";
+ }
+ return join("",@pre);
+}
+
+sub return($) {
+ # Files to return
+ # Non-quoted and with {...} substituted
+ # Returns:
+ # @non_quoted_filenames
+ my $self = shift;
+ return $self->{'commandline'}->
+ replace_placeholders($self->{'commandline'}{'return_files'},0,0);
+}
+
+sub returnsize($) {
+ # This is called after the job has finished
+ # Returns:
+ # $number_of_bytes transferred in return
+ my $self = shift;
+ for my $file ($self->return()) {
+ if(-e $file) {
+ $self->{'returnsize'} += (stat($file))[7];
+ }
+ }
+ return $self->{'returnsize'};
+}
+
+sub add_returnsize($) {
+ my $self = shift;
+ my $returnsize = shift;
+ $self->{'returnsize'} += $returnsize;
+ $opt::sqlworker and
+ $Global::sql->update("SET Receive = ? WHERE Seq = ".$self->seq(),
+ $self->{'returnsize'});
+}
+
+sub sshreturn($) {
+ # Returns for each return-file:
+ # rsync remote:$workdir/$file .
+ my $self = shift;
+ my $sshlogin = $self->sshlogin();
+ my $pre = "";
+ for my $file ($self->return()) {
+ $file =~ s:^\./::g; # Remove ./ if any
+ my $relpath = ($file !~ m:^/:) ||
+ ($file =~ m:/\./:); # Is the path relative or /./?
+ my $cd = "";
+ my $wd = "";
+ if($relpath) {
+ # rsync -avR /foo/./bar/baz.c remote:/tmp/
+ # == (on old systems)
+ # rsync -avR --rsync-path="cd /foo; rsync" remote:bar/baz.c /tmp/
+ $wd = ::shell_quote_file($self->workdir()."/");
+ }
+ # Only load File::Basename if actually needed
+ $Global::use{"File::Basename"} ||= eval "use File::Basename; 1;";
+ # dir/./file means relative to dir, so remove dir on remote
+ $file =~ m:(.*)/\./:;
+ my $basedir = $1 ? ::shell_quote_file($1."/") : "";
+ my $nobasedir = $file;
+ $nobasedir =~ s:.*/\./::;
+ $cd = ::shell_quote_file(::dirname($nobasedir));
+ my $rsync_cd = '--rsync-path='.::Q("cd $wd$cd; rsync");
+ my $basename = ::Q(::shell_quote_file(::basename($file)));
+ # --return
+ # mkdir -p /home/tange/dir/subdir/;
+ # rsync (--protocol 30) -rlDzR
+ # --rsync-path="cd /home/tange/dir/subdir/; rsync"
+ # server:file.gz /home/tange/dir/subdir/
+ $pre .= "mkdir -p $basedir$cd" . " && " .
+ $sshlogin->rsync(). " $rsync_cd -- ".$sshlogin->host().':'.
+ $basename . " ".$basedir.$cd.";";
+ }
+ return $pre;
+}
+
+sub sshcleanup($) {
+ # Return the sshcommand needed to remove the file
+ # Returns:
+ # ssh command needed to remove files from sshlogin
+ my $self = shift;
+ my $sshlogin = $self->sshlogin();
+ my $workdir = $self->workdir();
+ my $cleancmd = "";
+
+ for my $file ($self->remote_cleanup()) {
+ my @subworkdirs = parentdirs_of($file);
+ $cleancmd .= $sshlogin->cleanup_cmd($file,$workdir).";";
+ }
+ if(defined $opt::workdir and $opt::workdir eq "...") {
+ $cleancmd .= $sshlogin->wrap("rm -rf " . ::Q($workdir).';');
+ }
+ return $cleancmd;
+}
+
+sub remote_cleanup($) {
+ # Returns:
+ # Files to remove at cleanup
+ my $self = shift;
+ if($opt::cleanup) {
+ my @transfer = $self->transfer();
+ my @return = $self->return();
+ return (@transfer,@return);
+ } else {
+ return ();
+ }
+}
+
+sub exitstatuswrapper(@) {
+ # Input:
+ # @shellcode = shell code to execute
+ # Returns:
+ # shell script that returns current status after executing @shellcode
+ if($Global::cshell) {
+ return ('set _EXIT_status=$status; ' .
+ join(" ",@_).
+ 'exit $_EXIT_status;');
+ } elsif($Global::fish) {
+ return ('export _EXIT_status=$status; ' .
+ join(" ",@_).
+ 'exit $_EXIT_status;');
+ } else {
+ return ('_EXIT_status=$?; ' .
+ join(" ",@_).
+ 'exit $_EXIT_status;');
+ }
+}
+
+sub workdir($) {
+ # Returns:
+ # the workdir on a remote machine
+ my $self = shift;
+ if(not defined $self->{'workdir'}) {
+ my $workdir;
+ if(defined $opt::workdir) {
+ if($opt::workdir eq ".") {
+ # . means current dir
+ my $home = $ENV{'HOME'};
+ eval 'use Cwd';
+ my $cwd = cwd();
+ $workdir = $cwd;
+ if($home) {
+ # If homedir exists: remove the homedir from
+ # workdir if cwd starts with homedir
+ # E.g. /home/foo/my/dir => my/dir
+ # E.g. /tmp/my/dir => /tmp/my/dir
+ my ($home_dev, $home_ino) = (stat($home))[0,1];
+ my $parent = "";
+ my @dir_parts = split(m:/:,$cwd);
+ my $part;
+ while(defined ($part = shift @dir_parts)) {
+ $part eq "" and next;
+ $parent .= "/".$part;
+ my ($parent_dev, $parent_ino) = (stat($parent))[0,1];
+ if($parent_dev == $home_dev and $parent_ino == $home_ino) {
+ # dev and ino is the same: We found the homedir.
+ $workdir = join("/",@dir_parts);
+ last;
+ }
+ }
+ }
+ if($workdir eq "") {
+ $workdir = ".";
+ }
+ } elsif($opt::workdir eq "...") {
+ $workdir = ".parallel/tmp/" . ::hostname() . "-" . $$
+ . "-" . $self->seq();
+ } else {
+ $workdir = $self->{'commandline'}->
+ replace_placeholders([$opt::workdir],0,0);
+ #$workdir = $opt::workdir;
+ # Rsync treats /./ special. We dont want that
+ $workdir =~ s:/\./:/:g; # Remove /./
+ $workdir =~ s:(.)/+$:$1:; # Remove ending / if any
+ $workdir =~ s:^\./::g; # Remove starting ./ if any
+ }
+ } else {
+ $workdir = ".";
+ }
+ $self->{'workdir'} = $workdir;
+ }
+ return $self->{'workdir'};
+}
+
+sub parentdirs_of($) {
+ # Return:
+ # all parentdirs except . of this dir or file - sorted desc by length
+ my $d = shift;
+ my @parents = ();
+ while($d =~ s:/[^/]+$::) {
+ if($d ne ".") {
+ push @parents, $d;
+ }
+ }
+ return @parents;
+}
+
+sub start($) {
+ # Setup STDOUT and STDERR for a job and start it.
+ # Returns:
+ # job-object or undef if job not to run
+
+ sub open3_setpgrp_internal {
+ # Run open3+setpgrp followed by the command
+ # Input:
+ # $stdin_fh = Filehandle to use as STDIN
+ # $stdout_fh = Filehandle to use as STDOUT
+ # $stderr_fh = Filehandle to use as STDERR
+ # $command = Command to run
+ # Returns:
+ # $pid = Process group of job started
+ my ($stdin_fh,$stdout_fh,$stderr_fh,$command) = @_;
+ my $pid;
+ local (*OUT,*ERR);
+ open OUT, '>&', $stdout_fh or ::die_bug("Can't dup STDOUT: $!");
+ open ERR, '>&', $stderr_fh or ::die_bug("Can't dup STDERR: $!");
+ # The eval is needed to catch exception from open3
+ eval {
+ if(not $pid = ::open3($stdin_fh, ">&OUT", ">&ERR", "-")) {
+ # Each child gets its own process group to make it safe to killall
+ eval{ setpgrp(0,0) };
+ eval{ setpriority(0,0,$opt::nice) };
+ exec($Global::shell,"-c",$command)
+ || ::die_bug("open3-$stdin_fh ".substr($command,0,200));
+ }
+ };
+ return $pid;
+ }
+
+ sub open3_setpgrp_external {
+ # Run open3 on $command wrapped with a perl script doing setpgrp
+ # Works on systems that do not support open3(,,,"-")
+ # Input:
+ # $stdin_fh = Filehandle to use as STDIN
+ # $stdout_fh = Filehandle to use as STDOUT
+ # $stderr_fh = Filehandle to use as STDERR
+ # $command = Command to run
+ # Returns:
+ # $pid = Process group of job started
+ my ($stdin_fh,$stdout_fh,$stderr_fh,$command) = @_;
+ local (*OUT,*ERR);
+ open OUT, '>&', $stdout_fh or ::die_bug("Can't dup STDOUT: $!");
+ open ERR, '>&', $stderr_fh or ::die_bug("Can't dup STDERR: $!");
+
+ my $pid;
+ my @setpgrp_wrap =
+ ('perl','-e',
+ "eval\{setpgrp\}\;eval\{setpriority\(0,0,$opt::nice\)\}\;".
+ "exec '$Global::shell', '-c', \@ARGV");
+ # The eval is needed to catch exception from open3
+ eval {
+ $pid = ::open3($stdin_fh, ">&OUT", ">&ERR", @setpgrp_wrap, $command)
+ || ::die_bug("open3-$stdin_fh");
+ 1;
+ };
+ return $pid;
+ }
+
+ sub redefine_open3_setpgrp {
+ my $setgprp_cache = shift;
+ # Select and run open3_setpgrp_internal/open3_setpgrp_external
+ no warnings 'redefine';
+ my ($outfh,$name) = ::tmpfile(SUFFIX => ".tst");
+ # Test to see if open3(x,x,x,"-") is fully supported
+ # Can an exported bash function be called via open3?
+ my $script = 'if($pid=::open3($i,$o,$e,"-")) { wait; } '.
+ 'else { exec("bash","-c","testfun && true"); }';
+ my $bash =
+ ::shell_quote_scalar_default(
+ "testfun() { rm $name; }; export -f testfun; ".
+ "perl -MIPC::Open3 -e ".
+ ::Q(::Q($script))
+ );
+ my $redefine_eval;
+ # Redirect STDERR temporarily,
+ # so errors on MacOS X are ignored.
+ open my $saveerr, ">&STDERR";
+ open STDERR, '>', "/dev/null";
+ # Run the test
+ ::debug("init",qq{bash -c $bash 2>/dev/null});
+ qx{ bash -c $bash 2>/dev/null };
+ open STDERR, ">&", $saveerr;
+
+ if(-e $name) {
+ # Does not support open3(x,x,x,"-")
+ # or does not have bash:
+ # Use (slow) external version
+ unlink($name);
+ $redefine_eval = '*open3_setpgrp = \&open3_setpgrp_external';
+ ::debug("init","open3_setpgrp_external chosen\n");
+ } else {
+ # Supports open3(x,x,x,"-")
+ # This is 0.5 ms faster to run
+ $redefine_eval = '*open3_setpgrp = \&open3_setpgrp_internal';
+ ::debug("init","open3_setpgrp_internal chosen\n");
+ }
+ if(open(my $fh, ">", $setgprp_cache)) {
+ print $fh $redefine_eval;
+ close $fh;
+ } else {
+ ::debug("init","Cannot write to $setgprp_cache");
+ }
+ eval $redefine_eval;
+ }
+
+ sub open3_setpgrp {
+ my $setgprp_cache = $Global::cache_dir . "/tmp/sshlogin/" .
+ ::hostname() . "/setpgrp_func";
+ sub read_cache() {
+ -e $setgprp_cache || return 0;
+ local $/ = undef;
+ open(my $fh, "<", $setgprp_cache) || return 0;
+ eval <$fh> || return 0;
+ close $fh;
+ return 1;
+ }
+ if(not read_cache()) {
+ redefine_open3_setpgrp($setgprp_cache);
+ }
+ # The sub is now redefined. Call it
+ return open3_setpgrp(@_);
+ }
+
+ my $job = shift;
+ # Get the shell command to be executed (possibly with ssh infront).
+ my $command = $job->wrapped();
+ my $pid;
+
+ if($Global::interactive or $Global::stderr_verbose) {
+ $job->interactive_start();
+ }
+ # Must be run after $job->interactive_start():
+ # $job->interactive_start() may call $job->skip()
+ if($job->{'commandline'}{'skip'}
+ or
+ not $job->filter()) {
+ # $job->skip() was called or job filtered
+ $command = "true";
+ }
+ $job->openoutputfiles();
+ $job->print_verbose_dryrun();
+ my($stdout_fh,$stderr_fh) = ($job->fh(1,"w"),$job->fh(2,"w"));
+ if($opt::dryrun or $opt::sqlmaster) { $command = "true"; }
+ $ENV{'PARALLEL_SEQ'} = $job->seq();
+ $ENV{'PARALLEL_PID'} = $$;
+ $ENV{$opt::process_slot_var} = -1 +
+ ($ENV{'PARALLEL_JOBSLOT'} = $job->slot());
+ $ENV{'PARALLEL_TMP'} = ::tmpname("par");
+ $job->add_rm($ENV{'PARALLEL_TMP'});
+ $job->fill_templates();
+ $ENV{'SSHPASS'} = $job->{'sshlogin'}->{'password'};
+ ::debug("run", $Global::total_running, " processes . Starting (",
+ $job->seq(), "): $command\n");
+ if($opt::pipe) {
+ my ($stdin_fh) = ::gensym();
+ $pid = open3_setpgrp($stdin_fh,$stdout_fh,$stderr_fh,$command);
+ if($opt::roundrobin and not $opt::keeporder) {
+ # --keep-order will make sure the order will be reproducible
+ ::set_fh_non_blocking($stdin_fh);
+ }
+ $job->set_fh(0,"w",$stdin_fh);
+ if($opt::tee or $opt::shard or $opt::bin) { $job->set_virgin(0); }
+ } elsif(($opt::tty or $opt::open_tty) and -c "/dev/tty" and
+ open(my $devtty_fh, "<", "/dev/tty")) {
+ # Give /dev/tty to the command if no one else is using it
+ # The eval is needed to catch exception from open3
+ local (*IN,*OUT,*ERR);
+ open OUT, '>&', $stdout_fh or ::die_bug("Can't dup STDOUT: $!");
+ open ERR, '>&', $stderr_fh or ::die_bug("Can't dup STDERR: $!");
+ *IN = $devtty_fh;
+ # The eval is needed to catch exception from open3
+ my @wrap = ('perl','-e',
+ "eval\{setpriority\(0,0,$opt::nice\)\}\;".
+ "exec '$Global::shell', '-c', \@ARGV");
+ eval {
+ $pid = ::open3("<&IN", ">&OUT", ">&ERR", @wrap, $command)
+ || ::die_bug("open3-/dev/tty");
+ 1;
+ };
+ close $devtty_fh;
+ $job->set_virgin(0);
+ } elsif($Global::semaphore) {
+ # Allow sem to read from stdin
+ $pid = open3_setpgrp("<&STDIN",$stdout_fh,$stderr_fh,$command);
+ $job->set_virgin(0);
+ } else {
+ $pid = open3_setpgrp(::gensym(),$stdout_fh,$stderr_fh,$command);
+ $job->set_virgin(0);
+ }
+ if($pid) {
+ # A job was started
+ $Global::total_running++;
+ $Global::total_started++;
+ $job->set_pid($pid);
+ $job->set_starttime();
+ $Global::running{$job->pid()} = $job;
+ if($opt::timeout) {
+ $Global::timeoutq->insert($job);
+ }
+ $Global::newest_job = $job;
+ $Global::newest_starttime = ::now();
+ return $job;
+ } else {
+ # No more processes
+ ::debug("run", "Cannot spawn more jobs.\n");
+ return undef;
+ }
+}
+
+sub interactive_start($) {
+ my $self = shift;
+ my $command = $self->wrapped();
+ if($Global::interactive) {
+ my $answer;
+ ::status_no_nl("$command ?...");
+ do{
+ my $tty_fh = ::open_or_exit("<","/dev/tty");
+ $answer = <$tty_fh>;
+ close $tty_fh;
+ # Sometime we get an empty string (not even \n)
+ # Do not know why, so let us just ignore it and try again
+ } while(length $answer < 1);
+ if (not ($answer =~ /^\s*y/i)) {
+ $self->{'commandline'}->skip();
+ }
+ } else {
+ print $Global::original_stderr "$command\n";
+ }
+}
+
+{
+ my $tmuxsocket;
+ my $qsocket;
+
+ sub tmux_wrap($) {
+ # Wrap command with tmux for session pPID
+ # Input:
+ # $actual_command = the actual command being run (incl ssh wrap)
+ my $self = shift;
+ my $actual_command = shift;
+ # Temporary file name. Used for fifo to communicate exit val
+ my $tmpfifo = ::tmpname("tmx");
+ $self->add_rm($tmpfifo);
+ if(length($tmpfifo) >=100) {
+ ::error(255,"tmux does not support sockets with path > 100.");
+ }
+ if($opt::tmuxpane) {
+ # Move the command into a pane in window 0
+ $actual_command = $ENV{'PARALLEL_TMUX'}.' joinp -t :0 ; '.
+ $ENV{'PARALLEL_TMUX'}.' select-layout -t :0 tiled ; '.
+ $actual_command;
+ }
+ my $visual_command = $self->replaced();
+ my $title = $visual_command;
+ if($visual_command =~ /\0/) {
+ ::error(255,"Command line contains NUL. ".
+ "tmux is confused by NUL.");
+ }
+ # ; causes problems
+ # ascii 194-245 annoys tmux
+ $title =~ tr/[\011-\016;\302-\365]/ /s;
+ $title = ::Q($title);
+
+ my $l_act = length($actual_command);
+ my $l_tit = length($title);
+ my $l_fifo = length($tmpfifo);
+ # The line to run contains a 118 chars extra code + the title 2x
+ my $l_tot = 2 * $l_tit + $l_act + $l_fifo;
+
+ my $quoted_space75 = ::Q(" ")x75;
+ while($l_tit < 1000 and
+ (
+ (890 < $l_tot and $l_tot < 1350)
+ or
+ (9250 < $l_tot and $l_tot < 9800)
+ )) {
+ # tmux blocks for certain lengths:
+ # 900 < title + command < 1200
+ # 9250 < title + command < 9800
+ # but only if title < 1000, so expand the title with 75 spaces
+ # The measured lengths are:
+ # 996 < (title + whole command) < 1127
+ # 9331 < (title + whole command) < 9636
+ $title .= $quoted_space75;
+ $l_tit = length($title);
+ $l_tot = 2 * $l_tit + $l_act + $l_fifo;
+ }
+
+ my $tmux;
+ $ENV{'PARALLEL_TMUX'} ||= "tmux";
+ if(not $tmuxsocket) {
+ $tmuxsocket = ::tmpname("tms");
+ $qsocket = ::Q($tmuxsocket);
+ ::debug("tmux", "Start: $ENV{'PARALLEL_TMUX'} -S $qsocket attach");
+ if($opt::fg) {
+ if(not fork) {
+ # Run tmux in the foreground
+ # Wait for the socket to appear
+ while (not -e $tmuxsocket) { }
+ `$ENV{'PARALLEL_TMUX'} -S $qsocket attach`;
+ exit;
+ }
+ }
+ ::status("See output with: $ENV{'PARALLEL_TMUX'} -S $qsocket attach");
+ }
+ $tmux = "sh -c ".::Q(
+ $ENV{'PARALLEL_TMUX'}.
+ " -S $qsocket new-session -s p$$ -d \"sleep .2\" >/dev/null 2>&1").";" .
+ $ENV{'PARALLEL_TMUX'}.
+ " -S $qsocket new-window -t p$$ -n $title";
+
+ ::debug("tmux", "title len:", $l_tit, " act ", $l_act, " max ",
+ $Limits::Command::line_max_len, " tot ",
+ $l_tot, "\n");
+ return "mkfifo ".::Q($tmpfifo)." && $tmux ".
+ # Run in tmux
+ ::Q
+ (
+ "(".$actual_command.');'.
+ # The triple print is needed - otherwise the testsuite fails
+ q[ perl -e 'while($t++<3){ print $ARGV[0],"\n" }' $?h/$status >> ].
+ ::Q($tmpfifo)."&".
+ "echo $title; echo \007Job finished at: `date`;sleep 10"
+ ).
+ # Run outside tmux
+ # Read a / separated line: 0h/2 for csh, 2/0 for bash.
+ # If csh the first will be 0h, so use the second as exit value.
+ # Otherwise just use the first value as exit value.
+ q{; exec perl -e '$/="/";$_=<>;$c=<>;unlink $ARGV; }.
+ q{/(\d+)h/ and exit($1);exit$c' }.::Q($tmpfifo);
+ }
+}
+
+sub is_already_in_results($) {
+ # Do we already have results for this job?
+ # Returns:
+ # $job_already_run = bool whether there is output for this or not
+ my $job = $_[0];
+ if($Global::csvsep) {
+ if($opt::joblog) {
+ # OK: You can look for job run in joblog
+ return 0
+ } else {
+ ::warning_once(
+ "--resume --results .csv/.tsv/.json is not supported yet\n");
+ # TODO read and parse the file
+ return 0
+ }
+ }
+ my $out = $job->{'commandline'}->results_out();
+ ::debug("run", "Test ${out}stdout", -e "${out}stdout", "\n");
+ return(-e $out."stdout" or -f $out);
+}
+
+sub is_already_in_joblog($) {
+ my $job = shift;
+ return vec($Global::job_already_run,$job->seq(),1);
+}
+
+sub set_job_in_joblog($) {
+ my $job = shift;
+ vec($Global::job_already_run,$job->seq(),1) = 1;
+}
+
+sub should_be_retried($) {
+ # Should this job be retried?
+ # Returns
+ # 0 - do not retry
+ # 1 - job queued for retry
+ my $self = shift;
+ if (not defined $opt::retries) { return 0; }
+ if(not $self->exitstatus() and not $self->exitsignal()) {
+ # Completed with success. If there is a recorded failure: forget it
+ $self->reset_failed_here();
+ return 0;
+ } else {
+ # The job failed. Should it be retried?
+ $self->add_failed_here();
+ my $retries = $self->{'commandline'}->
+ replace_placeholders([$opt::retries],0,0);
+ # 0 = Inf
+ if($retries == 0) { $retries = 2**31; }
+ # Ignore files already unlinked to avoid memory leak
+ $self->{'unlink'} = [ grep { -e $_ } @{$self->{'unlink'}} ];
+ map { -e $_ or delete $Global::unlink{$_} } keys %Global::unlink;
+ if($self->total_failed() == $retries) {
+ # This has been retried enough
+ return 0;
+ } else {
+ # This command should be retried
+ $self->set_endtime(undef);
+ $self->reset_exitstatus();
+ $Global::JobQueue->unget($self);
+ ::debug("run", "Retry ", $self->seq(), "\n");
+ return 1;
+ }
+ }
+}
+
+{
+ my (%print_later,$job_seq_to_print);
+
+ sub print_earlier_jobs($) {
+ # Print jobs whose output is postponed due to --keep-order
+ # Returns: N/A
+ my $job = shift;
+ $print_later{$job->seq()} = $job;
+ $job_seq_to_print ||= 1;
+ my $returnsize = 0;
+ ::debug("run", "Looking for: $job_seq_to_print ",
+ "This: ", $job->seq(), "\n");
+ for(;vec($Global::job_already_run,$job_seq_to_print,1);
+ $job_seq_to_print++) {}
+ while(my $j = $print_later{$job_seq_to_print}) {
+ $returnsize += $j->print();
+ if($j->endtime()) {
+ # Job finished - look at the next
+ delete $print_later{$job_seq_to_print};
+ $job_seq_to_print++;
+ next;
+ } else {
+ # Job not finished yet - look at it again next round
+ last;
+ }
+ }
+ return $returnsize;
+ }
+}
+
+sub print($) {
+ # Print the output of the jobs
+ # Returns: N/A
+ my $self = shift;
+
+ ::debug("print", ">>joboutput ", $self->replaced(), "\n");
+ if($opt::dryrun) {
+ # Nothing was printed to this job:
+ # cleanup tmp files if --files was set
+ ::rm($self->fh(1,"name"));
+ }
+ if($opt::pipe and $self->virgin() and not $opt::tee) {
+ # Skip --joblog, --dryrun, --verbose
+ } else {
+ if($opt::ungroup) {
+ # NULL returnsize = 0 returnsize
+ $self->returnsize() or $self->add_returnsize(0);
+ if($Global::joblog and defined $self->{'exitstatus'}) {
+ # Add to joblog when finished
+ $self->print_joblog();
+ # Printing is only relevant for grouped/--line-buffer output.
+ $opt::ungroup and return;
+ }
+ }
+ # Check for disk full
+ ::exit_if_disk_full();
+ }
+
+ my $returnsize = $self->returnsize();
+ my @fdno;
+ if($opt::latestline) {
+ @fdno = (1);
+ } else {
+ @fdno = (sort { $a <=> $b } keys %Global::fh);
+ }
+ for my $fdno (@fdno) {
+ # Sort by file descriptor numerically: 1,2,3,..,9,10,11
+ $fdno == 0 and next;
+ my $out_fh = $Global::fh{$fdno};
+ my $in_fh = $self->fh($fdno,"r");
+ if(not $in_fh) {
+ if(not $Job::file_descriptor_warning_printed{$fdno}++) {
+ # ::warning("File descriptor $fdno not defined\n");
+ }
+ next;
+ }
+ ::debug("print", "File descriptor $fdno (", $self->fh($fdno,"name"), "):\n");
+ if($Global::linebuffer) {
+ # Line buffered print out
+ $self->print_linebuffer($fdno,$in_fh,$out_fh);
+ } elsif($Global::files) {
+ $self->print_files($fdno,$in_fh,$out_fh);
+ } elsif($opt::results) {
+ $self->print_results($fdno,$in_fh,$out_fh);
+ } else {
+ $self->print_normal($fdno,$in_fh,$out_fh);
+ }
+ flush $out_fh;
+ }
+ ::debug("print", "<<joboutput\n");
+ if(defined $self->{'exitstatus'}
+ and not ($self->virgin() and $opt::pipe)) {
+ if($Global::joblog and not $opt::sqlworker) {
+ # Add to joblog when finished
+ $self->print_joblog();
+ }
+ if($opt::sqlworker and not $opt::results) {
+ $Global::sql->output($self);
+ }
+ if($Global::csvsep) {
+ # Add output to CSV when finished
+ $self->print_csv();
+ }
+ if($Global::jsonout) {
+ $self->print_json();
+ }
+ }
+ return $returnsize - $self->returnsize();
+}
+
+{
+ my %jsonmap;
+
+ sub print_json($) {
+ my $self = shift;
+ sub jsonquote($) {
+ my $a = shift;
+ if(not $jsonmap{"\001"}) {
+ map { $jsonmap{sprintf("%c",$_)} =
+ sprintf '\u%04x', $_ } 0..31;
+ }
+ $a =~ s/\\/\\\\/g;
+ $a =~ s/\"/\\"/g;
+ $a =~ s/([\000-\037])/$jsonmap{$1}/g;
+ return $a;
+ }
+
+ my $cmd;
+ if($Global::verbose <= 1) {
+ $cmd = jsonquote($self->replaced());
+ } else {
+ # Verbose level > 1: Print the rsync and stuff
+ $cmd = jsonquote(join " ", @{$self->{'commandline'}});
+ }
+ my $record_ref = $self->{'commandline'}{'arg_list_flat_orig'};
+
+ # Memory optimization: Overwrite with the joined output
+ $self->{'output'}{1} = join("", @{$self->{'output'}{1}});
+ $self->{'output'}{2} = join("", @{$self->{'output'}{2}});
+ # {
+ # "Seq": 12,
+ # "Host": "/usr/bin/ssh foo@lo",
+ # "Starttime": 1608344711.743,
+ # "JobRuntime": 0.01,
+ # "Send": 0,
+ # "Receive": 10,
+ # "Exitval": 0,
+ # "Signal": 0,
+ # "Command": "echo 1",
+ # "V": [
+ # "1"
+ # ],
+ # "Stdout": "1\n",
+ # "Stderr": ""
+ # }
+ #
+ printf($Global::csv_fh
+ q({ "Seq": %s, "Host": "%s", "Starttime": %s, "JobRuntime": %s, ).
+ q("Send": %s, "Receive": %s, "Exitval": %s, "Signal": %s, ).
+ q("Command": "%s", "V": [ %s ], "Stdout": "%s", "Stderr": "%s" }).
+ "\n",
+ $self->seq(),
+ jsonquote($self->sshlogin()->string()),
+ $self->starttime(), sprintf("%0.3f",$self->runtime()),
+ $self->transfersize(), $self->returnsize(),
+ $self->exitstatus(), $self->exitsignal(), $cmd,
+ (join ",",
+ map { '"'.jsonquote($_).'"' } @$record_ref[1..$#$record_ref],
+ ),
+ jsonquote($self->{'output'}{1}),
+ jsonquote($self->{'output'}{2})
+ );
+ }
+}
+
+{
+ my $header_printed;
+
+ sub print_csv($) {
+ my $self = shift;
+ my $cmd;
+ if($Global::verbose <= 1) {
+ $cmd = $self->replaced();
+ } else {
+ # Verbose level > 1: Print the rsync and stuff
+ $cmd = join " ", @{$self->{'commandline'}};
+ }
+ my $record_ref = $self->{'commandline'}{'arg_list_flat_orig'};
+
+ if(not $header_printed) {
+ # Variable headers
+ # Normal => V1..Vn
+ # --header : => first value from column
+ my @V;
+ if($opt::header) {
+ my $i = 1;
+ @V = (map { $Global::input_source_header{$i++} }
+ @$record_ref[1..$#$record_ref]);
+ } else {
+ my $V = "V1";
+ @V = (map { $V++ } @$record_ref[1..$#$record_ref]);
+ }
+ print $Global::csv_fh
+ (map { $$_ }
+ combine_ref("Seq", "Host", "Starttime", "JobRuntime",
+ "Send", "Receive", "Exitval", "Signal", "Command",
+ @V,
+ "Stdout","Stderr"
+ )),"\n";
+ $header_printed++;
+ }
+ # Memory optimization: Overwrite with the joined output
+ $self->{'output'}{1} = join("", @{$self->{'output'}{1}});
+ $self->{'output'}{2} = join("", @{$self->{'output'}{2}});
+ print $Global::csv_fh
+ (map { $$_ }
+ combine_ref
+ ($self->seq(),
+ $self->sshlogin()->string(),
+ $self->starttime(), sprintf("%0.3f",$self->runtime()),
+ $self->transfersize(), $self->returnsize(),
+ $self->exitstatus(), $self->exitsignal(), \$cmd,
+ \@$record_ref[1..$#$record_ref],
+ \$self->{'output'}{1},
+ \$self->{'output'}{2})),"\n";
+ }
+}
+
+sub combine_ref($) {
+ # Inspired by Text::CSV_PP::_combine (by Makamaka Hannyaharamitu)
+ my @part = @_;
+ my $sep = $Global::csvsep;
+ my $quot = '"';
+ my @out = ();
+
+ my $must_be_quoted;
+ for my $column (@part) {
+ # Memory optimization: Content transferred as reference
+ if(ref $column ne "SCALAR") {
+ # Convert all columns to scalar references
+ my $v = $column;
+ $column = \$v;
+ }
+ if(not defined $$column) {
+ $$column = '';
+ next;
+ }
+
+ $must_be_quoted = 0;
+
+ if($$column =~ s/$quot/$quot$quot/go){
+ # Replace " => ""
+ $must_be_quoted ||=1;
+ }
+ if($$column =~ /[\s\Q$sep\E]/o){
+ # Put quotes around if the column contains ,
+ $must_be_quoted ||=1;
+ }
+
+ $Global::use{"bytes"} ||= eval "use bytes; 1;";
+ if ($$column =~ /\0/) {
+ # Contains \0 => put quotes around
+ $must_be_quoted ||=1;
+ }
+ if($must_be_quoted){
+ push @out, \$sep, \$quot, $column, \$quot;
+ } else {
+ push @out, \$sep, $column;
+ }
+ }
+ # Remove the first $sep: ,val,"val" => val,"val"
+ shift @out;
+ return @out;
+}
+
+sub print_files($) {
+ # Print the name of the file containing stdout on stdout
+ # Uses:
+ # $opt::pipe
+ # $opt::group = Print when job is done
+ # $opt::linebuffer = Print ASAP
+ # Returns: N/A
+ my $self = shift;
+ my ($fdno,$in_fh,$out_fh) = @_;
+
+ # If the job is dead: close printing fh. Needed for --compress
+ close $self->fh($fdno,"w");
+ if($? and $opt::compress) {
+ ::error($opt::compress_program." failed.");
+ $self->set_exitstatus(255);
+ }
+ if($opt::compress) {
+ # Kill the decompressor which will not be needed
+ CORE::kill "TERM", $self->fh($fdno,"rpid");
+ }
+ close $in_fh;
+
+ if($opt::pipe and $self->virgin()) {
+ # Nothing was printed to this job:
+ # cleanup unused tmp files because --files was set
+ for my $fdno (1,2) {
+ ::rm($self->fh($fdno,"name"));
+ ::rm($self->fh($fdno,"unlink"));
+ }
+ } elsif($fdno == 1 and $self->fh($fdno,"name")) {
+ print $out_fh $self->tag(),$self->fh($fdno,"name"), $Global::files_sep;
+ if($Global::membuffer) {
+ push @{$self->{'output'}{$fdno}},
+ $self->tag(), $self->fh($fdno,"name");
+ }
+ $self->add_returnsize(-s $self->fh($fdno,"name"));
+ # Mark as printed - do not print again
+ $self->set_fh($fdno,"name",undef);
+ }
+}
+
+
+# Different print types
+# (--ll | --ll --bar | --lb | --group | --parset | --sql-worker)
+# (--files | --results (.json|.csv|.tsv) )
+# --color-failed
+# --color
+# --keep-order
+# --tag
+# --bar
+{
+ my ($up,$eol,$currow,$maxrow);
+ my ($minvisible,%print_later,%notvisible);
+ my (%binmodeset,%tab);
+
+ sub latestline_init() {
+ # cursor_up cuu1 = up one line
+ $up = `sh -c "tput cuu1 </dev/tty" 2>/dev/null`;
+ chomp($up);
+ $eol = `sh -c "tput el </dev/tty" 2>/dev/null`;
+ chomp($eol);
+ if($eol eq "") { $eol = "\033[K"; }
+ $currow = 1;
+ $maxrow = 1;
+ $minvisible = 1;
+ for(0..8) {
+ $tab{$_} = " "x(8-($_%8));
+ }
+ }
+
+ sub mbtrunc($$) {
+ # Simple mbtrunc to avoid using Text::WideChar::Util
+ my $str = shift;
+ my $len = shift;
+ if(::mbswidth($str) == length($str)) {
+ $str = substr($str,0,$len);
+ } else {
+ # mb chars (ヌー平行) are wider than 1 char on screen
+ # We need at most $len chars - they may be wide
+ $str =~ s/(.{$len}).*/$1/;
+ my $rlen = int((::mbswidth($str) - $len)/2+0.5);
+ do {
+ $str =~ s/.{$rlen}$//;
+ $rlen = int((::mbswidth($str) - $len)/2+0.5);
+ } while($rlen >= 1);
+ }
+ return $str;
+ }
+
+ sub print_latest_line($) {
+ my $self = shift;
+ my $out_fh = shift;
+ if(not defined $self->{$out_fh,'latestline'}) { return; }
+ my $row = $self->row();
+ # Is row visible?
+ if(not ($minvisible <= $row
+ and
+ $row < $minvisible + ::terminal_rows() - 1)) {
+ return;
+ }
+ if(not $binmodeset{$out_fh}++) {
+ # Enable utf8 if possible
+ eval q{ binmode $out_fh, "encoding(utf8)"; };
+ }
+ my ($color,$reset_color) = $self->color();
+ my $termcol = ::terminal_columns();
+ my $untabify_tag = ::decode_utf8($self->untabtag());
+ my $untabify_str =
+ ::untabify(::decode_utf8($self->{$out_fh,'latestline'}));
+ # -1 to make space for $truncated_str
+ my $maxtaglen = $termcol - 1;
+ $untabify_tag = mbtrunc($untabify_tag,$maxtaglen);
+ my $taglen = ::mbswidth($untabify_tag);
+ my $maxstrlen = $termcol - $taglen - 1;
+ $untabify_str = mbtrunc($untabify_str,$maxstrlen);
+ my $strlen = ::mbswidth($untabify_str);
+ my $truncated_tag = "";
+ my $truncated_str = "";
+ if($termcol - $taglen < 2) {
+ $truncated_tag = ">";
+ } else {
+ if($termcol - $taglen - $strlen <= 2) {
+ $truncated_str = ">";
+ }
+ }
+ $maxrow = ($row > $maxrow) ? $row : $maxrow;
+ printf($out_fh
+ ("%s%s%s%s". # up down \r eol
+ "%s%s". # tag trunc_tag
+ "%s%s%s%s". # color line trunc reset_color
+ "%s" # down
+ ),
+ "$up"x($currow - $row), "\n"x($row - $currow), "\r", $eol,
+ $untabify_tag,$truncated_tag,
+ $color, $untabify_str, $truncated_str, $reset_color,
+ "\n"x($maxrow - $row + 1));
+ $currow = $maxrow + 1;
+ }
+
+ sub print_linebuffer($) {
+ my $self = shift;
+ my ($fdno,$in_fh,$out_fh) = @_;
+ if(defined $self->{'exitstatus'}) {
+ # If the job is dead: close printing fh. Needed for --compress
+ close $self->fh($fdno,"w");
+ if($opt::compress) {
+ if($?) {
+ ::error($opt::compress_program." failed.");
+ $self->set_exitstatus(255);
+ }
+ # Blocked reading in final round
+ for my $fdno (1,2) { ::set_fh_blocking($self->fh($fdno,'r')); }
+ }
+ if($opt::latestline) { $print_later{$self->row()} = $self; }
+ }
+ if(not $self->virgin()) {
+ if($Global::files or ($opt::results and not $Global::csvsep)) {
+ # Print filename
+ if($fdno == 1 and not $self->fh($fdno,"printed")) {
+ print $out_fh $self->tag(),$self->fh($fdno,"name"),"\n";
+ if($Global::membuffer) {
+ push(@{$self->{'output'}{$fdno}}, $self->tag(),
+ $self->fh($fdno,"name"));
+ }
+ $self->set_fh($fdno,"printed",1);
+ }
+ # No need for reading $in_fh, as it is from "cat >/dev/null"
+ } else {
+ # Read halflines and print full lines
+ my $outputlength = 0;
+ my $halfline_ref = $self->{'halfline'}{$fdno};
+ my ($buf,$i,$rv);
+ # 1310720 gives 1.2 GB/s
+ # 131072 gives 0.9 GB/s
+ # The optimal block size differs
+ # It has been measured on:
+ # AMD 6376: 60800 (>70k is also reasonable)
+ # Intel i7-3632QM: 52-59k, 170-175k
+ # seq 64 | ppar --_test $1 --lb \
+ # 'yes {} `seq 1000`|head -c 10000000' >/dev/null
+ while($rv = sysread($in_fh, $buf, 60800)) {
+ $outputlength += $rv;
+ # TODO --recend
+ # Treat both \n and \r as line end
+ # Only test for \r if there is no \n
+ # Test:
+ # perl -e '$a="x"x1000000;
+ # $b="$a\r$a\n$a\r$a\n";
+ # map { print $b,$_ } 1..10'
+ $i = ((rindex($buf,"\n")+1) || (rindex($buf,"\r")+1));
+ if($i) {
+ if($opt::latestline) {
+ # Keep the latest full line
+ my $l = join('', @$halfline_ref,
+ substr($buf,0,$i-1));
+ # "ab\rb\n" = "bb", but we cannot process that correctly.
+ # Line may be:
+ # foo \r bar \n
+ # foo \r bar \r baz \r
+ # If so: Remove 'foo \r'
+ $l =~ s/.*\r//g;
+ my $j = ((rindex($l,"\n")+1) ||
+ (rindex($l,"\r")+1));
+ $self->{$out_fh,'latestline'} = substr($l,$j);
+ # Remove the processed part
+ # by keeping the unprocessed part
+ @$halfline_ref = (substr($buf,$i));
+ } else {
+ # One or more complete lines were found
+ if($Global::color) {
+ my $print = join("",@$halfline_ref,
+ substr($buf,0,$i));
+ chomp($print);
+ my ($color,$reset_color) = $self->color();
+ my $colortag = $color.$self->tag();
+ # \n => reset \n color tag
+ $print =~ s{([\n\r])(?=.|$)}
+ {$reset_color$1$colortag}gs;
+ print($out_fh $colortag, $print,
+ $reset_color, "\n");
+ } elsif($opt::tag or defined $opt::tagstring) {
+ # Replace ^ with $tag within the full line
+ if($Global::cache_replacement_eval) {
+ # Replace with the same value for tag
+ my $tag = $self->tag();
+ unshift @$halfline_ref, $tag;
+ # TODO --recend that can be partially in
+ # @$halfline_ref
+ substr($buf,0,$i-1) =~
+ s/([\n\r])(?=.|$)/$1$tag/gs;
+ } else {
+ # Replace with freshly computed tag-value
+ unshift @$halfline_ref, $self->tag();
+ substr($buf,0,$i-1) =~
+ s/([\n\r])(?=.|$)/$1.$self->tag()/gse;
+ }
+ # The length changed,
+ # so find the new ending pos
+ $i = ::max((rindex($buf,"\n")+1),
+ (rindex($buf,"\r")+1));
+ # Print the partial line (halfline)
+ # and the last half
+ print $out_fh @$halfline_ref, substr($buf,0,$i);
+ } else {
+ # Print the partial line (halfline)
+ # and the last half
+ print $out_fh @$halfline_ref, substr($buf,0,$i);
+ }
+ # Buffer in memory for SQL and CSV-output
+ if($Global::membuffer) {
+ push(@{$self->{'output'}{$fdno}},
+ @$halfline_ref, substr($buf,0,$i));
+ }
+ # Remove the printed part by keeping the unprinted
+ @$halfline_ref = (substr($buf,$i));
+ }
+ } else {
+ # No newline, so append to the halfline
+ push @$halfline_ref, $buf;
+ }
+ }
+ $self->add_returnsize($outputlength);
+ if($opt::latestline) { $self->print_latest_line($out_fh); }
+ }
+ if(defined $self->{'exitstatus'}) {
+ if($Global::files or ($opt::results and not $Global::csvsep)) {
+ $self->add_returnsize(-s $self->fh($fdno,"name"));
+ } else {
+ if($opt::latestline) {
+ # Force re-computing color if --colorfailed
+ if($opt::colorfailed) { delete $self->{'color'}; }
+ if($self->{$out_fh,'latestline'} ne "") {
+ $self->print_latest_line($out_fh);
+ }
+ if(@{$self->{'halfline'}{$fdno}}) {
+ my $l = join('', @{$self->{'halfline'}{$fdno}});
+ if($l ne "") {
+ $self->{$out_fh,'latestline'} = $l;
+ }
+ } else {
+ $self->{$out_fh,'latestline'} = undef;
+ }
+ # Print latest line from jobs that are already done
+ while($print_later{$minvisible}) {
+ $print_later{$minvisible}->print_latest_line($out_fh);
+ delete $print_later{$minvisible};
+ $minvisible++;
+ }
+ # Print latest line from jobs that are on screen now
+ for(my $row = $minvisible;
+ $row < $minvisible -1 + ::terminal_rows();
+ $row++) {
+ $print_later{$row} and
+ $print_later{$row}->print_latest_line($out_fh);
+ }
+ } else {
+ # If the job is dead: print the remaining partial line
+ # read remaining (already done for $opt::latestline)
+ my $halfline_ref = $self->{'halfline'}{$fdno};
+ if(grep /./, @$halfline_ref) {
+ my $returnsize = 0;
+ for(@{$self->{'halfline'}{$fdno}}) {
+ $returnsize += length $_;
+ }
+ $self->add_returnsize($returnsize);
+ if($opt::tag or defined $opt::tagstring) {
+ # Prepend $tag the the remaining half line
+ unshift @$halfline_ref, $self->tag();
+ }
+ # Print the partial line (halfline)
+ print $out_fh @{$self->{'halfline'}{$fdno}};
+ # Buffer in memory for SQL and CSV-output
+ if($Global::membuffer) {
+ push(@{$self->{'output'}{$fdno}}, @$halfline_ref);
+ }
+ @$halfline_ref = ();
+ }
+ }
+ }
+ if($self->fh($fdno,"rpid") and
+ CORE::kill 0, $self->fh($fdno,"rpid")) {
+ # decompress still running
+ } else {
+ # decompress done: close fh
+ close $in_fh;
+ if($? and $opt::compress) {
+ ::error($opt::decompress_program." failed.");
+ $self->set_exitstatus(255);
+ }
+ }
+ }
+ }
+ }
+}
+
+sub free_ressources() {
+ my $self = shift;
+ if(not $opt::ungroup) {
+ my $fh;
+ for my $fdno (sort { $a <=> $b } keys %Global::fh) {
+ $fh = $self->fh($fdno,"w");
+ $fh and close $fh;
+ $fh = $self->fh($fdno,"r");
+ $fh and close $fh;
+ }
+ }
+}
+
+sub print_parset($) {
+ # Wrap output with shell script code to set as variables
+ my $self = shift;
+ my ($fdno,$in_fh,$out_fh) = @_;
+ my $outputlength = 0;
+
+ ::debug("parset","print $Global::parset");
+ if($Global::parset eq "assoc") {
+ # Start: (done in parse_parset())
+ # eval "`echo 'declare -A myassoc; myassoc=(
+ # Each: (done here)
+ # [$'a\tb']=$'a\tb\tc ddd'
+ # End: (done in wait_and_exit())
+ # )'`"
+ print '[',::Q($self->{'commandline'}->
+ replace_placeholders(["\257<\257>"],0,0)),']=';
+ } elsif($Global::parset eq "array") {
+ # Start: (done in parse_parset())
+ # eval "`echo 'myassoc=(
+ # Each: (done here)
+ # $'a\tb\tc ddd'
+ # End: (done in wait_and_exit())
+ # )'`"
+ } elsif($Global::parset eq "var") {
+ # Start: (done in parse_parset())
+ # <empty>
+ # Each: (done here)
+ # var=$'a\tb\tc ddd'
+ # End: (done in wait_and_exit())
+ # <empty>
+ if(not @Global::parset_vars) {
+ ::error(255,"Too few named destination variables");
+ }
+ print shift @Global::parset_vars,"=";
+ }
+ local $/ = "\n";
+ my $tag = $self->tag();
+ my @out;
+ while(<$in_fh>) {
+ $outputlength += length $_;
+ # Tag lines with \r, too
+ $_ =~ s/(?<=[\r])(?=.|$)/$tag/gs;
+ push @out, $tag,$_;
+ }
+ # Remove last newline
+ # This often makes it easier to use the output in shell
+ @out and ${out[$#out]} =~ s/\n$//s;
+ print ::Q(join("",@out)),"\n";
+ return $outputlength;
+}
+
+sub print_normal($) {
+ my $self = shift;
+ my ($fdno,$in_fh,$out_fh) = @_;
+ my $buf;
+ close $self->fh($fdno,"w");
+ if($? and $opt::compress) {
+ ::error($opt::compress_program." failed.");
+ $self->set_exitstatus(255);
+ }
+ if(not $self->virgin()) {
+ seek $in_fh, 0, 0;
+ # $in_fh is now ready for reading at position 0
+ my $outputlength = 0;
+ my @output;
+
+ if($Global::parset and $fdno == 1) {
+ $outputlength += $self->print_parset($fdno,$in_fh,$out_fh);
+ } elsif(defined $opt::tag or defined $opt::tagstring
+ or $Global::color or $opt::colorfailed) {
+ if($Global::color or $opt::colorfailed) {
+ my ($color,$reset_color) = $self->color();
+ my $colortag = $color.$self->tag();
+ # Read line by line
+ local $/ = "\n";
+ while(<$in_fh>) {
+ $outputlength += length $_;
+ # Tag lines with \r, too
+ chomp;
+ s{([\n\r])(?=.|$)}{$reset_color$1$colortag}gs;
+ print $out_fh $colortag,$_,$reset_color,"\n";
+ }
+ } else {
+ my $tag = $self->tag();
+ my $pretag = 1;
+ my $s;
+ while(sysread($in_fh,$buf,32767)) {
+ $outputlength += length $buf;
+ $buf =~ s/(?<=[\r\n])(?=.)/$tag/gs;
+ print $out_fh ($pretag ? $tag : ""),$buf;
+ if($Global::membuffer) {
+ push @{$self->{'output'}{$fdno}},
+ ($pretag ? $tag : ""),$buf;
+ }
+ # Should next print start with a tag?
+ $s = substr($buf, -1);
+ # This is faster than ($s eq "\n") || ($s eq "\r")
+ $pretag = ($s eq "\n") ? 1 : ($s eq "\r");
+ }
+ }
+ } else {
+ # Most efficient way of copying data from $in_fh to $out_fh
+ # Intel i7-3632QM: 25k-
+ while(sysread($in_fh,$buf,32767)) {
+ print $out_fh $buf;
+ $outputlength += length $buf;
+ if($Global::membuffer) {
+ push @{$self->{'output'}{$fdno}}, $buf;
+ }
+ }
+ }
+ if($fdno == 1) {
+ $self->add_returnsize($outputlength);
+ }
+ close $in_fh;
+ if($? and $opt::compress) {
+ ::error($opt::decompress_program." failed.");
+ $self->set_exitstatus(255);
+ }
+ }
+}
+
+sub print_results($) {
+ my $self = shift;
+ my ($fdno,$in_fh,$out_fh) = @_;
+ my $buf;
+ close $self->fh($fdno,"w");
+ if($? and $opt::compress) {
+ ::error($opt::compress_program." failed.");
+ $self->set_exitstatus(255);
+ }
+ if(not $self->virgin()) {
+ seek $in_fh, 0, 0;
+ # $in_fh is now ready for reading at position 0
+ my $outputlength = 0;
+ my @output;
+
+ if($Global::membuffer) {
+ # Read data into membuffer
+ if($opt::tag or $opt::tagstring) {
+ # Read line by line
+ local $/ = "\n";
+ my $tag = $self->tag();
+ while(<$in_fh>) {
+ $outputlength += length $_;
+ # Tag lines with \r, too
+ $_ =~ s/(?<=[\r])(?=.|$)/$tag/gs;
+ push @{$self->{'output'}{$fdno}}, $tag, $_;
+ }
+ } else {
+ # Most efficient way of copying data from $in_fh to $out_fh
+ while(sysread($in_fh,$buf,60000)) {
+ $outputlength += length $buf;
+ push @{$self->{'output'}{$fdno}}, $buf;
+ }
+ }
+ } else {
+ # Not membuffer: No need to read the file
+ if($opt::compress) {
+ $outputlength = -1;
+ } else {
+ # Determine $outputlength = file length
+ seek($in_fh, 0, 2) || ::die_bug("cannot seek result");
+ $outputlength = tell($in_fh);
+ }
+ }
+ if($fdno == 1) { $self->add_returnsize($outputlength); }
+ close $in_fh;
+ if($? and $opt::compress) {
+ ::error($opt::decompress_program." failed.");
+ $self->set_exitstatus(255);
+ }
+ }
+}
+
+sub print_joblog($) {
+ my $self = shift;
+ my $cmd;
+ if($Global::verbose <= 1) {
+ $cmd = $self->replaced();
+ } else {
+ # Verbose level > 1: Print the rsync and stuff
+ $cmd = $self->wrapped();
+ }
+ # Newlines make it hard to parse the joblog
+ $cmd =~ s/\n/\0/g;
+ print $Global::joblog
+ join("\t", $self->seq(), $self->sshlogin()->string(),
+ $self->starttime(), sprintf("%10.3f",$self->runtime()),
+ $self->transfersize(), $self->returnsize(),
+ $self->exitstatus(), $self->exitsignal(), $cmd
+ ). "\n";
+ flush $Global::joblog;
+ $self->set_job_in_joblog();
+}
+
+sub tag($) {
+ my $self = shift;
+ if(not defined $self->{'tag'} or not $Global::cache_replacement_eval) {
+ if(defined $opt::tag or defined $opt::tagstring) {
+ $self->{'tag'} =
+ ($self->{'commandline'}->
+ replace_placeholders([$opt::tagstring],0,0)).
+ "\t";
+ } else {
+ # No tag
+ $self->{'tag'} = "";
+ }
+ }
+ return $self->{'tag'};
+}
+
+sub untabtag($) {
+ # tag with \t replaced with spaces
+ my $self = shift;
+ my $tag = $self->tag();
+ if(not defined $self->{'untab'}{$tag}) {
+ $self->{'untab'}{$tag} = ::untabify($tag);
+ }
+ return $self->{'untab'}{$tag};
+}
+
+{
+ my (@color,$eol,$reset_color,$init);
+
+ sub init_color() {
+ if(not $init) {
+ $init = 1;
+ # color combinations that are readable: black/white text
+ # on colored background, but not white on yellow
+ my @color_combinations =
+ # Force each color code to have the same length in chars
+ # This will make \t work as expected
+ ((map { [sprintf("%03d",$_),"000"] }
+ 6..7,9..11,13..15,40..51,75..87,113..123,147..159,
+ 171..182,185..231,249..254),
+ (map { [sprintf("%03d",$_),231] }
+ 1..9,12..13,16..45,52..81,88..114,124..149,
+ 160..178,180,182..184,196..214,232..250));
+ # reorder list so adjacent colors are dissimilar
+ # %23 and %7 were found experimentally
+ my @order = reverse sort {
+ (($a%23) <=> ($b%23))
+ or
+ (($b%7) <=> ($a%7));
+ } 0..$#color_combinations;
+ @order = @order[54 .. $#color_combinations, 0 .. 53];
+ @color = map {
+ # TODO Can this be done with `tput` codes?
+ "\033[48;5;".$_->[0].";38;5;".$_->[1]."m"
+ } @color_combinations[ @order ];
+
+ # clr_eol el = clear to end of line
+ $eol = `sh -c "tput el </dev/tty" 2>/dev/null`;
+ chomp($eol);
+ if($eol eq "") { $eol = "\033[K"; }
+ # exit_attribute_mode sgr0 = turn off all attributes
+ $reset_color = `sh -c "tput sgr0 </dev/tty" 2>/dev/null`;
+ chomp($reset_color);
+ if($reset_color eq "") { $reset_color = "\033[m"; }
+ }
+ }
+
+ sub color($) {
+ my $self = shift;
+ if(not defined $self->{'color'}) {
+ if($Global::color) {
+ # Choose a value based on the seq
+ $self->{'color'} = $color[$self->seq() % ($#color+1)].$eol;
+ $self->{'reset_color'} = $reset_color;
+ } else {
+ $self->{'color'} = "";
+ $self->{'reset_color'} = "";
+ }
+ if($opt::colorfailed) {
+ if($self->exitstatus()) {
+ # White on Red
+ # Can this be done more generally?
+ $self->{'color'} =
+ "\033[48;5;"."196".";38;5;"."231"."m".$eol;
+ $self->{'reset_color'} = $reset_color;
+ }
+ }
+ }
+ return ($self->{'color'},$self->{'reset_color'});
+ }
+}
+
+sub hostgroups($) {
+ my $self = shift;
+ if(not defined $self->{'hostgroups'}) {
+ $self->{'hostgroups'} =
+ $self->{'commandline'}->{'arg_list'}[0][0]->{'hostgroups'};
+ }
+ return @{$self->{'hostgroups'}};
+}
+
+sub exitstatus($) {
+ my $self = shift;
+ return $self->{'exitstatus'};
+}
+
+sub set_exitstatus($$) {
+ my $self = shift;
+ my $exitstatus = shift;
+ if($exitstatus) {
+ # Overwrite status if non-zero
+ $self->{'exitstatus'} = $exitstatus;
+ } else {
+ # Set status but do not overwrite
+ # Status may have been set by --timeout
+ $self->{'exitstatus'} ||= $exitstatus;
+ }
+ $opt::sqlworker and
+ $Global::sql->update("SET Exitval = ? WHERE Seq = ".$self->seq(),
+ $exitstatus);
+}
+
+sub reset_exitstatus($) {
+ my $self = shift;
+ undef $self->{'exitstatus'};
+}
+
+sub exitsignal($) {
+ my $self = shift;
+ return $self->{'exitsignal'};
+}
+
+sub set_exitsignal($$) {
+ my $self = shift;
+ my $exitsignal = shift;
+ $self->{'exitsignal'} = $exitsignal;
+ $opt::sqlworker and
+ $Global::sql->update("SET _Signal = ? WHERE Seq = ".$self->seq(),
+ $exitsignal);
+}
+
+{
+ my $total_jobs;
+
+ sub should_we_halt {
+ # Should we halt? Immediately? Gracefully?
+ # Returns: N/A
+ my $job = shift;
+ my $limit;
+ if($Global::semaphore) {
+ # Emulate Bash's +128 if there is a signal
+ $Global::halt_exitstatus =
+ ($job->exitstatus()
+ or
+ $job->exitsignal() ? $job->exitsignal() + 128 : 0);
+ }
+ if($job->exitstatus() or $job->exitsignal()) {
+ # Job failed
+ $Global::exitstatus++;
+ $Global::total_failed++;
+ if($Global::halt_fail) {
+ ::status("$Global::progname: This job failed:",
+ $job->replaced());
+ $limit = $Global::total_failed;
+ }
+ } elsif($Global::halt_success) {
+ ::status("$Global::progname: This job succeeded:",
+ $job->replaced());
+ $limit = $Global::total_completed - $Global::total_failed;
+ }
+ if($Global::halt_done) {
+ ::status("$Global::progname: This job finished:",
+ $job->replaced());
+ $limit = $Global::total_completed;
+ }
+ if(not defined $limit) {
+ return ""
+ }
+ # --halt # => 1..100 (number of jobs failed, 101 means > 100)
+ # --halt % => 1..100 (pct of jobs failed)
+ if($Global::halt_pct and not $Global::halt_count) {
+ $total_jobs ||= $Global::JobQueue->total_jobs();
+ # From the pct compute the number of jobs that must fail/succeed
+ $Global::halt_count = $total_jobs * $Global::halt_pct;
+ }
+ if($limit >= $Global::halt_count) {
+ # At least N jobs have failed/succeded/completed
+ # or at least N% have failed/succeded/completed
+ # So we should prepare for exit
+ if($Global::halt_fail or $Global::halt_done) {
+ # Set exit status
+ if(not defined $Global::halt_exitstatus) {
+ if($Global::halt_pct) {
+ # --halt now,fail=X% or soon,fail=X%
+ # --halt now,done=X% or soon,done=X%
+ $Global::halt_exitstatus =
+ ::ceil($Global::total_failed / $total_jobs * 100);
+ } elsif($Global::halt_count) {
+ # --halt now,fail=X or soon,fail=X
+ # --halt now,done=X or soon,done=X
+ $Global::halt_exitstatus =
+ ::min($Global::total_failed,101);
+ }
+ if($Global::halt_count and $Global::halt_count == 1) {
+ # --halt now,fail=1 or soon,fail=1
+ # --halt now,done=1 or soon,done=1
+ # Emulate Bash's +128 if there is a signal
+ $Global::halt_exitstatus =
+ ($job->exitstatus()
+ or
+ $job->exitsignal() ? $job->exitsignal() + 128 : 0);
+ }
+ }
+ ::debug("halt","Pct: ",$Global::halt_pct,
+ " count: ",$Global::halt_count,
+ " status: ",$Global::halt_exitstatus,"\n");
+ } elsif($Global::halt_success) {
+ $Global::halt_exitstatus = 0;
+ }
+ if($Global::halt_when eq "soon") {
+ $Global::start_no_new_jobs ||= 1;
+ if(scalar(keys %Global::running) > 0) {
+ # Only warn if there are more jobs running
+ ::status
+ ("$Global::progname: Starting no more jobs. ".
+ "Waiting for ". (keys %Global::running).
+ " jobs to finish.");
+ }
+ }
+ return($Global::halt_when);
+ }
+ return "";
+ }
+}
+
+
+package CommandLine;
+
+sub new($) {
+ my $class = shift;
+ my $seq = shift;
+ my $commandref = shift;
+ $commandref || die;
+ my $arg_queue = shift;
+ my $context_replace = shift;
+ my $max_number_of_args = shift; # for -N and normal (-n1)
+ my $transfer_files = shift;
+ my $return_files = shift;
+ my $template_names = shift;
+ my $template_contents = shift;
+ my $replacecount_ref = shift;
+ my $len_ref = shift;
+ my %replacecount = %$replacecount_ref;
+ my %len = %$len_ref;
+ for (keys %$replacecount_ref) {
+ # Total length of this replacement string {} replaced with all args
+ $len{$_} = 0;
+ }
+ return bless {
+ 'command' => $commandref,
+ 'seq' => $seq,
+ 'len' => \%len,
+ 'arg_list' => [],
+ 'arg_list_flat' => [],
+ 'arg_list_flat_orig' => [undef],
+ 'arg_queue' => $arg_queue,
+ 'max_number_of_args' => $max_number_of_args,
+ 'replacecount' => \%replacecount,
+ 'context_replace' => $context_replace,
+ 'transfer_files' => $transfer_files,
+ 'return_files' => $return_files,
+ 'template_names' => $template_names,
+ 'template_contents' => $template_contents,
+ 'replaced' => undef,
+ }, ref($class) || $class;
+}
+
+sub flush_cache() {
+ my $self = shift;
+ for my $arglist (@{$self->{'arg_list'}}) {
+ for my $arg (@$arglist) {
+ $arg->flush_cache();
+ }
+ }
+ $self->{'arg_queue'}->flush_cache();
+ $self->{'replaced'} = undef;
+}
+
+sub seq($) {
+ my $self = shift;
+ return $self->{'seq'};
+}
+
+sub set_seq($$) {
+ my $self = shift;
+ $self->{'seq'} = shift;
+}
+
+sub slot($) {
+ # Find the number of a free job slot and return it
+ # Uses:
+ # @Global::slots - list with free jobslots
+ # Returns:
+ # $jobslot = number of jobslot
+ my $self = shift;
+ if(not $self->{'slot'}) {
+ if(not @Global::slots) {
+ # $max_slot_number will typically be $Global::max_jobs_running
+ push @Global::slots, ++$Global::max_slot_number;
+ }
+ $self->{'slot'} = shift @Global::slots;
+ }
+ return $self->{'slot'};
+}
+
+{
+ my $already_spread;
+ my $darwin_max_len;
+
+ sub populate($) {
+ # Add arguments from arg_queue until the number of arguments or
+ # max line length is reached
+ # Uses:
+ # $Global::usable_command_line_length
+ # $opt::cat
+ # $opt::fifo
+ # $Global::JobQueue
+ # $opt::m
+ # $opt::X
+ # $Global::max_jobs_running
+ # Returns: N/A
+ my $self = shift;
+ my $next_arg;
+ my $max_len = $Global::usable_command_line_length || die;
+ if($^O eq "darwin") {
+ # Darwin's limit is affected by:
+ # * number of environment names (variables+functions)
+ # * size of environment
+ # * the length of arguments:
+ # a one-char argument lowers the limit by 5
+ # To be safe assume all arguments are one-char
+ # The max_len is cached between runs, but if the size of
+ # the environment is different we need to recompute the
+ # usable max length for this run of GNU Parallel
+ # See https://unix.stackexchange.com/a/604943/2972
+ if(not $darwin_max_len) {
+ my $envc = (keys %ENV);
+ my $envn = length join"",(keys %ENV);
+ my $envv = length join"",(values %ENV);
+ $darwin_max_len = -146+($max_len - $envn - $envv) - $envc*10;
+ ::debug("init",
+ "length: $darwin_max_len ".
+ "3+($max_len - $envn - $envv)/5 - $envc*2");
+ }
+ $max_len = $darwin_max_len;
+ }
+ if($opt::cat or $opt::fifo) {
+ # Get the empty arg added by --pipepart (if any)
+ $Global::JobQueue->{'commandlinequeue'}->{'arg_queue'}->get();
+ # $PARALLEL_TMP will point to a tempfile that will be used as {}
+ $Global::JobQueue->{'commandlinequeue'}->{'arg_queue'}->
+ unget([Arg->new('"$PARALLEL_TMP"')]);
+ }
+ while (not $self->{'arg_queue'}->empty()) {
+ $next_arg = $self->{'arg_queue'}->get();
+ if(not defined $next_arg) {
+ next;
+ }
+ $self->push($next_arg);
+ if($self->len() >= $max_len) {
+ # Command length is now > max_length
+ # If there are arguments: remove the last
+ # If there are no arguments: Error
+ # TODO stuff about -x opt_x
+ if($self->number_of_args() > 1) {
+ # There is something to work on
+ $self->{'arg_queue'}->unget($self->pop());
+ last;
+ } else {
+ my $args = join(" ", map { $_->orig() } @$next_arg);
+ ::error(255,"Command line too long (".
+ $self->len(). " >= ".
+ $max_len.
+ ") at input ".
+ $self->{'arg_queue'}->arg_number().
+ ": ".
+ ((length $args > 50) ?
+ (substr($args,0,50))."..." :
+ $args));
+ }
+ }
+
+ if(defined $self->{'max_number_of_args'}) {
+ if($self->number_of_args() >=
+ $self->{'max_number_of_args'}) {
+ last;
+ }
+ }
+ }
+ if(($opt::m or $opt::X) and not $already_spread
+ and $self->{'arg_queue'}->empty() and $Global::max_jobs_running) {
+ # -m or -X and EOF => Spread the arguments over all jobslots
+ # (unless they are already spread)
+ $already_spread ||= 1;
+ if($self->number_of_args() > 1) {
+ $self->{'max_number_of_args'} =
+ ::ceil($self->number_of_args()/$Global::max_jobs_running);
+ $Global::JobQueue->{'commandlinequeue'}->{'max_number_of_args'} =
+ $self->{'max_number_of_args'};
+ $self->{'arg_queue'}->unget($self->pop_all());
+ while($self->number_of_args() < $self->{'max_number_of_args'}) {
+ $self->push($self->{'arg_queue'}->get());
+ }
+ }
+ $Global::JobQueue->flush_total_jobs();
+ }
+
+ if($opt::sqlmaster) {
+ # Insert the V1..Vn for this $seq in SQL table
+ # instead of generating one
+ $Global::sql->insert_records($self->seq(), $self->{'command'},
+ $self->{'arg_list_flat_orig'});
+ }
+ }
+}
+
+sub push($) {
+ # Add one or more records as arguments
+ # Returns: N/A
+ my $self = shift;
+ my $record = shift;
+ push @{$self->{'arg_list_flat_orig'}}, map { $_->orig() } @$record;
+ push @{$self->{'arg_list_flat'}}, @$record;
+ push @{$self->{'arg_list'}}, $record;
+ # Make @arg available for {= =}
+ *Arg::arg = $self->{'arg_list_flat_orig'};
+
+ my $quote_arg = ($Global::quote_replace and not $Global::quoting);
+ my $col;
+ for my $perlexpr (keys %{$self->{'replacecount'}}) {
+ if($perlexpr =~ /^(-?\d+)(?:\D.*|)$/) {
+ # Positional replacement string
+ # Deal with negative positional replacement string
+ $col = ($1 < 0) ? $1 : $1-1;
+ if(defined($record->[$col])) {
+ $self->{'len'}{$perlexpr} +=
+ length $record->[$col]->replace($perlexpr,$quote_arg,$self);
+ }
+ } else {
+ for my $arg (@$record) {
+ if(defined $arg) {
+ $self->{'len'}{$perlexpr} +=
+ length $arg->replace($perlexpr,$quote_arg,$self);
+ }
+ }
+ }
+ }
+}
+
+sub pop($) {
+ # Remove last argument
+ # Returns:
+ # the last record
+ my $self = shift;
+ my $record = pop @{$self->{'arg_list'}};
+ # pop off arguments from @$record
+ splice @{$self->{'arg_list_flat_orig'}}, -($#$record+1), $#$record+1;
+ splice @{$self->{'arg_list_flat'}}, -($#$record+1), $#$record+1;
+ my $quote_arg = ($Global::quote_replace and not $Global::quoting);
+ for my $perlexpr (keys %{$self->{'replacecount'}}) {
+ if($perlexpr =~ /^(\d+) /) {
+ # Positional
+ defined($record->[$1-1]) or next;
+ $self->{'len'}{$perlexpr} -=
+ length $record->[$1-1]->replace($perlexpr,$quote_arg,$self);
+ } else {
+ for my $arg (@$record) {
+ if(defined $arg) {
+ $self->{'len'}{$perlexpr} -=
+ length $arg->replace($perlexpr,$quote_arg,$self);
+ }
+ }
+ }
+ }
+ return $record;
+}
+
+sub pop_all($) {
+ # Remove all arguments and zeros the length of replacement perlexpr
+ # Returns:
+ # all records
+ my $self = shift;
+ my @popped = @{$self->{'arg_list'}};
+ for my $perlexpr (keys %{$self->{'replacecount'}}) {
+ $self->{'len'}{$perlexpr} = 0;
+ }
+ $self->{'arg_list'} = [];
+ $self->{'arg_list_flat_orig'} = [undef];
+ $self->{'arg_list_flat'} = [];
+ return @popped;
+}
+
+sub number_of_args($) {
+ # The number of records
+ # Returns:
+ # number of records
+ my $self = shift;
+ # This is really the number of records
+ return $#{$self->{'arg_list'}}+1;
+}
+
+sub number_of_recargs($) {
+ # The number of args in records
+ # Returns:
+ # number of args records
+ my $self = shift;
+ my $sum = 0;
+ my $nrec = scalar @{$self->{'arg_list'}};
+ if($nrec) {
+ $sum = $nrec * (scalar @{$self->{'arg_list'}[0]});
+ }
+ return $sum;
+}
+
+sub args_as_string($) {
+ # Returns:
+ # all unmodified arguments joined with ' ' (similar to {})
+ my $self = shift;
+ return (join " ", map { $_->orig() }
+ map { @$_ } @{$self->{'arg_list'}});
+}
+
+sub results_out($) {
+ sub max_file_name_length {
+ # Figure out the max length of a subdir
+ # TODO and the max total length
+ # Ext4 = 255,130816
+ # Uses:
+ # $Global::max_file_length is set
+ # Returns:
+ # $Global::max_file_length
+ my $testdir = shift;
+
+ my $upper = 100_000_000;
+ # Dir length of 8 chars is supported everywhere
+ my $len = 8;
+ my $dir = "d"x$len;
+ do {
+ rmdir($testdir."/".$dir);
+ $len *= 16;
+ $dir = "d"x$len;
+ } while ($len < $upper and mkdir $testdir."/".$dir);
+ # Then search for the actual max length between $len/16 and $len
+ my $min = $len/16;
+ my $max = $len;
+ while($max-$min > 5) {
+ # If we are within 5 chars of the exact value:
+ # it is not worth the extra time to find the exact value
+ my $test = int(($min+$max)/2);
+ $dir = "d"x$test;
+ if(mkdir $testdir."/".$dir) {
+ rmdir($testdir."/".$dir);
+ $min = $test;
+ } else {
+ $max = $test;
+ }
+ }
+ $Global::max_file_length = $min;
+ return $min;
+ }
+
+ my $self = shift;
+ my $out = $self->replace_placeholders([$opt::results],0,0);
+ if($out eq $opt::results) {
+ # $opt::results simple string: Append args_as_dirname
+ my $args_as_dirname = $self->args_as_dirname(0);
+ # Output in: prefix/name1/val1/name2/val2/stdout
+ $out = $opt::results."/".$args_as_dirname;
+ if(-d $out or eval{ File::Path::mkpath($out); }) {
+ # OK
+ } else {
+ # mkpath failed: Argument too long or not quoted
+ # Set $Global::max_file_length, which will keep the individual
+ # dir names shorter than the max length
+ max_file_name_length($opt::results);
+ # Quote dirnames with +
+ $args_as_dirname = $self->args_as_dirname(1);
+ # prefix/name1/val1/name2/val2/
+ $out = $opt::results."/".$args_as_dirname;
+ File::Path::mkpath($out);
+ }
+ $out .="/";
+ } else {
+ if($out =~ m:/$:s) {
+ # / = dir
+ if(-d $out or eval{ File::Path::mkpath($out); }) {
+ # OK
+ } else {
+ ::error(255,"Cannot make dir '$out'.");
+ }
+ } else {
+ $out =~ m:(.*)/:s;
+ File::Path::mkpath($1);
+ }
+ }
+ return $out;
+}
+
+{
+ my %map;
+ my %stringmap;
+ my $sep;
+
+ # test: '' . .. a. a.. + ++ 0..255 on fat12 ext4
+ sub args_as_dirname($) {
+ # Returns:
+ # all arguments joined with '/' (similar to {})
+ # Chars that are not safe on all file systems are quoted.
+ sub init() {
+ # ext4: / \t \n \0 \\ \r
+ # fat: 0..31 " * / : < > ? \ | Maybe also: # [ ] ; = ,
+ # exfat: 128..255
+ # Other FS: , [ ] { } ( ) ! ; " ' * ? < > |
+ #
+ # Quote these as:
+ # + = ++
+ # \0 = +0
+ # \t = +t
+ # \\ = +b (backslash)
+ # \n = +n
+ # \r = +r
+ # / = +z (zlash)
+ # ? = +y (whY?)
+ # " = +d (double quote)
+ # ' = +q (quote)
+ # * = +a (asterisk)
+ # < = +l (less than)
+ # > = +g (greater than)
+ # : = +k (kolon)
+ # ! = +x (eXclamation)
+ # | = +p (pipe)
+ # # = +h (hash)
+ # ; = +s (semicolon)
+ # = = +e (equal)
+ # , = +c (comma)
+ # 1..32 128..255 = +XX (hex value)
+ # [ ] = +e +f
+ # ( ) = +i +j
+ # { } = +v +w
+ # Quote '' as +m (eMpty)
+ # Quote . as +_
+ # Quote .. as +__
+ # (Unused: ou)
+ %map = qw(
+ + ++
+ \0 +0
+ \t +t
+ \\ +b
+ \n +n
+ \r +r
+ / +z
+ ? +y
+ " +d
+ ' +q
+ * +a
+ < +l
+ > +g
+ : +k
+ ! +x
+ | +p
+ # +h
+ ; +s
+ = +e
+ , +c
+ [ +e
+ ( +i
+ { +v
+ ] +f
+ ) +j
+ } +w
+ );
+ # 1..32 128..255 = +XX (hex value)
+ map { $map{sprintf "%c",$_} = sprintf "+%02x",$_ } 1..32, 128..255;
+ # Default value = itself
+ map { $map{sprintf "%c",$_} ||= sprintf "%c",$_ } 0..255;
+ # Quote '' as +m (eMpty)
+ $stringmap{""} = "+m";
+ # Quote . as +_
+ $stringmap{"."} = "+_";
+ # Quote .. as +__
+ $stringmap{".."} = "+__";
+ # Set dir separator
+ eval 'use File::Spec; $sep = File::Spec->catfile("", "");';
+ $sep ||= '/';
+ }
+ # If $Global::max_file_length: Keep subdirs < $Global::max_file_length
+ my $self = shift;
+ my $quote = shift;
+ my @res = ();
+ if(not $sep) { init(); }
+
+ for my $rec_ref (@{$self->{'arg_list'}}) {
+ # If headers are used, sort by them.
+ # Otherwise keep the order from the command line.
+ my @header_indexes_sorted = header_indexes_sorted($#$rec_ref+1);
+ for my $n (@header_indexes_sorted) {
+ CORE::push(@res,
+ $Global::input_source_header{$n},
+ $quote ?
+ (
+ grep { $_ ne "\0noarg" } map {
+ my $s = $_;
+ # Quote + as ++
+ $s =~ s/(.)/$map{$1}/gs;
+ if($Global::max_file_length) {
+ # Keep each subdir shorter than the longest
+ # allowed file name
+ $s = substr($s,0,$Global::max_file_length);
+ }
+ $s; }
+ $rec_ref->[$n-1]->orig()
+ ) :
+ (
+ grep { $_ ne "\0noarg" } map {
+ my $s = $_;
+ # Quote / as +z and + as ++
+ $s =~ s/($sep|\+)/$map{$1}/gos;
+ if($Global::max_file_length) {
+ # Keep each subdir shorter than the longest
+ # allowed file name
+ $s = substr($s,0,$Global::max_file_length);
+ }
+ $s; }
+ $rec_ref->[$n-1]->orig()
+ )
+ );
+ }
+ }
+ return join $sep, map { $stringmap{$_} || $_ } @res;
+ }
+}
+
+sub header_indexes_sorted($) {
+ # Sort headers first by number then by name.
+ # E.g.: 1a 1b 11a 11b
+ # Returns:
+ # Indexes of %Global::input_source_header sorted
+ my $max_col = shift;
+
+ no warnings 'numeric';
+ for my $col (1 .. $max_col) {
+ # Make sure the header is defined. If it is not: use column number
+ if(not defined $Global::input_source_header{$col}) {
+ $Global::input_source_header{$col} = $col;
+ }
+ }
+ my @header_indexes_sorted = sort {
+ # Sort headers numerically then asciibetically
+ $Global::input_source_header{$a} <=> $Global::input_source_header{$b}
+ or
+ $Global::input_source_header{$a} cmp $Global::input_source_header{$b}
+ } 1 .. $max_col;
+ return @header_indexes_sorted;
+}
+
+sub len($) {
+ # Uses:
+ # @opt::shellquote
+ # The length of the command line with args substituted
+ my $self = shift;
+ my $len = 0;
+ # Add length of the original command with no args
+ # Length of command w/ all replacement args removed
+ $len += $self->{'len'}{'noncontext'} + @{$self->{'command'}} -1;
+ ::debug("length", "noncontext + command: $len\n");
+ # MacOS has an overhead of 8 bytes per argument
+ my $darwin = ($^O eq "darwin") ? 8 : 0;
+ my $recargs = $self->number_of_recargs();
+ if($self->{'context_replace'}) {
+ # Context is duplicated for each arg
+ $len += $recargs * $self->{'len'}{'context'};
+ for my $replstring (keys %{$self->{'replacecount'}}) {
+ # If the replacements string is more than once: mulitply its length
+ $len += $self->{'len'}{$replstring} *
+ $self->{'replacecount'}{$replstring};
+ ::debug("length", $replstring, " ", $self->{'len'}{$replstring}, "*",
+ $self->{'replacecount'}{$replstring}, "\n");
+ }
+ # echo 11 22 33 44 55 66 77 88 99 1010
+ # echo 1 2 3 4 5 6 7 8 9 10 1 2 3 4 5 6 7 8 9 10
+ # 5 + ctxgrp*arg
+ ::debug("length", "Ctxgrp: ", $self->{'len'}{'contextgroups'},
+ " Groups: ", $self->{'len'}{'noncontextgroups'}, "\n");
+ # Add space between context groups
+ $len += ($recargs-1) * ($self->{'len'}{'contextgroups'});
+ if($darwin) {
+ $len += $recargs * $self->{'len'}{'contextgroups'} * $darwin;
+ }
+ } else {
+ # Each replacement string may occur several times
+ # Add the length for each time
+ $len += 1*$self->{'len'}{'context'};
+ ::debug("length", "context+noncontext + command: $len\n");
+ for my $replstring (keys %{$self->{'replacecount'}}) {
+ # (space between recargs + length of replacement)
+ # * number this replacement is used
+ $len += ($recargs -1 + $self->{'len'}{$replstring}) *
+ $self->{'replacecount'}{$replstring};
+ if($darwin) {
+ $len += ($recargs * $self->{'replacecount'}{$replstring}
+ * $darwin);
+ }
+ }
+ }
+ if(defined $Global::parallel_env) {
+ # If we are using --env, add the prefix for that, too.
+ $len += length $Global::parallel_env;
+ }
+ if($Global::quoting) {
+ # Pessimistic length if -q is set
+ # Worse than worst case: ' => "'" + " => '"'
+ # TODO can we count the number of expanding chars?
+ # and count them in arguments, too?
+ $len *= 3;
+ }
+ if(@opt::shellquote) {
+ # Pessimistic length if --shellquote is set
+ # Worse than worst case: ' => "'"
+ for(@opt::shellquote) {
+ $len *= 3;
+ }
+ $len *= 5;
+ }
+ if(@opt::sshlogin) {
+ # Pessimistic length if remote
+ # Worst case is BASE64 encoding 3 bytes -> 4 bytes
+ $len = int($len*4/3);
+ }
+ return $len;
+}
+
+sub replaced($) {
+ # Uses:
+ # $Global::quote_replace
+ # $Global::quoting
+ # Returns:
+ # $replaced = command with place holders replaced and prepended
+ my $self = shift;
+ if(not defined $self->{'replaced'}) {
+ # Don't quote arguments if the input is the full command line
+ my $quote_arg = ($Global::quote_replace and not $Global::quoting);
+ # or if ($opt::cat or $opt::pipe) as they use $PARALLEL_TMP
+ $quote_arg = ($opt::cat || $opt::fifo) ? 0 : $quote_arg;
+ $self->{'replaced'} = $self->
+ replace_placeholders($self->{'command'},$Global::quoting,
+ $quote_arg);
+ my $len = length $self->{'replaced'};
+ if ($len != $self->len()) {
+ ::debug("length", $len, " != ", $self->len(),
+ " ", $self->{'replaced'}, "\n");
+ } else {
+ ::debug("length", $len, " == ", $self->len(),
+ " ", $self->{'replaced'}, "\n");
+ }
+ }
+ return $self->{'replaced'};
+}
+
+sub replace_placeholders($$$$) {
+ # Replace foo{}bar with fooargbar
+ # Input:
+ # $targetref = command as shell words
+ # $quote = should everything be quoted?
+ # $quote_arg = should replaced arguments be quoted?
+ # Uses:
+ # @Arg::arg = arguments as strings to be use in {= =}
+ # Returns:
+ # @target with placeholders replaced
+ my $self = shift;
+ my $targetref = shift;
+ my $quote = shift;
+ my $quote_arg = shift;
+ my %replace;
+
+ # Token description:
+ # \0spc = unquoted space
+ # \0end = last token element
+ # \0ign = dummy token to be ignored
+ # \257<...\257> = replacement expression
+ # " " = quoted space, that splits -X group
+ # text = normal text - possibly part of -X group
+ my $spacer = 0;
+ my @tokens = grep { length $_ > 0 } map {
+ if(/^\257<|^ $/) {
+ # \257<...\257> or space
+ $_
+ } else {
+ # Split each space/tab into a token
+ split /(?=\s)|(?<=\s)/
+ }
+ }
+ # Split \257< ... \257> into own token
+ map { split /(?=\257<)|(?<=\257>)/ }
+ # Insert "\0spc" between every element
+ # This space should never be quoted
+ map { $spacer++ ? ("\0spc",$_) : $_ }
+ map { $_ eq "" ? "\0empty" : $_ }
+ @$targetref;
+
+ if(not @tokens) {
+ # @tokens is empty: Return empty array
+ return @tokens;
+ }
+ ::debug("replace", "Tokens ".join":",@tokens,"\n");
+ # Make it possible to use $arg[2] in {= =}
+ *Arg::arg = $self->{'arg_list_flat_orig'};
+ # Flat list:
+ # $self->{'arg_list'} = [ [Arg11, Arg12], [Arg21, Arg22], [Arg31, Arg32] ]
+ # $self->{'arg_list_flat'} = [ Arg11, Arg12, Arg21, Arg22, Arg31, Arg32 ]
+ if(not @{$self->{'arg_list_flat'}}) {
+ @{$self->{'arg_list_flat'}} = Arg->new("");
+ }
+ my $argref = $self->{'arg_list_flat'};
+ # Number of arguments - used for positional arguments
+ my $n = $#$argref+1;
+
+ # $self is actually a CommandLine-object,
+ # but it looks nice to be able to say {= $job->slot() =}
+ my $job = $self;
+ # @replaced = tokens with \257< \257> replaced
+ my @replaced;
+ if($self->{'context_replace'}) {
+ my @ctxgroup;
+ for my $t (@tokens,"\0end") {
+ # \0end = last token was end of tokens.
+ if($t eq "\t" or $t eq " " or $t eq "\0end" or $t eq "\0spc") {
+ # Context group complete: Replace in it
+ if(grep { /^\257</ } @ctxgroup) {
+ # Context group contains a replacement string:
+ # Copy once per arg
+ my $space = "\0ign";
+ for my $arg (@$argref) {
+ my $normal_replace;
+ # Push output
+ # Put unquoted space before each context group
+ # except the first
+ CORE::push @replaced, $space, map {
+ $a = $_;
+ if($a =~
+ s{\257<(-?\d+)?(.*)\257>}
+ {
+ if($1) {
+ # Positional replace
+ # Find the relevant arg and replace it
+ ($argref->[$1 > 0 ? $1-1 : $n+$1] ? # If defined: replace
+ $argref->[$1 > 0 ? $1-1 : $n+$1]->
+ replace($2,$quote_arg,$self)
+ : "");
+ } else {
+ # Normal replace
+ $normal_replace ||= 1;
+ ($arg ? $arg->replace($2,$quote_arg,$self) : "");
+ }
+ }sgxe) {
+ # Token is \257<..\257>
+ } else {
+ if($Global::escape_string_present) {
+ # Command line contains \257:
+ # Unescape it \257\256 => \257
+ $a =~ s/\257\256/\257/g;
+ }
+ }
+ $a
+ } @ctxgroup;
+ $normal_replace or last;
+ $space = "\0spc";
+ }
+ } else {
+ # Context group has no a replacement string: Copy it once
+ CORE::push @replaced, map {
+ $Global::escape_string_present and s/\257\256/\257/g; $_;
+ } @ctxgroup;
+ }
+ # New context group
+ @ctxgroup=();
+ }
+ if($t eq "\0spc" or $t eq " ") {
+ CORE::push @replaced,$t;
+ } else {
+ CORE::push @ctxgroup,$t;
+ }
+ }
+ } else {
+ # @group = @token
+ # Replace in group
+ # Push output
+ # repquote = no if {} first on line, no if $quote, yes otherwise
+ for my $t (@tokens) {
+ if($t =~ /^\257</) {
+ my $space = "\0ign";
+ for my $arg (@$argref) {
+ my $normal_replace;
+ $a = $t;
+ $a =~
+ s{\257<(-?\d+)?(.*)\257>}
+ {
+ if($1) {
+ # Positional replace
+ # Find the relevant arg and replace it
+ ($argref->[$1 > 0 ? $1-1 : $n+$1] ?
+ # If defined: replace
+ $argref->[$1 > 0 ? $1-1 : $n+$1]->
+ replace($2,$quote_arg,$self)
+ : "");
+ } else {
+ # Normal replace
+ $normal_replace ||= 1;
+ ($arg ? $arg->replace($2,$quote_arg,$self) : "");
+ }
+ }sgxe;
+ CORE::push @replaced, $space, $a;
+ $normal_replace or last;
+ $space = "\0spc";
+ }
+ } else {
+ # No replacement
+ CORE::push @replaced, map {
+ $Global::escape_string_present and s/\257\256/\257/g; $_;
+ } $t;
+ }
+ }
+ }
+ *Arg::arg = [];
+ ::debug("replace","Replaced: ".join":",@replaced,"\n");
+
+ # Put tokens into groups that may be quoted.
+ my @quotegroup;
+ my @quoted;
+ for (map { $_ eq "\0empty" ? "" : $_ }
+ grep { $_ ne "\0ign" and $_ ne "\0noarg" and $_ ne "'\0noarg'" }
+ @replaced, "\0end") {
+ if($_ eq "\0spc" or $_ eq "\0end") {
+ # \0spc splits quotable groups
+ if($quote) {
+ if(@quotegroup) {
+ CORE::push @quoted, ::Q(join"",@quotegroup);;
+ }
+ } else {
+ CORE::push @quoted, join"",@quotegroup;
+ }
+ @quotegroup = ();
+ } else {
+ CORE::push @quotegroup, $_;
+ }
+ }
+ ::debug("replace","Quoted: ".join":",@quoted,"\n");
+ return wantarray ? @quoted : "@quoted";
+}
+
+sub skip($) {
+ # Skip this job
+ my $self = shift;
+ $self->{'skip'} = 1;
+}
+
+
+package CommandLineQueue;
+
+sub new($) {
+ my $class = shift;
+ my $commandref = shift;
+ my $read_from = shift;
+ my $context_replace = shift || 0;
+ my $max_number_of_args = shift;
+ my $transfer_files = shift;
+ my $return_files = shift;
+ my $template_names = shift;
+ my $template_contents = shift;
+ my @unget = ();
+ my $posrpl;
+ my ($replacecount_ref, $len_ref);
+ my @command = @$commandref;
+ my $seq = 1;
+ # Replace replacement strings with {= perl expr =}
+ # '{=' 'perlexpr' '=}' => '{= perlexpr =}'
+ @command = merge_rpl_parts(@command);
+
+ # Protect matching inside {= perl expr =}
+ # by replacing {= and =} with \257< and \257>
+ # in options that can contain replacement strings:
+ # @command, --transferfile, --return,
+ # --tagstring, --workdir, --results
+ for(@command, @$transfer_files, @$return_files,
+ @$template_names, @$template_contents,
+ $opt::tagstring, $opt::workdir, $opt::results, $opt::retries,
+ @opt::filter) {
+ # Skip if undefined
+ defined($_) or next;
+ # Escape \257 => \257\256
+ $Global::escape_string_present += s/\257/\257\256/g;
+ # Needs to match rightmost left parens (Perl defaults to leftmost)
+ # to deal with: {={==} and {={==}=}
+ # Replace {= -> \257< and =} -> \257>
+ #
+ # Complex way to do:
+ # s/{=(.*)=}/\257<$1\257>/g
+ # which would not work
+ s[\Q$Global::parensleft\E # Match {=
+ # Match . unless the next string is {= or =}
+ # needed to force matching the shortest {= =}
+ ((?:(?! \Q$Global::parensleft\E|\Q$Global::parensright\E ).)*?)
+ \Q$Global::parensright\E ] # Match =}
+ {\257<$1\257>}gxs;
+ for my $rpl (sort { length $b <=> length $a } keys %Global::rpl) {
+ # Replace long --rpl's before short ones, as a short may be a
+ # substring of a long:
+ # --rpl '% s/a/b/' --rpl '%% s/b/a/'
+ #
+ # Replace the shorthand string (--rpl)
+ # with the {= perl expr =}
+ #
+ # Avoid searching for shorthand strings inside existing {= perl expr =}
+ #
+ # Replace $$1 in {= perl expr =} with groupings in shorthand string
+ #
+ # --rpl '{/(\.\S+)/(\.\S+)} s/$$1/$$2/g;'
+ # echo {/.tar/.gz} ::: UU.tar.gz
+ my ($prefix,$grp_regexp,$postfix) =
+ $rpl =~ /^( [^(]* ) # Prefix - e.g. {%%
+ ( \(.*\) )? # Group capture regexp - e.g (.*)
+ ( [^)]* )$ # Postfix - e.g }
+ /xs;
+ $grp_regexp ||= '';
+ my $rplval = $Global::rpl{$rpl};
+ while(s{( (?: ^|\257> ) (?: [^\257]*|[\257][^<>] )*? )
+ # Don't replace after \257 unless \257>
+ \Q$prefix\E $grp_regexp \Q$postfix\E}
+ {
+ # The start remains the same
+ my $unchanged = $1;
+ # Dummy entry to start at 1.
+ my @grp = (1);
+ # $2 = first ()-group in $grp_regexp
+ # Put $2 in $grp[1], Put $3 in $grp[2]
+ # so first ()-group in $grp_regexp is $grp[1];
+ for(my $i = 2; defined $grp[$#grp]; $i++) {
+ push @grp, eval '$'.$i;
+ }
+ my $rv = $rplval;
+ # replace $$1 with $_pAr_gRp1, $$2 with $_pAr_gRp2
+ # in the code to be executed
+ $rv =~ s/\$\$ (\d+)/\$_pAr_gRp$1/gx;
+ # prepend with $_pAr_gRp1 = perlquote($1),
+ my $set_args = "";
+ for(my $i = 1;defined $grp[$i]; $i++) {
+ $set_args .= "\$_pAr_gRp$i = \"" .
+ ::perl_quote_scalar($grp[$i]) . "\";";
+ }
+ $unchanged . "\257<" . $set_args . $rv . "\257>"
+ }gxes) {
+ }
+ # Do the same for the positional replacement strings
+ $posrpl = $rpl;
+ if($posrpl =~ s/^\{//) {
+ # Only do this if the shorthand start with {
+ $prefix=~s/^\{//;
+ # Don't replace after \257 unless \257>
+ while(s{( (?: ^|\257> ) (?: [^\257]*|[\257][^<>] )*? )
+ \{(-?\d+) \s* \Q$prefix\E $grp_regexp \Q$postfix\E}
+ {
+ # The start remains the same
+ my $unchanged = $1;
+ my $position = $2;
+ # Dummy entry to start at 1.
+ my @grp = (1);
+ # $3 = first ()-group in $grp_regexp
+ # Put $3 in $grp[1], Put $4 in $grp[2]
+ # so first ()-group in $grp_regexp is $grp[1];
+ for(my $i = 3; defined $grp[$#grp]; $i++) {
+ push @grp, eval '$'.$i;
+ }
+ my $rv = $rplval;
+ # replace $$1 with $_pAr_gRp1, $$2 with $_pAr_gRp2
+ # in the code to be executed
+ $rv =~ s/\$\$ (\d+)/\$_pAr_gRp$1/gx;
+ # prepend with $_pAr_gRp1 = perlquote($1),
+ my $set_args = "";
+ for(my $i = 1;defined $grp[$i]; $i++) {
+ $set_args .= "\$_pAr_gRp$i = \"" .
+ ::perl_quote_scalar($grp[$i]) . "\";";
+ }
+ $unchanged . "\257<" . $position . $set_args . $rv . "\257>"
+ }gxes) {
+ }
+ }
+ }
+ }
+ # Add {} if no replacement strings in @command
+ ($replacecount_ref, $len_ref, @command) =
+ replacement_counts_and_lengths($transfer_files, $return_files,
+ $template_names, $template_contents,
+ @command);
+ if("@command" =~ /^[^ \t\n=]*\257</) {
+ # Replacement string is (part of) the command (and not just
+ # argument or variable definition V1={})
+ # E.g. parallel {}, parallel my_{= s/_//=}, parallel {2}
+ # Do no quote (Otherwise it will fail if the input contains spaces)
+ $Global::quote_replace = 0;
+ }
+
+ if($opt::sqlmaster and $Global::sql->append()) {
+ $seq = $Global::sql->max_seq() + 1;
+ }
+
+ return bless {
+ ('unget' => \@unget,
+ 'command' => \@command,
+ 'replacecount' => $replacecount_ref,
+ 'arg_queue' => RecordQueue->new($read_from,$opt::colsep),
+ 'context_replace' => $context_replace,
+ 'len' => $len_ref,
+ 'max_number_of_args' => $max_number_of_args,
+ 'size' => undef,
+ 'transfer_files' => $transfer_files,
+ 'return_files' => $return_files,
+ 'template_names' => $template_names,
+ 'template_contents' => $template_contents,
+ 'seq' => $seq,
+ )
+ }, ref($class) || $class;
+}
+
+sub merge_rpl_parts($) {
+ # '{=' 'perlexpr' '=}' => '{= perlexpr =}'
+ # Input:
+ # @in = the @command as given by the user
+ # Uses:
+ # $Global::parensleft
+ # $Global::parensright
+ # Returns:
+ # @command with parts merged to keep {= and =} as one
+ my @in = @_;
+ my @out;
+ my $l = quotemeta($Global::parensleft);
+ my $r = quotemeta($Global::parensright);
+
+ while(@in) {
+ my $s = shift @in;
+ $_ = $s;
+ # Remove matching (right most) parens
+ while(s/(.*)$l.*?$r/$1/os) {}
+ if(/$l/o) {
+ # Missing right parens
+ while(@in) {
+ $s .= " ".shift @in;
+ $_ = $s;
+ while(s/(.*)$l.*?$r/$1/os) {}
+ if(not /$l/o) {
+ last;
+ }
+ }
+ }
+ push @out, $s;
+ }
+ return @out;
+}
+
+sub replacement_counts_and_lengths($$@) {
+ # Count the number of different replacement strings.
+ # Find the lengths of context for context groups and non-context
+ # groups.
+ # If no {} found in @command: add it to @command
+ #
+ # Input:
+ # \@transfer_files = array of filenames to transfer
+ # \@return_files = array of filenames to return
+ # \@template_names = array of names to copy to
+ # \@template_contents = array of contents to write
+ # @command = command template
+ # Output:
+ # \%replacecount, \%len, @command
+ my $transfer_files = shift;
+ my $return_files = shift;
+ my $template_names = shift;
+ my $template_contents = shift;
+ my @command = @_;
+ my (%replacecount,%len);
+ my $sum = 0;
+ while($sum == 0) {
+ # Count how many times each replacement string is used
+ my @cmd = @command;
+ my $contextlen = 0;
+ my $noncontextlen = 0;
+ my $contextgroups = 0;
+ for my $c (@cmd) {
+ while($c =~ s/ \257<( (?: [^\257]*|[\257][^<>] )*?)\257> /\000/xs) {
+ # %replacecount = { "perlexpr" => number of times seen }
+ # e.g { "s/a/b/" => 2 }
+ $replacecount{$1}++;
+ $sum++;
+ }
+ # Measure the length of the context around the {= perl expr =}
+ # Use that {=...=} has been replaced with \000 above
+ # So there is no need to deal with \257<
+ while($c =~ s/ (\S*\000\S*) //xs) {
+ my $w = $1;
+ $w =~ tr/\000//d; # Remove all \000's
+ $contextlen += length($w);
+ $contextgroups++;
+ }
+ # All {= perl expr =} have been removed: The rest is non-context
+ $noncontextlen += length $c;
+ }
+ for(@$transfer_files, @$return_files,
+ @$template_names, @$template_contents,
+ @opt::filter,
+ $opt::tagstring, $opt::workdir, $opt::results, $opt::retries) {
+ # Options that can contain replacement strings
+ defined($_) or next;
+ my $t = $_;
+ while($t =~ s/ \257<( (?: [^\257]*|[\257][^<>] )* )\257> //xs) {
+ # %replacecount = { "perlexpr" => number of times seen }
+ # e.g { "$_++" => 2 }
+ # But for tagstring we just need to mark it as seen
+ $replacecount{$1} ||= 1;
+ }
+ }
+ if($opt::bar) {
+ # If the command does not contain {} force it to be computed
+ # as it is being used by --bar
+ $replacecount{""} ||= 1;
+ }
+
+ $len{'context'} = 0+$contextlen;
+ $len{'noncontext'} = $noncontextlen;
+ $len{'contextgroups'} = $contextgroups;
+ $len{'noncontextgroups'} = @cmd-$contextgroups;
+ ::debug("length", "@command Context: ", $len{'context'},
+ " Non: ", $len{'noncontext'}, " Ctxgrp: ", $len{'contextgroups'},
+ " NonCtxGrp: ", $len{'noncontextgroups'}, "\n");
+ if($sum == 0) {
+ if(not @command) {
+ # Default command = {}
+ @command = ("\257<\257>");
+ } elsif(($opt::pipe or $opt::pipepart)
+ and not $opt::fifo and not $opt::cat) {
+ # With --pipe / --pipe-part you can have no replacement
+ last;
+ } else {
+ # Append {} to the command if there are no {...}'s and no {=...=}
+ push @command, ("\257<\257>");
+ }
+ }
+ }
+ return(\%replacecount,\%len,@command);
+}
+
+sub get($) {
+ my $self = shift;
+ if(@{$self->{'unget'}}) {
+ my $cmd_line = shift @{$self->{'unget'}};
+ return ($cmd_line);
+ } else {
+ if($opt::sqlworker) {
+ # Get the sequence number from the SQL table
+ $self->set_seq($SQL::next_seq);
+ # Get the command from the SQL table
+ $self->{'command'} = $SQL::command_ref;
+ my @command;
+ # Recompute replace counts based on the read command
+ ($self->{'replacecount'},
+ $self->{'len'}, @command) =
+ replacement_counts_and_lengths($self->{'transfer_files'},
+ $self->{'return_files'},
+ $self->{'template_name'},
+ $self->{'template_contents'},
+ @$SQL::command_ref);
+ if("@command" =~ /^[^ \t\n=]*\257</) {
+ # Replacement string is (part of) the command (and not just
+ # argument or variable definition V1={})
+ # E.g. parallel {}, parallel my_{= s/_//=}, parallel {2}
+ # Do no quote (Otherwise it will fail if the input contains spaces)
+ $Global::quote_replace = 0;
+ }
+ }
+
+ my $cmd_line = CommandLine->new($self->seq(),
+ $self->{'command'},
+ $self->{'arg_queue'},
+ $self->{'context_replace'},
+ $self->{'max_number_of_args'},
+ $self->{'transfer_files'},
+ $self->{'return_files'},
+ $self->{'template_names'},
+ $self->{'template_contents'},
+ $self->{'replacecount'},
+ $self->{'len'},
+ );
+ $cmd_line->populate();
+ ::debug("run","cmd_line->number_of_args ",
+ $cmd_line->number_of_args(), "\n");
+ if(not $Global::no_more_input and ($opt::pipe or $opt::pipepart)) {
+ if($cmd_line->replaced() eq "") {
+ # Empty command - pipe requires a command
+ ::error(255,"--pipe/--pipepart must have a command to pipe ".
+ "into (e.g. 'cat').");
+ }
+ } elsif($cmd_line->number_of_args() == 0) {
+ # We did not get more args - maybe at EOF string?
+ return undef;
+ }
+ $self->set_seq($self->seq()+1);
+ return $cmd_line;
+ }
+}
+
+sub unget($) {
+ my $self = shift;
+ unshift @{$self->{'unget'}}, @_;
+}
+
+sub empty($) {
+ my $self = shift;
+ my $empty = (not @{$self->{'unget'}}) &&
+ $self->{'arg_queue'}->empty();
+ ::debug("run", "CommandLineQueue->empty $empty");
+ return $empty;
+}
+
+sub seq($) {
+ my $self = shift;
+ return $self->{'seq'};
+}
+
+sub set_seq($$) {
+ my $self = shift;
+ $self->{'seq'} = shift;
+}
+
+sub quote_args($) {
+ my $self = shift;
+ # If there is not command emulate |bash
+ return $self->{'command'};
+}
+
+
+package Limits::Command;
+
+# Maximal command line length (for -m and -X)
+sub max_length($) {
+ # Find the max_length of a command line and cache it
+ # Returns:
+ # number of chars on the longest command line allowed
+ if(not $Limits::Command::line_max_len) {
+ # Disk cache of max command line length
+ my $len_cache = $Global::cache_dir . "/tmp/sshlogin/" . ::hostname() .
+ "/linelen";
+ my $cached_limit;
+ local $/ = undef;
+ if(open(my $fh, "<", $len_cache)) {
+ $cached_limit = <$fh>;
+ $cached_limit || ::warning("Invalid content in $len_cache");
+ close $fh;
+ }
+ if(not $cached_limit) {
+ $cached_limit = real_max_length();
+ # If $HOME is write protected: Do not fail
+ my $dir = ::dirname($len_cache);
+ -d $dir or eval { File::Path::mkpath($dir); };
+ open(my $fh, ">", $len_cache.$$);
+ print $fh $cached_limit;
+ close $fh;
+ rename $len_cache.$$, $len_cache || ::die_bug("rename cache file");
+ }
+ $Limits::Command::line_max_len = tmux_length($cached_limit);
+ }
+ return int($Limits::Command::line_max_len);
+}
+
+sub real_max_length() {
+ # Find the max_length of a command line
+ # Returns:
+ # The maximal command line length with 1 byte arguments
+ # return find_max(" c");
+ return find_max("c");
+}
+
+sub find_max($) {
+ my $string = shift;
+ # This is slow on Cygwin, so give Cygwin users a warning
+ if($^O eq "cygwin" or $^O eq "msys") {
+ ::warning("Finding the maximal command line length. ".
+ "This may take up to 1 minute.")
+ }
+ # Use an upper bound of 100 MB if the shell allows for infinite
+ # long lengths
+ my $upper = 100_000_000;
+ my $lower;
+ # 1000 is supported everywhere, so the search can start anywhere 1..999
+ # 324 makes the search much faster on Cygwin, so let us use that
+ my $len = 324;
+ do {
+ if($len > $upper) { return $len };
+ $lower = $len;
+ $len *= 4;
+ ::debug("init", "Maxlen: $lower<$len<$upper(".($upper-$lower)."): ");
+ } while (is_acceptable_command_line_length($len,$string));
+ # Then search for the actual max length between
+ # last successful length ($len/16) and upper bound
+ return binary_find_max(int($len/16),$len,$string);
+}
+
+
+# Prototype forwarding
+sub binary_find_max($$$);
+sub binary_find_max($$$) {
+ # Given a lower and upper bound find the max (length or args) of a
+ # command line
+ # Returns:
+ # number of chars on the longest command line allowed
+ my ($lower, $upper, $string) = (@_);
+ if($lower == $upper
+ or $lower == $upper-1
+ or $lower/$upper > 0.99) {
+ # $lower is +- 1 or within 1%: Don't search more
+ return $lower;
+ }
+ # Unevenly split binary search which is faster for Microsoft Windows.
+ # Guessing too high is cheap. Guessing too low is expensive.
+ my $split = ($^O eq "cygwin" or $^O eq "msys") ? 0.93 : 0.5;
+ my $middle = int (($upper-$lower)*$split + $lower);
+ ::debug("init", "Maxlen: $lower<$middle<$upper(".($upper-$lower)."): ");
+ if (is_acceptable_command_line_length($middle,$string)) {
+ return binary_find_max($middle,$upper,$string);
+ } else {
+ return binary_find_max($lower,$middle,$string);
+ }
+}
+
+{
+ my $prg;
+
+ sub is_acceptable_command_line_length($$) {
+ # Test if a command line of this length can run
+ # in the current environment
+ # If the string is " x" it tests how many args are allowed
+ # Returns:
+ # 0 if the command line length is too long
+ # 1 otherwise
+ my $len = shift;
+ my $string = shift;
+ if($Global::parallel_env) {
+ $len += length $Global::parallel_env;
+ }
+ # Force using non-built-in command
+ $prg ||= ::which("echo") || ::error(255,'echo must be in $PATH');
+ my $l = length ::qqx("$prg ".${string}x(($len-1-length $prg)/length $string));
+ if($l < $len/2) {
+ # The command returned OK, but did not output $len chars
+ # => this failed (Centos3 does this craziness)
+ return 0
+ }
+ ::debug("init", "$len=$?\n");
+ return not $?;
+ }
+}
+
+sub tmux_length($) {
+ # If $opt::tmux set, find the limit for tmux
+ # tmux 1.8 has a 2kB limit
+ # tmux 1.9 has a 16kB limit
+ # tmux 2.0 has a 16kB limit
+ # tmux 2.1 has a 16kB limit
+ # tmux 2.2 has a 16kB limit
+ # Input:
+ # $len = maximal command line length
+ # Returns:
+ # $tmux_len = maximal length runable in tmux
+ local $/ = "\n";
+ my $len = shift;
+ if($opt::tmux) {
+ $ENV{'PARALLEL_TMUX'} ||= "tmux";
+ if(not ::which($ENV{'PARALLEL_TMUX'})) {
+ ::error(255,$ENV{'PARALLEL_TMUX'}." not found in \$PATH.");
+ }
+ my @out;
+ for my $l (1, 2020, 16320, 30000, $len) {
+ my $tmpfile = ::tmpname("tms");
+ my $qtmp = ::Q($tmpfile);
+ my $tmuxcmd = $ENV{'PARALLEL_TMUX'}.
+ " -S $qtmp new-session -d -n echo $l".
+ ("t"x$l). " && echo $l; rm -f $qtmp";
+ push @out, ::qqx($tmuxcmd);
+ ::rm($tmpfile);
+ }
+ ::debug("tmux","tmux-out ",@out);
+ chomp @out;
+ # The arguments is given 3 times on the command line
+ # and the tmux wrapping is around 30 chars
+ # (29 for tmux1.9, 33 for tmux1.8)
+ my $tmux_len = ::max(@out);
+ $len = ::min($len,int($tmux_len/4-33));
+ ::debug("tmux","tmux-length ",$len);
+ }
+ return $len;
+}
+
+
+package RecordQueue;
+
+sub new($) {
+ my $class = shift;
+ my $fhs = shift;
+ my $colsep = shift;
+ my @unget = ();
+ my $arg_sub_queue;
+ if($opt::sqlworker) {
+ # Open SQL table
+ $arg_sub_queue = SQLRecordQueue->new();
+ } elsif(defined $colsep) {
+ # Open one file with colsep or CSV
+ $arg_sub_queue = RecordColQueue->new($fhs);
+ } else {
+ # Open one or more files if multiple -a
+ $arg_sub_queue = MultifileQueue->new($fhs);
+ }
+ return bless {
+ 'unget' => \@unget,
+ 'arg_number' => 0,
+ 'arg_sub_queue' => $arg_sub_queue,
+ }, ref($class) || $class;
+}
+
+sub get($) {
+ # Returns:
+ # reference to array of Arg-objects
+ my $self = shift;
+ if(@{$self->{'unget'}}) {
+ $self->{'arg_number'}++;
+ # Flush cached computed replacements in Arg-objects
+ # To fix: parallel --bar echo {%} ::: a b c ::: d e f
+ my $ret = shift @{$self->{'unget'}};
+ if($ret) {
+ map { $_->flush_cache() } @$ret;
+ }
+ return $ret;
+ }
+ my $ret = $self->{'arg_sub_queue'}->get();
+ if($ret) {
+ if(grep { index($_->orig(),"\0") > 0 } @$ret) {
+ # Allow for \0 in position 0 because GNU Parallel uses "\0noarg"
+ # to mean no-string
+ ::warning("A NUL character in the input was replaced with \\0.",
+ "NUL cannot be passed through in the argument list.",
+ "Did you mean to use the --null option?");
+ for(grep { index($_->orig(),"\0") > 0 } @$ret) {
+ # Replace \0 with \\0
+ my $a = $_->orig();
+ $a =~ s/\0/\\0/g;
+ $_->set_orig($a);
+ }
+ }
+ if(defined $Global::max_number_of_args
+ and $Global::max_number_of_args == 0) {
+ ::debug("run", "Read 1 but return 0 args\n");
+ # \0noarg => nothing (not the empty string)
+ map { $_->set_orig("\0noarg"); } @$ret;
+ }
+ # Flush cached computed replacements in Arg-objects
+ # To fix: parallel --bar echo {%} ::: a b c ::: d e f
+ map { $_->flush_cache() } @$ret;
+ }
+ return $ret;
+}
+
+sub unget($) {
+ my $self = shift;
+ ::debug("run", "RecordQueue-unget\n");
+ $self->{'arg_number'} -= @_;
+ unshift @{$self->{'unget'}}, @_;
+}
+
+sub empty($) {
+ my $self = shift;
+ my $empty = (not @{$self->{'unget'}}) &&
+ $self->{'arg_sub_queue'}->empty();
+ ::debug("run", "RecordQueue->empty $empty");
+ return $empty;
+}
+
+sub flush_cache($) {
+ my $self = shift;
+ for my $record (@{$self->{'unget'}}) {
+ for my $arg (@$record) {
+ $arg->flush_cache();
+ }
+ }
+ $self->{'arg_sub_queue'}->flush_cache();
+}
+
+sub arg_number($) {
+ my $self = shift;
+ return $self->{'arg_number'};
+}
+
+
+package RecordColQueue;
+
+sub new($) {
+ my $class = shift;
+ my $fhs = shift;
+ my @unget = ();
+ my $arg_sub_queue = MultifileQueue->new($fhs);
+ return bless {
+ 'unget' => \@unget,
+ 'arg_sub_queue' => $arg_sub_queue,
+ }, ref($class) || $class;
+}
+
+sub get($) {
+ # Returns:
+ # reference to array of Arg-objects
+ my $self = shift;
+ if(@{$self->{'unget'}}) {
+ return shift @{$self->{'unget'}};
+ }
+ if($self->{'arg_sub_queue'}->empty()) {
+ return undef;
+ }
+ my $in_record = $self->{'arg_sub_queue'}->get();
+ if(defined $in_record) {
+ my @out_record = ();
+ for my $arg (@$in_record) {
+ ::debug("run", "RecordColQueue::arg $arg\n");
+ my $line = $arg->orig();
+ ::debug("run", "line='$line'\n");
+ if($line ne "") {
+ if($opt::csv) {
+ # Parse CSV and put it into a record
+ chomp $line;
+ if(not $Global::csv->parse($line)) {
+ die "CSV has unexpected format: ^$line^";
+ }
+ for($Global::csv->fields()) {
+ push @out_record, Arg->new($_);
+ }
+ } else {
+ # Split --colsep into record
+ for my $s (split /$opt::colsep/o, $line, -1) {
+ push @out_record, Arg->new($s);
+ }
+ }
+ } else {
+ push @out_record, Arg->new("");
+ }
+ }
+ return \@out_record;
+ } else {
+ return undef;
+ }
+}
+
+sub unget($) {
+ my $self = shift;
+ ::debug("run", "RecordColQueue-unget '@_'\n");
+ unshift @{$self->{'unget'}}, @_;
+}
+
+sub empty($) {
+ my $self = shift;
+ my $empty = (not @{$self->{'unget'}}) &&
+ $self->{'arg_sub_queue'}->empty();
+ ::debug("run", "RecordColQueue->empty $empty");
+ return $empty;
+}
+
+sub flush_cache($) {
+ my $self = shift;
+ for my $arg (@{$self->{'unget'}}) {
+ $arg->flush_cache();
+ }
+ $self->{'arg_sub_queue'}->flush_cache();
+}
+
+
+package SQLRecordQueue;
+
+sub new($) {
+ my $class = shift;
+ my @unget = ();
+ return bless {
+ 'unget' => \@unget,
+ }, ref($class) || $class;
+}
+
+sub get($) {
+ # Returns:
+ # reference to array of Arg-objects
+ my $self = shift;
+ if(@{$self->{'unget'}}) {
+ return shift @{$self->{'unget'}};
+ }
+ return $Global::sql->get_record();
+}
+
+sub unget($) {
+ my $self = shift;
+ ::debug("run", "SQLRecordQueue-unget '@_'\n");
+ unshift @{$self->{'unget'}}, @_;
+}
+
+sub empty($) {
+ my $self = shift;
+ if(@{$self->{'unget'}}) { return 0; }
+ my $get = $self->get();
+ if(defined $get) {
+ $self->unget($get);
+ }
+ my $empty = not $get;
+ ::debug("run", "SQLRecordQueue->empty $empty");
+ return $empty;
+}
+
+sub flush_cache($) {
+ my $self = shift;
+ for my $record (@{$self->{'unget'}}) {
+ for my $arg (@$record) {
+ $arg->flush_cache();
+ }
+ }
+}
+
+
+package MultifileQueue;
+
+@Global::unget_argv=();
+
+sub new($$) {
+ my $class = shift;
+ my $fhs = shift;
+ for my $fh (@$fhs) {
+ if(-t $fh and -t ($Global::status_fd || *STDERR)) {
+ ::warning(
+ "Input is read from the terminal. You are either an expert",
+ "(in which case: YOU ARE AWESOME!) or maybe you forgot",
+ "::: or :::: or -a or to pipe data into parallel. If so",
+ "consider going through the tutorial: man parallel_tutorial",
+ "Press CTRL-D to exit.");
+ }
+ }
+ return bless {
+ 'unget' => \@Global::unget_argv,
+ 'fhs' => $fhs,
+ 'arg_matrix' => undef,
+ }, ref($class) || $class;
+}
+
+sub get($) {
+ my $self = shift;
+ if($opt::link) {
+ return $self->link_get();
+ } else {
+ return $self->nest_get();
+ }
+}
+
+sub unget($) {
+ my $self = shift;
+ ::debug("run", "MultifileQueue-unget '@_'\n");
+ unshift @{$self->{'unget'}}, @_;
+}
+
+sub empty($) {
+ my $self = shift;
+ my $empty = (not @Global::unget_argv) &&
+ not @{$self->{'unget'}};
+ for my $fh (@{$self->{'fhs'}}) {
+ $empty &&= eof($fh);
+ }
+ ::debug("run", "MultifileQueue->empty $empty ");
+ return $empty;
+}
+
+sub flush_cache($) {
+ my $self = shift;
+ for my $record (@{$self->{'unget'}}, @{$self->{'arg_matrix'}}) {
+ for my $arg (@$record) {
+ $arg->flush_cache();
+ }
+ }
+}
+
+sub link_get($) {
+ my $self = shift;
+ if(@{$self->{'unget'}}) {
+ return shift @{$self->{'unget'}};
+ }
+ my @record = ();
+ my $prepend;
+ my $empty = 1;
+ for my $i (0..$#{$self->{'fhs'}}) {
+ my $fh = $self->{'fhs'}[$i];
+ my $arg = read_arg_from_fh($fh);
+ if(defined $arg) {
+ # Record $arg for recycling at end of file
+ push @{$self->{'arg_matrix'}[$i]}, $arg;
+ push @record, $arg;
+ $empty = 0;
+ } else {
+ ::debug("run", "EOA ");
+ # End of file: Recycle arguments
+ push @{$self->{'arg_matrix'}[$i]}, shift @{$self->{'arg_matrix'}[$i]};
+ # return last @{$args->{'args'}{$fh}};
+ push @record, @{$self->{'arg_matrix'}[$i]}[-1];
+ }
+ }
+ if($empty) {
+ return undef;
+ } else {
+ return \@record;
+ }
+}
+
+sub nest_get($) {
+ my $self = shift;
+ if(@{$self->{'unget'}}) {
+ return shift @{$self->{'unget'}};
+ }
+ my @record = ();
+ my $prepend;
+ my $empty = 1;
+ my $no_of_inputsources = $#{$self->{'fhs'}} + 1;
+ if(not $self->{'arg_matrix'}) {
+ # Initialize @arg_matrix with one arg from each file
+ # read one line from each file
+ my @first_arg_set;
+ my $all_empty = 1;
+ for (my $fhno = 0; $fhno < $no_of_inputsources ; $fhno++) {
+ my $arg = read_arg_from_fh($self->{'fhs'}[$fhno]);
+ if(defined $arg) {
+ $all_empty = 0;
+ }
+ $self->{'arg_matrix'}[$fhno][0] = $arg || Arg->new("");
+ push @first_arg_set, $self->{'arg_matrix'}[$fhno][0];
+ }
+ if($all_empty) {
+ # All filehandles were at eof or eof-string
+ return undef;
+ }
+ return [@first_arg_set];
+ }
+
+ # Treat the case with one input source special. For multiple
+ # input sources we need to remember all previously read values to
+ # generate all combinations. But for one input source we can
+ # forget the value after first use.
+ if($no_of_inputsources == 1) {
+ my $arg = read_arg_from_fh($self->{'fhs'}[0]);
+ if(defined($arg)) {
+ return [$arg];
+ }
+ return undef;
+ }
+ for (my $fhno = $no_of_inputsources - 1; $fhno >= 0; $fhno--) {
+ if(eof($self->{'fhs'}[$fhno])) {
+ next;
+ } else {
+ # read one
+ my $arg = read_arg_from_fh($self->{'fhs'}[$fhno]);
+ defined($arg) || next; # If we just read an EOF string: Treat this as EOF
+ my $len = $#{$self->{'arg_matrix'}[$fhno]} + 1;
+ $self->{'arg_matrix'}[$fhno][$len] = $arg;
+ # make all new combinations
+ my @combarg = ();
+ for (my $fhn = 0; $fhn < $no_of_inputsources; $fhn++) {
+ push(@combarg, [0, $#{$self->{'arg_matrix'}[$fhn]}],
+ # Is input source --link'ed to the next?
+ $opt::linkinputsource[$fhn+1]);
+ }
+ # Find only combinations with this new entry
+ $combarg[2*$fhno] = [$len,$len];
+ # map combinations
+ # [ 1, 3, 7 ], [ 2, 4, 1 ]
+ # =>
+ # [ m[0][1], m[1][3], m[2][7] ], [ m[0][2], m[1][4], m[2][1] ]
+ my @mapped;
+ for my $c (expand_combinations(@combarg)) {
+ my @a;
+ for my $n (0 .. $no_of_inputsources - 1 ) {
+ push @a, $self->{'arg_matrix'}[$n][$$c[$n]];
+ }
+ push @mapped, \@a;
+ }
+ # append the mapped to the ungotten arguments
+ push @{$self->{'unget'}}, @mapped;
+ # get the first
+ if(@mapped) {
+ return shift @{$self->{'unget'}};
+ }
+ }
+ }
+ # all are eof or at EOF string; return from the unget queue
+ return shift @{$self->{'unget'}};
+}
+
+{
+ my $cr_count = 0;
+ my $nl_count = 0;
+ my $dos_crnl_determined;
+ sub read_arg_from_fh($) {
+ # Read one Arg from filehandle
+ # Returns:
+ # Arg-object with one read line
+ # undef if end of file
+ my $fh = shift;
+ my $prepend;
+ my $arg;
+ my $half_record = 0;
+ do {{
+ # This makes 10% faster
+ if(not defined ($arg = <$fh>)) {
+ if(defined $prepend) {
+ return Arg->new($prepend);
+ } else {
+ return undef;
+ }
+ }
+ if(not $dos_crnl_determined and not defined $opt::d) {
+ # Warn if input has CR-NL and -d is not set
+ if($arg =~ /\r$/) {
+ $cr_count++;
+ } else {
+ $nl_count++;
+ }
+ if($cr_count == 3 or $nl_count == 3) {
+ $dos_crnl_determined = 1;
+ if($nl_count == 0 and $cr_count == 3) {
+ ::warning('The first three values end in CR-NL. '.
+ 'Consider using -d "\r\n"');
+ }
+ }
+ }
+ if($opt::csv) {
+ # We need to read a full CSV line.
+ if(($arg =~ y/"/"/) % 2 ) {
+ # The number of " on the line is uneven:
+ # If we were in a half_record => we have a full record now
+ # If we were outside a half_record =>
+ # we are in a half record now
+ $half_record = not $half_record;
+ }
+ if($half_record) {
+ # CSV half-record with quoting:
+ # col1,"col2 2""x3"" board newline <-this one
+ # cont",col3
+ $prepend .= $arg;
+ redo;
+ } else {
+ # Now we have a full CSV record
+ }
+ }
+ # Remove delimiter
+ chomp $arg;
+ if($Global::end_of_file_string and
+ $arg eq $Global::end_of_file_string) {
+ # Ignore the rest of input file
+ close $fh;
+ ::debug("run", "EOF-string ($arg) met\n");
+ if(defined $prepend) {
+ return Arg->new($prepend);
+ } else {
+ return undef;
+ }
+ }
+ if(defined $prepend) {
+ $arg = $prepend.$arg; # For line continuation
+ undef $prepend;
+ }
+ if($Global::ignore_empty) {
+ if($arg =~ /^\s*$/) {
+ redo; # Try the next line
+ }
+ }
+ if($Global::max_lines) {
+ if($arg =~ /\s$/) {
+ # Trailing space => continued on next line
+ $prepend = $arg;
+ redo;
+ }
+ }
+ }} while (1 == 0); # Dummy loop {{}} for redo
+ if(defined $arg) {
+ return Arg->new($arg);
+ } else {
+ ::die_bug("multiread arg undefined");
+ }
+ }
+}
+
+# Prototype forwarding
+sub expand_combinations(@);
+sub expand_combinations(@) {
+ # Input:
+ # ([xmin,xmax], [ymin,ymax], ...)
+ # Returns: ([x,y,...],[x,y,...])
+ # where xmin <= x <= xmax and ymin <= y <= ymax
+ my $minmax_ref = shift;
+ my $link = shift; # This is linked to the next input source
+ my $xmin = $$minmax_ref[0];
+ my $xmax = $$minmax_ref[1];
+ my @p;
+ if(@_) {
+ my @rest = expand_combinations(@_);
+ if($link) {
+ # Linked to next col with --link/:::+/::::+
+ # TODO BUG does not wrap values if not same number of vals
+ push(@p, map { [$$_[0], @$_] }
+ grep { $xmin <= $$_[0] and $$_[0] <= $xmax } @rest);
+ } else {
+ # If there are more columns: Compute those recursively
+ for(my $x = $xmin; $x <= $xmax; $x++) {
+ push @p, map { [$x, @$_] } @rest;
+ }
+ }
+ } else {
+ for(my $x = $xmin; $x <= $xmax; $x++) {
+ push @p, [$x];
+ }
+ }
+ return @p;
+}
+
+
+package Arg;
+
+sub new($) {
+ my $class = shift;
+ my $orig = shift;
+ my @hostgroups;
+ if($opt::hostgroups) {
+ if($orig =~ s:@(.+)::) {
+ # We found hostgroups on the arg
+ @hostgroups = split(/\+|,/, $1);
+ if(not grep { defined $Global::hostgroups{$_} } @hostgroups) {
+ # This hostgroup is not defined using -S
+ # Add it
+ ::warning("Adding hostgroups: @hostgroups");
+ # Add sshlogin
+ for(grep { not defined $Global::hostgroups{$_} } @hostgroups) {
+ my $sshlogin = SSHLogin->new($_);
+ my $sshlogin_string = $sshlogin->string();
+ $Global::host{$sshlogin_string} = $sshlogin;
+ $Global::hostgroups{$sshlogin_string} = 1;
+ }
+ }
+ } else {
+ # No hostgroup on the arg => any hostgroup
+ @hostgroups = (keys %Global::hostgroups);
+ }
+ }
+ return bless {
+ 'orig' => $orig,
+ 'hostgroups' => \@hostgroups,
+ }, ref($class) || $class;
+}
+
+sub Q($) {
+ # Q alias for ::shell_quote_scalar
+ my $ret = ::Q($_[0]);
+ no warnings 'redefine';
+ *Q = \&::Q;
+ return $ret;
+}
+
+sub pQ($) {
+ # pQ alias for ::perl_quote_scalar
+ my $ret = ::pQ($_[0]);
+ no warnings 'redefine';
+ *pQ = \&::pQ;
+ return $ret;
+}
+
+sub hash($) {
+ $Global::use{"DBI"} ||= eval "use B; 1;";
+ B::hash(@_);
+}
+
+sub total_jobs() {
+ return $Global::JobQueue->total_jobs();
+}
+
+{
+ my %perleval;
+ my $job;
+ sub skip() {
+ # shorthand for $job->skip();
+ $job->skip();
+ }
+ sub slot() {
+ # shorthand for $job->slot();
+ $job->slot();
+ }
+ sub seq() {
+ # shorthand for $job->seq();
+ $job->seq();
+ }
+ sub uq() {
+ # Do not quote this arg
+ $Global::unquote_arg = 1;
+ }
+ sub yyyy_mm_dd_hh_mm_ss(@) {
+ # ISO8601 2038-01-19T03:14:08
+ ::strftime("%Y-%m-%dT%H:%M:%S", localtime(shift || time()));
+ }
+ sub yyyy_mm_dd_hh_mm(@) {
+ # ISO8601 2038-01-19T03:14
+ ::strftime("%Y-%m-%dT%H:%M", localtime(shift || time()));
+ }
+ sub yyyy_mm_dd(@) {
+ # ISO8601 2038-01-19
+ ::strftime("%Y-%m-%d", localtime(shift || time()));
+ }
+ sub hh_mm_ss(@) {
+ # ISO8601 03:14:08
+ ::strftime("%H:%M:%S", localtime(shift || time()));
+ }
+ sub hh_mm(@) {
+ # ISO8601 03:14
+ ::strftime("%H:%M", localtime(shift || time()));
+ }
+ sub yyyymmddhhmmss(@) {
+ # ISO8601 20380119 + ISO8601 031408
+ ::strftime("%Y%m%d%H%M%S", localtime(shift || time()));
+ }
+ sub yyyymmddhhmm(@) {
+ # ISO8601 20380119 + ISO8601 0314
+ ::strftime("%Y%m%d%H%M", localtime(shift || time()));
+ }
+ sub yyyymmdd(@) {
+ # ISO8601 20380119
+ ::strftime("%Y%m%d", localtime(shift || time()));
+ }
+ sub hhmmss(@) {
+ # ISO8601 031408
+ ::strftime("%H%M%S", localtime(shift || time()));
+ }
+ sub hhmm(@) {
+ # ISO8601 0314
+ ::strftime("%H%M", localtime(shift || time()));
+ }
+
+ sub replace($$$$) {
+ # Calculates the corresponding value for a given perl expression
+ # Returns:
+ # The calculated string (quoted if asked for)
+ my $self = shift;
+ my $perlexpr = shift; # E.g. $_=$_ or s/.gz//
+ my $quote = shift; # should the string be quoted?
+ # This is actually a CommandLine-object,
+ # but it looks nice to be able to say {= $job->slot() =}
+ $job = shift;
+ # Positional replace treated as normal replace
+ $perlexpr =~ s/^(-?\d+)? *//;
+ if(not $Global::cache_replacement_eval
+ or
+ not $self->{'cache'}{$perlexpr}) {
+ # Only compute the value once
+ # Use $_ as the variable to change
+ local $_;
+ if($Global::trim eq "n") {
+ $_ = $self->{'orig'};
+ } else {
+ # Trim the input
+ $_ = trim_of($self->{'orig'});
+ }
+ ::debug("replace", "eval ", $perlexpr, " ", $_, "\n");
+ if(not $perleval{$perlexpr}) {
+ # Make an anonymous function of the $perlexpr
+ # And more importantly: Compile it only once
+ if($perleval{$perlexpr} =
+ eval('sub { no strict; no warnings; my $job = shift; '.
+ $perlexpr.' }')) {
+ # All is good
+ } else {
+ # The eval failed. Maybe $perlexpr is invalid perl?
+ ::error(255,"Cannot use $perlexpr: $@");
+ }
+ }
+ # Execute the function
+ $perleval{$perlexpr}->($job);
+ $self->{'cache'}{$perlexpr} = $_;
+ if($Global::unquote_arg) {
+ # uq() was called in perlexpr
+ $self->{'cache'}{'unquote'}{$perlexpr} = 1;
+ # Reset for next perlexpr
+ $Global::unquote_arg = 0;
+ }
+ }
+ # Return the value quoted if needed
+ if($self->{'cache'}{'unquote'}{$perlexpr}) {
+ return($self->{'cache'}{$perlexpr});
+ } else {
+ return($quote ? Q($self->{'cache'}{$perlexpr})
+ : $self->{'cache'}{$perlexpr});
+ }
+ }
+}
+
+sub flush_cache($) {
+ # Flush cache of computed values
+ my $self = shift;
+ $self->{'cache'} = undef;
+}
+
+sub orig($) {
+ my $self = shift;
+ return $self->{'orig'};
+}
+
+sub set_orig($$) {
+ my $self = shift;
+ $self->{'orig'} = shift;
+}
+
+sub trim_of($) {
+ # Removes white space as specifed by --trim:
+ # n = nothing
+ # l = start
+ # r = end
+ # lr|rl = both
+ # Returns:
+ # string with white space removed as needed
+ my @strings = map { defined $_ ? $_ : "" } (@_);
+ my $arg;
+ if($Global::trim eq "n") {
+ # skip
+ } elsif($Global::trim eq "l") {
+ for my $arg (@strings) { $arg =~ s/^\s+//; }
+ } elsif($Global::trim eq "r") {
+ for my $arg (@strings) { $arg =~ s/\s+$//; }
+ } elsif($Global::trim eq "rl" or $Global::trim eq "lr") {
+ for my $arg (@strings) { $arg =~ s/^\s+//; $arg =~ s/\s+$//; }
+ } else {
+ ::error(255,"--trim must be one of: r l rl lr.");
+ }
+ return wantarray ? @strings : "@strings";
+}
+
+
+package TimeoutQueue;
+
+sub new($) {
+ my $class = shift;
+ my $delta_time = shift;
+ my ($pct);
+ if($delta_time =~ /(\d+(\.\d+)?)%/) {
+ # Timeout in percent
+ $pct = $1/100;
+ $delta_time = 1_000_000;
+ }
+ $delta_time = ::multiply_time_units($delta_time);
+
+ return bless {
+ 'queue' => [],
+ 'delta_time' => $delta_time,
+ 'pct' => $pct,
+ 'remedian_idx' => 0,
+ 'remedian_arr' => [],
+ 'remedian' => undef,
+ }, ref($class) || $class;
+}
+
+sub delta_time($) {
+ my $self = shift;
+ return $self->{'delta_time'};
+}
+
+sub set_delta_time($$) {
+ my $self = shift;
+ $self->{'delta_time'} = shift;
+}
+
+sub remedian($) {
+ my $self = shift;
+ return $self->{'remedian'};
+}
+
+sub set_remedian($$) {
+ # Set median of the last 999^3 (=997002999) values using Remedian
+ #
+ # Rousseeuw, Peter J., and Gilbert W. Bassett Jr. "The remedian: A
+ # robust averaging method for large data sets." Journal of the
+ # American Statistical Association 85.409 (1990): 97-104.
+ my $self = shift;
+ my $val = shift;
+ my $i = $self->{'remedian_idx'}++;
+ my $rref = $self->{'remedian_arr'};
+ $rref->[0][$i%999] = $val;
+ $rref->[1][$i/999%999] = (sort @{$rref->[0]})[$#{$rref->[0]}/2];
+ $rref->[2][$i/999/999%999] = (sort @{$rref->[1]})[$#{$rref->[1]}/2];
+ $self->{'remedian'} = (sort @{$rref->[2]})[$#{$rref->[2]}/2];
+}
+
+sub update_median_runtime($) {
+ # Update delta_time based on runtime of finished job if timeout is
+ # a percentage
+ my $self = shift;
+ my $runtime = shift;
+ if($self->{'pct'}) {
+ $self->set_remedian($runtime);
+ $self->{'delta_time'} = $self->{'pct'} * $self->remedian();
+ ::debug("run", "Timeout: $self->{'delta_time'}s ");
+ }
+}
+
+sub process_timeouts($) {
+ # Check if there was a timeout
+ my $self = shift;
+ # $self->{'queue'} is sorted by start time
+ while (@{$self->{'queue'}}) {
+ my $job = $self->{'queue'}[0];
+ if($job->endtime()) {
+ # Job already finished. No need to timeout the job
+ # This could be because of --keep-order
+ shift @{$self->{'queue'}};
+ } elsif($job->is_timedout($self->{'delta_time'})) {
+ # Need to shift off queue before kill
+ # because kill calls usleep that calls process_timeouts
+ shift @{$self->{'queue'}};
+ ::warning("This job was killed because it timed out:",
+ $job->replaced());
+ $job->kill();
+ } else {
+ # Because they are sorted by start time the rest are later
+ last;
+ }
+ }
+}
+
+sub insert($) {
+ my $self = shift;
+ my $in = shift;
+ push @{$self->{'queue'}}, $in;
+}
+
+
+package SQL;
+
+sub new($) {
+ my $class = shift;
+ my $dburl = shift;
+ $Global::use{"DBI"} ||= eval "use DBI; 1;";
+ # +DBURL = append to this DBURL
+ my $append = $dburl=~s/^\+//;
+ my %options = parse_dburl(get_alias($dburl));
+ my %driveralias = ("sqlite" => "SQLite",
+ "sqlite3" => "SQLite",
+ "pg" => "Pg",
+ "postgres" => "Pg",
+ "postgresql" => "Pg",
+ "csv" => "CSV",
+ "oracle" => "Oracle",
+ "ora" => "Oracle");
+ my $driver = $driveralias{$options{'databasedriver'}} ||
+ $options{'databasedriver'};
+ my $database = $options{'database'};
+ my $host = $options{'host'} ? ";host=".$options{'host'} : "";
+ my $port = $options{'port'} ? ";port=".$options{'port'} : "";
+ my $dsn = "DBI:$driver:dbname=$database$host$port";
+ my $userid = $options{'user'};
+ my $password = $options{'password'};;
+ if(not grep /$driver/, DBI->available_drivers) {
+ ::error(255,"$driver not supported. ".
+ "Are you missing a perl DBD::$driver module?");
+ }
+ my $dbh;
+ if($driver eq "CSV") {
+ # CSV does not use normal dsn
+ if(-d $database) {
+ $dbh = DBI->connect("dbi:CSV:", "", "", { f_dir => "$database", })
+ or die $DBI::errstr;
+ } else {
+ ::error(255,"$database is not a directory.");
+ }
+ } else {
+ $dbh = DBI->connect($dsn, $userid, $password,
+ { RaiseError => 1, AutoInactiveDestroy => 1 })
+ or die $DBI::errstr;
+ }
+ $dbh->{'PrintWarn'} = $Global::debug || 0;
+ $dbh->{'PrintError'} = $Global::debug || 0;
+ $dbh->{'RaiseError'} = 1;
+ $dbh->{'ShowErrorStatement'} = 1;
+ $dbh->{'HandleError'} = sub {};
+ if(not defined $options{'table'}) {
+ ::error(255,"The DBURL ($dburl) must contain a table.");
+ }
+
+ return bless {
+ 'dbh' => $dbh,
+ 'driver' => $driver,
+ 'max_number_of_args' => undef,
+ 'table' => $options{'table'},
+ 'append' => $append,
+ }, ref($class) || $class;
+}
+
+# Prototype forwarding
+sub get_alias($);
+sub get_alias($) {
+ my $alias = shift;
+ $alias =~ s/^(sql:)*//; # Accept aliases prepended with sql:
+ if ($alias !~ /^:/) {
+ return $alias;
+ }
+
+ # Find the alias
+ my $path;
+ if (-l $0) {
+ ($path) = readlink($0) =~ m|^(.*)/|;
+ } else {
+ ($path) = $0 =~ m|^(.*)/|;
+ }
+
+ my @deprecated = ("$ENV{HOME}/.dburl.aliases",
+ "$path/dburl.aliases", "$path/dburl.aliases.dist");
+ for (@deprecated) {
+ if(-r $_) {
+ ::warning("$_ is deprecated. ".
+ "Use .sql/aliases instead (read man sql).");
+ }
+ }
+ my @urlalias=();
+ check_permissions("$ENV{HOME}/.sql/aliases");
+ check_permissions("$ENV{HOME}/.dburl.aliases");
+ my @search = ("$ENV{HOME}/.sql/aliases",
+ "$ENV{HOME}/.dburl.aliases", "/etc/sql/aliases",
+ "$path/dburl.aliases", "$path/dburl.aliases.dist");
+ for my $alias_file (@search) {
+ # local $/ needed if -0 set
+ local $/ = "\n";
+ if(-r $alias_file) {
+ my $in = ::open_or_exit("<",$alias_file);
+ push @urlalias, <$in>;
+ close $in;
+ }
+ }
+ my ($alias_part,$rest) = $alias=~/(:\w*)(.*)/;
+ # If we saw this before: we have an alias loop
+ if(grep {$_ eq $alias_part } @Private::seen_aliases) {
+ ::error("$alias_part is a cyclic alias.");
+ exit -1;
+ } else {
+ push @Private::seen_aliases, $alias_part;
+ }
+
+ my $dburl;
+ for (@urlalias) {
+ /^$alias_part\s+(\S+.*)/ and do { $dburl = $1; last; }
+ }
+
+ if($dburl) {
+ return get_alias($dburl.$rest);
+ } else {
+ ::error("$alias is not defined in @search");
+ exit(-1);
+ }
+}
+
+sub check_permissions($) {
+ my $file = shift;
+
+ if(-e $file) {
+ if(not -o $file) {
+ my $username = (getpwuid($<))[0];
+ ::warning("$file should be owned by $username: ".
+ "chown $username $file");
+ }
+ my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,
+ $atime,$mtime,$ctime,$blksize,$blocks) = stat($file);
+ if($mode & 077) {
+ my $username = (getpwuid($<))[0];
+ ::warning("$file should be only be readable by $username: ".
+ "chmod 600 $file");
+ }
+ }
+}
+
+sub parse_dburl($) {
+ my $url = shift;
+ my %options = ();
+ # sql:mysql://[[user][:password]@][host][:port]/[database[/table][?query]]
+
+ if($url=~m!^(?:sql:)? # You can prefix with 'sql:'
+ ((?:oracle|ora|mysql|pg|postgres|postgresql)(?:s|ssl|)|
+ (?:sqlite|sqlite2|sqlite3|csv)):// # Databasedriver ($1)
+ (?:
+ ([^:@/][^:@]*|) # Username ($2)
+ (?:
+ :([^@]*) # Password ($3)
+ )?
+ @)?
+ ([^:/]*)? # Hostname ($4)
+ (?:
+ :
+ ([^/]*)? # Port ($5)
+ )?
+ (?:
+ /
+ ([^/?]*)? # Database ($6)
+ )?
+ (?:
+ /
+ ([^?]*)? # Table ($7)
+ )?
+ (?:
+ \?
+ (.*)? # Query ($8)
+ )?
+ $!ix) {
+ $options{databasedriver} = ::undef_if_empty(lc(uri_unescape($1)));
+ $options{user} = ::undef_if_empty(uri_unescape($2));
+ $options{password} = ::undef_if_empty(uri_unescape($3));
+ $options{host} = ::undef_if_empty(uri_unescape($4));
+ $options{port} = ::undef_if_empty(uri_unescape($5));
+ $options{database} = ::undef_if_empty(uri_unescape($6));
+ $options{table} = ::undef_if_empty(uri_unescape($7));
+ $options{query} = ::undef_if_empty(uri_unescape($8));
+ ::debug("sql", "dburl $url\n");
+ ::debug("sql", "databasedriver ", $options{databasedriver},
+ " user ", $options{user},
+ " password ", $options{password}, " host ", $options{host},
+ " port ", $options{port}, " database ", $options{database},
+ " table ", $options{table}, " query ", $options{query}, "\n");
+ } else {
+ ::error("$url is not a valid DBURL");
+ exit 255;
+ }
+ return %options;
+}
+
+sub uri_unescape($) {
+ # Copied from http://cpansearch.perl.org/src/GAAS/URI-1.55/URI/Escape.pm
+ # to avoid depending on URI::Escape
+ # This section is (C) Gisle Aas.
+ # Note from RFC1630: "Sequences which start with a percent sign
+ # but are not followed by two hexadecimal characters are reserved
+ # for future extension"
+ my $str = shift;
+ if (@_ && wantarray) {
+ # not executed for the common case of a single argument
+ my @str = ($str, @_); # need to copy
+ foreach (@str) {
+ s/%([0-9A-Fa-f]{2})/chr(hex($1))/eg;
+ }
+ return @str;
+ }
+ $str =~ s/%([0-9A-Fa-f]{2})/chr(hex($1))/eg if defined $str;
+ $str;
+}
+
+sub run($) {
+ my $self = shift;
+ my $stmt = shift;
+ if($self->{'driver'} eq "CSV") {
+ $stmt=~ s/;$//;
+ if($stmt eq "BEGIN" or
+ $stmt eq "COMMIT") {
+ return undef;
+ }
+ }
+ my @retval;
+ my $dbh = $self->{'dbh'};
+ ::debug("sql","$opt::sqlmaster$opt::sqlworker run $stmt\n");
+ # Execute with the rest of the args - if any
+ my $rv;
+ my $sth;
+ my $lockretry = 0;
+ while($lockretry < 10) {
+ $sth = $dbh->prepare($stmt);
+ if($sth
+ and
+ eval { $rv = $sth->execute(@_) }) {
+ last;
+ } else {
+ if($@ =~ /no such table|Table .* doesn.t exist|relation ".*" does not exist/
+ or
+ $DBI::errstr =~ /no such table|Table .* doesn.t exist|relation ".*" does not exist/) {
+ # This is fine:
+ # It is just a worker that reported back too late -
+ # another worker had finished the job first
+ # and the table was then dropped
+ $rv = $sth = 0;
+ last;
+ }
+ if($DBI::errstr =~ /locked/) {
+ ::debug("sql", "Lock retry: $lockretry");
+ $lockretry++;
+ ::usleep(rand()*300);
+ } elsif(not $sth) {
+ # Try again
+ $lockretry++;
+ } else {
+ ::error(255,$DBI::errstr);
+ }
+ }
+ }
+ if($lockretry >= 10) {
+ ::die_bug("retry > 10: $DBI::errstr");
+ }
+ if($rv < 0 and $DBI::errstr){
+ ::error(255,$DBI::errstr);
+ }
+ return $sth;
+}
+
+sub get($) {
+ my $self = shift;
+ my $sth = $self->run(@_);
+ my @retval;
+ # If $sth = 0 it means the table was dropped by another process
+ while($sth) {
+ my @row = $sth->fetchrow_array();
+ @row or last;
+ push @retval, \@row;
+ }
+ return \@retval;
+}
+
+sub table($) {
+ my $self = shift;
+ return $self->{'table'};
+}
+
+sub append($) {
+ my $self = shift;
+ return $self->{'append'};
+}
+
+sub update($) {
+ my $self = shift;
+ my $stmt = shift;
+ my $table = $self->table();
+ $self->run("UPDATE $table $stmt",@_);
+}
+
+sub output($) {
+ my $self = shift;
+ my $commandline = shift;
+
+ $self->update("SET Stdout = ?, Stderr = ? WHERE Seq = ".
+ $commandline->seq(),
+ join("",@{$commandline->{'output'}{1}}),
+ join("",@{$commandline->{'output'}{2}}));
+}
+
+sub max_number_of_args($) {
+ # Maximal number of args for this table
+ my $self = shift;
+ if(not $self->{'max_number_of_args'}) {
+ # Read the number of args from the SQL table
+ my $table = $self->table();
+ my $v = $self->get("SELECT * FROM $table LIMIT 1;");
+ my @reserved_columns = qw(Seq Host Starttime JobRuntime Send
+ Receive Exitval _Signal Command Stdout Stderr);
+ if(not $v) {
+ ::error("$table contains no records");
+ }
+ # Count the number of Vx columns
+ $self->{'max_number_of_args'} = $#{$v->[0]} - $#reserved_columns;
+ }
+ return $self->{'max_number_of_args'};
+}
+
+sub set_max_number_of_args($$) {
+ my $self = shift;
+ $self->{'max_number_of_args'} = shift;
+}
+
+sub create_table($) {
+ my $self = shift;
+ if($self->append()) { return; }
+ my $max_number_of_args = shift;
+ $self->set_max_number_of_args($max_number_of_args);
+ my $table = $self->table();
+ $self->run(qq(DROP TABLE IF EXISTS $table;));
+ # BIGINT and TEXT are not supported in these databases or are too small
+ my %vartype = (
+ "Oracle" => { "BIGINT" => "NUMBER(19,0)",
+ "TEXT" => "CLOB", },
+ "mysql" => { "TEXT" => "BLOB", },
+ "CSV" => { "BIGINT" => "INT",
+ "FLOAT" => "REAL", },
+ );
+ my $BIGINT = $vartype{$self->{'driver'}}{"BIGINT"} || "BIGINT";
+ my $TEXT = $vartype{$self->{'driver'}}{"TEXT"} || "TEXT";
+ my $FLOAT = $vartype{$self->{'driver'}}{"FLOAT"} || "FLOAT(44)";
+ my $v_def = join "", map { "V$_ $TEXT," } (1..$self->max_number_of_args());
+ $self->run(qq{CREATE TABLE $table
+ (Seq $BIGINT,
+ Host $TEXT,
+ Starttime $FLOAT,
+ JobRuntime $FLOAT,
+ Send $BIGINT,
+ Receive $BIGINT,
+ Exitval $BIGINT,
+ _Signal $BIGINT,
+ Command $TEXT,}.
+ $v_def.
+ qq{Stdout $TEXT,
+ Stderr $TEXT);});
+}
+
+sub insert_records($) {
+ my $self = shift;
+ my $seq = shift;
+ my $command_ref = shift;
+ my $record_ref = shift;
+ my $table = $self->table();
+ # For SQL encode the command with \257 space as split points
+ my $command = join("\257 ",@$command_ref);
+ my @v_cols = map { ", V$_" } (1..$self->max_number_of_args());
+ # Two extra value due to $seq, Exitval, Send
+ my $v_vals = join ",", map { "?" } (1..$self->max_number_of_args()+4);
+ $self->run("INSERT INTO $table (Seq,Command,Exitval,Send @v_cols) ".
+ "VALUES ($v_vals);", $seq, $command, -1000,
+ 0, @$record_ref[1..$#$record_ref]);
+}
+
+
+sub get_record($) {
+ my $self = shift;
+ my @retval;
+ my $table = $self->table();
+ my @v_cols = map { ", V$_" } (1..$self->max_number_of_args());
+ my $rand = "Reserved-".$$.rand();
+ my $v;
+ my $more_pending;
+
+ do {
+ if($self->{'driver'} eq "CSV") {
+ # Sub SELECT is not supported in CSV
+ # So to minimize the race condition below select a job at random
+ my $r = $self->get("SELECT Seq, Command @v_cols FROM $table ".
+ "WHERE Exitval = -1000 LIMIT 100;");
+ $v = [ sort { rand() > 0.5 } @$r ];
+ } else {
+ # Avoid race condition where multiple workers get the same job
+ # by setting Stdout to a unique string
+ # (SELECT * FROM (...) AS dummy) is needed due to sillyness in MySQL
+ $self->update("SET Stdout = ?,Exitval = ? ".
+ "WHERE Seq = (".
+ " SELECT * FROM (".
+ " SELECT min(Seq) FROM $table WHERE Exitval = -1000".
+ " ) AS dummy".
+ ") AND Exitval = -1000;", $rand, -1210);
+ # If a parallel worker overwrote the unique string this will get nothing
+ $v = $self->get("SELECT Seq, Command @v_cols FROM $table ".
+ "WHERE Stdout = ?;", $rand);
+ }
+ if($v->[0]) {
+ my $val_ref = $v->[0];
+ # Mark record as taken
+ my $seq = shift @$val_ref;
+ # Save the sequence number to use when running the job
+ $SQL::next_seq = $seq;
+ $self->update("SET Exitval = ? WHERE Seq = ".$seq, -1220);
+ # Command is encoded with '\257 space' as splitting char
+ my @command = split /\257 /, shift @$val_ref;
+ $SQL::command_ref = \@command;
+ for (@$val_ref) {
+ push @retval, Arg->new($_);
+ }
+ } else {
+ # If the record was updated by another job in parallel,
+ # then we may not be done, so see if there are more jobs pending
+ $more_pending =
+ $self->get("SELECT Seq FROM $table WHERE Exitval = ?;", -1210);
+ }
+ } while (not $v->[0] and $more_pending->[0]);
+
+ if(@retval) {
+ return \@retval;
+ } else {
+ return undef;
+ }
+}
+
+sub total_jobs($) {
+ my $self = shift;
+ my $table = $self->table();
+ my $v = $self->get("SELECT count(*) FROM $table;");
+ if($v->[0]) {
+ return $v->[0]->[0];
+ } else {
+ ::die_bug("SQL::total_jobs");
+ }
+}
+
+sub max_seq($) {
+ my $self = shift;
+ my $table = $self->table();
+ my $v = $self->get("SELECT max(Seq) FROM $table;");
+ if($v->[0]) {
+ return $v->[0]->[0];
+ } else {
+ ::die_bug("SQL::max_seq");
+ }
+}
+
+sub finished($) {
+ # Check if there are any jobs left in the SQL table that do not
+ # have a "real" exitval
+ my $self = shift;
+ if($opt::wait or $Global::start_sqlworker) {
+ my $table = $self->table();
+ my $rv = $self->get("select Seq,Exitval from $table ".
+ "where Exitval <= -1000 limit 1");
+ return not $rv->[0];
+ } else {
+ return 1;
+ }
+}
+
+package Semaphore;
+
+# This package provides a counting semaphore
+#
+# If a process dies without releasing the semaphore the next process
+# that needs that entry will clean up dead semaphores
+#
+# The semaphores are stored in $PARALLEL_HOME/semaphores/id-<name> Each
+# file in $PARALLEL_HOME/semaphores/id-<name>/ is the process ID of the
+# process holding the entry. If the process dies, the entry can be
+# taken by another process.
+
+sub new($) {
+ my $class = shift;
+ my $id = shift;
+ my $count = shift;
+ $id =~ s/([^-_a-z0-9])/unpack("H*",$1)/ige; # Convert non-word chars to hex
+ $id = "id-".$id; # To distinguish it from a process id
+ my $parallel_locks = $Global::cache_dir . "/semaphores";
+ -d $parallel_locks or ::mkdir_or_die($parallel_locks);
+ my $lockdir = "$parallel_locks/$id";
+ my $lockfile = $lockdir.".lock";
+ if(-d $parallel_locks and -w $parallel_locks
+ and -r $parallel_locks and -x $parallel_locks) {
+ # skip
+ } else {
+ ::error(255,"Semaphoredir must be writable: '$parallel_locks'");
+ }
+
+ if($count < 1) { ::die_bug("semaphore-count: $count"); }
+ return bless {
+ 'lockfile' => $lockfile,
+ 'lockfh' => Symbol::gensym(),
+ 'lockdir' => $lockdir,
+ 'id' => $id,
+ 'idfile' => $lockdir."/".$id,
+ 'pid' => $$,
+ 'pidfile' => $lockdir."/".$$.'@'.::hostname(),
+ 'count' => $count + 1 # nlinks returns a link for the 'id-' as well
+ }, ref($class) || $class;
+}
+
+sub remove_dead_locks($) {
+ my $self = shift;
+ my $lockdir = $self->{'lockdir'};
+
+ for my $d (glob "$lockdir/*") {
+ $d =~ m:$lockdir/([0-9]+)\@([-\._a-z0-9]+)$:o or next;
+ my ($pid, $host) = ($1, $2);
+ if($host eq ::hostname()) {
+ if(kill 0, $pid) {
+ ::debug("sem", "Alive: $pid $d\n");
+ } else {
+ ::debug("sem", "Dead: $d\n");
+ ::rm($d);
+ }
+ }
+ }
+}
+
+sub acquire($) {
+ my $self = shift;
+ my $sleep = 1; # 1 ms
+ my $start_time = time;
+ while(1) {
+ # Can we get a lock?
+ $self->atomic_link_if_count_less_than() and last;
+ $self->remove_dead_locks();
+ # Retry slower and slower up to 1 second
+ $sleep = ($sleep < 1000) ? ($sleep * 1.1) : ($sleep);
+ # Random to avoid every sleeping job waking up at the same time
+ ::usleep(rand()*$sleep);
+ if($opt::semaphoretimeout) {
+ if($opt::semaphoretimeout > 0
+ and
+ time - $start_time > $opt::semaphoretimeout) {
+ # Timeout: Take the semaphore anyway
+ ::warning("Semaphore timed out. Stealing the semaphore.");
+ if(not -e $self->{'idfile'}) {
+ open (my $fh, ">", $self->{'idfile'}) or
+ ::die_bug("timeout_write_idfile: $self->{'idfile'}");
+ close $fh;
+ }
+ link $self->{'idfile'}, $self->{'pidfile'};
+ last;
+ }
+ if($opt::semaphoretimeout < 0
+ and
+ time - $start_time > -$opt::semaphoretimeout) {
+ # Timeout: Exit
+ ::warning("Semaphore timed out. Exiting.");
+ exit(1);
+ last;
+ }
+ }
+ }
+ ::debug("sem", "acquired $self->{'pid'}\n");
+}
+
+sub release($) {
+ my $self = shift;
+ ::rm($self->{'pidfile'});
+ if($self->nlinks() == 1) {
+ # This is the last link, so atomic cleanup
+ $self->lock();
+ if($self->nlinks() == 1) {
+ ::rm($self->{'idfile'});
+ rmdir $self->{'lockdir'};
+ }
+ $self->unlock();
+ }
+ ::debug("run", "released $self->{'pid'}\n");
+}
+
+sub pid_change($) {
+ # This should do what release()+acquire() would do without having
+ # to re-acquire the semaphore
+ my $self = shift;
+
+ my $old_pidfile = $self->{'pidfile'};
+ $self->{'pid'} = $$;
+ $self->{'pidfile'} = $self->{'lockdir'}."/".$$.'@'.::hostname();
+ my $retval = link $self->{'idfile'}, $self->{'pidfile'};
+ ::debug("sem","link($self->{'idfile'},$self->{'pidfile'})=$retval\n");
+ ::rm($old_pidfile);
+}
+
+sub atomic_link_if_count_less_than($) {
+ # Link $file1 to $file2 if nlinks to $file1 < $count
+ my $self = shift;
+ my $retval = 0;
+ $self->lock();
+ my $nlinks = $self->nlinks();
+ ::debug("sem","$nlinks<$self->{'count'} ");
+ if($nlinks < $self->{'count'}) {
+ -d $self->{'lockdir'} or ::mkdir_or_die($self->{'lockdir'});
+ if(not -e $self->{'idfile'}) {
+ open (my $fh, ">", $self->{'idfile'}) or
+ ::die_bug("write_idfile: $self->{'idfile'}");
+ close $fh;
+ }
+ $retval = link $self->{'idfile'}, $self->{'pidfile'};
+ ::debug("sem","link($self->{'idfile'},$self->{'pidfile'})=$retval\n");
+ }
+ $self->unlock();
+ ::debug("sem", "atomic $retval");
+ return $retval;
+}
+
+sub nlinks($) {
+ my $self = shift;
+ if(-e $self->{'idfile'}) {
+ return (stat(_))[3];
+ } else {
+ return 0;
+ }
+}
+
+sub lock($) {
+ my $self = shift;
+ my $sleep = 100; # 100 ms
+ my $total_sleep = 0;
+ $Global::use{"Fcntl"} ||= eval "use Fcntl qw(:DEFAULT :flock); 1;";
+ my $locked = 0;
+ while(not $locked) {
+ if(tell($self->{'lockfh'}) == -1) {
+ # File not open
+ open($self->{'lockfh'}, ">", $self->{'lockfile'})
+ or ::debug("run", "Cannot open $self->{'lockfile'}");
+ }
+ if($self->{'lockfh'}) {
+ # File is open
+ chmod 0666, $self->{'lockfile'}; # assuming you want it a+rw
+ if(flock($self->{'lockfh'}, LOCK_EX()|LOCK_NB())) {
+ # The file is locked: No need to retry
+ $locked = 1;
+ last;
+ } else {
+ if ($! =~ m/Function not implemented/) {
+ ::warning("flock: $!",
+ "Will wait for a random while.");
+ ::usleep(rand(5000));
+ # File cannot be locked: No need to retry
+ $locked = 2;
+ last;
+ }
+ }
+ }
+ # Locking failed in first round
+ # Sleep and try again
+ $sleep = ($sleep < 1000) ? ($sleep * 1.1) : ($sleep);
+ # Random to avoid every sleeping job waking up at the same time
+ ::usleep(rand()*$sleep);
+ $total_sleep += $sleep;
+ if($opt::semaphoretimeout) {
+ if($opt::semaphoretimeout > 0
+ and
+ $total_sleep/1000 > $opt::semaphoretimeout) {
+ # Timeout: Take the semaphore anyway
+ ::warning("Semaphore timed out. Taking the semaphore.");
+ $locked = 3;
+ last;
+ }
+ if($opt::semaphoretimeout < 0
+ and
+ $total_sleep/1000 > -$opt::semaphoretimeout) {
+ # Timeout: Exit
+ ::warning("Semaphore timed out. Exiting.");
+ $locked = 4;
+ last;
+ }
+ } else {
+ if($total_sleep/1000 > 30) {
+ ::warning("Semaphore stuck for 30 seconds. ".
+ "Consider using --semaphoretimeout.");
+ }
+ }
+ }
+ ::debug("run", "locked $self->{'lockfile'}");
+}
+
+sub unlock($) {
+ my $self = shift;
+ ::rm($self->{'lockfile'});
+ close $self->{'lockfh'};
+ ::debug("run", "unlocked\n");
+}
+
+# Keep perl -w happy
+
+$opt::x = $Semaphore::timeout = $Semaphore::wait =
+$Job::file_descriptor_warning_printed = $Global::envdef = @Arg::arg =
+$Global::max_slot_number = $opt::session;
+
+package main;
+
+
+sub main() {
+ unpack_combined_executable();
+ save_stdin_stdout_stderr();
+ save_original_signal_handler();
+ parse_options();
+ ::debug("init", "Open file descriptors: ", join(" ",keys %Global::fh), "\n");
+ my $number_of_args;
+ if($Global::max_number_of_args) {
+ $number_of_args = $Global::max_number_of_args;
+ } elsif ($opt::X or $opt::m or $opt::xargs) {
+ $number_of_args = undef;
+ } else {
+ $number_of_args = 1;
+ }
+
+ my @command = @ARGV;
+ my @input_source_fh;
+ if($opt::pipepart) {
+ if($opt::tee) {
+ @input_source_fh = map { open_or_exit("<",$_) } @opt::a;
+ # Remove the first: It will be the file piped.
+ shift @input_source_fh;
+ if(not @input_source_fh and not $opt::pipe) {
+ @input_source_fh = (*STDIN);
+ }
+ } else {
+ # -a is used for data - not for command line args
+ @input_source_fh = map { open_or_exit("<",$_) } "/dev/null";
+ }
+ } else {
+ @input_source_fh = map { open_or_exit("<",$_) } @opt::a;
+ if(not @input_source_fh and not $opt::pipe) {
+ @input_source_fh = (*STDIN);
+ }
+ }
+
+ if($opt::skip_first_line) {
+ # Skip the first line for the first file handle
+ my $fh = $input_source_fh[0];
+ <$fh>;
+ }
+
+ set_input_source_header(\@command,\@input_source_fh);
+ if($opt::filter_hosts and (@opt::sshlogin or @opt::sshloginfile)) {
+ # Parallel check all hosts are up. Remove hosts that are down
+ filter_hosts();
+ }
+ if($opt::sqlmaster and $opt::sqlworker) {
+ # Start a real --sqlworker in the background later
+ $Global::start_sqlworker = 1;
+ $opt::sqlworker = undef;
+ }
+
+ $Global::start_time = ::now();
+ if($opt::nonall or $opt::onall) {
+ onall(\@input_source_fh,@command);
+ wait_and_exit(min(undef_as_zero($Global::exitstatus),254));
+ }
+
+ $Global::JobQueue = JobQueue->new(
+ \@command, \@input_source_fh, $Global::ContextReplace,
+ $number_of_args, \@Global::transfer_files, \@Global::ret_files,
+ \@Global::template_names, \@Global::template_contents
+ );
+
+ if($opt::sqlmaster) {
+ # Create SQL table to hold joblog + output
+ # Figure out how many arguments are in a job
+ # (It is affected by --colsep, -N, $number_source_fh)
+ my $record_queue = $Global::JobQueue->{'commandlinequeue'}{'arg_queue'};
+ my $record = $record_queue->get();
+ my $no_of_values = $number_of_args * (1+$#{$record});
+ $record_queue->unget($record);
+ $Global::sql->create_table($no_of_values);
+ if($opt::sqlworker) {
+ # Start a real --sqlworker in the background later
+ $Global::start_sqlworker = 1;
+ $opt::sqlworker = undef;
+ }
+ }
+
+ if($opt::pipepart) {
+ pipepart_setup();
+ } elsif($opt::pipe) {
+ if($opt::tee) {
+ pipe_tee_setup();
+ } elsif($opt::shard or $opt::bin) {
+ pipe_shard_setup();
+ } elsif($opt::groupby) {
+ pipe_group_by_setup();
+ }
+ }
+
+ if($opt::eta or $opt::bar or $opt::shuf or $Global::halt_pct) {
+ # Count the number of jobs or shuffle all jobs
+ # before starting any.
+ # Must be done after ungetting any --pipepart jobs.
+ $Global::JobQueue->total_jobs();
+ }
+ # Compute $Global::max_jobs_running
+ # Must be done after ungetting any --pipepart jobs.
+ max_jobs_running();
+ init_run_jobs();
+ my $sem;
+ if($Global::semaphore) {
+ $sem = acquire_semaphore();
+ }
+ $SIG{TERM} = $Global::original_sig{TERM};
+ $SIG{HUP} = \&start_no_new_jobs;
+
+ if($opt::progress) {
+ ::status_no_nl(init_progress());
+ }
+ if($opt::tee or $opt::shard or $opt::bin) {
+ # All jobs must be running in parallel for --tee/--shard/--bin
+ while(start_more_jobs()) {}
+ $Global::start_no_new_jobs = 1;
+ if(not $Global::JobQueue->empty()) {
+ if($opt::tee) {
+ ::error(255,"--tee requires --jobs to be higher. ".
+ "Try --jobs 0.");
+ } elsif($opt::bin) {
+ ::error(255,"--bin requires --jobs to be higher than ".
+ "the number of", "arguments. Increase --jobs.");
+ } elsif($opt::shard) {
+ ::error(255,"--shard requires --jobs to be higher ".
+ "than the number of",
+ "arguments. Increase --jobs.");
+ } else {
+ ::die_bug("--bin/--shard/--tee should not get here");
+ }
+ }
+ } elsif($opt::pipe and not $opt::pipepart and not $opt::semaphore) {
+ # Fill all jobslots
+ while(start_more_jobs()) {}
+ spreadstdin();
+ } else {
+ # Reap the finished jobs and start more
+ while(reapers() + start_more_jobs()) {}
+ }
+ ::debug("init", "Start draining\n");
+ drain_job_queue(@command);
+ ::debug("init", "Done draining\n");
+ reapers();
+ ::debug("init", "Done reaping\n");
+ if($Global::semaphore) { $sem->release(); }
+ cleanup();
+ ::debug("init", "Halt\n");
+ halt();
+}
+
+main();
diff --git a/src/sem.pod b/src/sem.pod
new file mode 100644
index 0000000..0ca1ecc
--- /dev/null
+++ b/src/sem.pod
@@ -0,0 +1,381 @@
+#!/usr/bin/perl -w
+
+# SPDX-FileCopyrightText: 2021-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GFDL-1.3-or-later
+# SPDX-License-Identifier: CC-BY-SA-4.0
+
+=head1 NAME
+
+sem - semaphore for executing shell command lines in parallel
+
+=head1 SYNOPSIS
+
+B<sem> [--fg] [--id <id>] [--semaphoretimeout <secs>] [-j <num>] [--wait] command
+
+=head1 DESCRIPTION
+
+GNU B<sem> is an alias for GNU B<parallel --semaphore>.
+
+GNU B<sem> acts as a counting semaphore. When GNU B<sem> is called
+with command it starts the command in the background. When I<num>
+number of commands are running in the background, GNU B<sem> waits for
+one of these to complete before starting the command.
+
+GNU B<sem> does not read any arguments to build the command (no -a,
+:::, and ::::). It simply waits for a semaphore to become available
+and then runs the command given.
+
+Before looking at the options you may want to check out the examples
+after the list of options. That will give you an idea of what GNU
+B<sem> is capable of.
+
+=head1 OPTIONS
+
+=over 9
+
+=item I<command>
+
+Command to execute. The command may be followed by arguments for the
+command.
+
+
+=item B<--bg>
+
+Run command in background thus GNU B<sem> will not wait for
+completion of the command before exiting. This is the default.
+
+In toilet analogy: GNU B<sem> waits for a toilet to be available,
+gives the toilet to a person, and exits immediately.
+
+See also: B<--fg>
+
+
+=item B<--jobs> I<N>
+
+=item B<-j> I<N>
+
+=item B<--max-procs> I<N>
+
+=item B<-P> I<N>
+
+Run up to N commands in parallel. Default is 1 thus acting like a
+mutex.
+
+In toilet analogy: B<-j> is the number of toilets.
+
+
+=item B<--jobs> I<+N>
+
+=item B<-j> I<+N>
+
+=item B<--max-procs> I<+N>
+
+=item B<-P> I<+N>
+
+Add N to the number of CPU cores. Run up to this many jobs in
+parallel. For compute intensive jobs B<-j> +0 is useful as it will run
+number-of-cpu-cores jobs simultaneously.
+
+
+=item B<--jobs> I<-N>
+
+=item B<-j> I<-N>
+
+=item B<--max-procs> I<-N>
+
+=item B<-P> I<-N>
+
+Subtract N from the number of CPU cores. Run up to this many jobs in
+parallel. If the evaluated number is less than 1 then 1 will be used.
+See also B<--use-cpus-instead-of-cores>.
+
+
+=item B<--jobs> I<N>%
+
+=item B<-j> I<N>%
+
+=item B<--max-procs> I<N>%
+
+=item B<-P> I<N>%
+
+Multiply N% with the number of CPU cores. Run up to this many jobs in
+parallel. If the evaluated number is less than 1 then 1 will be used.
+See also B<--use-cpus-instead-of-cores>.
+
+
+=item B<--jobs> I<procfile>
+
+=item B<-j> I<procfile>
+
+=item B<--max-procs> I<procfile>
+
+=item B<-P> I<procfile>
+
+Read parameter from file. Use the content of I<procfile> as parameter
+for I<-j>. E.g. I<procfile> could contain the string 100% or +2 or
+10.
+
+
+=item B<--pipe>
+
+Pass stdin (standard input) to I<command>.
+
+If I<command> read from stdin (standard input), use B<--pipe>.
+
+
+=item B<--semaphorename> I<name>
+
+=item B<--id> I<name>
+
+Use B<name> as the name of the semaphore. Default is the name of the
+controlling tty (output from B<tty>).
+
+The default normally works as expected when used interactively, but
+when used in a script I<name> should be set. I<$$> or I<my_task_name>
+are often a good value.
+
+The semaphore is stored in ~/.parallel/semaphores/
+
+In toilet analogy the name corresponds to different types of toilets:
+e.g. male, female, customer, staff.
+
+
+=item B<--fg>
+
+Do not put command in background.
+
+In toilet analogy: GNU B<sem> waits for a toilet to be available,
+takes a person to the toilet, waits for the person to finish, and
+exits.
+
+
+=item B<--semaphoretimeout> I<secs>
+
+=item B<--st> I<secs>
+
+If I<secs> > 0: If the semaphore is not released within I<secs>
+seconds, take it anyway.
+
+If I<secs> < 0: If the semaphore is not released within I<secs>
+seconds, exit.
+
+In toilet analogy: I<secs> > 0: If no toilet becomes available within
+I<secs> seconds, pee on the floor. I<secs> < 0: If no toilet becomes
+available within I<secs> seconds, exit without doing anything.
+
+
+=item B<--wait>
+
+Wait for all commands to complete.
+
+In toilet analogy: Wait until all toilets are empty, then exit.
+
+
+=back
+
+=head1 UNDERSTANDING A SEMAPHORE
+
+Try the following example:
+
+ sem -j 2 'sleep 1;echo 1 finished'; echo sem 1 exited
+ sem -j 2 'sleep 2;echo 2 finished'; echo sem 2 exited
+ sem -j 2 'sleep 3;echo 3 finished'; echo sem 3 exited
+ sem -j 2 'sleep 4;echo 4 finished'; echo sem 4 exited
+ sem --wait; echo sem --wait done
+
+In toilet analogy this uses 2 toilets (B<-j 2>). GNU B<sem> takes '1'
+to a toilet, and exits immediately. While '1' is sleeping, another GNU
+B<sem> takes '2' to a toilet, and exits immediately.
+
+While '1' and '2' are sleeping, another GNU B<sem> waits for a free
+toilet. When '1' finishes, a toilet becomes available, and this GNU
+B<sem> stops waiting, and takes '3' to a toilet, and exits
+immediately.
+
+While '2' and '3' are sleeping, another GNU B<sem> waits for a free
+toilet. When '2' finishes, a toilet becomes available, and this GNU
+B<sem> stops waiting, and takes '4' to a toilet, and exits
+immediately.
+
+Finally another GNU B<sem> waits for all toilets to become free.
+
+
+=head1 EXAMPLE: Gzipping *.log
+
+Run one gzip process per CPU core. Block until a CPU core becomes
+available.
+
+ for i in *.log ; do
+ echo $i
+ sem -j+0 gzip $i ";" echo done
+ done
+ sem --wait
+
+=head1 EXAMPLE: Protecting pod2html from itself
+
+pod2html creates two files: pod2htmd.tmp and pod2htmi.tmp which it
+does not clean up. It uses these two files for a short time. But if
+you run multiple pod2html in parallel (e.g. in a Makefile with make
+-j) there is a risk that two different instances of pod2html will
+write to the files at the same time:
+
+ # This may fail due to shared pod2htmd.tmp/pod2htmi.tmp files
+ foo.html:
+ pod2html foo.pod --outfile foo.html
+
+ bar.html:
+ pod2html bar.pod --outfile bar.html
+
+ $ make -j foo.html bar.html
+
+You need to protect pod2html from running twice at the same time.
+B<sem> running as a mutex will make sure only one runs:
+
+ foo.html:
+ sem --id pod2html pod2html foo.pod --outfile foo.html
+
+ bar.html:
+ sem --id pod2html pod2html bar.pod --outfile bar.html
+
+ clean: foo.html bar.html
+ sem --id pod2html --wait
+ rm -f pod2htmd.tmp pod2htmi.tmp
+
+ $ make -j foo.html bar.html clean
+
+=head1 BUGS
+
+None known.
+
+
+=head1 REPORTING BUGS
+
+Report bugs to <bug-parallel@gnu.org>.
+
+
+=head1 AUTHOR
+
+Copyright (C) 2010-2024 Ole Tange, http://ole.tange.dk and Free
+Software Foundation, Inc.
+
+
+=head1 LICENSE
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3 of the License, or
+at your option any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+=head2 Documentation license I
+
+Permission is granted to copy, distribute and/or modify this
+documentation under the terms of the GNU Free Documentation License,
+Version 1.3 or any later version published by the Free Software
+Foundation; with no Invariant Sections, with no Front-Cover Texts, and
+with no Back-Cover Texts. A copy of the license is included in the
+file LICENSES/GFDL-1.3-or-later.txt.
+
+=head2 Documentation license II
+
+You are free:
+
+=over 9
+
+=item B<to Share>
+
+to copy, distribute and transmit the work
+
+=item B<to Remix>
+
+to adapt the work
+
+=back
+
+Under the following conditions:
+
+=over 9
+
+=item B<Attribution>
+
+You must attribute the work in the manner specified by the author or
+licensor (but not in any way that suggests that they endorse you or
+your use of the work).
+
+=item B<Share Alike>
+
+If you alter, transform, or build upon this work, you may distribute
+the resulting work only under the same, similar or a compatible
+license.
+
+=back
+
+With the understanding that:
+
+=over 9
+
+=item B<Waiver>
+
+Any of the above conditions can be waived if you get permission from
+the copyright holder.
+
+=item B<Public Domain>
+
+Where the work or any of its elements is in the public domain under
+applicable law, that status is in no way affected by the license.
+
+=item B<Other Rights>
+
+In no way are any of the following rights affected by the license:
+
+=over 2
+
+=item *
+
+Your fair dealing or fair use rights, or other applicable
+copyright exceptions and limitations;
+
+=item *
+
+The author's moral rights;
+
+=item *
+
+Rights other persons may have either in the work itself or in
+how the work is used, such as publicity or privacy rights.
+
+=back
+
+=back
+
+=over 9
+
+=item B<Notice>
+
+For any reuse or distribution, you must make clear to others the
+license terms of this work.
+
+=back
+
+A copy of the full license is included in the file as
+LICENCES/CC-BY-SA-4.0.txt
+
+
+=head1 DEPENDENCIES
+
+GNU B<sem> uses Perl, and the Perl modules Getopt::Long,
+Symbol, Fcntl.
+
+
+=head1 SEE ALSO
+
+B<parallel>(1)
+
+=cut
diff --git a/src/sql b/src/sql
new file mode 100755
index 0000000..3e657ab
--- /dev/null
+++ b/src/sql
@@ -0,0 +1,1279 @@
+#!/usr/bin/perl -w
+
+# Copyright (C) 2008-2010 Ole Tange, http://ole.tange.dk
+#
+# Copyright (C) 2010-2024 Ole Tange, http://ole.tange.dk and
+# Free Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+#
+# SPDX-FileCopyrightText: 2008-2024 Ole Tange, http://ole.tange.dk and Free Software and Foundation, Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+# SPDX-License-Identifier: GFDL-1.3-or-later
+
+=head1 NAME
+
+sql - execute a command on a database determined by a dburl
+
+=head1 SYNOPSIS
+
+B<sql> [options] I<dburl> [I<commands>]
+
+B<sql> [options] I<dburl> < commandfile
+
+B<#!/usr/bin/sql> B<--shebang> [options] I<dburl>
+
+=head1 DESCRIPTION
+
+GNU B<sql> aims to give a simple, unified interface for accessing
+databases through all the different databases' command line
+clients. So far the focus has been on giving a common way to specify
+login information (protocol, username, password, hostname, and port
+number), size (database and table size), and running queries.
+
+The database is addressed using a DBURL. If I<commands> are left out
+you will get that database's interactive shell.
+
+GNU B<sql> is often used in combination with GNU B<parallel>.
+
+=over 9
+
+=item I<dburl>
+
+A DBURL has the following syntax:
+[sql:]vendor://
+[[user][:password]@][host][:port]/[database][?sqlquery]
+
+See the section DBURL below.
+
+=item I<commands>
+
+The SQL commands to run. Each argument will have a newline
+appended.
+
+Example: "SELECT * FROM foo;" "SELECT * FROM bar;"
+
+If the arguments contain '\n' or '\x0a' this will be replaced with a
+newline:
+
+Example: "SELECT * FROM foo;\n SELECT * FROM bar;"
+
+If no commands are given SQL is read from the keyboard or STDIN.
+
+Example: echo 'SELECT * FROM foo;' | sql mysql:///
+
+
+=item B<--csv>
+
+CSV output.
+
+
+=item B<--db-size>
+
+=item B<--dbsize>
+
+Size of database. Show the size of the database on disk. For Oracle
+this requires access to read the table I<dba_data_files> - the user
+I<system> has that.
+
+
+=item B<--help>
+
+=item B<-h>
+
+Print a summary of the options to GNU B<sql> and exit.
+
+
+=item B<--html>
+
+HTML output. Turn on HTML tabular output.
+
+
+=item B<--json>
+
+=item B<--pretty>
+
+Pretty JSON output.
+
+
+=item B<--list-databases>
+
+=item B<--listdbs>
+
+=item B<--show-databases>
+
+=item B<--showdbs>
+
+List the databases (table spaces) in the database.
+
+
+=item B<--listproc>
+
+=item B<--proclist>
+
+=item B<--show-processlist>
+
+Show the list of running queries.
+
+
+=item B<--list-tables>
+
+=item B<--show-tables>
+
+=item B<--table-list>
+
+List the tables in the database.
+
+
+=item B<--noheaders>
+
+=item B<--no-headers>
+
+=item B<-n>
+
+Remove headers and footers and print only tuples. Bug in Oracle: it
+still prints number of rows found.
+
+
+=item B<-p> I<pass-through>
+
+The string following -p will be given to the database connection
+program as arguments. Multiple -p's will be joined with
+space. Example: pass '-U' and the user name to the program:
+
+I<-p "-U scott"> can also be written I<-p -U -p scott>.
+
+
+=item B<--precision> <I<rfc3339|h|m|s|ms|u|ns>>
+
+Precision of timestamps.
+
+Specifiy the format of the output timestamps: rfc3339, h, m, s, ms, u
+or ns.
+
+
+=item B<-r>
+
+Try 3 times. Short version of I<--retries 3>.
+
+
+=item B<--retries> I<ntimes>
+
+Try I<ntimes> times. If the client program returns with an error,
+retry the command. Default is I<--retries 1>.
+
+
+=item B<--sep> I<string>
+
+=item B<-s> I<string>
+
+Field separator. Use I<string> as separator between columns.
+
+
+=item B<--skip-first-line>
+
+Do not use the first line of input (used by GNU B<sql> itself
+when called with B<--shebang>).
+
+
+=item B<--table-size>
+
+=item B<--tablesize>
+
+Size of tables. Show the size of the tables in the database.
+
+
+=item B<--verbose>
+
+=item B<-v>
+
+Print which command is sent.
+
+
+=item B<--version>
+
+=item B<-V>
+
+Print the version GNU B<sql> and exit.
+
+
+=item B<--shebang>
+
+=item B<-Y>
+
+GNU B<sql> can be called as a shebang (#!) command as the first line of a script. Like this:
+
+ #!/usr/bin/sql -Y mysql:///
+
+ SELECT * FROM foo;
+
+For this to work B<--shebang> or B<-Y> must be set as the first option.
+
+=back
+
+=head1 DBURL
+
+A DBURL has the following syntax:
+[sql:]vendor://
+[[user][:password]@][host][:port]/[database][?sqlquery]
+
+To quote special characters use %-encoding specified in
+http://tools.ietf.org/html/rfc3986#section-2.1 (E.g. a password
+containing '/' would contain '%2F').
+
+Examples:
+
+ mysql://scott:tiger@my.example.com/mydb
+ influxdb://scott:tiger@influxdb.example.com/foo
+ sql:oracle://scott:tiger@ora.example.com/xe
+ postgresql://scott:tiger@pg.example.com/pgdb
+ pg:///
+ postgresqlssl://scott@pg.example.com:3333/pgdb
+ sql:sqlite2:////tmp/db.sqlite?SELECT * FROM foo;
+ sqlite3:///../db.sqlite3?SELECT%20*%20FROM%20foo;
+
+Currently supported vendors: MySQL (mysql), MySQL with SSL (mysqls,
+mysqlssl), Oracle (oracle, ora), PostgreSQL (postgresql, pg, pgsql,
+postgres), PostgreSQL with SSL (postgresqlssl, pgs, pgsqlssl,
+postgresssl, pgssl, postgresqls, pgsqls, postgress), SQLite2 (sqlite,
+sqlite2), SQLite3 (sqlite3), InfluxDB 1.x (influx, influxdb), InfluxDB
+with SSL (influxdbssl, influxdbs, influxs, influxssl)
+
+Aliases must start with ':' and are read from
+/etc/sql/aliases and ~/.sql/aliases. The user's own
+~/.sql/aliases should only be readable by the user.
+
+Example of aliases:
+
+ :myalias1 pg://scott:tiger@pg.example.com/pgdb
+ :myalias2 ora://scott:tiger@ora.example.com/xe
+ # Short form of mysql://`whoami`:nopassword@localhost:3306/`whoami`
+ :myalias3 mysql:///
+ # Short form of mysql://`whoami`:nopassword@localhost:33333/mydb
+ :myalias4 mysql://:33333/mydb
+ # Alias for an alias
+ :m :myalias4
+ # the sortest alias possible
+ : sqlite2:////tmp/db.sqlite
+ # Including an SQL query
+ :query sqlite:////tmp/db.sqlite?SELECT * FROM foo;
+
+=head1 EXAMPLES
+
+=head2 Get an interactive prompt
+
+The most basic use of GNU B<sql> is to get an interactive prompt:
+
+B<sql sql:oracle://scott:tiger@ora.example.com/xe>
+
+If you have setup an alias you can do:
+
+B<sql :myora>
+
+
+=head2 Run a query
+
+To run a query directly from the command line:
+
+B<sql :myalias "SELECT * FROM foo;">
+
+Oracle requires newlines after each statement. This can be done like
+this:
+
+B<sql :myora "SELECT * FROM foo;" "SELECT * FROM bar;">
+
+Or this:
+
+B<sql :myora "SELECT * FROM foo;\nSELECT * FROM bar;">
+
+
+=head2 Copy a PostgreSQL database
+
+To copy a PostgreSQL database use pg_dump to generate the dump and GNU
+B<sql> to import it:
+
+B<pg_dump pg_database | sql pg://scott:tiger@pg.example.com/pgdb>
+
+
+=head2 Empty all tables in a MySQL database
+
+Using GNU B<parallel> it is easy to empty all tables without dropping them:
+
+B<sql -n mysql:/// 'show tables' | parallel sql mysql:/// DELETE FROM {};>
+
+
+=head2 Drop all tables in a PostgreSQL database
+
+To drop all tables in a PostgreSQL database do:
+
+B<sql -n pg:/// '\dt' | parallel --colsep '\|' -r sql pg:/// DROP TABLE {2};>
+
+
+=head2 Run as a script
+
+Instead of doing:
+
+B<sql mysql:/// < sqlfile>
+
+you can combine the sqlfile with the DBURL to make a
+UNIX-script. Create a script called I<demosql>:
+
+B<#!/usr/bin/sql -Y mysql:///>
+
+B<SELECT * FROM foo;>
+
+Then do:
+
+B<chmod +x demosql; ./demosql>
+
+
+=head2 Use --colsep to process multiple columns
+
+Use GNU B<parallel>'s B<--colsep> to separate columns:
+
+B<sql -s '\t' :myalias 'SELECT * FROM foo;' | parallel --colsep '\t' do_stuff {4} {1}>
+
+
+=head2 Retry if the connection fails
+
+If the access to the database fails occasionally B<--retries> can help
+make sure the query succeeds:
+
+B<sql --retries 5 :myalias 'SELECT * FROM really_big_foo;'>
+
+
+=head2 Get info about the running database system
+
+Show how big the database is:
+
+B<sql --db-size :myalias>
+
+List the tables:
+
+B<sql --list-tables :myalias>
+
+List the size of the tables:
+
+B<sql --table-size :myalias>
+
+List the running processes:
+
+B<sql --show-processlist :myalias>
+
+
+=head1 REPORTING BUGS
+
+GNU B<sql> is part of GNU B<parallel>. Report bugs to <bug-parallel@gnu.org>.
+
+
+=head1 AUTHOR
+
+When using GNU B<sql> for a publication please cite:
+
+O. Tange (2011): GNU SQL - A Command Line Tool for Accessing Different
+Databases Using DBURLs, ;login: The USENIX Magazine, April 2011:29-32.
+
+Copyright (C) 2008-2010 Ole Tange http://ole.tange.dk
+
+Copyright (C) 2010-2024 Ole Tange, http://ole.tange.dk and Free
+Software Foundation, Inc.
+
+=head1 LICENSE
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3 of the License, or
+at your option any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+=head2 Documentation license I
+
+Permission is granted to copy, distribute and/or modify this
+documentation under the terms of the GNU Free Documentation License,
+Version 1.3 or any later version published by the Free Software
+Foundation; with no Invariant Sections, with no Front-Cover Texts, and
+with no Back-Cover Texts. A copy of the license is included in the
+file LICENSES/GFDL-1.3-or-later.txt.
+
+
+=head2 Documentation license II
+
+You are free:
+
+=over 9
+
+=item B<to Share>
+
+to copy, distribute and transmit the work
+
+=item B<to Remix>
+
+to adapt the work
+
+=back
+
+Under the following conditions:
+
+=over 9
+
+=item B<Attribution>
+
+You must attribute the work in the manner specified by the author or
+licensor (but not in any way that suggests that they endorse you or
+your use of the work).
+
+=item B<Share Alike>
+
+If you alter, transform, or build upon this work, you may distribute
+the resulting work only under the same, similar or a compatible
+license.
+
+=back
+
+With the understanding that:
+
+=over 9
+
+=item B<Waiver>
+
+Any of the above conditions can be waived if you get permission from
+the copyright holder.
+
+=item B<Public Domain>
+
+Where the work or any of its elements is in the public domain under
+applicable law, that status is in no way affected by the license.
+
+=item B<Other Rights>
+
+In no way are any of the following rights affected by the license:
+
+=over 9
+
+=item *
+
+Your fair dealing or fair use rights, or other applicable
+copyright exceptions and limitations;
+
+=item *
+
+The author's moral rights;
+
+=item *
+
+Rights other persons may have either in the work itself or in
+how the work is used, such as publicity or privacy rights.
+
+=back
+
+=item B<Notice>
+
+For any reuse or distribution, you must make clear to others the
+license terms of this work.
+
+=back
+
+A copy of the full license is included in the file as cc-by-sa.txt.
+
+=head1 DEPENDENCIES
+
+GNU B<sql> uses Perl. If B<mysql> is installed, MySQL dburls will
+work. If B<psql> is installed, PostgreSQL dburls will work. If
+B<sqlite> is installed, SQLite2 dburls will work. If B<sqlite3> is
+installed, SQLite3 dburls will work. If B<sqlplus> is installed,
+Oracle dburls will work. If B<rlwrap> is installed, GNU B<sql> will
+have a command history for Oracle.
+
+
+=head1 FILES
+
+~/.sql/aliases - user's own aliases with DBURLs
+
+/etc/sql/aliases - common aliases with DBURLs
+
+
+=head1 SEE ALSO
+
+B<mysql>(1), B<psql>(1), B<rlwrap>(1), B<sqlite>(1), B<sqlite3>(1),
+B<sqlplus>(1), B<influx>(1)
+
+=cut
+
+use Getopt::Long;
+use strict;
+use File::Temp qw/tempfile tempdir/;
+
+sub shell_quote($) {
+ # Quote for other shells (Bourne compatibles)
+ # Inputs:
+ # $string = string to be quoted
+ # Returns:
+ # $shell_quoted = string quoted as needed by the shell
+ my $s = $_[0];
+ if($s =~ /[^-_.+a-z0-9\/]/i) {
+ $s =~ s/'/'"'"'/g; # "-quote single quotes
+ $s = "'$s'"; # '-quote entire string
+ $s =~ s/^''//; # Remove unneeded '' at ends
+ $s =~ s/''$//; # (faster than s/^''|''$//g)
+ return $s;
+ } elsif ($s eq "") {
+ return "''";
+ } else {
+ # No quoting needed
+ return $s;
+ }
+}
+
+parse_options();
+
+my $pass_through_options = (defined $::opt_p) ? join(" ",@{$::opt_p}) : "";
+my $dburl_or_alias = shift;
+if (not defined $dburl_or_alias) { Usage("No DBURL given"); exit -1; }
+my %dburl = parse_dburl(get_alias($dburl_or_alias));
+
+my $interactive_command;
+my $batch_command;
+
+my $database_driver = database_driver_alias($dburl{'databasedriver'});
+if($database_driver eq "mysql" or
+ $database_driver eq "mysqlssl") {
+ ($batch_command,$interactive_command) =
+ mysql_commands($database_driver,%dburl);
+} elsif($database_driver eq "postgresql" or
+ $database_driver eq "postgresqlssl") {
+ ($batch_command,$interactive_command) =
+ postgresql_commands($database_driver,%dburl);
+} elsif($database_driver eq "oracle") {
+ ($batch_command,$interactive_command) =
+ oracle_commands($database_driver,%dburl);
+} elsif($database_driver eq "sqlite" or
+ $database_driver eq "sqlite3") {
+ ($batch_command,$interactive_command) =
+ sqlite_commands($database_driver,%dburl);
+} elsif($database_driver eq "influx" or
+ $database_driver eq "influxssl") {
+ ($batch_command,$interactive_command) =
+ influx_commands($database_driver,%dburl);
+}
+
+my $err;
+my $retries;
+$retries ||= defined $::opt_retries ? $::opt_retries : undef;
+$retries ||= defined $::opt_retry ? $::opt_retry * 3 : undef;
+$retries ||= 1;
+
+if(defined $::opt_processlist) {
+ unshift @ARGV, processlist($database_driver,%dburl);
+}
+
+if(defined $::opt_tablelist) {
+ unshift @ARGV, tablelist($database_driver,%dburl);
+}
+
+if(defined $::opt_dblist) {
+ unshift @ARGV, dblist($database_driver,%dburl);
+}
+
+if(defined $::opt_dbsize) {
+ unshift @ARGV, dbsize($database_driver,%dburl);
+}
+
+if(defined $::opt_tablesize) {
+ unshift @ARGV, tablesize($database_driver,%dburl);
+}
+
+my $queryfile = "";
+if($dburl{'query'}) {
+ my $fh;
+ ($fh,$queryfile) = tempfile(SUFFIX => ".sql");
+ print $fh $dburl{'query'},"\n";
+ close $fh;
+ my $qfile = shell_quote($queryfile);
+ $batch_command = "(cat $qfile;rm $qfile; cat) | $batch_command";
+}
+
+
+
+do {
+ if(@ARGV) {
+ # SQL Commands given as arguments:
+ # Run those commands
+ $::opt_debug and print "[ | $batch_command]\n";
+ $::opt_verbose and print "[ | $batch_command]\n";
+ if($database_driver eq "influx" or $database_driver eq "influxssl") {
+ # Influx currently cannot read commands from stdin
+ for(@ARGV) {
+ s/\\n/\n/g;
+ s/\\x0a/\n/gi;
+ $::opt_debug and print "[$batch_command -execute $_]\n";
+ system("$batch_command -execute ".shell_quote($_));
+ }
+ } else {
+ open(M,"| $batch_command") ||
+ die("mysql/psql/sqlplus/influx not in path");
+ for(@ARGV) {
+ s/\\n/\n/g;
+ s/\\x0a/\n/gi;
+ print M "$_\n";
+ }
+ close M;
+ }
+ } else {
+ if (is_stdin_terminal()) {
+ # Run interactively
+ $::opt_debug and print "[$interactive_command]\n";
+ $::opt_verbose and print "[$interactive_command]\n";
+ system("$interactive_command");
+ } else {
+ # Let the command read from stdin
+ $::opt_debug and print "[$batch_command]\n";
+ $::opt_verbose and print "[$batch_command]\n";
+ if($database_driver eq "influx" or $database_driver eq "influxssl") {
+ # Influx currently cannot read commands from stdin
+ while(<STDIN>) {
+ s/\\n/\n/g;
+ s/\\x0a/\n/gi;
+ $::opt_debug and print "[$batch_command -execute $_]\n";
+ system("$batch_command -execute ".shell_quote($_));
+ }
+ } else{
+ system("$batch_command");
+ }
+ }
+ }
+ $err = $?>>8;
+} while (--$retries and $err);
+
+$queryfile and unlink $queryfile;
+
+$Global::Initfile && unlink $Global::Initfile;
+exit ($err);
+
+sub parse_options {
+ $Global::version = 20240222;
+ $Global::progname = 'sql';
+
+ # This must be done first as this may exec myself
+ if(defined $ARGV[0] and ($ARGV[0]=~/^-Y/ or $ARGV[0]=~/^--shebang /)) {
+ # Program is called from #! line in script
+ $ARGV[0]=~s/^-Y //; # remove -Y if on its own
+ $ARGV[0]=~s/^-Y/-/; # remove -Y if bundled with other options
+ $ARGV[0]=~s/^--shebang //; # remove --shebang if it is set
+ my $argfile = pop @ARGV;
+ # exec myself to split @ARGV into separate fields
+ exec "$0 --skip-first-line < $argfile @ARGV";
+ }
+ Getopt::Long::Configure ("bundling","require_order");
+ GetOptions("passthrough|p=s@" => \$::opt_p,
+ "sep|s=s" => \$::opt_s,
+ "html" => \$::opt_html,
+ "show-processlist|proclist|listproc|showqueries|show-queries"
+ => \$::opt_processlist,
+ "show-tables|showtables|listtables|list-tables|".
+ "tablelist|table-list|show-measurements|showmeasurements|".
+ "list-measurements|listmeasurements" => \$::opt_tablelist,
+ "dblist|".
+ "listdb|listdbs|list-db|list-dbs|list-database|".
+ "list-databases|listdatabases|listdatabase|showdb|".
+ "showdbs|show-db|show-dbs|show-database|show-databases|".
+ "showdatabases|showdatabase" => \$::opt_dblist,
+ "db-size|dbsize" => \$::opt_dbsize,
+ "table-size|tablesize" => \$::opt_tablesize,
+ "json|j" => \$::opt_json,
+ "pretty" => \$::opt_pretty,
+ "csv" => \$::opt_csv,
+ "precision=s" => \$::opt_precision,
+ "noheaders|no-headers|n" => \$::opt_n,
+ "r" => \$::opt_retry,
+ "retries=s" => \$::opt_retries,
+ "debug|D" => \$::opt_debug,
+ # Shebang #!/usr/bin/parallel -Yotheroptions
+ "Y|shebang" => \$::opt_shebang,
+ "skip-first-line" => \$::opt_skip_first_line,
+ # GNU requirements
+ "help|h" => \$::opt_help,
+ "version|V" => \$::opt_version,
+ "verbose|v" => \$::opt_verbose,
+ ) || die_usage();
+
+ if(defined $::opt_help) { die_usage(); }
+ if(defined $::opt_version) { version(); exit(0); }
+ $Global::debug = $::opt_debug;
+}
+
+sub database_driver_alias {
+ my $driver = shift;
+ my %database_driver_alias = ("mysql" => "mysql",
+ "mysqls" => "mysqlssl",
+ "mysqlssl" => "mysqlssl",
+ "oracle" => "oracle",
+ "ora" => "oracle",
+ "oracles" => "oraclessl",
+ "oras" => "oraclessl",
+ "oraclessl" => "oraclessl",
+ "orassl" => "oraclessl",
+ "postgresql" => "postgresql",
+ "pgsql" => "postgresql",
+ "postgres" => "postgresql",
+ "pg" => "postgresql",
+ "postgresqlssl" => "postgresqlssl",
+ "pgsqlssl" => "postgresqlssl",
+ "postgresssl" => "postgresqlssl",
+ "pgssl" => "postgresqlssl",
+ "postgresqls" => "postgresqlssl",
+ "pgsqls" => "postgresqlssl",
+ "postgress" => "postgresqlssl",
+ "pgs" => "postgresqlssl",
+ "sqlite" => "sqlite",
+ "sqlite2" => "sqlite",
+ "sqlite3" => "sqlite3",
+ "influx" => "influx",
+ "influxdb" => "influx",
+ "influxssl" => "influxssl",
+ "influxdbssl" => "influxssl",
+ "influxs" => "influxssl",
+ "influxdbs" => "influxssl",
+ );
+ return $database_driver_alias{$driver};
+}
+
+sub mysql_commands {
+ my ($database_driver,%opt) = (@_);
+ find_command_in_path("mysql") || die ("mysql not in path");
+ if(defined($::opt_s)) { die "Field separator not implemented for mysql" }
+ my $password = defined($opt{'password'}) ? "--password=".$opt{'password'} : "";
+ my $host = defined($opt{'host'}) ? "--host=".$opt{'host'} : "";
+ my $port = defined($opt{'port'}) ? "--port=".$opt{'port'} : "";
+ my $user = defined($opt{'user'}) ? "--user=".$opt{'user'} : "";
+ my $database = defined($opt{'database'}) ? $opt{'database'} : $ENV{'USER'};
+ my $html = defined($::opt_html) ? "--html" : "";
+ my $no_headers = defined($::opt_n) ? "--skip-column-names" : "";
+ my $ssl = "";
+ if ($database_driver eq "mysqlssl") { $ssl="--ssl"; }
+ my($credential_fh,$tmp) = tempfile(SUFFIX => ".sql");
+ chmod (0600,$tmp);
+ print $credential_fh ("[client]\n",
+ $user && "user=$opt{'user'}\n",
+ $password && "password=$opt{'password'}\n",
+ $host && "host=$opt{'host'}\n");
+ close $credential_fh;
+
+ # Prepend with a remover of the credential tempfile
+ my $qtmp = shell_quote($tmp);
+ $batch_command =
+ "((sleep 1; rm $qtmp) & ".
+ "mysql --defaults-extra-file=$qtmp $pass_through_options ".
+ "$no_headers $html $ssl $host $user $port $database)";
+ $interactive_command = $batch_command;
+ return($batch_command,$interactive_command);
+}
+
+sub postgresql_commands {
+ my ($database_driver,%opt) = (@_);
+ find_command_in_path("psql") || die ("psql not in path");
+ my $sep = ($::opt_s) ? "-A --field-separator '$::opt_s'" : "";
+ my $password = defined($opt{'password'}) ?
+ "PGPASSWORD=".$opt{'password'} : "";
+ my $host = defined($opt{'host'}) ? "-h ".$opt{'host'} : "";
+ my $port = defined($opt{'port'}) ? "-p ".$opt{'port'} : "";
+ my $user = defined($opt{'user'}) ? "-U ".$opt{'user'} : "";
+ my $database = defined($opt{'database'}) ? "-d ".$opt{'database'} : "";
+ my $html = defined($::opt_html) ? "--html" : "";
+ my $no_headers = defined($::opt_n) ? "--tuples-only" : "";
+ my $ssl = "";
+ if ($database_driver eq "postgresqlssl") { $ssl="PGSSLMODE=require"; }
+ $batch_command =
+ "$ssl $password psql $pass_through_options $sep $no_headers ".
+ "$html $host $user $port $database";
+ $interactive_command = $batch_command;
+
+ return($batch_command,$interactive_command);
+}
+
+sub oracle_commands {
+ my ($database_driver,%opt) = (@_);
+ # oracle://user:pass@grum:1521/XE becomes:
+ # sqlplus 'user/pass@(DESCRIPTION = (ADDRESS = (PROTOCOL = TCP)(HOST = grum)(PORT = 1521)) (CONNECT_DATA =(SERVER = DEDICATED)(SERVICE_NAME = XE) ))'
+ my $sqlplus = find_command_in_path("sqlplus") ||
+ find_command_in_path("sqlplus64") or
+ die("sqlplus/sqlplus64 not in path");
+
+ # Readline support: if rlwrap installed run rlwrap sqlplus
+ my $rlwrap = find_command_in_path("rlwrap");
+
+ # Set good defaults in the inifile
+ $Global::Initfile = "/tmp/$$.sql.init";
+ open(INIT,">".$Global::Initfile) || die;
+ print INIT "SET LINESIZE 32767\n";
+ $::opt_debug and print "SET LINESIZE 32767\n";
+ print INIT "SET WRAP OFF\n";
+ $::opt_debug and print "SET WRAP OFF\n";
+ if(defined($::opt_html)) {
+ print INIT "SET MARK HTML ON\n";
+ $::opt_debug and print "SET MARK HTML ON\n";
+ }
+ if(defined($::opt_n)) {
+ print INIT "SET PAGES 0\n";
+ $::opt_debug and print "SET PAGES 0\n";
+ } else {
+ print INIT "SET PAGES 50000\n";
+ $::opt_debug and print "SET PAGES 50000\n";
+ }
+ if(defined($::opt_s)) {
+ print INIT "SET COLSEP $::opt_s\n";
+ $::opt_debug and print "SET COLSEP $::opt_s\n";
+ }
+ close INIT;
+
+ my $password = defined($opt{'password'}) ? "/".$opt{'password'} : "";
+ my $host = defined($opt{'host'}) ? $opt{'host'} : "localhost";
+ my $port = defined($opt{'port'}) ? $opt{'port'} : "1521";
+ my $user = defined($opt{'user'}) ? $opt{'user'} : "";
+ # Database is called service in Oracle lingo
+ my $service = defined($opt{'database'}) ? "(SERVICE_NAME = ".$opt{'database'}.")" : "";
+ my $tns = "(DESCRIPTION = (ADDRESS = (PROTOCOL = TCP)(HOST = $host)(PORT = $port)) ".
+ "(CONNECT_DATA =(SERVER = DEDICATED)$service ))";
+ my $ssl = "";
+ # -L: Do not re-ask for password if it is wrong
+ my $common_options = "-L $pass_through_options ".
+ "'$user$password\@$tns' \@$Global::Initfile";
+ my $batch_command = "$sqlplus -S ".$common_options;
+ my $interactive_command = "$rlwrap $sqlplus ".$common_options;
+ return($batch_command,$interactive_command);
+}
+
+sub sqlite_commands {
+ my ($database_driver,%opt) = (@_);
+ if(not find_command_in_path($database_driver)) {
+ print STDERR "Database driver '$database_driver' not supported\n";
+ exit -1;
+ }
+ my $sep = defined($::opt_s) ? "-separator '$::opt_s'" : "";
+ my $password = defined($opt{'password'}) ? "--password=".$opt{'password'} : "";
+ my $host = defined($opt{'host'}) ? "--host=".$opt{'host'} : "";
+ my $port = defined($opt{'port'}) ? "--port=".$opt{'port'} : "";
+ my $user = defined($opt{'user'}) ? "--user=".$opt{'user'} : "";
+ my $database = defined($opt{'database'}) ? $opt{'database'} : "";
+ my $html = defined($::opt_html) ? "-html" : "";
+ my $no_headers = defined($::opt_n) ? "-noheader" : "-header";
+ my $ssl = "";
+ $batch_command =
+ "$database_driver $pass_through_options $sep ".
+ "$no_headers $html $database";
+ $interactive_command = $batch_command;
+ return($batch_command,$interactive_command);
+}
+
+sub influx_commands {
+ my ($database_driver,%opt) = (@_);
+ my $influx = find_command_in_path("influx") ||
+ die ("influx not in path");
+ if(defined($::opt_s)) {
+ die "Field separator not implemented for influx";
+ }
+ my $password =
+ defined($opt{'password'}) ? "-password=".$opt{'password'} : "";
+ my $host = defined($opt{'host'}) ? "-host=".$opt{'host'} : "";
+ my $port = defined($opt{'port'}) ? "-port=".$opt{'port'} : "";
+ my $user = defined($opt{'user'}) ? "-username=".$opt{'user'} : "";
+ my $database = defined($opt{'database'}) ?
+ "-database $opt{'database'}" : "-database $ENV{'USER'}";
+
+ my $format = defined($::opt_json) ? "-format json" :
+ defined($::opt_pretty) ? "-format json -pretty" :
+ defined($::opt_csv) ? "-format csv" : "";
+ my $precision = defined($::opt_precision) ?
+ "-precision $::opt_precision" : "";
+
+ my $no_headers = defined($::opt_n) ? "--skip-column-names" : "";
+ my $ssl = "";
+ if($database_driver eq "influxssl") { $ssl="--ssl"; }
+
+ $batch_command =
+ "$influx $pass_through_options $no_headers $format ".
+ "$precision $ssl $host $user $password $port $database";
+ $interactive_command = $batch_command;
+ return($batch_command,$interactive_command);
+}
+
+# Return the code for 'show processlist' in the chosen database dialect
+sub processlist {
+ my $dbdriver = shift;
+ my %dburl = @_;
+ my %statement =
+ ("mysql" => "show processlist;",
+ "postgresql" => ("SELECT ".
+ " datname AS database,".
+ " usename AS username,".
+ " now()-xact_start AS runtime,".
+ " current_query ".
+ "FROM pg_stat_activity ".
+ "ORDER BY runtime DESC;"),
+ "oracle" => ("SELECT ".
+ ' CPU_TIME/100000, SYS.V_$SQL.SQL_TEXT, USERNAME '.
+ "FROM ".
+ ' SYS.V_$SQL, SYS.V_$SESSION '.
+ "WHERE ".
+ ' SYS.V_$SQL.SQL_ID = SYS.V_$SESSION.SQL_ID(+) '.
+ "AND username IS NOT NULL ".
+ "ORDER BY CPU_TIME DESC;"),
+ "influx" => "show queries;",
+ );
+ if($statement{$dbdriver}) {
+ return $statement{$dbdriver};
+ } else {
+ print STDERR "processlist is not implemented for $dbdriver\n";
+ exit 1;
+ }
+}
+
+# Return the code for 'show tables' in the chosen database dialect
+sub tablelist {
+ my $dbdriver = shift;
+ my %dburl = @_;
+ my %statement =
+ ("mysql" => "show tables;",
+ "postgresql" => '\dt',
+ "oracle" => ("SELECT object_name ".
+ "FROM user_objects ".
+ "WHERE object_type = 'TABLE';"),
+ "sqlite" => ".tables",
+ "sqlite3" => ".tables",
+ "influx" => "show measurements;",
+ );
+ if($statement{$dbdriver}) {
+ return $statement{$dbdriver};
+ } else {
+ print STDERR "tablelist is not implemented for $dbdriver\n";
+ exit 1;
+ }
+}
+
+# Return the code for 'show databases' in the chosen database dialect
+sub dblist {
+ my $dbdriver = shift;
+ my %dburl = @_;
+ my %statement =
+ ("mysql" => "show databases;",
+ "postgresql" => ("SELECT datname FROM pg_database ".
+ "WHERE datname NOT IN ('template0','template1','postgres') ".
+ "ORDER BY datname ASC;"),
+ "oracle" => ("select * from user_tablespaces;"),
+ "influx" => "show databases;",
+ );
+ if($statement{$dbdriver}) {
+ return $statement{$dbdriver};
+ } else {
+ print STDERR "dblist is not implemented for $dbdriver\n";
+ exit 1;
+ }
+}
+
+# Return the code for 'show database size' in the chosen database dialect
+sub dbsize {
+ my $dbdriver = shift;
+ my %dburl = @_;
+ my %statement;
+ if(defined $dburl{'database'}) {
+ %statement =
+ ("mysql" => (
+ 'SELECT '.
+ ' table_schema "database", '.
+ ' SUM(data_length+index_length) "bytes", '.
+ ' SUM(data_length+index_length)/1024/1024 "megabytes" '.
+ 'FROM information_schema.TABLES '.
+ "WHERE table_schema = '$dburl{'database'}'".
+ 'GROUP BY table_schema;'),
+ "postgresql" => (
+ "SELECT '$dburl{'database'}' AS database, ".
+ "pg_database_size('$dburl{'database'}') AS bytes, ".
+ "pg_size_pretty(pg_database_size('$dburl{'database'}')) AS human_readabable "),
+ "sqlite" => (
+ "SELECT ".(undef_as_zero(-s $dburl{'database'}))." AS bytes;"),
+ "sqlite3" => (
+ "SELECT ".(undef_as_zero(-s $dburl{'database'}))." AS bytes;"),
+ );
+ } else {
+ %statement =
+ ("mysql" => (
+ 'SELECT '.
+ ' table_schema "database", '.
+ ' SUM(data_length+index_length) "bytes", '.
+ ' SUM(data_length+index_length)/1024/1024 "megabytes" '.
+ 'FROM information_schema.TABLES '.
+ 'GROUP BY table_schema;'),
+ "postgresql" => (
+ 'SELECT datname AS database, pg_database_size(datname) AS bytes, '.
+ 'pg_size_pretty(pg_database_size(datname)) AS human_readabable '.
+ 'FROM (SELECT datname FROM pg_database) a;'),
+ "sqlite" => (
+ "SELECT 0 AS bytes;"),
+ "sqlite3" => (
+ "SELECT 0 AS bytes;"),
+ );
+ }
+ if($statement{$dbdriver}) {
+ return $statement{$dbdriver};
+ } else {
+ print STDERR "dbsize is not implemented for $dbdriver\n";
+ exit 1;
+ }
+}
+
+
+# Return the code for 'show table size' in the chosen database dialect
+sub tablesize {
+ my $dbdriver = shift;
+ my $database = shift;
+ my %statement =
+ ("postgresql" => (
+ "SELECT relname, relpages*8 AS kb, reltuples::int AS \"live+dead rows\" ".
+ "FROM pg_class c ".
+ "ORDER BY relpages DESC;"),
+ "mysql" => (
+ "select table_name, TABLE_ROWS, DATA_LENGTH,INDEX_LENGTH from INFORMATION_SCHEMA.tables;"),
+ );
+ if($statement{$dbdriver}) {
+ return $statement{$dbdriver};
+ } else {
+ print STDERR "table size is not implemented for $dbdriver\n";
+ exit 1;
+ }
+}
+
+sub is_stdin_terminal {
+ return (-t STDIN);
+}
+
+sub find_command_in_path {
+ # Find the command if it exists in the current path
+ my $command = shift;
+ my $path = `which $command`;
+ chomp $path;
+ return $path;
+}
+
+sub Usage {
+ if(@_) {
+ print "Error:\n";
+ print map{ "$_\n" } @_;
+ print "\n";
+ }
+ print "sql [-hnr] [--table-size] [--db-size] [-p pass-through] [-s string] dburl [command]\n";
+}
+
+sub get_alias {
+ my $alias = shift;
+ $alias =~ s/^(sql:)*//; # Accept aliases prepended with sql:
+ if ($alias !~ /^:/) {
+ return $alias;
+ }
+
+ # Find the alias
+ my $path;
+ if (-l $0) {
+ ($path) = readlink($0) =~ m|^(.*)/|;
+ } else {
+ ($path) = $0 =~ m|^(.*)/|;
+ }
+
+ my @deprecated = ("$ENV{HOME}/.dburl.aliases",
+ "$path/dburl.aliases", "$path/dburl.aliases.dist");
+ for (@deprecated) {
+ if(-r $_) {
+ print STDERR "$_ is deprecated. Use .sql/aliases instead (read man sql)\n";
+ }
+ }
+ my @urlalias=();
+ check_permissions("$ENV{HOME}/.sql/aliases");
+ check_permissions("$ENV{HOME}/.dburl.aliases");
+ my @search = ("$ENV{HOME}/.sql/aliases",
+ "$ENV{HOME}/.dburl.aliases", "/etc/sql/aliases",
+ "$path/dburl.aliases", "$path/dburl.aliases.dist");
+ for my $alias_file (@search) {
+ if(-r $alias_file) {
+ open(my $fh, "<", $alias_file) || die;
+ push @urlalias, <$fh>;
+ close $fh;
+ }
+ }
+ my ($alias_part,$rest) = $alias=~/(:\w*)(.*)/;
+ # If we saw this before: we have an alias loop
+ if(grep {$_ eq $alias_part } @Private::seen_aliases) {
+ print STDERR "$alias_part is a cyclic alias\n";
+ exit -1;
+ } else {
+ push @Private::seen_aliases, $alias_part;
+ }
+
+ my $dburl;
+ for (@urlalias) {
+ /^$alias_part\s+(\S+.*)/ and do { $dburl = $1; last; }
+ }
+
+ if($dburl) {
+ return get_alias($dburl.$rest);
+ } else {
+ Usage("$alias is not defined in @search");
+ exit(-1);
+ }
+}
+
+sub check_permissions {
+ my $file = shift;
+
+ if(-e $file) {
+ if(not -o $file) {
+ my $username = (getpwuid($<))[0];
+ print STDERR "$file should be owned by $username: chown $username $file\n";
+ }
+ my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,
+ $atime,$mtime,$ctime,$blksize,$blocks) = stat($file);
+ if($mode & 077) {
+ my $username = (getpwuid($<))[0];
+ print STDERR "$file should be only be readable by $username: chmod 600 $file\n";
+ }
+ }
+}
+
+sub parse_dburl {
+ my $url = shift;
+ my %options = ();
+ # sql:mysql://[[user][:password]@][host][:port]/[database[?sql query]]
+
+ if($url=~m!(?:sql:)? # You can prefix with 'sql:'
+ ((?:oracle|ora|mysql|pg|postgres|postgresql|influx|influxdb)(?:s|ssl|)|
+ (?:sqlite|sqlite2|sqlite3)):// # Databasedriver ($1)
+ (?:
+ ([^:@/][^:@]*|) # Username ($2)
+ (?:
+ :([^@]*) # Password ($3)
+ )?
+ @)?
+ ([^:/]*)? # Hostname ($4)
+ (?:
+ :
+ ([^/]*)? # Port ($5)
+ )?
+ (?:
+ /
+ ([^?/]*)? # Database ($6)
+ )?
+ (?:
+ /?
+ \?
+ (.*)? # Query ($7)
+ )?
+ !x) {
+ $options{databasedriver} = undef_if_empty(uri_unescape($1));
+ $options{user} = undef_if_empty(uri_unescape($2));
+ $options{password} = undef_if_empty(uri_unescape($3));
+ $options{host} = undef_if_empty(uri_unescape($4));
+ $options{port} = undef_if_empty(uri_unescape($5));
+ $options{database} = undef_if_empty(uri_unescape($6))
+ || $options{user};
+ $options{query} = undef_if_empty(uri_unescape($7));
+ debug("dburl $url\n");
+ debug("databasedriver ",$options{databasedriver}, " user ", $options{user},
+ " password ", $options{password}, " host ", $options{host},
+ " port ", $options{port}, " database ", $options{database},
+ " query ",$options{query}, "\n");
+ } else {
+ Usage("$url is not a valid DBURL");
+ exit -1;
+ }
+ return %options;
+}
+
+sub uri_unescape {
+ # Copied from http://cpansearch.perl.org/src/GAAS/URI-1.55/URI/Escape.pm
+ # to avoid depending on URI::Escape
+ # This section is (C) Gisle Aas.
+ # Note from RFC1630: "Sequences which start with a percent sign
+ # but are not followed by two hexadecimal characters are reserved
+ # for future extension"
+ my $str = shift;
+ if (@_ && wantarray) {
+ # not executed for the common case of a single argument
+ my @str = ($str, @_); # need to copy
+ foreach (@str) {
+ s/%([0-9A-Fa-f]{2})/chr(hex($1))/eg;
+ }
+ return @str;
+ }
+ $str =~ s/%([0-9A-Fa-f]{2})/chr(hex($1))/eg if defined $str;
+ $str;
+}
+
+sub undef_if_empty {
+ if(defined($_[0]) and $_[0] eq "") {
+ return undef;
+ }
+ return $_[0];
+}
+
+sub undef_as_zero {
+ my $a = shift;
+ return $a ? $a : 0;
+}
+
+sub version {
+ # Returns: N/A
+ print join("\n",
+ "GNU $Global::progname $Global::version",
+ "Copyright (C) 2009,2010,2011,2012,2013,2014,2015,2016,2017",
+ "Ole Tange and Free Software Foundation, Inc.",
+ "License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>",
+ "This is free software: you are free to change and redistribute it.",
+ "GNU $Global::progname comes with no warranty.",
+ "",
+ "Web site: http://www.gnu.org/software/${Global::progname}\n",
+ "When using GNU $Global::progname for a publication please cite:\n",
+ "O. Tange (2011): GNU SQL - A Command Line Tool for Accessing Different",
+ "Databases Using DBURLs, ;login: The USENIX Magazine, April 2011:29-32.\n"
+ );
+}
+
+sub die_usage {
+ # Returns: N/A
+ usage();
+ exit(255);
+}
+
+sub usage {
+ # Returns: N/A
+ print "Usage:\n";
+ print "$Global::progname [options] dburl [sqlcommand]\n";
+ print "$Global::progname [options] dburl < sql_command_file\n";
+ print "\n";
+ print "See 'man $Global::progname' for the options\n";
+}
+
+sub debug {
+ # Returns: N/A
+ $Global::debug or return;
+ @_ = grep { defined $_ ? $_ : "" } @_;
+ print @_;
+}
+
+$::opt_skip_first_line = $::opt_shebang = 0;