summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 18:50:47 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 18:50:47 +0000
commit9face96b899b3d93226ab52c4098afa5e3797396 (patch)
treef6e560a1021c00d5aeeab61edc3e4a09194d820a
parentInitial commit. (diff)
downloadgolang-github-openshift-imagebuilder-9face96b899b3d93226ab52c4098afa5e3797396.tar.xz
golang-github-openshift-imagebuilder-9face96b899b3d93226ab52c4098afa5e3797396.zip
Adding upstream version 1.2.5+ds1.upstream/1.2.5+ds1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--.gitignore1
-rw-r--r--.travis.yml24
-rw-r--r--LICENSE192
-rw-r--r--Makefile11
-rw-r--r--OWNERS6
-rw-r--r--README.md109
-rw-r--r--builder.go677
-rw-r--r--builder_test.go1048
-rw-r--r--cmd/imagebuilder/imagebuilder.go236
-rw-r--r--constants.go9
-rw-r--r--dispatchers.go711
-rw-r--r--dispatchers_test.go800
-rw-r--r--doc.go6
-rw-r--r--dockerclient/archive.go768
-rw-r--r--dockerclient/archive_test.go535
-rw-r--r--dockerclient/client.go1526
-rw-r--r--dockerclient/conformance_test.go1258
-rw-r--r--dockerclient/conformance_unix_test.go21
-rw-r--r--dockerclient/conformance_windows_test.go15
-rw-r--r--dockerclient/copyinfo.go181
-rw-r--r--dockerclient/copyinfo_test.go266
-rw-r--r--dockerclient/directory.go87
-rw-r--r--dockerclient/testdata/Dockerfile.add11
-rw-r--r--dockerclient/testdata/Dockerfile.args7
-rw-r--r--dockerclient/testdata/Dockerfile.badhealthcheck2
-rw-r--r--dockerclient/testdata/Dockerfile.copyfrom_15
-rw-r--r--dockerclient/testdata/Dockerfile.copyfrom_105
-rw-r--r--dockerclient/testdata/Dockerfile.copyfrom_116
-rw-r--r--dockerclient/testdata/Dockerfile.copyfrom_126
-rw-r--r--dockerclient/testdata/Dockerfile.copyfrom_135
-rw-r--r--dockerclient/testdata/Dockerfile.copyfrom_145
-rw-r--r--dockerclient/testdata/Dockerfile.copyfrom_25
-rw-r--r--dockerclient/testdata/Dockerfile.copyfrom_36
-rw-r--r--dockerclient/testdata/Dockerfile.copyfrom_45
-rw-r--r--dockerclient/testdata/Dockerfile.copyfrom_55
-rw-r--r--dockerclient/testdata/Dockerfile.copyfrom_65
-rw-r--r--dockerclient/testdata/Dockerfile.copyfrom_75
-rw-r--r--dockerclient/testdata/Dockerfile.copyfrom_85
-rw-r--r--dockerclient/testdata/Dockerfile.copyfrom_95
-rw-r--r--dockerclient/testdata/Dockerfile.edgecases48
-rw-r--r--dockerclient/testdata/Dockerfile.env22
-rw-r--r--dockerclient/testdata/Dockerfile.envargconflict8
-rw-r--r--dockerclient/testdata/Dockerfile.envsubst4
-rw-r--r--dockerclient/testdata/Dockerfile.escape2
-rw-r--r--dockerclient/testdata/Dockerfile.exposedefault2
-rw-r--r--dockerclient/testdata/Dockerfile.healthcheck7
-rw-r--r--dockerclient/testdata/Dockerfile.mount2
-rw-r--r--dockerclient/testdata/Dockerfile.multiarg4
-rw-r--r--dockerclient/testdata/Dockerfile.multistage24
-rw-r--r--dockerclient/testdata/Dockerfile.novolume5
-rw-r--r--dockerclient/testdata/Dockerfile.novolumenorun3
-rw-r--r--dockerclient/testdata/Dockerfile.noworkdir4
-rw-r--r--dockerclient/testdata/Dockerfile.reusebase6
-rw-r--r--dockerclient/testdata/Dockerfile.run.args5
-rw-r--r--dockerclient/testdata/Dockerfile.shell3
-rw-r--r--dockerclient/testdata/Dockerfile.target8
-rw-r--r--dockerclient/testdata/Dockerfile.unknown3
-rw-r--r--dockerclient/testdata/Dockerfile.unset5
-rw-r--r--dockerclient/testdata/Dockerfile.volumeexists5
-rw-r--r--dockerclient/testdata/add/Dockerfile18
-rw-r--r--dockerclient/testdata/add/Dockerfile.addall2
-rw-r--r--dockerclient/testdata/add/Dockerfile.addslash2
-rw-r--r--dockerclient/testdata/add/Dockerfile.copy13
-rw-r--r--dockerclient/testdata/add/archived-bz2.txt1
-rw-r--r--dockerclient/testdata/add/archived-gz.txt1
-rw-r--r--dockerclient/testdata/add/archived-xz.txt1
-rw-r--r--dockerclient/testdata/add/archived.tarbin0 -> 10240 bytes
-rw-r--r--dockerclient/testdata/add/archived.tar.bz2bin0 -> 233 bytes
-rw-r--r--dockerclient/testdata/add/archived.tar.gzbin0 -> 221 bytes
-rw-r--r--dockerclient/testdata/add/archived.tar.xzbin0 -> 256 bytes
-rw-r--r--dockerclient/testdata/add/archived.txt1
-rw-r--r--dockerclient/testdata/copy/Dockerfile3
-rw-r--r--dockerclient/testdata/copy/script2
-rw-r--r--dockerclient/testdata/copyblahblub/Dockerfile4
-rw-r--r--dockerclient/testdata/copyblahblub/Dockerfile24
-rw-r--r--dockerclient/testdata/copyblahblub/Dockerfile34
-rw-r--r--dockerclient/testdata/copyblahblub/firstdir/seconddir/dir-a/file-a1
-rw-r--r--dockerclient/testdata/copyblahblub/firstdir/seconddir/dir-b/file-b1
-rw-r--r--dockerclient/testdata/copychmod/Dockerfile4
-rw-r--r--dockerclient/testdata/copychmod/file1
-rw-r--r--dockerclient/testdata/copychmod/file21
-rw-r--r--dockerclient/testdata/copychmod/file31
-rw-r--r--dockerclient/testdata/copychown/Dockerfile20
-rw-r--r--dockerclient/testdata/copychown/script2
-rw-r--r--dockerclient/testdata/copychown/script22
-rw-r--r--dockerclient/testdata/copydir/Dockerfile3
-rw-r--r--dockerclient/testdata/copydir/dir/file0
-rw-r--r--dockerclient/testdata/copyempty/.script2
-rw-r--r--dockerclient/testdata/copyempty/Dockerfile2
-rw-r--r--dockerclient/testdata/copyempty/Dockerfile22
-rw-r--r--dockerclient/testdata/copyempty/script12
-rw-r--r--dockerclient/testdata/copyempty/script22
-rw-r--r--dockerclient/testdata/copyfrom/Dockerfile15
-rw-r--r--dockerclient/testdata/copyrename/Dockerfile3
-rw-r--r--dockerclient/testdata/copyrename/file12
-rw-r--r--dockerclient/testdata/dir/Dockerfile4
-rw-r--r--dockerclient/testdata/dir/file0
-rw-r--r--dockerclient/testdata/dir/subdir/file20
-rw-r--r--dockerclient/testdata/ignore/.dockerignore2
-rw-r--r--dockerclient/testdata/ignore/Dockerfile2
-rw-r--r--dockerclient/testdata/ignore/file0
-rw-r--r--dockerclient/testdata/ignore/file20
-rw-r--r--dockerclient/testdata/ignore/file30
-rw-r--r--dockerclient/testdata/multistage/Dockerfile16
-rw-r--r--dockerclient/testdata/multistage/Dockerfile.arg-scope9
-rw-r--r--dockerclient/testdata/multistage/Dockerfile.env15
-rw-r--r--dockerclient/testdata/multistage/Dockerfile.heading-arg18
-rw-r--r--dockerclient/testdata/multistage/Dockerfile.heading-redefine7
-rw-r--r--dockerclient/testdata/multistage/Dockerfile.ref6
-rw-r--r--dockerclient/testdata/multistage/Dockerfile.relative-copy_17
-rw-r--r--dockerclient/testdata/multistage/Dockerfile.relative-copy_27
-rw-r--r--dockerclient/testdata/multistage/dir/a.txt0
-rw-r--r--dockerclient/testdata/overlapdir/Dockerfile.with_slash2
-rw-r--r--dockerclient/testdata/overlapdir/Dockerfile.without_slash2
-rw-r--r--dockerclient/testdata/overlapdir/existing/etc/file-in-existing-dir0
-rw-r--r--dockerclient/testdata/singlefile/Dockerfile2
-rw-r--r--dockerclient/testdata/user-workdir/Dockerfile.notused5
-rw-r--r--dockerclient/testdata/user-workdir/Dockerfile.used7
-rw-r--r--dockerclient/testdata/volume/Dockerfile7
-rw-r--r--dockerclient/testdata/volume/file1
-rw-r--r--dockerclient/testdata/volume/file21
-rw-r--r--dockerclient/testdata/volumerun/Dockerfile7
-rw-r--r--dockerclient/testdata/volumerun/file1
-rw-r--r--dockerclient/testdata/volumerun/file21
-rw-r--r--dockerclient/testdata/volumerun/file41
-rw-r--r--dockerclient/testdata/wildcard/Dockerfile3
-rw-r--r--dockerclient/testdata/wildcard/dir2/file.a1
-rw-r--r--dockerclient/testdata/wildcard/dir2/file.b1
-rw-r--r--dockerclient/testdata/wildcard/dir2/file.c1
-rw-r--r--dockerclient/testdata/wildcard/dir2/file2.b1
-rw-r--r--dockerfile/NOTICE26
-rw-r--r--dockerfile/command/command.go46
-rw-r--r--dockerfile/parser/dumper/main.go32
-rw-r--r--dockerfile/parser/json_test.go59
-rw-r--r--dockerfile/parser/line_parsers.go398
-rw-r--r--dockerfile/parser/line_parsers_test.go74
-rw-r--r--dockerfile/parser/parser.go355
-rw-r--r--dockerfile/parser/parser_test.go154
-rw-r--r--dockerfile/parser/split_command.go118
-rw-r--r--dockerfile/parser/testfile-line/Dockerfile35
-rw-r--r--dockerfile/parser/testfiles-negative/env_no_value/Dockerfile3
-rw-r--r--dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile1
-rw-r--r--dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile11
-rw-r--r--dockerfile/parser/testfiles/ADD-COPY-with-JSON/result10
-rw-r--r--dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile26
-rw-r--r--dockerfile/parser/testfiles/brimstone-consuldock/result5
-rw-r--r--dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile52
-rw-r--r--dockerfile/parser/testfiles/brimstone-docker-consul/result9
-rw-r--r--dockerfile/parser/testfiles/continue-at-eof/Dockerfile3
-rw-r--r--dockerfile/parser/testfiles/continue-at-eof/result2
-rw-r--r--dockerfile/parser/testfiles/continueIndent/Dockerfile36
-rw-r--r--dockerfile/parser/testfiles/continueIndent/result10
-rw-r--r--dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile54
-rw-r--r--dockerfile/parser/testfiles/cpuguy83-nagios/result40
-rw-r--r--dockerfile/parser/testfiles/docker/Dockerfile102
-rw-r--r--dockerfile/parser/testfiles/docker/result24
-rw-r--r--dockerfile/parser/testfiles/env/Dockerfile23
-rw-r--r--dockerfile/parser/testfiles/env/result16
-rw-r--r--dockerfile/parser/testfiles/escape-after-comment/Dockerfile9
-rw-r--r--dockerfile/parser/testfiles/escape-after-comment/result3
-rw-r--r--dockerfile/parser/testfiles/escape-nonewline/Dockerfile7
-rw-r--r--dockerfile/parser/testfiles/escape-nonewline/result3
-rw-r--r--dockerfile/parser/testfiles/escape/Dockerfile6
-rw-r--r--dockerfile/parser/testfiles/escape/result3
-rw-r--r--dockerfile/parser/testfiles/escapes/Dockerfile14
-rw-r--r--dockerfile/parser/testfiles/escapes/result6
-rw-r--r--dockerfile/parser/testfiles/flags/Dockerfile10
-rw-r--r--dockerfile/parser/testfiles/flags/result10
-rw-r--r--dockerfile/parser/testfiles/health/Dockerfile10
-rw-r--r--dockerfile/parser/testfiles/health/result9
-rw-r--r--dockerfile/parser/testfiles/influxdb/Dockerfile15
-rw-r--r--dockerfile/parser/testfiles/influxdb/result11
-rw-r--r--dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile1
-rw-r--r--dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result1
-rw-r--r--dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile1
-rw-r--r--dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result1
-rw-r--r--dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile1
-rw-r--r--dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result1
-rw-r--r--dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile1
-rw-r--r--dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result1
-rw-r--r--dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile1
-rw-r--r--dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result1
-rw-r--r--dockerfile/parser/testfiles/json/Dockerfile8
-rw-r--r--dockerfile/parser/testfiles/json/result8
-rw-r--r--dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile7
-rw-r--r--dockerfile/parser/testfiles/kartar-entrypoint-oddities/result7
-rw-r--r--dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile48
-rw-r--r--dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result29
-rw-r--r--dockerfile/parser/testfiles/mail/Dockerfile16
-rw-r--r--dockerfile/parser/testfiles/mail/result14
-rw-r--r--dockerfile/parser/testfiles/multiple-volumes/Dockerfile3
-rw-r--r--dockerfile/parser/testfiles/multiple-volumes/result2
-rw-r--r--dockerfile/parser/testfiles/mumble/Dockerfile7
-rw-r--r--dockerfile/parser/testfiles/mumble/result4
-rw-r--r--dockerfile/parser/testfiles/nginx/Dockerfile14
-rw-r--r--dockerfile/parser/testfiles/nginx/result11
-rw-r--r--dockerfile/parser/testfiles/tf2/Dockerfile23
-rw-r--r--dockerfile/parser/testfiles/tf2/result20
-rw-r--r--dockerfile/parser/testfiles/weechat/Dockerfile9
-rw-r--r--dockerfile/parser/testfiles/weechat/result6
-rw-r--r--dockerfile/parser/testfiles/znc/Dockerfile7
-rw-r--r--dockerfile/parser/testfiles/znc/result5
-rw-r--r--evaluator.go163
-rw-r--r--go.mod49
-rw-r--r--go.sum190
-rw-r--r--imagebuilder.spec63
-rw-r--r--imageprogress/progress.go315
-rw-r--r--imageprogress/progress_test.go216
-rw-r--r--imageprogress/pull.go45
-rw-r--r--imageprogress/push.go29
-rw-r--r--internals.go120
-rw-r--r--internals_test.go77
-rw-r--r--shell_parser.go332
-rw-r--r--signal/README.md1
-rw-r--r--signal/signal.go25
-rw-r--r--signal/signals.go79
-rw-r--r--strslice/strslice.go30
217 files changed, 12841 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..56e98ad
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+/imagebuilder
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..9885aee
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,24 @@
+language: go
+
+dist: jammy
+
+services:
+ - docker
+
+go:
+ - "1.19"
+ - "1.20"
+
+before_install:
+ - sudo apt-get update -q -y
+ - docker pull busybox
+ - docker pull centos:7
+ - chmod -R go-w ./dockerclient/testdata
+
+script:
+ - make build
+ - make test
+ - travis_wait 45 make test-conformance
+
+notifications:
+ irc: "chat.freenode.net#openshift-dev"
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..ea21aad
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,192 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2013-2016 Docker, Inc.
+ Copyright 2016 The OpenShift Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..dea84bb
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,11 @@
+build:
+ go build ./cmd/imagebuilder
+.PHONY: build
+
+test:
+ go test ./...
+.PHONY: test
+
+test-conformance:
+ go test -v -tags conformance -timeout 45m ./dockerclient
+.PHONY: test-conformance
diff --git a/OWNERS b/OWNERS
new file mode 100644
index 0000000..db859b7
--- /dev/null
+++ b/OWNERS
@@ -0,0 +1,6 @@
+approvers:
+- TomSweeneyRedHat
+- mrunalp
+- nalind
+- rhatdan
+- smarterclayton
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..f663dcd
--- /dev/null
+++ b/README.md
@@ -0,0 +1,109 @@
+OCI Image Builder
+==========================
+
+[![Go Report Card](https://goreportcard.com/badge/github.com/openshift/imagebuilder)](https://goreportcard.com/report/github.com/openshift/imagebuilder)
+[![GoDoc](https://godoc.org/github.com/openshift/imagebuilder?status.png)](https://godoc.org/github.com/openshift/imagebuilder)
+[![Travis](https://app.travis-ci.com/openshift/imagebuilder.svg?branch=master)](https://app.travis-ci.com/github/openshift/imagebuilder)
+[![Join the chat at freenode:openshift-dev](https://img.shields.io/badge/irc-freenode%3A%20%23openshift--dev-blue.svg)](http://webchat.freenode.net/?channels=%23openshift-dev)
+
+Please test your images (and add to our conformance suite)!
+
+This library supports using the Dockerfile syntax to build OCI & Docker
+compatible images, without invoking a container build command such as `buildah bud` or `docker build`. It is intended to give
+clients more control over how they build container images, including:
+
+* Instead of building one layer per line, run all instructions in the
+ same container
+* Set HostConfig settings like network and memory controls that
+ are not available when running container builds
+* Mount external files into the build that are not persisted as part of
+ the final image (i.e. "secrets")
+* If there are no RUN commands in the Dockerfile, the container is created
+ and committed, but never started.
+
+The final image should be 99.9% compatible with regular container builds,
+but bugs are always possible.
+
+Future goals include:
+
+* Output OCI compatible images
+* Support other container execution engines, like runc or rkt
+* Better conformance testing
+* Windows support
+
+## Install and Run
+
+To download and install the library and the binary, set up a Golang build environment and with `GOPATH` set run:
+
+```
+$ go install github.com/openshift/imagebuilder/cmd/imagebuilder@latest
+```
+
+The included command line takes one argument, a path to a directory containing a Dockerfile. The `-t` option
+can be used to specify an image to tag as:
+
+```
+$ imagebuilder [-t TAG] DIRECTORY
+```
+
+To mount a file into the image for build that will not be present in the final output image, run:
+
+```
+$ imagebuilder --mount ~/secrets/private.key:/etc/keys/private.key path/to/my/code testimage
+```
+
+Any processes in the Dockerfile will have access to `/etc/keys/private.key`, but that file will not be part of the committed image.
+
+You can also customize which Dockerfile is run, or run multiple Dockerfiles in sequence (the FROM is ignored on
+later files):
+
+```
+$ imagebuilder -f Dockerfile:Dockerfile.extra .
+```
+
+will build the current directory and combine the first Dockerfile with the second. The FROM in the second image
+is ignored.
+
+Note that imagebuilder adds the built image to the `docker` daemon's internal storage. If you use `podman` you must first pull the image into its local registry:
+
+```
+$ podman pull docker-daemon:<IMAGE>:<TAG> # must contain either a tag or a digest
+```
+
+## Code Example
+
+```go
+f, err := os.Open("path/to/Dockerfile")
+if err != nil {
+ return err
+}
+defer f.Close()
+
+e := builder.NewClientExecutor(o.Client)
+e.Out, e.ErrOut = os.Stdout, os.Stderr
+e.AllowPull = true
+e.Directory = "context/directory"
+e.Tag = "name/of-image:and-tag"
+e.AuthFn = nil // ... pass a function to retrieve authorization info
+e.LogFn = func(format string, args ...interface{}) {
+ fmt.Fprintf(e.ErrOut, "--> %s\n", fmt.Sprintf(format, args...))
+}
+
+buildErr := e.Build(f, map[string]string{"arg1":"value1"})
+if err := e.Cleanup(); err != nil {
+ fmt.Fprintf(e.ErrOut, "error: Unable to clean up build: %v\n", err)
+}
+
+return buildErr
+```
+
+Example of usage from OpenShift's experimental `dockerbuild` [command with mount secrets](https://github.com/openshift/origin/blob/26c9e032ff42f613fe10649cd7c5fa1b4c33501b/pkg/cmd/cli/cmd/dockerbuild/dockerbuild.go)
+
+## Run conformance tests (very slow):
+
+```
+docker rmi busybox; docker pull busybox
+docker rmi centos:7; docker pull centos:7
+chmod -R go-w ./dockerclient/testdata
+go test ./dockerclient -tags conformance -timeout 30m
+```
diff --git a/builder.go b/builder.go
new file mode 100644
index 0000000..b01ed0c
--- /dev/null
+++ b/builder.go
@@ -0,0 +1,677 @@
+package imagebuilder
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+
+ docker "github.com/fsouza/go-dockerclient"
+
+ "github.com/openshift/imagebuilder/dockerfile/command"
+ "github.com/openshift/imagebuilder/dockerfile/parser"
+)
+
+// Copy defines a copy operation required on the container.
+type Copy struct {
+ // If true, this is a copy from the file system to the container. If false,
+ // the copy is from the context.
+ FromFS bool
+ // If set, this is a copy from the named stage or image to the container.
+ From string
+ Src []string
+ Dest string
+ Download bool
+ // If set, the owner:group for the destination. This value is passed
+ // to the executor for handling.
+ Chown string
+ Chmod string
+}
+
+// Run defines a run operation required in the container.
+type Run struct {
+ Shell bool
+ Args []string
+ // Mounts are mounts specified through the --mount flag inside the Containerfile
+ Mounts []string
+ // Network specifies the network mode to run the container with
+ Network string
+}
+
+type Executor interface {
+ Preserve(path string) error
+ // EnsureContainerPath should ensure that the directory exists, creating any components required
+ EnsureContainerPath(path string) error
+ // EnsureContainerPathAs should ensure that the directory exists, creating any components required
+ // with the specified owner and mode, if either is specified
+ EnsureContainerPathAs(path, user string, mode *os.FileMode) error
+ Copy(excludes []string, copies ...Copy) error
+ Run(run Run, config docker.Config) error
+ UnrecognizedInstruction(step *Step) error
+}
+
+type logExecutor struct{}
+
+func (logExecutor) Preserve(path string) error {
+ log.Printf("PRESERVE %s", path)
+ return nil
+}
+
+func (logExecutor) EnsureContainerPath(path string) error {
+ log.Printf("ENSURE %s", path)
+ return nil
+}
+
+func (logExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error {
+ if mode != nil {
+ log.Printf("ENSURE %s AS %q with MODE=%q", path, user, *mode)
+ } else {
+ log.Printf("ENSURE %s AS %q", path, user)
+ }
+ return nil
+}
+
+func (logExecutor) Copy(excludes []string, copies ...Copy) error {
+ for _, c := range copies {
+ log.Printf("COPY %v -> %s (from:%s download:%t), chown: %s, chmod %s", c.Src, c.Dest, c.From, c.Download, c.Chown, c.Chmod)
+ }
+ return nil
+}
+
+func (logExecutor) Run(run Run, config docker.Config) error {
+ log.Printf("RUN %v %v %t (%v)", run.Args, run.Mounts, run.Shell, config.Env)
+ return nil
+}
+
+func (logExecutor) UnrecognizedInstruction(step *Step) error {
+ log.Printf("Unknown instruction: %s", strings.ToUpper(step.Command))
+ return nil
+}
+
+type noopExecutor struct{}
+
+func (noopExecutor) Preserve(path string) error {
+ return nil
+}
+
+func (noopExecutor) EnsureContainerPath(path string) error {
+ return nil
+}
+
+func (noopExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error {
+ return nil
+}
+
+func (noopExecutor) Copy(excludes []string, copies ...Copy) error {
+ return nil
+}
+
+func (noopExecutor) Run(run Run, config docker.Config) error {
+ return nil
+}
+
+func (noopExecutor) UnrecognizedInstruction(step *Step) error {
+ return nil
+}
+
+type VolumeSet []string
+
+func (s *VolumeSet) Add(path string) bool {
+ if path == "/" {
+ set := len(*s) != 1 || (*s)[0] != ""
+ *s = []string{""}
+ return set
+ }
+ path = strings.TrimSuffix(path, "/")
+ var adjusted []string
+ for _, p := range *s {
+ if p == path || strings.HasPrefix(path, p+"/") {
+ return false
+ }
+ if strings.HasPrefix(p, path+"/") {
+ continue
+ }
+ adjusted = append(adjusted, p)
+ }
+ adjusted = append(adjusted, path)
+ *s = adjusted
+ return true
+}
+
+func (s VolumeSet) Has(path string) bool {
+ if path == "/" {
+ return len(s) == 1 && s[0] == ""
+ }
+ path = strings.TrimSuffix(path, "/")
+ for _, p := range s {
+ if p == path {
+ return true
+ }
+ }
+ return false
+}
+
+func (s VolumeSet) Covers(path string) bool {
+ if path == "/" {
+ return len(s) == 1 && s[0] == ""
+ }
+ path = strings.TrimSuffix(path, "/")
+ for _, p := range s {
+ if p == path || strings.HasPrefix(path, p+"/") {
+ return true
+ }
+ }
+ return false
+}
+
+var (
+ LogExecutor = logExecutor{}
+ NoopExecutor = noopExecutor{}
+)
+
+type Stages []Stage
+
+func (stages Stages) ByName(name string) (Stage, bool) {
+ for _, stage := range stages {
+ if stage.Name == name {
+ return stage, true
+ }
+ }
+ return Stage{}, false
+}
+
+// Get just the target stage.
+func (stages Stages) ByTarget(target string) (Stages, bool) {
+ if len(target) == 0 {
+ return stages, true
+ }
+ for i, stage := range stages {
+ if stage.Name == target {
+ return stages[i : i+1], true
+ }
+ }
+ return nil, false
+}
+
+// Get all the stages up to and including the target.
+func (stages Stages) ThroughTarget(target string) (Stages, bool) {
+ if len(target) == 0 {
+ return stages, true
+ }
+ for i, stage := range stages {
+ if stage.Name == target {
+ return stages[0 : i+1], true
+ }
+ }
+ return nil, false
+}
+
+type Stage struct {
+ Position int
+ Name string
+ Builder *Builder
+ Node *parser.Node
+}
+
+func NewStages(node *parser.Node, b *Builder) (Stages, error) {
+ var stages Stages
+ var allDeclaredArgs []string
+ for _, root := range SplitBy(node, command.Arg) {
+ argNode := root.Children[0]
+ if argNode.Value == command.Arg {
+ // extract declared variable
+ s := strings.SplitN(argNode.Original, " ", 2)
+ if len(s) == 2 && (strings.ToLower(s[0]) == command.Arg) {
+ allDeclaredArgs = append(allDeclaredArgs, s[1])
+ }
+ }
+ }
+ if err := b.extractHeadingArgsFromNode(node); err != nil {
+ return stages, err
+ }
+ for i, root := range SplitBy(node, command.From) {
+ name, _ := extractNameFromNode(root.Children[0])
+ if len(name) == 0 {
+ name = strconv.Itoa(i)
+ }
+ stages = append(stages, Stage{
+ Position: i,
+ Name: name,
+ Builder: b.builderForStage(allDeclaredArgs),
+ Node: root,
+ })
+ }
+ return stages, nil
+}
+
+func (b *Builder) extractHeadingArgsFromNode(node *parser.Node) error {
+ var args []*parser.Node
+ var children []*parser.Node
+ extract := true
+ for _, child := range node.Children {
+ if extract && child.Value == command.Arg {
+ args = append(args, child)
+ } else {
+ if child.Value == command.From {
+ extract = false
+ }
+ children = append(children, child)
+ }
+ }
+
+ // Set children equal to everything except the leading ARG nodes
+ node.Children = children
+
+ // Use a separate builder to evaluate the heading args
+ tempBuilder := NewBuilder(b.UserArgs)
+
+ // Evaluate all the heading arg commands
+ for _, c := range args {
+ step := tempBuilder.Step()
+ if err := step.Resolve(c); err != nil {
+ return err
+ }
+ if err := tempBuilder.Run(step, NoopExecutor, false); err != nil {
+ return err
+ }
+ }
+
+ // Add all of the defined heading args to the original builder's HeadingArgs map
+ for k, v := range tempBuilder.Args {
+ if _, ok := tempBuilder.AllowedArgs[k]; ok {
+ b.HeadingArgs[k] = v
+ }
+ }
+
+ return nil
+}
+
+func extractNameFromNode(node *parser.Node) (string, bool) {
+ if node.Value != command.From {
+ return "", false
+ }
+ n := node.Next
+ if n == nil || n.Next == nil {
+ return "", false
+ }
+ n = n.Next
+ if !strings.EqualFold(n.Value, "as") || n.Next == nil || len(n.Next.Value) == 0 {
+ return "", false
+ }
+ return n.Next.Value, true
+}
+
+func (b *Builder) builderForStage(globalArgsList []string) *Builder {
+ stageBuilder := newBuilderWithGlobalAllowedArgs(b.UserArgs, globalArgsList)
+ for k, v := range b.HeadingArgs {
+ stageBuilder.HeadingArgs[k] = v
+ }
+ return stageBuilder
+}
+
+type Builder struct {
+ RunConfig docker.Config
+
+ Env []string
+ Args map[string]string
+ HeadingArgs map[string]string
+ UserArgs map[string]string
+ CmdSet bool
+ Author string
+ // Certain instructions like `FROM` will need to use
+ // `ARG` decalred before or not in this stage hence
+ // while processing instruction like `FROM ${SOME_ARG}`
+ // we will make sure to verify if they are declared any
+ // where in containerfile or not.
+ GlobalAllowedArgs []string
+
+ AllowedArgs map[string]bool
+ Volumes VolumeSet
+ Excludes []string
+
+ PendingVolumes VolumeSet
+ PendingRuns []Run
+ PendingCopies []Copy
+
+ Warnings []string
+ // Raw platform string specified with `FROM --platform` of the stage
+ // It's up to the implementation or client to parse and use this field
+ Platform string
+}
+
+func NewBuilder(args map[string]string) *Builder {
+ return newBuilderWithGlobalAllowedArgs(args, []string{})
+}
+
+func newBuilderWithGlobalAllowedArgs(args map[string]string, globalallowedargs []string) *Builder {
+ allowed := make(map[string]bool)
+ for k, v := range builtinAllowedBuildArgs {
+ allowed[k] = v
+ }
+ userArgs := make(map[string]string)
+ initialArgs := make(map[string]string)
+ for k, v := range args {
+ userArgs[k] = v
+ initialArgs[k] = v
+ }
+ return &Builder{
+ Args: initialArgs,
+ UserArgs: userArgs,
+ HeadingArgs: make(map[string]string),
+ AllowedArgs: allowed,
+ GlobalAllowedArgs: globalallowedargs,
+ }
+}
+
+func ParseFile(path string) (*parser.Node, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return ParseDockerfile(f)
+}
+
+// Step creates a new step from the current state.
+func (b *Builder) Step() *Step {
+ // Include build arguments in the table of variables that we'll use in
+ // Resolve(), but override them with values from the actual
+ // environment in case there's any conflict.
+ return &Step{Env: mergeEnv(b.Arguments(), mergeEnv(b.Env, b.RunConfig.Env))}
+}
+
+// Run executes a step, transforming the current builder and
+// invoking any Copy or Run operations. noRunsRemaining is an
+// optimization hint that allows the builder to avoid performing
+// unnecessary work.
+func (b *Builder) Run(step *Step, exec Executor, noRunsRemaining bool) error {
+ fn, ok := evaluateTable[step.Command]
+ if !ok {
+ return exec.UnrecognizedInstruction(step)
+ }
+ if err := fn(b, step.Args, step.Attrs, step.Flags, step.Original); err != nil {
+ return err
+ }
+
+ copies := b.PendingCopies
+ b.PendingCopies = nil
+ runs := b.PendingRuns
+ b.PendingRuns = nil
+
+ // Once a VOLUME is defined, future ADD/COPY instructions are
+ // all that may mutate that path. Instruct the executor to preserve
+ // the path. The executor must handle invalidating preserved info.
+ for _, path := range b.PendingVolumes {
+ if b.Volumes.Add(path) && !noRunsRemaining {
+ if err := exec.Preserve(path); err != nil {
+ return err
+ }
+ }
+ }
+
+ if err := exec.Copy(b.Excludes, copies...); err != nil {
+ return err
+ }
+
+ if len(b.RunConfig.WorkingDir) > 0 {
+ if err := exec.EnsureContainerPathAs(b.RunConfig.WorkingDir, b.RunConfig.User, nil); err != nil {
+ return err
+ }
+ }
+
+ for _, run := range runs {
+ config := b.Config()
+ config.Env = step.Env
+ if err := exec.Run(run, *config); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// RequiresStart returns true if a running container environment is necessary
+// to invoke the provided commands
+func (b *Builder) RequiresStart(node *parser.Node) bool {
+ for _, child := range node.Children {
+ if child.Value == command.Run {
+ return true
+ }
+ }
+ return false
+}
+
+// Config returns a snapshot of the current RunConfig intended for
+// use with a container commit.
+func (b *Builder) Config() *docker.Config {
+ config := b.RunConfig
+ if config.OnBuild == nil {
+ config.OnBuild = []string{}
+ }
+ if config.Entrypoint == nil {
+ config.Entrypoint = []string{}
+ }
+ config.Image = ""
+ return &config
+}
+
+// Arguments returns the currently active arguments.
+func (b *Builder) Arguments() []string {
+ var envs []string
+ for key, val := range b.Args {
+ if _, ok := b.AllowedArgs[key]; ok {
+ envs = append(envs, fmt.Sprintf("%s=%s", key, val))
+ }
+ }
+ return envs
+}
+
+// ErrNoFROM is returned if the Dockerfile did not contain a FROM
+// statement.
+var ErrNoFROM = fmt.Errorf("no FROM statement found")
+
+// From returns the image this dockerfile depends on, or an error
+// if no FROM is found or if multiple FROM are specified. If a
+// single from is found the passed node is updated with only
+// the remaining statements. The builder's RunConfig.Image field
+// is set to the first From found, or left unchanged if already
+// set.
+func (b *Builder) From(node *parser.Node) (string, error) {
+ if err := b.extractHeadingArgsFromNode(node); err != nil {
+ return "", err
+ }
+ children := SplitChildren(node, command.From)
+ switch {
+ case len(children) == 0:
+ return "", ErrNoFROM
+ case len(children) > 1:
+ return "", fmt.Errorf("multiple FROM statements are not supported")
+ default:
+ step := b.Step()
+ if err := step.Resolve(children[0]); err != nil {
+ return "", err
+ }
+ if err := b.Run(step, NoopExecutor, false); err != nil {
+ return "", err
+ }
+ return b.RunConfig.Image, nil
+ }
+}
+
+// FromImage updates the builder to use the provided image (resetting RunConfig
+// and recording the image environment), and updates the node with any ONBUILD
+// statements extracted from the parent image.
+func (b *Builder) FromImage(image *docker.Image, node *parser.Node) error {
+ SplitChildren(node, command.From)
+
+ b.RunConfig = *image.Config
+ b.Env = mergeEnv(b.Env, b.RunConfig.Env)
+ b.RunConfig.Env = nil
+
+ // Check to see if we have a default PATH, note that windows won't
+ // have one as it's set by HCS
+ if runtime.GOOS != "windows" && !hasEnvName(b.Env, "PATH") {
+ b.RunConfig.Env = append(b.RunConfig.Env, "PATH="+defaultPathEnv)
+ }
+
+ // Join the image onbuild statements into node
+ if image.Config == nil || len(image.Config.OnBuild) == 0 {
+ return nil
+ }
+ extra, err := ParseDockerfile(bytes.NewBufferString(strings.Join(image.Config.OnBuild, "\n")))
+ if err != nil {
+ return err
+ }
+ for _, child := range extra.Children {
+ switch strings.ToUpper(child.Value) {
+ case "ONBUILD":
+ return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
+ case "MAINTAINER", "FROM":
+ return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", child.Value)
+ }
+ }
+ node.Children = append(extra.Children, node.Children...)
+ // Since we've processed the OnBuild statements, clear them from the runconfig state.
+ b.RunConfig.OnBuild = nil
+ return nil
+}
+
+// SplitChildren removes any children with the provided value from node
+// and returns them as an array. node.Children is updated.
+func SplitChildren(node *parser.Node, value string) []*parser.Node {
+ var split []*parser.Node
+ var children []*parser.Node
+ for _, child := range node.Children {
+ if child.Value == value {
+ split = append(split, child)
+ } else {
+ children = append(children, child)
+ }
+ }
+ node.Children = children
+ return split
+}
+
+func SplitBy(node *parser.Node, value string) []*parser.Node {
+ var split []*parser.Node
+ var current *parser.Node
+ for _, child := range node.Children {
+ if current == nil || child.Value == value {
+ copied := *node
+ current = &copied
+ current.Children = nil
+ current.Next = nil
+ split = append(split, current)
+ }
+ current.Children = append(current.Children, child)
+ }
+ return split
+}
+
+// StepFunc is invoked with the result of a resolved step.
+type StepFunc func(*Builder, []string, map[string]bool, []string, string) error
+
+var evaluateTable = map[string]StepFunc{
+ command.Env: env,
+ command.Label: label,
+ command.Maintainer: maintainer,
+ command.Add: add,
+ command.Copy: dispatchCopy, // copy() is a go builtin
+ command.From: from,
+ command.Onbuild: onbuild,
+ command.Workdir: workdir,
+ command.Run: run,
+ command.Cmd: cmd,
+ command.Entrypoint: entrypoint,
+ command.Expose: expose,
+ command.Volume: volume,
+ command.User: user,
+ command.StopSignal: stopSignal,
+ command.Arg: arg,
+ command.Healthcheck: healthcheck,
+ command.Shell: shell,
+}
+
+// builtinAllowedBuildArgs is list of built-in allowed build args
+var builtinAllowedBuildArgs = map[string]bool{
+ "HTTP_PROXY": true,
+ "http_proxy": true,
+ "HTTPS_PROXY": true,
+ "https_proxy": true,
+ "FTP_PROXY": true,
+ "ftp_proxy": true,
+ "NO_PROXY": true,
+ "no_proxy": true,
+}
+
+// ParseIgnore returns a list of the excludes in the specified path
+// path should be a file with the .dockerignore format
+// extracted from fsouza/go-dockerclient and modified to drop comments and
+// empty lines.
+func ParseIgnore(path string) ([]string, error) {
+ var excludes []string
+
+ ignores, err := ioutil.ReadFile(path)
+ if err != nil {
+ return excludes, err
+ }
+ for _, ignore := range strings.Split(string(ignores), "\n") {
+ if len(ignore) == 0 || ignore[0] == '#' {
+ continue
+ }
+ ignore = strings.Trim(ignore, "/")
+ if len(ignore) > 0 {
+ excludes = append(excludes, ignore)
+ }
+ }
+ return excludes, nil
+}
+
+// ParseDockerIgnore returns a list of the excludes in the .containerignore or .dockerignore file.
+func ParseDockerignore(root string) ([]string, error) {
+ excludes, err := ParseIgnore(filepath.Join(root, ".containerignore"))
+ if err != nil && os.IsNotExist(err) {
+ excludes, err = ParseIgnore(filepath.Join(root, ".dockerignore"))
+ }
+ if err != nil && os.IsNotExist(err) {
+ return excludes, nil
+ }
+ return excludes, err
+}
+
+// ExportEnv creates an export statement for a shell that contains all of the
+// provided environment.
+func ExportEnv(env []string) string {
+ if len(env) == 0 {
+ return ""
+ }
+ out := "export"
+ for _, e := range env {
+ if len(e) == 0 {
+ continue
+ }
+ out += " " + BashQuote(e)
+ }
+ return out + "; "
+}
+
+// BashQuote escapes the provided string and surrounds it with double quotes.
+// TODO: verify that these are all we have to escape.
+func BashQuote(env string) string {
+ out := []rune{'"'}
+ for _, r := range env {
+ switch r {
+ case '$', '\\', '"':
+ out = append(out, '\\', r)
+ default:
+ out = append(out, r)
+ }
+ }
+ out = append(out, '"')
+ return string(out)
+}
diff --git a/builder_test.go b/builder_test.go
new file mode 100644
index 0000000..be7e5d3
--- /dev/null
+++ b/builder_test.go
@@ -0,0 +1,1048 @@
+package imagebuilder
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "strings"
+ "testing"
+ "time"
+
+ docker "github.com/fsouza/go-dockerclient"
+
+ "github.com/containerd/containerd/platforms"
+ "github.com/openshift/imagebuilder/dockerfile/parser"
+)
+
+func TestVolumeSet(t *testing.T) {
+ testCases := []struct {
+ inputs []string
+ changed []bool
+ result []string
+ covered []string
+ uncovered []string
+ }{
+ {
+ inputs: []string{"/var/lib", "/var"},
+ changed: []bool{true, true},
+ result: []string{"/var"},
+
+ covered: []string{"/var/lib", "/var/", "/var"},
+ uncovered: []string{"/var1", "/", "/va"},
+ },
+ {
+ inputs: []string{"/var", "/", "/"},
+ changed: []bool{true, true, false},
+ result: []string{""},
+
+ covered: []string{"/var/lib", "/var/", "/var", "/"},
+ },
+ {
+ inputs: []string{"/var", "/var/lib"},
+ changed: []bool{true, false},
+ result: []string{"/var"},
+ },
+ }
+ for i, testCase := range testCases {
+ s := VolumeSet{}
+ for j, path := range testCase.inputs {
+ if s.Add(path) != testCase.changed[j] {
+ t.Errorf("%d: adding %d %s should have resulted in change %t", i, j, path, testCase.changed[j])
+ }
+ }
+ if !reflect.DeepEqual(testCase.result, []string(s)) {
+ t.Errorf("%d: got %v", i, s)
+ }
+ for _, path := range testCase.covered {
+ if !s.Covers(path) {
+ t.Errorf("%d: not covered %s", i, path)
+ }
+ }
+ for _, path := range testCase.uncovered {
+ if s.Covers(path) {
+ t.Errorf("%d: covered %s", i, path)
+ }
+ }
+ }
+}
+
+func TestByTarget(t *testing.T) {
+ n, err := ParseFile("dockerclient/testdata/Dockerfile.target")
+ if err != nil {
+ t.Fatal(err)
+ }
+ stages, err := NewStages(n, NewBuilder(nil))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(stages) != 3 {
+ t.Fatalf("expected 3 stages, got %d", len(stages))
+ }
+ t.Logf("stages: %#v", stages)
+
+ stages1, found := stages.ByTarget("mytarget")
+ if !found {
+ t.Fatal("First target not found")
+ }
+ if len(stages1) != 1 {
+ t.Fatalf("expected 1 stages, got %d", len(stages1))
+ }
+ t.Logf("stages1: %#v", stages1)
+
+ stages2, found := stages.ByTarget("mytarget2")
+ if !found {
+ t.Fatal("Second target not found")
+ }
+ if len(stages2) != 1 {
+ t.Fatalf("expected 1 stages, got %d", len(stages2))
+ }
+ t.Logf("stages2: %#v", stages2)
+}
+
+func TestThroughTarget(t *testing.T) {
+ n, err := ParseFile("dockerclient/testdata/Dockerfile.target")
+ if err != nil {
+ t.Fatal(err)
+ }
+ stages, err := NewStages(n, NewBuilder(nil))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(stages) != 3 {
+ t.Fatalf("expected 3 stages, got %d", len(stages))
+ }
+ t.Logf("stages: %#v", stages)
+
+ stages1, found := stages.ThroughTarget("mytarget")
+ if !found {
+ t.Fatal("First target not found")
+ }
+ if len(stages1) != 2 {
+ t.Fatalf("expected 2 stages, got %d", len(stages1))
+ }
+ t.Logf("stages1: %#v", stages1)
+
+ stages2, found := stages.ThroughTarget("mytarget2")
+ if !found {
+ t.Fatal("Second target not found")
+ }
+ if len(stages2) != 3 {
+ t.Fatalf("expected 3 stages, got %d", len(stages2))
+ }
+ t.Logf("stages2: %#v", stages2)
+}
+
+func TestMultiStageParse(t *testing.T) {
+ n, err := ParseFile("dockerclient/testdata/multistage/Dockerfile")
+ if err != nil {
+ t.Fatal(err)
+ }
+ stages, err := NewStages(n, NewBuilder(nil))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(stages) != 3 {
+ t.Fatalf("expected 3 stages, got %d", len(stages))
+ }
+ t.Logf("stages: %#v", stages)
+}
+
+func TestMultiStageParseHeadingArg(t *testing.T) {
+ n, err := ParseFile("dockerclient/testdata/multistage/Dockerfile.heading-arg")
+ if err != nil {
+ t.Fatal(err)
+ }
+ stages, err := NewStages(n, NewBuilder(map[string]string{}))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(stages) != 3 {
+ t.Fatalf("expected 3 stages, got %d", len(stages))
+ }
+
+ fromImages := []string{"golang:1.9", "busybox:latest", "golang:1.9"}
+ for i, stage := range stages {
+ from, err := stage.Builder.From(stage.Node)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if expected := fromImages[i]; from != expected {
+ t.Fatalf("expected %s, got %s", expected, from)
+ }
+ }
+
+ t.Logf("stages: %#v", stages)
+}
+
+func TestHeadingArg(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ args map[string]string
+ expectedFrom string
+ }{
+ {name: "default", args: map[string]string{}, expectedFrom: "busybox:latest"},
+ {name: "override", args: map[string]string{"FOO": "bar"}, expectedFrom: "busybox:bar"},
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ n, err := ParseDockerfile(strings.NewReader(`ARG FOO=latest
+ARG BAR=baz
+FROM busybox:$FOO
+ARG BAZ=banana
+RUN echo $FOO $BAR`))
+ if err != nil {
+ t.Fatal(err)
+ }
+ b := NewBuilder(tc.args)
+ from, err := b.From(n)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if from != tc.expectedFrom {
+ t.Fatalf("expected %s, got %s", tc.expectedFrom, from)
+ }
+ })
+ }
+}
+
+// Test if `FROM some-${SOME-BUILT-IN-ARG}` args gets resolved correctly.
+func TestArgResolutionOfDefaultVariables(t *testing.T) {
+ // Get architecture from host
+ var localspec = platforms.DefaultSpec()
+ for _, tc := range []struct {
+ dockerfile string
+ name string
+ args map[string]string
+ expectedFrom string
+ }{
+ {name: "use-default-built-arg",
+ dockerfile: "FROM platform-${TARGETARCH}",
+ args: map[string]string{"FOO": "bar"},
+ expectedFrom: "platform-" + localspec.Architecture},
+ // Override should not work since we did not declare
+ {name: "override-default-built-arg-without-declaration",
+ dockerfile: "FROM platform-${TARGETARCH}",
+ args: map[string]string{"TARGETARCH": "bar"},
+ expectedFrom: "platform-" + localspec.Architecture},
+ {name: "override-default-built-arg",
+ dockerfile: "ARG TARGETARCH\nFROM platform-${TARGETARCH}",
+ args: map[string]string{"TARGETARCH": "bar"},
+ expectedFrom: "platform-bar"},
+ {name: "random-built-arg",
+ dockerfile: "ARG FOO\nFROM ${FOO}",
+ args: map[string]string{"FOO": "bar"},
+ expectedFrom: "bar"},
+ // Arg should not be resolved since we did not declare
+ {name: "random-built-arg-without-declaration",
+ dockerfile: "FROM ${FOO}",
+ args: map[string]string{"FOO": "bar"},
+ expectedFrom: ""},
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ n, err := ParseDockerfile(strings.NewReader(tc.dockerfile))
+ if err != nil {
+ t.Fatal(err)
+ }
+ stages, err := NewStages(n, NewBuilder(tc.args))
+ if err != nil {
+ t.Fatal(err)
+ }
+ from, err := stages[0].Builder.From(n)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if from != tc.expectedFrom {
+ t.Fatalf("expected %s, got %s", tc.expectedFrom, from)
+ }
+ })
+ }
+}
+
+func resolveNodeArgs(b *Builder, node *parser.Node) error {
+ for _, c := range node.Children {
+ if c.Value != "arg" {
+ continue
+ }
+ step := b.Step()
+ if err := step.Resolve(c); err != nil {
+ return err
+ }
+ if err := b.Run(step, NoopExecutor, false); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func builderHasArgument(b *Builder, argString string) bool {
+ for _, arg := range b.Arguments() {
+ if arg == argString {
+ return true
+ }
+ }
+ return false
+}
+
+func TestMultiStageHeadingArgRedefine(t *testing.T) {
+ n, err := ParseFile("dockerclient/testdata/multistage/Dockerfile.heading-redefine")
+ if err != nil {
+ t.Fatal(err)
+ }
+ stages, err := NewStages(n, NewBuilder(map[string]string{}))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(stages) != 2 {
+ t.Fatalf("expected 2 stages, got %d", len(stages))
+ }
+
+ for _, stage := range stages {
+ if err := resolveNodeArgs(stage.Builder, stage.Node); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ firstStageHasArg := false
+ for _, arg := range stages[0].Builder.Arguments() {
+ if match, err := regexp.MatchString(`FOO=.*`, arg); err == nil && match {
+ firstStageHasArg = true
+ break
+ } else if err != nil {
+ t.Fatal(err)
+ }
+ }
+ if firstStageHasArg {
+ t.Fatalf("expected FOO to not be present in first stage")
+ }
+
+ if !builderHasArgument(stages[1].Builder, "FOO=latest") {
+ t.Fatalf("expected FOO=latest in second stage arguments list, got %v", stages[1].Builder.Arguments())
+ }
+}
+
+func TestMultiStageHeadingArgRedefineOverride(t *testing.T) {
+ n, err := ParseFile("dockerclient/testdata/multistage/Dockerfile.heading-redefine")
+ if err != nil {
+ t.Fatal(err)
+ }
+ stages, err := NewStages(n, NewBuilder(map[string]string{"FOO": "7"}))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(stages) != 2 {
+ t.Fatalf("expected 2 stages, got %d", len(stages))
+ }
+
+ for _, stage := range stages {
+ if err := resolveNodeArgs(stage.Builder, stage.Node); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ firstStageHasArg := false
+ for _, arg := range stages[0].Builder.Arguments() {
+ if match, err := regexp.MatchString(`FOO=.*`, arg); err == nil && match {
+ firstStageHasArg = true
+ break
+ } else if err != nil {
+ t.Fatal(err)
+ }
+ }
+ if firstStageHasArg {
+ t.Fatalf("expected FOO to not be present in first stage")
+ }
+
+ if !builderHasArgument(stages[1].Builder, "FOO=7") {
+ t.Fatalf("expected FOO=7 in second stage arguments list, got %v", stages[1].Builder.Arguments())
+ }
+}
+
+func TestArgs(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ dockerfile string
+ args map[string]string
+ expectedValue string
+ }{
+ {
+ name: "argOverride",
+ dockerfile: "FROM centos\nARG FOO=stuff\nARG FOO=things\n",
+ args: map[string]string{},
+ expectedValue: "FOO=things",
+ },
+ {
+ name: "argOverrideWithBuildArgs",
+ dockerfile: "FROM centos\nARG FOO=stuff\nARG FOO=things\n",
+ args: map[string]string{"FOO": "bar"},
+ expectedValue: "FOO=bar",
+ },
+ {
+ name: "multiple args in single step",
+ dockerfile: "FROM centos\nARG FOO=stuff WORLD=hello\n",
+ args: map[string]string{},
+ expectedValue: "WORLD=hello",
+ },
+ {
+ name: "multiple args in single step",
+ dockerfile: "FROM centos\nARG FOO=stuff WORLD=hello\n",
+ args: map[string]string{},
+ expectedValue: "FOO=stuff",
+ },
+ {
+ name: "headingArgRedefine",
+ dockerfile: "ARG FOO=stuff\nFROM centos\nARG FOO\n",
+ args: map[string]string{},
+ expectedValue: "FOO=stuff",
+ },
+ {
+ name: "headingArgRedefineWithBuildArgs",
+ dockerfile: "ARG FOO=stuff\nFROM centos\nARG FOO\n",
+ args: map[string]string{"FOO": "bar"},
+ expectedValue: "FOO=bar",
+ },
+ {
+ name: "headingArgRedefineDefault",
+ dockerfile: "ARG FOO=stuff\nFROM centos\nARG FOO=defaultfoovalue\n",
+ args: map[string]string{},
+ expectedValue: "FOO=defaultfoovalue",
+ },
+ {
+ name: "headingArgRedefineDefaultWithBuildArgs",
+ dockerfile: "ARG FOO=stuff\nFROM centos\nARG FOO=defaultfoovalue\n",
+ args: map[string]string{"FOO": "bar"},
+ expectedValue: "FOO=bar",
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ node, err := ParseDockerfile(strings.NewReader(tc.dockerfile))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ b := NewBuilder(tc.args)
+ if err := resolveNodeArgs(b, node); err != nil {
+ t.Fatal(err)
+ }
+
+ if !builderHasArgument(b, tc.expectedValue) {
+ t.Fatalf("expected %s to be contained in arguments list: %v", tc.expectedValue, b.Arguments())
+ }
+ })
+ }
+}
+
+func TestMultiStageArgScope(t *testing.T) {
+ n, err := ParseFile("dockerclient/testdata/multistage/Dockerfile.arg-scope")
+ if err != nil {
+ t.Fatal(err)
+ }
+ args := map[string]string{
+ "SECRET": "secretthings",
+ "BAR": "notsecretthings",
+ }
+ stages, err := NewStages(n, NewBuilder(args))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(stages) != 2 {
+ t.Fatalf("expected 2 stages, got %d", len(stages))
+ }
+
+ for _, stage := range stages {
+ if err := resolveNodeArgs(stage.Builder, stage.Node); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if !builderHasArgument(stages[0].Builder, "SECRET=secretthings") {
+ t.Fatalf("expected SECRET=secretthings to be contained in first stage arguments list: %v", stages[0].Builder.Arguments())
+ }
+
+ secondStageArguments := stages[1].Builder.Arguments()
+ secretInSecondStage := false
+ for _, arg := range secondStageArguments {
+ if match, err := regexp.MatchString(`SECRET=.*`, arg); err == nil && match {
+ secretInSecondStage = true
+ break
+ } else if err != nil {
+ t.Fatal(err)
+ }
+ }
+ if secretInSecondStage {
+ t.Fatalf("expected SECRET to not be present in second stage")
+ }
+
+ if !builderHasArgument(stages[1].Builder, "FOO=test") {
+ t.Fatalf("expected FOO=test to be present in second stage arguments list: %v", secondStageArguments)
+ }
+ if !builderHasArgument(stages[1].Builder, "BAR=notsecretthings") {
+ t.Fatalf("expected BAR=notsecretthings to be present in second stage arguments list: %v", secondStageArguments)
+ }
+}
+
+func TestRun(t *testing.T) {
+ f, err := os.Open("dockerclient/testdata/Dockerfile.add")
+ if err != nil {
+ t.Fatal(err)
+ }
+ node, err := ParseDockerfile(f)
+ if err != nil {
+ t.Fatal(err)
+ }
+ b := NewBuilder(nil)
+ from, err := b.From(node)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if from != "busybox" {
+ t.Fatalf("unexpected from: %s", from)
+ }
+ for _, child := range node.Children {
+ step := b.Step()
+ if err := step.Resolve(child); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Run(step, LogExecutor, false); err != nil {
+ t.Fatal(err)
+ }
+ }
+ t.Logf("config: %#v", b.Config())
+ t.Logf(node.Dump())
+}
+
+type testExecutor struct {
+ Preserved []string
+ Copies []Copy
+ Runs []Run
+ Configs []docker.Config
+ Unrecognized []Step
+ Err error
+}
+
+func (e *testExecutor) Preserve(path string) error {
+ e.Preserved = append(e.Preserved, path)
+ return e.Err
+}
+
+func (e *testExecutor) EnsureContainerPath(path string) error {
+ return e.Err
+}
+
+func (e *testExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error {
+ return e.Err
+}
+
+func (e *testExecutor) Copy(excludes []string, copies ...Copy) error {
+ e.Copies = append(e.Copies, copies...)
+ return e.Err
+}
+func (e *testExecutor) Run(run Run, config docker.Config) error {
+ e.Runs = append(e.Runs, run)
+ e.Configs = append(e.Configs, config)
+ return e.Err
+}
+func (e *testExecutor) UnrecognizedInstruction(step *Step) error {
+ e.Unrecognized = append(e.Unrecognized, *step)
+ return e.Err
+}
+
+func TestBuilder(t *testing.T) {
+ testCases := []struct {
+ Args map[string]string
+ Dockerfile string
+ From string
+ Copies []Copy
+ Runs []Run
+ Unrecognized []Step
+ Config docker.Config
+ Image *docker.Image
+ FromErrFn func(err error) bool
+ RunErrFn func(err error) bool
+ }{
+ {
+ Dockerfile: "dockerclient/testdata/dir/Dockerfile",
+ From: "busybox",
+ Copies: []Copy{
+ {Src: []string{"."}, Dest: "/", Download: false},
+ {Src: []string{"."}, Dest: "/dir"},
+ {Src: []string{"subdir/"}, Dest: "/test/", Download: false},
+ },
+ Config: docker.Config{
+ Image: "busybox",
+ },
+ },
+ {
+ Dockerfile: "dockerclient/testdata/ignore/Dockerfile",
+ From: "busybox",
+ Copies: []Copy{
+ {Src: []string{"."}, Dest: "/"},
+ },
+ Config: docker.Config{
+ Image: "busybox",
+ },
+ },
+ {
+ Dockerfile: "dockerclient/testdata/Dockerfile.env",
+ From: "busybox",
+ Config: docker.Config{
+ Env: []string{"name=value", "name2=value2a value2b", "name1=value1", "name3=value3a\\n\"value3b\"", "name4=value4a\\nvalue4b"},
+ Image: "busybox",
+ },
+ },
+ {
+ Dockerfile: "dockerclient/testdata/Dockerfile.edgecases",
+ From: "busybox",
+ Copies: []Copy{
+ {Src: []string{"."}, Dest: "/", Download: true},
+ {Src: []string{"."}, Dest: "/test/copy"},
+ },
+ Runs: []Run{
+ {Shell: false, Args: []string{"ls", "-la"}},
+ {Shell: false, Args: []string{"echo", "'1234'"}},
+ {Shell: true, Args: []string{"echo \"1234\""}},
+ {Shell: true, Args: []string{"echo 1234"}},
+ {Shell: true, Args: []string{"echo '1234' && echo \"456\" && echo 789"}},
+ {Shell: true, Args: []string{"sh -c 'echo root:testpass > /tmp/passwd'"}},
+ {Shell: true, Args: []string{"mkdir -p /test /test2 /test3/test"}},
+ },
+ Config: docker.Config{
+ User: "docker:root",
+ ExposedPorts: map[docker.Port]struct{}{"6000/tcp": {}, "3000/tcp": {}, "9000/tcp": {}, "5000/tcp": {}},
+ Env: []string{"SCUBA=1 DUBA 3"},
+ Cmd: []string{"/bin/sh", "-c", "echo 'test' | wc -"},
+ Image: "busybox",
+ Volumes: map[string]struct{}{"/test2": {}, "/test3": {}, "/test": {}},
+ WorkingDir: "/test",
+ OnBuild: []string{"RUN [\"echo\", \"test\"]", "RUN echo test", "COPY . /"},
+ },
+ },
+ {
+ Dockerfile: "dockerclient/testdata/Dockerfile.unknown",
+ From: "busybox",
+ Unrecognized: []Step{
+ {Command: "health", Message: "HEALTH ", Original: "HEALTH NONE", Args: []string{""}, Flags: []string{}, Env: []string{}},
+ {Command: "unrecognized", Message: "UNRECOGNIZED ", Original: "UNRECOGNIZED", Args: []string{""}, Env: []string{}},
+ },
+ Config: docker.Config{
+ Image: "busybox",
+ },
+ },
+ {
+ Dockerfile: "dockerclient/testdata/Dockerfile.exposedefault",
+ From: "busybox",
+ Config: docker.Config{
+ ExposedPorts: map[docker.Port]struct{}{"3469/tcp": {}},
+ Image: "busybox",
+ },
+ },
+ {
+ Dockerfile: "dockerclient/testdata/Dockerfile.add",
+ From: "busybox",
+ Copies: []Copy{
+ {Src: []string{"https://github.com/openshift/origin/raw/master/README.md"}, Dest: "/README.md", Download: true},
+ {Src: []string{"https://github.com/openshift/origin/raw/master/LICENSE"}, Dest: "/", Download: true},
+ {Src: []string{"https://github.com/openshift/origin/raw/master/LICENSE"}, Dest: "/A", Download: true},
+ {Src: []string{"https://github.com/openshift/origin/raw/master/LICENSE"}, Dest: "/a", Download: true},
+ {Src: []string{"https://github.com/openshift/origin/raw/master/LICENSE"}, Dest: "/b/a", Download: true},
+ {Src: []string{"https://github.com/openshift/origin/raw/master/LICENSE"}, Dest: "/b/", Download: true},
+ {Src: []string{"https://github.com/openshift/ruby-hello-world/archive/master.zip"}, Dest: "/tmp/", Download: true},
+ },
+ Runs: []Run{
+ {Shell: true, Args: []string{"mkdir ./b"}},
+ },
+ Config: docker.Config{
+ Image: "busybox",
+ User: "root",
+ },
+ },
+ {
+ Dockerfile: "dockerclient/testdata/Dockerfile.badhealthcheck",
+ From: "debian",
+ Config: docker.Config{
+ Image: "busybox",
+ },
+ RunErrFn: func(err error) bool {
+ return err != nil && strings.Contains(err.Error(), "HEALTHCHECK requires at least one argument")
+ },
+ },
+ {
+ Dockerfile: "dockerclient/testdata/Dockerfile.healthcheck",
+ From: "debian",
+ Config: docker.Config{
+ Image: "debian",
+ Cmd: []string{"/bin/sh", "-c", "/app/main.sh"},
+ Healthcheck: &docker.HealthConfig{
+ StartPeriod: 8 * time.Second,
+ Interval: 5 * time.Second,
+ Timeout: 3 * time.Second,
+ Retries: 3,
+ Test: []string{"CMD-SHELL", "/app/check.sh --quiet"},
+ },
+ },
+ },
+ {
+ Dockerfile: "dockerclient/testdata/Dockerfile.envsubst",
+ From: "busybox",
+ Image: &docker.Image{
+ ID: "busybox2",
+ Config: &docker.Config{
+ Env: []string{"FOO=another", "BAR=original"},
+ },
+ },
+ Config: docker.Config{
+ Env: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "FOO=value"},
+ Labels: map[string]string{"test": "value"},
+ },
+ },
+ {
+ Dockerfile: "dockerclient/testdata/Dockerfile.unset",
+ From: "busybox",
+ Image: &docker.Image{
+ ID: "busybox2",
+ Config: &docker.Config{
+ Env: []string{},
+ },
+ },
+ RunErrFn: func(err error) bool {
+ return err != nil && strings.Contains(err.Error(), "is not allowed to be unset")
+ },
+ Config: docker.Config{
+ Env: []string{},
+ Labels: map[string]string{"test": ""},
+ },
+ },
+ {
+ Dockerfile: "dockerclient/testdata/Dockerfile.args",
+ Args: map[string]string{"BAR": "first"},
+ From: "busybox",
+ Config: docker.Config{
+ Image: "busybox",
+ Env: []string{"FOO=value", "TEST=", "BAZ=first"},
+ Labels: map[string]string{"test": "value"},
+ },
+ Runs: []Run{
+ {Shell: true, Args: []string{"echo $BAR"}},
+ },
+ },
+ {
+ Dockerfile: "dockerclient/testdata/volume/Dockerfile",
+ From: "busybox",
+ Image: &docker.Image{
+ ID: "busybox2",
+ Config: &docker.Config{},
+ },
+ Config: docker.Config{
+ Env: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
+ Volumes: map[string]struct{}{
+ "/var": {},
+ "/var/www": {},
+ },
+ },
+ Copies: []Copy{
+ {Src: []string{"file"}, Dest: "/var/www/", Download: true},
+ {Src: []string{"file"}, Dest: "/var/", Download: true},
+ {Src: []string{"file2"}, Dest: "/var/", Download: true},
+ },
+ },
+ {
+ Dockerfile: "dockerclient/testdata/volumerun/Dockerfile",
+ From: "busybox",
+ Config: docker.Config{
+ Image: "busybox",
+ Volumes: map[string]struct{}{
+ "/var/www": {},
+ },
+ },
+ Runs: []Run{
+ {Shell: true, Args: []string{"touch /var/www/file3"}},
+ },
+ Copies: []Copy{
+ {Src: []string{"file"}, Dest: "/var/www/", Download: true},
+ {Src: []string{"file2"}, Dest: "/var/www/", Download: true},
+ {Src: []string{"file4"}, Dest: "/var/www/", Download: true},
+ },
+ },
+ {
+ Dockerfile: "dockerclient/testdata/multistage/Dockerfile",
+ From: "busybox",
+ Config: docker.Config{
+ Image: "busybox",
+ WorkingDir: "/tmp",
+ },
+ FromErrFn: func(err error) bool {
+ return err != nil && strings.Contains(err.Error(), "multiple FROM statements are not supported")
+ },
+ Runs: []Run{
+ {Shell: true, Args: []string{"echo foo > bar"}},
+ },
+ Copies: []Copy{
+ {Src: []string{"file"}, Dest: "/var/www/", Download: true},
+ {Src: []string{"file2"}, Dest: "/var/www/", Download: true},
+ {Src: []string{"file4"}, Dest: "/var/www/", Download: true},
+ },
+ },
+ {
+ Dockerfile: "dockerclient/testdata/Dockerfile.shell",
+ From: "centos:7",
+ Config: docker.Config{
+ Image: "centos:7",
+ Shell: []string{"/bin/bash", "-xc"},
+ },
+ Runs: []Run{
+ {Shell: true, Args: []string{"env"}},
+ },
+ },
+ }
+ for i, test := range testCases {
+ t.Run(fmt.Sprintf("%s %d", test.Dockerfile, i), func(t *testing.T) {
+ data, err := ioutil.ReadFile(test.Dockerfile)
+ if err != nil {
+ t.Fatalf("%d: %v", i, err)
+ }
+ node, err := ParseDockerfile(bytes.NewBuffer(data))
+ if err != nil {
+ t.Fatalf("%d: %v", i, err)
+ }
+ b := NewBuilder(test.Args)
+ from, err := b.From(node)
+ if err != nil {
+ if test.FromErrFn == nil || !test.FromErrFn(err) {
+ t.Errorf("%d: %v", i, err)
+ }
+ return
+ }
+ if test.FromErrFn != nil {
+ t.Errorf("%d: expected an error from From(), didn't get one", i)
+ }
+ if from != test.From {
+ t.Errorf("%d: unexpected FROM: %s", i, from)
+ }
+ if test.Image != nil {
+ if err := b.FromImage(test.Image, node); err != nil {
+ t.Errorf("%d: unexpected error: %v", i, err)
+ }
+ }
+
+ e := &testExecutor{}
+ var lastErr error
+ for j, child := range node.Children {
+ step := b.Step()
+ if err := step.Resolve(child); err != nil {
+ lastErr = fmt.Errorf("%d: %d: %s: resolve: %v", i, j, step.Original, err)
+ break
+ }
+ if err := b.Run(step, e, false); err != nil {
+ lastErr = fmt.Errorf("%d: %d: %s: run: %v", i, j, step.Original, err)
+ break
+ }
+ }
+ if lastErr != nil {
+ if test.RunErrFn == nil || !test.RunErrFn(lastErr) {
+ t.Errorf("%d: unexpected error: %v", i, lastErr)
+ }
+ return
+ }
+ if test.RunErrFn != nil {
+ t.Errorf("%d: expected an error from Resolve()/Run()(), didn't get one", i)
+ }
+ if !reflect.DeepEqual(test.Copies, e.Copies) {
+ t.Errorf("%d: unexpected copies: %#v", i, e.Copies)
+ }
+ if !reflect.DeepEqual(test.Runs, e.Runs) {
+ t.Errorf("%d: unexpected runs: %#v", i, e.Runs)
+ }
+ if !reflect.DeepEqual(test.Unrecognized, e.Unrecognized) {
+ t.Errorf("%d: unexpected unrecognized: %#v", i, e.Unrecognized)
+ }
+ lastConfig := b.RunConfig
+ if !reflect.DeepEqual(test.Config, lastConfig) {
+ data, _ := json.Marshal(lastConfig)
+ expected, _ := json.Marshal(test.Config)
+ t.Errorf("%d: unexpected config: %s should be %s", i, string(data), string(expected))
+ }
+ })
+ }
+}
+
+func TestRunWithEnvArgConflict(t *testing.T) {
+ f, err := os.Open("dockerclient/testdata/Dockerfile.envargconflict")
+ if err != nil {
+ t.Fatal(err)
+ }
+ node, err := ParseDockerfile(f)
+ if err != nil {
+ t.Fatal(err)
+ }
+ b := NewBuilder(nil)
+ from, err := b.From(node)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if from != "ubuntu:18.04" {
+ t.Fatalf("unexpected from: %s", from)
+ }
+ for _, child := range node.Children {
+ step := b.Step()
+ if err := step.Resolve(child); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Run(step, LogExecutor, false); err != nil {
+ t.Fatal(err)
+ }
+ }
+ configString := fmt.Sprintf("%v", b.Config())
+ expectedValue := "USER_NAME=my_user_env"
+ if !strings.Contains(configString, expectedValue) {
+ t.Fatalf("expected %s to be contained in the Configuration list: %s", expectedValue, configString)
+ }
+ expectedValue = "USER_NAME=my_user_arg"
+ if strings.Contains(configString, expectedValue) {
+ t.Fatalf("expected %s to NOT be contained in the Configuration list: %s", expectedValue, configString)
+ }
+ expectedValue = "/home/my_user_env"
+ if !strings.Contains(configString, expectedValue) {
+ t.Fatalf("expected %s to be contained in the Configuration list: %s", expectedValue, configString)
+ }
+
+ t.Logf("config: %#v", b.Config())
+ t.Logf(node.Dump())
+}
+
+func TestRunWithMultiArg(t *testing.T) {
+ f, err := os.Open("dockerclient/testdata/Dockerfile.multiarg")
+ if err != nil {
+ t.Fatal(err)
+ }
+ node, err := ParseDockerfile(f)
+ if err != nil {
+ t.Fatal(err)
+ }
+ b := NewBuilder(nil)
+ from, err := b.From(node)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if from != "alpine" {
+ t.Fatalf("unexpected from: %s", from)
+ }
+ for _, child := range node.Children {
+ step := b.Step()
+ if err := step.Resolve(child); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Run(step, LogExecutor, false); err != nil {
+ t.Fatal(err)
+ }
+ }
+ configString := fmt.Sprintf("%v", b.Config())
+ expectedValue := "multival=a=1 b=2 c=3 d=4"
+ if !strings.Contains(configString, expectedValue) {
+ t.Fatalf("expected %s to be contained in the Configuration list: %s", expectedValue, configString)
+ }
+
+ t.Logf("config: %#v", b.Config())
+ t.Logf(node.Dump())
+}
+
+func TestParseDockerignore(t *testing.T) {
+ dir, err := ioutil.TempDir("", "dockerignore*")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ tests := []struct {
+ input, result []string
+ }{
+ {
+ input: []string{"first", "second", "", "third", "fourth"},
+ result: []string{"first", "second", "third", "fourth"},
+ },
+ {
+ input: []string{"#first", "#second", "", "third", "fourth"},
+ result: []string{"third", "fourth"},
+ },
+ {
+ input: []string{"", "first", "second", "", " #third", "#invalid pattern which shouldn't matter ("},
+ result: []string{"first", "second", " #third"},
+ },
+ {
+ input: []string{"", "first", "second", "", "#third", ""},
+ result: []string{"first", "second"},
+ },
+ {
+ input: []string{"first", "second", "", "th#rd", "fourth", "fifth#"},
+ result: []string{"first", "second", "th#rd", "fourth", "fifth#"},
+ },
+ {
+ input: []string{"/first", "second/", "/third/", "///fourth//", "fif/th#", "/"},
+ result: []string{"first", "second", "third", "fourth", "fif/th#"},
+ },
+ }
+
+ testIgnore := func(ignorefile string) {
+ for _, test := range tests {
+ f, err := os.Create(ignorefile)
+ if err != nil {
+ t.Fatalf("error creating %q: %v", ignorefile, err)
+ }
+ fmt.Fprintf(f, "%s\n", strings.Join(test.input, "\n"))
+ f.Close()
+ excludes, err := ParseDockerignore(dir)
+ if err != nil {
+ t.Fatalf("error reading %q: %v", ignorefile, err)
+ }
+ if err := os.Remove(ignorefile); err != nil {
+ t.Fatalf("failed to remove ignore file: %v", err)
+ }
+ if len(excludes) != len(test.result) {
+ t.Errorf("expected to read back %#v, got %#v", test.result, excludes)
+ }
+ for i := range excludes {
+ if excludes[i] != test.result[i] {
+ t.Errorf("expected to read back %#v, got %#v", test.result, excludes)
+ }
+ }
+ }
+ }
+ testIgnore(filepath.Join(dir, ".containerignore"))
+ testIgnore(filepath.Join(dir, ".dockerignore"))
+ // Create empty .dockerignore to test in same directory as .containerignore
+ f, err := os.Create(filepath.Join(dir, ".dockerignore"))
+ if err != nil {
+ t.Fatalf("error creating: %v", err)
+ }
+ f.Close()
+ testIgnore(filepath.Join(dir, ".containerignore"))
+ os.Remove(filepath.Join(dir, ".dockerignore"))
+
+ ignorefile := filepath.Join(dir, "ignore")
+ for _, test := range tests {
+ f, err := os.Create(ignorefile)
+ if err != nil {
+ t.Fatalf("error creating %q: %v", ignorefile, err)
+ }
+ fmt.Fprintf(f, "%s\n", strings.Join(test.input, "\n"))
+ f.Close()
+ excludes, err := ParseIgnore(ignorefile)
+ if err != nil {
+ t.Fatalf("error reading %q: %v", ignorefile, err)
+ }
+ if err := os.Remove(ignorefile); err != nil {
+ t.Fatalf("failed to remove ignore file: %v", err)
+ }
+ if len(excludes) != len(test.result) {
+ t.Errorf("expected to read back %#v, got %#v", test.result, excludes)
+ }
+ for i := range excludes {
+ if excludes[i] != test.result[i] {
+ t.Errorf("expected to read back %#v, got %#v", test.result, excludes)
+ }
+ }
+ }
+}
diff --git a/cmd/imagebuilder/imagebuilder.go b/cmd/imagebuilder/imagebuilder.go
new file mode 100644
index 0000000..e3f3067
--- /dev/null
+++ b/cmd/imagebuilder/imagebuilder.go
@@ -0,0 +1,236 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/distribution/reference"
+ dockertypes "github.com/docker/docker/api/types"
+ docker "github.com/fsouza/go-dockerclient"
+ "k8s.io/klog"
+
+ "github.com/openshift/imagebuilder"
+ "github.com/openshift/imagebuilder/dockerclient"
+)
+
+func init() {
+ klog.InitFlags(flag.CommandLine)
+}
+
+func main() {
+ log.SetFlags(0)
+ options := dockerclient.NewClientExecutor(nil)
+ var tags stringSliceFlag
+ var target string
+ var dockerfilePath string
+ var imageFrom string
+ var privileged bool
+ var version bool
+ var mountSpecs stringSliceFlag
+
+ VERSION := "1.2.5"
+ arguments := stringMapFlag{}
+
+ flag.Var(&tags, "t", "The name to assign this image, if any. May be specified multiple times.")
+ flag.Var(&tags, "tag", "The name to assign this image, if any. May be specified multiple times.")
+ flag.Var(&arguments, "build-arg", "An optional list of build-time variables usable as ARG in Dockerfile. Use --build-arg ARG1=VAL1 --build-arg ARG2=VAL2 syntax for passing multiple build args.")
+ flag.StringVar(&dockerfilePath, "f", dockerfilePath, "An optional path to a Dockerfile to use. You may pass multiple docker files using the operating system delimiter.")
+ flag.StringVar(&dockerfilePath, "file", dockerfilePath, "An optional path to a Dockerfile to use. You may pass multiple docker files using the operating system delimiter.")
+ flag.StringVar(&imageFrom, "from", imageFrom, "An optional FROM to use instead of the one in the Dockerfile.")
+ flag.StringVar(&target, "target", "", "The name of a stage within the Dockerfile to build.")
+ flag.Var(&mountSpecs, "mount", "An optional list of files and directories to mount during the build. Use SRC:DST syntax for each path.")
+ flag.BoolVar(&options.AllowPull, "allow-pull", true, "Pull the images that are not present.")
+ flag.BoolVar(&options.IgnoreUnrecognizedInstructions, "ignore-unrecognized-instructions", true, "If an unrecognized Docker instruction is encountered, warn but do not fail the build.")
+ flag.BoolVar(&options.StrictVolumeOwnership, "strict-volume-ownership", false, "Due to limitations in docker `cp`, owner permissions on volumes are lost. This flag will fail builds that might fall victim to this.")
+ flag.BoolVar(&privileged, "privileged", false, "Builds run as privileged containers instead of restricted containers.")
+ flag.BoolVar(&version, "version", false, "Display imagebuilder version.")
+
+ flag.Parse()
+
+ args := flag.Args()
+ if version {
+ fmt.Println(VERSION)
+ return
+ }
+
+ if len(args) != 1 {
+ log.Fatalf("You must provide one argument, the name of a directory to build")
+ }
+
+ options.Directory = args[0]
+ if len(tags) > 0 {
+ options.Tag = tags[0]
+ options.AdditionalTags = tags[1:]
+ }
+ if len(dockerfilePath) == 0 {
+ dockerfilePath = filepath.Join(options.Directory, "Dockerfile")
+ }
+
+ if privileged {
+ if options.HostConfig == nil {
+ options.HostConfig = &docker.HostConfig{}
+ }
+ options.HostConfig.Privileged = true
+ }
+
+ var mounts []dockerclient.Mount
+ for _, s := range mountSpecs {
+ segments := strings.Split(s, ":")
+ if len(segments) != 2 {
+ log.Fatalf("--mount must be of the form SOURCE:DEST")
+ }
+ mounts = append(mounts, dockerclient.Mount{SourcePath: segments[0], DestinationPath: segments[1]})
+ }
+ options.TransientMounts = mounts
+
+ options.Out, options.ErrOut = os.Stdout, os.Stderr
+ authConfigurations, err := docker.NewAuthConfigurationsFromDockerCfg()
+ if err != nil {
+ log.Fatalf("reading authentication configurations: %v", err)
+ }
+ if authConfigurations == nil {
+ klog.V(4).Infof("No authentication secrets found")
+ }
+
+ options.AuthFn = func(name string) ([]dockertypes.AuthConfig, bool) {
+ if authConfigurations != nil {
+ if authConfig, ok := authConfigurations.Configs[name]; ok {
+ klog.V(4).Infof("Found authentication secret for registry %q", name)
+ return []dockertypes.AuthConfig{{
+ Username: authConfig.Username,
+ Password: authConfig.Password,
+ Email: authConfig.Email,
+ ServerAddress: authConfig.ServerAddress,
+ }}, true
+ }
+ if named, err := reference.ParseNormalizedNamed(name); err == nil {
+ domain := reference.Domain(named)
+ if authConfig, ok := authConfigurations.Configs[domain]; ok {
+ klog.V(4).Infof("Found authentication secret for registry %q", domain)
+ return []dockertypes.AuthConfig{{
+ Username: authConfig.Username,
+ Password: authConfig.Password,
+ Email: authConfig.Email,
+ ServerAddress: authConfig.ServerAddress,
+ }}, true
+ }
+ if domain == "docker.io" || strings.HasSuffix(domain, ".docker.io") {
+ var auths []dockertypes.AuthConfig
+ for _, aka := range []string{"docker.io", "index.docker.io", "https://index.docker.io/v1/"} {
+ if aka == domain {
+ continue
+ }
+ if authConfig, ok := authConfigurations.Configs[aka]; ok {
+ klog.V(4).Infof("Found authentication secret for registry %q", aka)
+ auths = append(auths, dockertypes.AuthConfig{
+ Username: authConfig.Username,
+ Password: authConfig.Password,
+ Email: authConfig.Email,
+ ServerAddress: authConfig.ServerAddress,
+ })
+ }
+ }
+ if len(auths) > 0 {
+ return auths, true
+ }
+ }
+ }
+ }
+ return nil, false
+ }
+ options.LogFn = func(format string, args ...interface{}) {
+ if klog.V(2) {
+ log.Printf("Builder: "+format, args...)
+ } else {
+ fmt.Fprintf(options.Out, "--> %s\n", fmt.Sprintf(format, args...))
+ }
+ }
+
+ dockerfiles := filepath.SplitList(dockerfilePath)
+ if len(dockerfiles) == 0 {
+ dockerfiles = []string{filepath.Join(options.Directory, "Dockerfile")}
+ }
+
+ if err := build(dockerfiles[0], dockerfiles[1:], arguments, imageFrom, target, options); err != nil {
+ log.Fatal(err.Error())
+ }
+}
+
+func build(dockerfile string, additionalDockerfiles []string, arguments map[string]string, from string, target string, e *dockerclient.ClientExecutor) error {
+ if err := e.DefaultExcludes(); err != nil {
+ return fmt.Errorf("error: Could not parse default .dockerignore: %v", err)
+ }
+
+ client, err := docker.NewClientFromEnv()
+ if err != nil {
+ return fmt.Errorf("error: No connection to Docker available: %v", err)
+ }
+ e.Client = client
+
+ // TODO: handle signals
+ defer func() {
+ for _, err := range e.Release() {
+ fmt.Fprintf(e.ErrOut, "error: Unable to clean up build: %v\n", err)
+ }
+ }()
+
+ node, err := imagebuilder.ParseFile(dockerfile)
+ if err != nil {
+ return err
+ }
+ for _, s := range additionalDockerfiles {
+ additionalNode, err := imagebuilder.ParseFile(s)
+ if err != nil {
+ return err
+ }
+ node.Children = append(node.Children, additionalNode.Children...)
+ }
+
+ b := imagebuilder.NewBuilder(arguments)
+ stages, err := imagebuilder.NewStages(node, b)
+ if err != nil {
+ return err
+ }
+ stages, ok := stages.ByTarget(target)
+ if !ok {
+ return fmt.Errorf("error: The target %q was not found in the provided Dockerfile", target)
+ }
+
+ lastExecutor, err := e.Stages(b, stages, from)
+ if err != nil {
+ return err
+ }
+
+ return lastExecutor.Commit(stages[len(stages)-1].Builder)
+}
+
+type stringSliceFlag []string
+
+func (f *stringSliceFlag) Set(s string) error {
+ *f = append(*f, s)
+ return nil
+}
+
+func (f *stringSliceFlag) String() string {
+ return strings.Join(*f, " ")
+}
+
+type stringMapFlag map[string]string
+
+func (f *stringMapFlag) String() string {
+ args := []string{}
+ for k, v := range *f {
+ args = append(args, strings.Join([]string{k, v}, "="))
+ }
+ return strings.Join(args, " ")
+}
+
+func (f *stringMapFlag) Set(value string) error {
+ kv := strings.Split(value, "=")
+ (*f)[kv[0]] = kv[1]
+ return nil
+}
diff --git a/constants.go b/constants.go
new file mode 100644
index 0000000..7b41e5a
--- /dev/null
+++ b/constants.go
@@ -0,0 +1,9 @@
+package imagebuilder
+
+const (
+ // in docker/system
+ NoBaseImageSpecifier = "scratch"
+
+ // in docker/system
+ defaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+)
diff --git a/dispatchers.go b/dispatchers.go
new file mode 100644
index 0000000..f264876
--- /dev/null
+++ b/dispatchers.go
@@ -0,0 +1,711 @@
+package imagebuilder
+
+// This file contains the dispatchers for each command. Note that
+// `nullDispatch` is not actually a command, but support for commands we parse
+// but do nothing with.
+//
+// See evaluator.go for a higher level discussion of the whole evaluator
+// package.
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+
+ docker "github.com/fsouza/go-dockerclient"
+
+ "github.com/containerd/containerd/platforms"
+ "github.com/containers/storage/pkg/regexp"
+ "github.com/openshift/imagebuilder/signal"
+ "github.com/openshift/imagebuilder/strslice"
+)
+
+var (
+ obRgex = regexp.Delayed(`(?i)^\s*ONBUILD\s*`)
+)
+
+var localspec = platforms.DefaultSpec()
+
+// https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope
+var builtinBuildArgs = map[string]string{
+ "TARGETPLATFORM": localspec.OS + "/" + localspec.Architecture,
+ "TARGETOS": localspec.OS,
+ "TARGETARCH": localspec.Architecture,
+ "TARGETVARIANT": localspec.Variant,
+ "BUILDPLATFORM": localspec.OS + "/" + localspec.Architecture,
+ "BUILDOS": localspec.OS,
+ "BUILDARCH": localspec.Architecture,
+ "BUILDVARIANT": localspec.Variant,
+}
+
+func init() {
+ if localspec.Variant != "" {
+ builtinBuildArgs["TARGETPLATFORM"] = builtinBuildArgs["TARGETPLATFORM"] + "/" + localspec.Variant
+ builtinBuildArgs["BUILDPLATFORM"] = builtinBuildArgs["BUILDPLATFORM"] + "/" + localspec.Variant
+ }
+}
+
+// ENV foo bar
+//
+// Sets the environment variable foo to bar, also makes interpolation
+// in the dockerfile available from the next statement on via ${foo}.
+func env(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) == 0 {
+ return errAtLeastOneArgument("ENV")
+ }
+
+ if len(args)%2 != 0 {
+ // should never get here, but just in case
+ return errTooManyArguments("ENV")
+ }
+
+ // TODO/FIXME/NOT USED
+ // Just here to show how to use the builder flags stuff within the
+ // context of a builder command. Will remove once we actually add
+ // a builder command to something!
+ /*
+ flBool1 := b.flags.AddBool("bool1", false)
+ flStr1 := b.flags.AddString("str1", "HI")
+
+ if err := b.flags.Parse(); err != nil {
+ return err
+ }
+
+ fmt.Printf("Bool1:%v\n", flBool1)
+ fmt.Printf("Str1:%v\n", flStr1)
+ */
+
+ for j := 0; j < len(args); j++ {
+ // name ==> args[j]
+ // value ==> args[j+1]
+ newVar := []string{args[j] + "=" + args[j+1]}
+ b.RunConfig.Env = mergeEnv(b.RunConfig.Env, newVar)
+ b.Env = mergeEnv(b.Env, newVar)
+ j++
+ }
+
+ return nil
+}
+
+// MAINTAINER some text <maybe@an.email.address>
+//
+// Sets the maintainer metadata.
+func maintainer(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) != 1 {
+ return errExactlyOneArgument("MAINTAINER")
+ }
+ b.Author = args[0]
+ return nil
+}
+
+// LABEL some json data describing the image
+//
+// Sets the Label variable foo to bar,
+func label(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) == 0 {
+ return errAtLeastOneArgument("LABEL")
+ }
+ if len(args)%2 != 0 {
+ // should never get here, but just in case
+ return errTooManyArguments("LABEL")
+ }
+
+ if b.RunConfig.Labels == nil {
+ b.RunConfig.Labels = map[string]string{}
+ }
+
+ for j := 0; j < len(args); j++ {
+ // name ==> args[j]
+ // value ==> args[j+1]
+ b.RunConfig.Labels[args[j]] = args[j+1]
+ j++
+ }
+ return nil
+}
+
+// ADD foo /path
+//
+// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling
+// exist here. If you do not wish to have this automatic handling, use COPY.
+func add(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) < 2 {
+ return errAtLeastTwoArgument("ADD")
+ }
+ var chown string
+ var chmod string
+ last := len(args) - 1
+ dest := makeAbsolute(args[last], b.RunConfig.WorkingDir)
+ filteredUserArgs := make(map[string]string)
+ for k, v := range b.Args {
+ if _, ok := b.AllowedArgs[k]; ok {
+ filteredUserArgs[k] = v
+ }
+ }
+ userArgs := mergeEnv(envMapAsSlice(filteredUserArgs), b.Env)
+ for _, a := range flagArgs {
+ arg, err := ProcessWord(a, userArgs)
+ if err != nil {
+ return err
+ }
+ switch {
+ case strings.HasPrefix(arg, "--chown="):
+ chown = strings.TrimPrefix(arg, "--chown=")
+ case strings.HasPrefix(arg, "--chmod="):
+ chmod = strings.TrimPrefix(arg, "--chmod=")
+ err = checkChmodConversion(chmod)
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("ADD only supports the --chmod=<permissions> and the --chown=<uid:gid> flag")
+ }
+ }
+ b.PendingCopies = append(b.PendingCopies, Copy{Src: args[0:last], Dest: dest, Download: true, Chown: chown, Chmod: chmod})
+ return nil
+}
+
+// COPY foo /path
+//
+// Same as 'ADD' but without the tar and remote url handling.
+func dispatchCopy(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) < 2 {
+ return errAtLeastTwoArgument("COPY")
+ }
+ last := len(args) - 1
+ dest := makeAbsolute(args[last], b.RunConfig.WorkingDir)
+ var chown string
+ var chmod string
+ var from string
+ userArgs := mergeEnv(envMapAsSlice(b.Args), b.Env)
+ for _, a := range flagArgs {
+ arg, err := ProcessWord(a, userArgs)
+ if err != nil {
+ return err
+ }
+ switch {
+ case strings.HasPrefix(arg, "--chown="):
+ chown = strings.TrimPrefix(arg, "--chown=")
+ case strings.HasPrefix(arg, "--chmod="):
+ chmod = strings.TrimPrefix(arg, "--chmod=")
+ err = checkChmodConversion(chmod)
+ if err != nil {
+ return err
+ }
+ case strings.HasPrefix(arg, "--from="):
+ from = strings.TrimPrefix(arg, "--from=")
+ default:
+ return fmt.Errorf("COPY only supports the --chmod=<permissions> --chown=<uid:gid> and the --from=<image|stage> flags")
+ }
+ }
+ b.PendingCopies = append(b.PendingCopies, Copy{From: from, Src: args[0:last], Dest: dest, Download: false, Chown: chown, Chmod: chmod})
+ return nil
+}
+
+// FROM imagename
+//
+// This sets the image the dockerfile will build on top of.
+func from(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ switch {
+ case len(args) == 1:
+ case len(args) == 3 && len(args[0]) > 0 && strings.EqualFold(args[1], "as") && len(args[2]) > 0:
+
+ default:
+ return fmt.Errorf("FROM requires either one argument, or three: FROM <source> [as <name>]")
+ }
+
+ name := args[0]
+
+ // Support ARG before from
+ argStrs := []string{}
+ for n, v := range b.HeadingArgs {
+ argStrs = append(argStrs, n+"="+v)
+ }
+ defaultArgs := envMapAsSlice(builtinBuildArgs)
+ filteredUserArgs := make(map[string]string)
+ for k, v := range b.UserArgs {
+ for _, a := range b.GlobalAllowedArgs {
+ if a == k {
+ filteredUserArgs[k] = v
+ }
+ }
+ }
+ userArgs := mergeEnv(envMapAsSlice(filteredUserArgs), b.Env)
+ userArgs = mergeEnv(defaultArgs, userArgs)
+ nameArgs := mergeEnv(argStrs, userArgs)
+ var err error
+ if name, err = ProcessWord(name, nameArgs); err != nil {
+ return err
+ }
+
+ // Windows cannot support a container with no base image.
+ if name == NoBaseImageSpecifier {
+ if runtime.GOOS == "windows" {
+ return fmt.Errorf("Windows does not support FROM scratch")
+ }
+ }
+ for _, a := range flagArgs {
+ arg, err := ProcessWord(a, userArgs)
+ if err != nil {
+ return err
+ }
+ switch {
+ case strings.HasPrefix(arg, "--platform="):
+ platformString := strings.TrimPrefix(arg, "--platform=")
+ b.Platform = platformString
+ default:
+ return fmt.Errorf("FROM only supports the --platform flag")
+ }
+ }
+ b.RunConfig.Image = name
+ // TODO: handle onbuild
+ return nil
+}
+
+// ONBUILD RUN echo yo
+//
+// ONBUILD triggers run when the image is used in a FROM statement.
+//
+// ONBUILD handling has a lot of special-case functionality, the heading in
+// evaluator.go and comments around dispatch() in the same file explain the
+// special cases. search for 'OnBuild' in internals.go for additional special
+// cases.
+func onbuild(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) == 0 {
+ return errAtLeastOneArgument("ONBUILD")
+ }
+
+ triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0]))
+ switch triggerInstruction {
+ case "ONBUILD":
+ return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
+ case "MAINTAINER", "FROM":
+ return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction)
+ }
+
+ original = obRgex.ReplaceAllString(original, "")
+
+ b.RunConfig.OnBuild = append(b.RunConfig.OnBuild, original)
+ return nil
+}
+
+// WORKDIR /tmp
+//
+// Set the working directory for future RUN/CMD/etc statements.
+func workdir(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) != 1 {
+ return errExactlyOneArgument("WORKDIR")
+ }
+
+ // This is from the Dockerfile and will not necessarily be in platform
+ // specific semantics, hence ensure it is converted.
+ workdir := filepath.FromSlash(args[0])
+
+ if !filepath.IsAbs(workdir) {
+ current := filepath.FromSlash(b.RunConfig.WorkingDir)
+ workdir = filepath.Join(string(os.PathSeparator), current, workdir)
+ }
+
+ b.RunConfig.WorkingDir = workdir
+ return nil
+}
+
+// RUN some command yo
+//
+// run a command and commit the image. Args are automatically prepended with
+// 'sh -c' under linux or 'cmd /S /C' under Windows, in the event there is
+// only one argument. The difference in processing:
+//
+// RUN echo hi # sh -c echo hi (Linux)
+// RUN echo hi # cmd /S /C echo hi (Windows)
+// RUN [ "echo", "hi" ] # echo hi
+func run(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if b.RunConfig.Image == "" {
+ return fmt.Errorf("Please provide a source image with `from` prior to run")
+ }
+
+ args = handleJSONArgs(args, attributes)
+
+ var mounts []string
+ var network string
+ filteredUserArgs := make(map[string]string)
+ for k, v := range b.Args {
+ if _, ok := b.AllowedArgs[k]; ok {
+ filteredUserArgs[k] = v
+ }
+ }
+ userArgs := mergeEnv(envMapAsSlice(filteredUserArgs), b.Env)
+ for _, a := range flagArgs {
+ arg, err := ProcessWord(a, userArgs)
+ if err != nil {
+ return err
+ }
+ switch {
+ case strings.HasPrefix(arg, "--mount="):
+ mount := strings.TrimPrefix(arg, "--mount=")
+ mounts = append(mounts, mount)
+ case strings.HasPrefix(arg, "--network="):
+ network = strings.TrimPrefix(arg, "--network=")
+ default:
+ return fmt.Errorf("RUN only supports the --mount and --network flag")
+ }
+ }
+
+ run := Run{
+ Args: args,
+ Mounts: mounts,
+ Network: network,
+ }
+
+ if !attributes["json"] {
+ run.Shell = true
+ }
+ b.PendingRuns = append(b.PendingRuns, run)
+ return nil
+}
+
+// CMD foo
+//
+// Set the default command to run in the container (which may be empty).
+// Argument handling is the same as RUN.
+func cmd(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ cmdSlice := handleJSONArgs(args, attributes)
+
+ if !attributes["json"] {
+ if runtime.GOOS != "windows" {
+ cmdSlice = append([]string{"/bin/sh", "-c"}, cmdSlice...)
+ } else {
+ cmdSlice = append([]string{"cmd", "/S", "/C"}, cmdSlice...)
+ }
+ }
+
+ b.RunConfig.Cmd = strslice.StrSlice(cmdSlice)
+ if len(args) != 0 {
+ b.CmdSet = true
+ }
+ return nil
+}
+
+// ENTRYPOINT /usr/sbin/nginx
+//
+// Set the entrypoint (which defaults to sh -c on linux, or cmd /S /C on Windows) to
+// /usr/sbin/nginx. Will accept the CMD as the arguments to /usr/sbin/nginx.
+//
+// Handles command processing similar to CMD and RUN, only b.RunConfig.Entrypoint
+// is initialized at NewBuilder time instead of through argument parsing.
+func entrypoint(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ parsed := handleJSONArgs(args, attributes)
+
+ switch {
+ case attributes["json"]:
+ // ENTRYPOINT ["echo", "hi"]
+ b.RunConfig.Entrypoint = strslice.StrSlice(parsed)
+ case len(parsed) == 0:
+ // ENTRYPOINT []
+ b.RunConfig.Entrypoint = nil
+ default:
+ // ENTRYPOINT echo hi
+ if runtime.GOOS != "windows" {
+ b.RunConfig.Entrypoint = strslice.StrSlice{"/bin/sh", "-c", parsed[0]}
+ } else {
+ b.RunConfig.Entrypoint = strslice.StrSlice{"cmd", "/S", "/C", parsed[0]}
+ }
+ }
+
+ // when setting the entrypoint if a CMD was not explicitly set then
+ // set the command to nil
+ if !b.CmdSet {
+ b.RunConfig.Cmd = nil
+ }
+ return nil
+}
+
+// EXPOSE 6667/tcp 7000/tcp
+//
+// Expose ports for links and port mappings. This all ends up in
+// b.RunConfig.ExposedPorts for runconfig.
+func expose(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) == 0 {
+ return errAtLeastOneArgument("EXPOSE")
+ }
+
+ if b.RunConfig.ExposedPorts == nil {
+ b.RunConfig.ExposedPorts = make(map[docker.Port]struct{})
+ }
+
+ existing := map[string]struct{}{}
+ for k := range b.RunConfig.ExposedPorts {
+ existing[k.Port()] = struct{}{}
+ }
+
+ for _, port := range args {
+ dp := docker.Port(port)
+ if _, exists := existing[dp.Port()]; !exists {
+ b.RunConfig.ExposedPorts[docker.Port(fmt.Sprintf("%s/%s", dp.Port(), dp.Proto()))] = struct{}{}
+ }
+ }
+ return nil
+}
+
+// USER foo
+//
+// Set the user to 'foo' for future commands and when running the
+// ENTRYPOINT/CMD at container run time.
+func user(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) != 1 {
+ return errExactlyOneArgument("USER")
+ }
+
+ b.RunConfig.User = args[0]
+ return nil
+}
+
+// VOLUME /foo
+//
+// Expose the volume /foo for use. Will also accept the JSON array form.
+func volume(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) == 0 {
+ return errAtLeastOneArgument("VOLUME")
+ }
+
+ if b.RunConfig.Volumes == nil {
+ b.RunConfig.Volumes = map[string]struct{}{}
+ }
+ for _, v := range args {
+ v = strings.TrimSpace(v)
+ if v == "" {
+ return fmt.Errorf("Volume specified can not be an empty string")
+ }
+ b.RunConfig.Volumes[v] = struct{}{}
+ b.PendingVolumes.Add(v)
+ }
+ return nil
+}
+
+// STOPSIGNAL signal
+//
+// Set the signal that will be used to kill the container.
+func stopSignal(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) != 1 {
+ return errExactlyOneArgument("STOPSIGNAL")
+ }
+
+ sig := args[0]
+ if err := signal.CheckSignal(sig); err != nil {
+ return err
+ }
+
+ b.RunConfig.StopSignal = sig
+ return nil
+}
+
+// HEALTHCHECK foo
+//
+// Set the default healthcheck command to run in the container (which may be empty).
+// Argument handling is the same as RUN.
+func healthcheck(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) == 0 {
+ return errAtLeastOneArgument("HEALTHCHECK")
+ }
+ typ := strings.ToUpper(args[0])
+ args = args[1:]
+ if typ == "NONE" {
+ if len(args) != 0 {
+ return fmt.Errorf("HEALTHCHECK NONE takes no arguments")
+ }
+ test := strslice.StrSlice{typ}
+ b.RunConfig.Healthcheck = &docker.HealthConfig{
+ Test: test,
+ }
+ } else {
+ if b.RunConfig.Healthcheck != nil {
+ oldCmd := b.RunConfig.Healthcheck.Test
+ if len(oldCmd) > 0 && oldCmd[0] != "NONE" {
+ b.Warnings = append(b.Warnings, fmt.Sprintf("Note: overriding previous HEALTHCHECK: %v\n", oldCmd))
+ }
+ }
+
+ healthcheck := docker.HealthConfig{}
+
+ flags := flag.NewFlagSet("", flag.ContinueOnError)
+ flags.String("start-period", "", "")
+ flags.String("interval", "", "")
+ flags.String("timeout", "", "")
+ flRetries := flags.String("retries", "", "")
+
+ if err := flags.Parse(flagArgs); err != nil {
+ return err
+ }
+
+ switch typ {
+ case "CMD":
+ cmdSlice := handleJSONArgs(args, attributes)
+ if len(cmdSlice) == 0 {
+ return fmt.Errorf("Missing command after HEALTHCHECK CMD")
+ }
+
+ if !attributes["json"] {
+ typ = "CMD-SHELL"
+ }
+
+ healthcheck.Test = strslice.StrSlice(append([]string{typ}, cmdSlice...))
+ default:
+ return fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ)
+ }
+
+ period, err := parseOptInterval(flags.Lookup("start-period"))
+ if err != nil {
+ return err
+ }
+ healthcheck.StartPeriod = period
+
+ interval, err := parseOptInterval(flags.Lookup("interval"))
+ if err != nil {
+ return err
+ }
+ healthcheck.Interval = interval
+
+ timeout, err := parseOptInterval(flags.Lookup("timeout"))
+ if err != nil {
+ return err
+ }
+ healthcheck.Timeout = timeout
+
+ if *flRetries != "" {
+ retries, err := strconv.ParseInt(*flRetries, 10, 32)
+ if err != nil {
+ return err
+ }
+ if retries < 1 {
+ return fmt.Errorf("--retries must be at least 1 (not %d)", retries)
+ }
+ healthcheck.Retries = int(retries)
+ } else {
+ healthcheck.Retries = 0
+ }
+ b.RunConfig.Healthcheck = &healthcheck
+ }
+
+ return nil
+}
+
+var targetArgs = []string{"TARGETOS", "TARGETARCH", "TARGETVARIANT"}
+
+// ARG name[=value]
+//
+// Adds the variable foo to the trusted list of variables that can be passed
+// to builder using the --build-arg flag for expansion/subsitution or passing to 'run'.
+// Dockerfile author may optionally set a default value of this variable.
+func arg(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ var (
+ name string
+ value string
+ hasDefault bool
+ )
+
+ for _, argument := range args {
+ arg := argument
+ // 'arg' can just be a name or name-value pair. Note that this is different
+ // from 'env' that handles the split of name and value at the parser level.
+ // The reason for doing it differently for 'arg' is that we support just
+ // defining an arg and not assign it a value (while 'env' always expects a
+ // name-value pair). If possible, it will be good to harmonize the two.
+ if strings.Contains(arg, "=") {
+ parts := strings.SplitN(arg, "=", 2)
+ name = parts[0]
+ value = parts[1]
+ hasDefault = true
+ if name == "TARGETPLATFORM" {
+ p, err := platforms.Parse(value)
+ if err != nil {
+ return fmt.Errorf("error parsing TARGETPLATFORM argument")
+ }
+ for _, val := range targetArgs {
+ b.AllowedArgs[val] = true
+ }
+ b.Args["TARGETPLATFORM"] = p.OS + "/" + p.Architecture
+ b.Args["TARGETOS"] = p.OS
+ b.Args["TARGETARCH"] = p.Architecture
+ b.Args["TARGETVARIANT"] = p.Variant
+ if p.Variant != "" {
+ b.Args["TARGETPLATFORM"] = b.Args["TARGETPLATFORM"] + "/" + p.Variant
+ }
+ }
+ } else if val, ok := builtinBuildArgs[arg]; ok {
+ name = arg
+ value = val
+ hasDefault = true
+ } else {
+ name = arg
+ hasDefault = false
+ }
+ // add the arg to allowed list of build-time args from this step on.
+ b.AllowedArgs[name] = true
+
+ // If there is still no default value, a value can be assigned from the heading args
+ if val, ok := b.HeadingArgs[name]; ok && !hasDefault {
+ b.Args[name] = val
+ }
+
+ // If there is a default value associated with this arg then add it to the
+ // b.buildArgs, later default values for the same arg override earlier ones.
+ // The args passed to builder (UserArgs) override the default value of 'arg'
+ // Don't add them here as they were already set in NewBuilder.
+ if _, ok := b.UserArgs[name]; !ok && hasDefault {
+ b.Args[name] = value
+ }
+ }
+
+ return nil
+}
+
+// SHELL powershell -command
+//
+// Set the non-default shell to use.
+func shell(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ shellSlice := handleJSONArgs(args, attributes)
+ switch {
+ case len(shellSlice) == 0:
+ // SHELL []
+ return errAtLeastOneArgument("SHELL")
+ case attributes["json"]:
+ // SHELL ["powershell", "-command"]
+ b.RunConfig.Shell = strslice.StrSlice(shellSlice)
+ // b.RunConfig.Shell = strslice.StrSlice(shellSlice)
+ default:
+ // SHELL powershell -command - not JSON
+ return errNotJSON("SHELL")
+ }
+ return nil
+}
+
+func checkChmodConversion(chmod string) error {
+ _, err := strconv.ParseUint(chmod, 8, 32)
+ if err != nil {
+ return fmt.Errorf("Error parsing chmod %s", chmod)
+ }
+ return nil
+}
+
+func errAtLeastOneArgument(command string) error {
+ return fmt.Errorf("%s requires at least one argument", command)
+}
+
+func errAtLeastTwoArgument(command string) error {
+ return fmt.Errorf("%s requires at least two arguments", command)
+}
+
+func errExactlyOneArgument(command string) error {
+ return fmt.Errorf("%s requires exactly one argument", command)
+}
+
+func errTooManyArguments(command string) error {
+ return fmt.Errorf("Bad input to %s, too many arguments", command)
+}
+
+func errNotJSON(command string) error {
+ return fmt.Errorf("%s requires the arguments to be in JSON form", command)
+}
diff --git a/dispatchers_test.go b/dispatchers_test.go
new file mode 100644
index 0000000..d72ab5f
--- /dev/null
+++ b/dispatchers_test.go
@@ -0,0 +1,800 @@
+package imagebuilder
+
+import (
+ "errors"
+ "reflect"
+ "sort"
+ "testing"
+
+ "github.com/containerd/containerd/platforms"
+ docker "github.com/fsouza/go-dockerclient"
+)
+
+func TestDispatchArgDefaultBuiltins(t *testing.T) {
+ mybuilder := *NewBuilder(make(map[string]string))
+ args := []string{"TARGETPLATFORM"}
+ if err := arg(&mybuilder, args, nil, nil, ""); err != nil {
+ t.Errorf("arg error: %v", err)
+ }
+ args = []string{"BUILDARCH"}
+ if err := arg(&mybuilder, args, nil, nil, ""); err != nil {
+ t.Errorf("arg(2) error: %v", err)
+ }
+ localspec := platforms.DefaultSpec()
+ expectedArgs := []string{
+ "BUILDARCH=" + localspec.Architecture,
+ "TARGETPLATFORM=" + localspec.OS + "/" + localspec.Architecture,
+ }
+ got := mybuilder.Arguments()
+ sort.Strings(got)
+ if !reflect.DeepEqual(got, expectedArgs) {
+ t.Errorf("Expected %v, got %v\n", expectedArgs, got)
+ }
+}
+
+func TestDispatchArgTargetPlatform(t *testing.T) {
+ mybuilder := *NewBuilder(make(map[string]string))
+ args := []string{"TARGETPLATFORM=linux/arm/v7"}
+ if err := arg(&mybuilder, args, nil, nil, ""); err != nil {
+ t.Errorf("arg error: %v", err)
+ }
+ expectedArgs := []string{
+ "TARGETARCH=arm",
+ "TARGETOS=linux",
+ "TARGETPLATFORM=linux/arm/v7",
+ "TARGETVARIANT=v7",
+ }
+ got := mybuilder.Arguments()
+ sort.Strings(got)
+ if !reflect.DeepEqual(got, expectedArgs) {
+ t.Errorf("Expected %v, got %v\n", expectedArgs, got)
+ }
+}
+
+func TestDispatchArgTargetPlatformBad(t *testing.T) {
+ mybuilder := *NewBuilder(make(map[string]string))
+ args := []string{"TARGETPLATFORM=bozo"}
+ err := arg(&mybuilder, args, nil, nil, "")
+ expectedErr := errors.New("error parsing TARGETPLATFORM argument")
+ if !reflect.DeepEqual(err, expectedErr) {
+ t.Errorf("Expected %v, got %v\n", expectedErr, err)
+ }
+}
+
+func TestDispatchCopy(t *testing.T) {
+ mybuilder := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "alpine",
+ },
+ }
+ args := []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager", "."}
+ flagArgs := []string{"--from=builder"}
+ original := "COPY --from=builder /go/src/github.com/kubernetes-incubator/service-catalog/controller-manager ."
+ if err := dispatchCopy(&mybuilder, args, nil, flagArgs, original); err != nil {
+ t.Errorf("dispatchCopy error: %v", err)
+ }
+ expectedPendingCopies := []Copy{
+ {
+ From: "builder",
+ Src: []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager"},
+ Dest: "/root/", // destination must contain a trailing slash
+ Download: false,
+ Chown: "",
+ Chmod: "",
+ },
+ }
+ if !reflect.DeepEqual(mybuilder.PendingCopies, expectedPendingCopies) {
+ t.Errorf("Expected %v, got %v\n", expectedPendingCopies, mybuilder.PendingCopies)
+ }
+}
+
+func TestDispatchCopyChown(t *testing.T) {
+ mybuilder := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "busybox",
+ },
+ }
+
+ mybuilder2 := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "alpine",
+ },
+ }
+
+ // Test Bad chown values
+ args := []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager", "."}
+ flagArgs := []string{"--chown=1376:1376"}
+ original := "COPY --chown=1376:1376 /go/src/github.com/kubernetes-incubator/service-catalog/controller-manager ."
+ if err := dispatchCopy(&mybuilder, args, nil, flagArgs, original); err != nil {
+ t.Errorf("dispatchCopy error: %v", err)
+ }
+ expectedPendingCopies := []Copy{
+ {
+ From: "",
+ Src: []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager"},
+ Dest: "/root/", // destination must contain a trailing slash
+ Download: false,
+ Chown: "6731:6731",
+ },
+ }
+ if reflect.DeepEqual(mybuilder.PendingCopies, expectedPendingCopies) {
+ t.Errorf("Expected %v, to not match %v\n", expectedPendingCopies, mybuilder.PendingCopies)
+ }
+
+ // Test Good chown values
+ flagArgs = []string{"--chown=6731:6731"}
+ original = "COPY --chown=6731:6731 /go/src/github.com/kubernetes-incubator/service-catalog/controller-manager ."
+ if err := dispatchCopy(&mybuilder2, args, nil, flagArgs, original); err != nil {
+ t.Errorf("dispatchCopy error: %v", err)
+ }
+ expectedPendingCopies = []Copy{
+ {
+ From: "",
+ Src: []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager"},
+ Dest: "/root/", // destination must contain a trailing slash
+ Download: false,
+ Chown: "6731:6731",
+ },
+ }
+ if !reflect.DeepEqual(mybuilder2.PendingCopies, expectedPendingCopies) {
+ t.Errorf("Expected %v, to match %v\n", expectedPendingCopies, mybuilder2.PendingCopies)
+ }
+}
+
+func TestDispatchCopyChmod(t *testing.T) {
+ mybuilder := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "busybox",
+ },
+ }
+
+ mybuilder2 := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "alpine",
+ },
+ }
+
+ // Test Bad chmod values
+ args := []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager", "."}
+ flagArgs := []string{"--chmod=888"}
+ original := "COPY --chmod=888 /go/src/github.com/kubernetes-incubator/service-catalog/controller-manager ."
+ err := dispatchCopy(&mybuilder, args, nil, flagArgs, original)
+ chmod := "888"
+ convErr := checkChmodConversion(chmod)
+ if err != nil && convErr != nil && err.Error() != convErr.Error() {
+ t.Errorf("Expected chmod conversion error, instead got error: %v", err)
+ }
+ if err == nil || convErr == nil {
+ t.Errorf("Expected conversion error for chmod %s", chmod)
+ }
+
+ // Test Good chmod values
+ flagArgs = []string{"--chmod=777"}
+ original = "COPY --chmod=777 /go/src/github.com/kubernetes-incubator/service-catalog/controller-manager ."
+ if err := dispatchCopy(&mybuilder2, args, nil, flagArgs, original); err != nil {
+ t.Errorf("dispatchCopy error: %v", err)
+ }
+ expectedPendingCopies := []Copy{
+ {
+ From: "",
+ Src: []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager"},
+ Dest: "/root/", // destination must contain a trailing slash
+ Download: false,
+ Chown: "",
+ Chmod: "777",
+ },
+ }
+ if !reflect.DeepEqual(mybuilder2.PendingCopies, expectedPendingCopies) {
+ t.Errorf("Expected %v, to match %v\n", expectedPendingCopies, mybuilder2.PendingCopies)
+ }
+}
+
+func TestDispatchAddChownWithEnvironment(t *testing.T) {
+ mybuilder := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "alpine",
+ },
+ Env: []string{"CHOWN_VAL=6731:6731"},
+ }
+
+ args := []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager", "."}
+ flagArgs := []string{"--chown=${CHOWN_VAL}"}
+ original := "ADD --chown=${CHOWN_VAL} /go/src/github.com/kubernetes-incubator/service-catalog/controller-manager ."
+ if err := add(&mybuilder, args, nil, flagArgs, original); err != nil {
+ t.Errorf("dispatchAdd error: %v", err)
+ }
+
+ expectedPendingCopies := []Copy{
+ {
+ From: "",
+ Src: []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager"},
+ Dest: "/root/", // destination must contain a trailing slash
+ Download: true,
+ Chown: "6731:6731",
+ },
+ }
+ if !reflect.DeepEqual(mybuilder.PendingCopies, expectedPendingCopies) {
+ t.Errorf("Expected %v, to match %v\n", expectedPendingCopies, mybuilder.PendingCopies)
+ }
+}
+
+func TestDispatchAddChmodWithEnvironment(t *testing.T) {
+ mybuilder := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "alpine",
+ },
+ Env: []string{"CHMOD_VAL=755"},
+ }
+
+ args := []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager", "."}
+ flagArgs := []string{"--chmod=${CHMOD_VAL}"}
+ original := "ADD --chmod=${CHMOD_VAL} /go/src/github.com/kubernetes-incubator/service-catalog/controller-manager ."
+ if err := add(&mybuilder, args, nil, flagArgs, original); err != nil {
+ t.Errorf("dispatchAdd error: %v", err)
+ }
+
+ expectedPendingCopies := []Copy{
+ {
+ From: "",
+ Src: []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager"},
+ Dest: "/root/", // destination must contain a trailing slash
+ Download: true,
+ Chmod: "755",
+ },
+ }
+ if !reflect.DeepEqual(mybuilder.PendingCopies, expectedPendingCopies) {
+ t.Errorf("Expected %v, to match %v\n", expectedPendingCopies, mybuilder.PendingCopies)
+ }
+}
+
+func TestDispatchAddChownWithArg(t *testing.T) {
+ argsMap := make(map[string]string)
+ allowedArgs := make(map[string]bool)
+ argsMap["CHOWN_VAL"] = "6731:6731"
+ allowedArgs["CHOWN_VAL"] = true
+ mybuilder := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "alpine",
+ },
+ Args: argsMap,
+ AllowedArgs: allowedArgs,
+ }
+
+ args := []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager", "."}
+ flagArgs := []string{"--chown=${CHOWN_VAL}"}
+ original := "ADD --chown=${CHOWN_VAL} /go/src/github.com/kubernetes-incubator/service-catalog/controller-manager ."
+ if err := add(&mybuilder, args, nil, flagArgs, original); err != nil {
+ t.Errorf("dispatchAdd error: %v", err)
+ }
+
+ expectedPendingCopies := []Copy{
+ {
+ From: "",
+ Src: []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager"},
+ Dest: "/root/", // destination must contain a trailing slash
+ Download: true,
+ Chown: "6731:6731",
+ },
+ }
+ if !reflect.DeepEqual(mybuilder.PendingCopies, expectedPendingCopies) {
+ t.Errorf("Expected %v, to match %v\n", expectedPendingCopies, mybuilder.PendingCopies)
+ }
+}
+
+func TestDispatchAddChmodWithArg(t *testing.T) {
+ argsMap := make(map[string]string)
+ allowedArgs := make(map[string]bool)
+ argsMap["CHMOD_VAL"] = "644"
+ allowedArgs["CHMOD_VAL"] = true
+ mybuilder := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "alpine",
+ },
+ Args: argsMap,
+ AllowedArgs: allowedArgs,
+ }
+
+ args := []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager", "."}
+ flagArgs := []string{"--chmod=${CHMOD_VAL}"}
+ original := "ADD --chmod=${CHMOD_VAL} /go/src/github.com/kubernetes-incubator/service-catalog/controller-manager ."
+ if err := add(&mybuilder, args, nil, flagArgs, original); err != nil {
+ t.Errorf("dispatchAdd error: %v", err)
+ }
+
+ expectedPendingCopies := []Copy{
+ {
+ From: "",
+ Src: []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager"},
+ Dest: "/root/", // destination must contain a trailing slash
+ Download: true,
+ Chmod: "644",
+ },
+ }
+ if !reflect.DeepEqual(mybuilder.PendingCopies, expectedPendingCopies) {
+ t.Errorf("Expected %v, to match %v\n", expectedPendingCopies, mybuilder.PendingCopies)
+ }
+}
+
+func TestDispatchCopyChownWithEnvironment(t *testing.T) {
+ mybuilder := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "alpine",
+ },
+ Env: []string{"CHOWN_VAL=6731:6731"},
+ }
+
+ args := []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager", "."}
+ flagArgs := []string{"--chown=${CHOWN_VAL}"}
+ original := "COPY --chown=${CHOWN_VAL} /go/src/github.com/kubernetes-incubator/service-catalog/controller-manager ."
+ if err := dispatchCopy(&mybuilder, args, nil, flagArgs, original); err != nil {
+ t.Errorf("dispatchCopy error: %v", err)
+ }
+
+ expectedPendingCopies := []Copy{
+ {
+ From: "",
+ Src: []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager"},
+ Dest: "/root/", // destination must contain a trailing slash
+ Download: false,
+ Chown: "6731:6731",
+ },
+ }
+ if !reflect.DeepEqual(mybuilder.PendingCopies, expectedPendingCopies) {
+ t.Errorf("Expected %v, to match %v\n", expectedPendingCopies, mybuilder.PendingCopies)
+ }
+}
+
+func TestDispatchCopyChmodWithEnvironment(t *testing.T) {
+ mybuilder := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "alpine",
+ },
+ Env: []string{"CHMOD_VAL=660"},
+ }
+
+ args := []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager", "."}
+ flagArgs := []string{"--chmod=${CHMOD_VAL}"}
+ original := "COPY --chmod=${CHMOD_VAL} /go/src/github.com/kubernetes-incubator/service-catalog/controller-manager ."
+ if err := dispatchCopy(&mybuilder, args, nil, flagArgs, original); err != nil {
+ t.Errorf("dispatchCopy error: %v", err)
+ }
+
+ expectedPendingCopies := []Copy{
+ {
+ From: "",
+ Src: []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager"},
+ Dest: "/root/", // destination must contain a trailing slash
+ Download: false,
+ Chmod: "660",
+ },
+ }
+ if !reflect.DeepEqual(mybuilder.PendingCopies, expectedPendingCopies) {
+ t.Errorf("Expected %v, to match %v\n", expectedPendingCopies, mybuilder.PendingCopies)
+ }
+}
+
+func TestDispatchCopyChownWithArg(t *testing.T) {
+ argsMap := make(map[string]string)
+ argsMap["CHOWN_VAL"] = "6731:6731"
+ mybuilder := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "alpine",
+ },
+ Args: argsMap,
+ }
+
+ args := []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager", "."}
+ flagArgs := []string{"--chown=${CHOWN_VAL}"}
+ original := "COPY --chown=${CHOWN_VAL} /go/src/github.com/kubernetes-incubator/service-catalog/controller-manager ."
+ if err := dispatchCopy(&mybuilder, args, nil, flagArgs, original); err != nil {
+ t.Errorf("dispatchCopy error: %v", err)
+ }
+
+ expectedPendingCopies := []Copy{
+ {
+ From: "",
+ Src: []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager"},
+ Dest: "/root/", // destination must contain a trailing slash
+ Download: false,
+ Chown: "6731:6731",
+ },
+ }
+ if !reflect.DeepEqual(mybuilder.PendingCopies, expectedPendingCopies) {
+ t.Errorf("Expected %v, to match %v\n", expectedPendingCopies, mybuilder.PendingCopies)
+ }
+}
+
+func TestDispatchCopyChmodWithArg(t *testing.T) {
+ argsMap := make(map[string]string)
+ argsMap["CHMOD_VAL"] = "444"
+ mybuilder := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "alpine",
+ },
+ Args: argsMap,
+ }
+
+ args := []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager", "."}
+ flagArgs := []string{"--chmod=${CHMOD_VAL}"}
+ original := "COPY --chmod=${CHMOD_VAL} /go/src/github.com/kubernetes-incubator/service-catalog/controller-manager ."
+ if err := dispatchCopy(&mybuilder, args, nil, flagArgs, original); err != nil {
+ t.Errorf("dispatchCopy error: %v", err)
+ }
+
+ expectedPendingCopies := []Copy{
+ {
+ From: "",
+ Src: []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager"},
+ Dest: "/root/", // destination must contain a trailing slash
+ Download: false,
+ Chmod: "444",
+ },
+ }
+ if !reflect.DeepEqual(mybuilder.PendingCopies, expectedPendingCopies) {
+ t.Errorf("Expected %v, to match %v\n", expectedPendingCopies, mybuilder.PendingCopies)
+ }
+}
+
+func TestDispatchCopyChownWithSameArgAndEnv(t *testing.T) {
+ argsMap := make(map[string]string)
+ argsMap["CHOWN_VAL"] = "4321:4321"
+ mybuilder := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "alpine",
+ },
+ Args: argsMap,
+ Env: []string{"CHOWN_VAL=6731:6731"},
+ }
+
+ args := []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager", "."}
+ flagArgs := []string{"--chown=${CHOWN_VAL}"}
+ original := "COPY --chown=${CHOWN_VAL} /go/src/github.com/kubernetes-incubator/service-catalog/controller-manager ."
+ if err := dispatchCopy(&mybuilder, args, nil, flagArgs, original); err != nil {
+ t.Errorf("dispatchCopy error: %v", err)
+ }
+
+ expectedPendingCopies := []Copy{
+ {
+ From: "",
+ Src: []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager"},
+ Dest: "/root/", // destination must contain a trailing slash
+ Download: false,
+ Chown: "6731:6731",
+ },
+ }
+ if !reflect.DeepEqual(mybuilder.PendingCopies, expectedPendingCopies) {
+ t.Errorf("Expected %v, to match %v\n", expectedPendingCopies, mybuilder.PendingCopies)
+ }
+}
+
+func TestDispatchCopyChmodWithSameArgAndEnv(t *testing.T) {
+ argsMap := make(map[string]string)
+ argsMap["CHMOD_VAL"] = "777"
+ mybuilder := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "alpine",
+ },
+ Args: argsMap,
+ Env: []string{"CHMOD_VAL=444"},
+ }
+
+ args := []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager", "."}
+ flagArgs := []string{"--chmod=${CHMOD_VAL}"}
+ original := "COPY --chmod=${CHMOD_VAL} /go/src/github.com/kubernetes-incubator/service-catalog/controller-manager ."
+ if err := dispatchCopy(&mybuilder, args, nil, flagArgs, original); err != nil {
+ t.Errorf("dispatchCopy error: %v", err)
+ }
+
+ expectedPendingCopies := []Copy{
+ {
+ From: "",
+ Src: []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager"},
+ Dest: "/root/", // destination must contain a trailing slash
+ Download: false,
+ Chmod: "444",
+ },
+ }
+ if !reflect.DeepEqual(mybuilder.PendingCopies, expectedPendingCopies) {
+ t.Errorf("Expected %v, to match %v\n", expectedPendingCopies, mybuilder.PendingCopies)
+ }
+}
+
+func TestDispatchAddChown(t *testing.T) {
+ mybuilder := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "busybox",
+ },
+ }
+
+ mybuilder2 := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "alpine",
+ },
+ }
+
+ // Test Bad chown values
+ args := []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager", "."}
+ flagArgs := []string{"--chown=1376:1376"}
+ original := "ADD --chown=1376:1376 /go/src/github.com/kubernetes-incubator/service-catalog/controller-manager"
+ if err := add(&mybuilder, args, nil, flagArgs, original); err != nil {
+ t.Errorf("dispatchAdd error: %v", err)
+ }
+ expectedPendingCopies := []Copy{
+ {
+ From: "",
+ Src: []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager"},
+ Dest: "/root/", // destination must contain a trailing slash
+ Download: false,
+ Chown: "6731:6731",
+ },
+ }
+ if reflect.DeepEqual(mybuilder.PendingCopies, expectedPendingCopies) {
+ t.Errorf("Expected %v, to not match %v\n", expectedPendingCopies, mybuilder.PendingCopies)
+ }
+
+ // Test Good chown values
+ flagArgs = []string{"--chown=6731:6731"}
+ original = "ADD --chown=6731:6731 /go/src/github.com/kubernetes-incubator/service-catalog/controller-manager"
+ if err := add(&mybuilder2, args, nil, flagArgs, original); err != nil {
+ t.Errorf("dispatchAdd error: %v", err)
+ }
+ expectedPendingCopies = []Copy{
+ {
+ From: "",
+ Src: []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager"},
+ Dest: "/root/", // destination must contain a trailing slash
+ Download: true,
+ Chown: "6731:6731",
+ },
+ }
+ if !reflect.DeepEqual(mybuilder2.PendingCopies, expectedPendingCopies) {
+ t.Errorf("Expected %v, to match %v\n", expectedPendingCopies, mybuilder2.PendingCopies)
+ }
+}
+
+func TestDispatchAddChmod(t *testing.T) {
+ mybuilder := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "busybox",
+ },
+ }
+
+ mybuilder2 := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "alpine",
+ },
+ }
+
+ // Test Bad chmod values
+ args := []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager", "."}
+ flagArgs := []string{"--chmod=rwxrwxrwx"}
+ original := "ADD --chmod=rwxrwxrwx /go/src/github.com/kubernetes-incubator/service-catalog/controller-manager"
+ err := add(&mybuilder, args, nil, flagArgs, original)
+ chmod := "rwxrwxrwx"
+ convErr := checkChmodConversion(chmod)
+ if err != nil && convErr != nil && err.Error() != convErr.Error() {
+ t.Errorf("Expected chmod conversion error, instead got error: %v", err)
+ }
+ if err == nil || convErr == nil {
+ t.Errorf("Expected conversion error for chmod %s", chmod)
+ }
+
+ // Test Good chmod values
+ flagArgs = []string{"--chmod=755"}
+ original = "ADD --chmod=755 /go/src/github.com/kubernetes-incubator/service-catalog/controller-manager"
+ if err := add(&mybuilder2, args, nil, flagArgs, original); err != nil {
+ t.Errorf("dispatchAdd error: %v", err)
+ }
+ expectedPendingCopies := []Copy{
+ {
+ From: "",
+ Src: []string{"/go/src/github.com/kubernetes-incubator/service-catalog/controller-manager"},
+ Dest: "/root/", // destination must contain a trailing slash
+ Download: true,
+ Chmod: "755",
+ },
+ }
+ if !reflect.DeepEqual(mybuilder2.PendingCopies, expectedPendingCopies) {
+ t.Errorf("Expected %v, to match %v\n", expectedPendingCopies, mybuilder2.PendingCopies)
+ }
+}
+
+func TestDispatchRunFlags(t *testing.T) {
+ mybuilder := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "busybox",
+ },
+ }
+
+ flags := []string{"--mount=type=bind,target=/foo"}
+ args := []string{"echo \"stuff\""}
+ original := "RUN --mount=type=bind,target=/foo echo \"stuff\""
+
+ if err := run(&mybuilder, args, nil, flags, original); err != nil {
+ t.Errorf("dispatchAdd error: %v", err)
+ }
+ expectedPendingRuns := []Run{
+ {
+ Shell: true,
+ Args: args,
+ Mounts: []string{"type=bind,target=/foo"},
+ },
+ }
+
+ if !reflect.DeepEqual(mybuilder.PendingRuns, expectedPendingRuns) {
+ t.Errorf("Expected %v, to match %v\n", expectedPendingRuns, mybuilder.PendingRuns)
+ }
+
+}
+
+func TestDispatchNetworkFlags(t *testing.T) {
+ mybuilder := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "busybox",
+ },
+ }
+
+ flags := []string{"--network=none"}
+ args := []string{"echo \"stuff\""}
+ original := "RUN --network=none echo \"stuff\""
+
+ if err := run(&mybuilder, args, nil, flags, original); err != nil {
+ t.Errorf("dispatchAdd error: %v", err)
+ }
+ expectedPendingRuns := []Run{
+ {
+ Shell: true,
+ Args: args,
+ Network: "none",
+ },
+ }
+
+ if !reflect.DeepEqual(mybuilder.PendingRuns, expectedPendingRuns) {
+ t.Errorf("Expected %v, to match %v\n", expectedPendingRuns, mybuilder.PendingRuns)
+ }
+}
+
+func TestDispatchRunFlagsWithArgs(t *testing.T) {
+ argsMap := make(map[string]string)
+ allowedArgs := make(map[string]bool)
+ argsMap["TYPE"] = "bind"
+ allowedArgs["TYPE"] = true
+ mybuilder := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "busybox",
+ },
+ Args: argsMap,
+ AllowedArgs: allowedArgs,
+ }
+
+ flags := []string{"--mount=type=${TYPE},target=/foo"}
+ args := []string{"echo \"stuff\""}
+ original := "RUN --mount=type=${TYPE},target=/foo echo \"stuff\""
+
+ if err := run(&mybuilder, args, nil, flags, original); err != nil {
+ t.Errorf("dispatchAdd error: %v", err)
+ }
+ expectedPendingRuns := []Run{
+ {
+ Shell: true,
+ Args: args,
+ Mounts: []string{"type=bind,target=/foo"},
+ },
+ }
+
+ if !reflect.DeepEqual(mybuilder.PendingRuns, expectedPendingRuns) {
+ t.Errorf("Expected %v, to match %v\n", expectedPendingRuns, mybuilder.PendingRuns)
+ }
+ // Following run should not resolve correctly and type should be equal to "" i.e blank
+ mybuilder = Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "busybox",
+ },
+ }
+ if err := run(&mybuilder, args, nil, flags, original); err != nil {
+ t.Errorf("dispatchAdd error: %v", err)
+ }
+ expectedBadPendingRuns := []Run{
+ {
+ Shell: true,
+ Args: args,
+ Mounts: []string{"type=,target=/foo"},
+ },
+ }
+
+ if !reflect.DeepEqual(mybuilder.PendingRuns, expectedBadPendingRuns) {
+ t.Errorf("Expected %v, to match %v\n", expectedPendingRuns, mybuilder.PendingRuns)
+ }
+}
+
+func TestDispatchFromFlags(t *testing.T) {
+ expectedPlatform := "linux/arm64"
+ mybuilder := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "busybox",
+ },
+ }
+
+ flags := []string{"--platform=linux/arm64"}
+ args := []string{""}
+ original := "FROM --platform=linux/arm64 busybox"
+
+ if err := from(&mybuilder, args, nil, flags, original); err != nil {
+ t.Errorf("dispatchAdd error: %v", err)
+ }
+
+ if mybuilder.Platform != expectedPlatform {
+ t.Errorf("Expected %v, to match %v\n", expectedPlatform, mybuilder.Platform)
+ }
+}
+
+func TestDispatchFromFlagsAndUseBuiltInArgs(t *testing.T) {
+ expectedPlatform := localspec.OS + "/" + localspec.Architecture
+ mybuilder := Builder{
+ RunConfig: docker.Config{
+ WorkingDir: "/root",
+ Cmd: []string{"/bin/sh"},
+ Image: "busybox",
+ },
+ }
+
+ flags := []string{"--platform=$BUILDPLATFORM"}
+ args := []string{""}
+ original := "FROM --platform=$BUILDPLATFORM busybox"
+
+ if err := from(&mybuilder, args, nil, flags, original); err != nil {
+ t.Errorf("dispatchAdd error: %v", err)
+ }
+
+ if mybuilder.Platform != expectedPlatform {
+ t.Errorf("Expected %v, to match %v\n", expectedPlatform, mybuilder.Platform)
+ }
+}
diff --git a/doc.go b/doc.go
new file mode 100644
index 0000000..97028ff
--- /dev/null
+++ b/doc.go
@@ -0,0 +1,6 @@
+// Package builder uses code from github.com/docker/docker/builder/* to implement
+// a Docker builder that does not create individual layers, but instead creates a
+// single layer.
+//
+// TODO: full windows support
+package imagebuilder
diff --git a/dockerclient/archive.go b/dockerclient/archive.go
new file mode 100644
index 0000000..74a28dd
--- /dev/null
+++ b/dockerclient/archive.go
@@ -0,0 +1,768 @@
+package dockerclient
+
+import (
+ "archive/tar"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/fileutils"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/ioutils"
+ "k8s.io/klog"
+)
+
+var isArchivePath = archive.IsArchivePath
+var dstNeedsToBeDirectoryError = errors.New("copying would overwrite content that was already copied; destination needs to be a directory")
+
+// TransformFileFunc is given a chance to transform an arbitrary input file.
+type TransformFileFunc func(h *tar.Header, r io.Reader) (data []byte, update bool, skip bool, err error)
+
+// FetchArchiveFunc retrieves an entire second copy of the archive we're
+// processing, so that we can fetch something from it that we discarded
+// earlier. This is expensive, so it is only called when it's needed.
+type FetchArchiveFunc func(pw *io.PipeWriter)
+
+// FilterArchive transforms the provided input archive to a new archive,
+// giving the fn a chance to transform arbitrary files.
+func FilterArchive(r io.Reader, w io.Writer, fn TransformFileFunc) error {
+ tr := tar.NewReader(r)
+ tw := tar.NewWriter(w)
+
+ for {
+ h, err := tr.Next()
+ if err == io.EOF {
+ return tw.Close()
+ }
+ if err != nil {
+ return err
+ }
+
+ var body io.Reader = tr
+ name := h.Name
+ data, ok, skip, err := fn(h, tr)
+ klog.V(6).Infof("Transform %s(0%o) -> %s: data=%t ok=%t skip=%t err=%v", name, h.Mode, h.Name, data != nil, ok, skip, err)
+ if err != nil {
+ return err
+ }
+ if skip {
+ continue
+ }
+ if ok {
+ h.Size = int64(len(data))
+ body = bytes.NewBuffer(data)
+ }
+ if err := tw.WriteHeader(h); err != nil {
+ return err
+ }
+ if _, err := io.Copy(tw, body); err != nil {
+ return err
+ }
+ }
+}
+
+type CreateFileFunc func() (*tar.Header, io.ReadCloser, bool, error)
+
+func NewLazyArchive(fn CreateFileFunc) io.ReadCloser {
+ pr, pw := io.Pipe()
+ tw := tar.NewWriter(pw)
+ go func() {
+ for {
+ h, r, more, err := fn()
+ if err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ if h == nil {
+ tw.Flush()
+ pw.Close()
+ return
+ }
+ if err := tw.WriteHeader(h); err != nil {
+ r.Close()
+ pw.CloseWithError(err)
+ return
+ }
+ n, err := io.Copy(tw, &io.LimitedReader{R: r, N: h.Size})
+ r.Close()
+ if err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ if n != h.Size {
+ pw.CloseWithError(fmt.Errorf("short read for %s", h.Name))
+ return
+ }
+ if !more {
+ tw.Flush()
+ pw.Close()
+ return
+ }
+ }
+ }()
+ return pr
+}
+
+func archiveFromURL(src, dst, tempDir string, check DirectoryCheck) (io.Reader, io.Closer, error) {
+ // get filename from URL
+ u, err := url.Parse(src)
+ if err != nil {
+ return nil, nil, err
+ }
+ base := path.Base(u.Path)
+ if base == "." {
+ return nil, nil, fmt.Errorf("cannot determine filename from url: %s", u)
+ }
+ resp, err := http.Get(src)
+ if err != nil {
+ return nil, nil, err
+ }
+ archive := NewLazyArchive(func() (*tar.Header, io.ReadCloser, bool, error) {
+ if resp.StatusCode >= 400 {
+ return nil, nil, false, fmt.Errorf("server returned a status code >= 400: %s", resp.Status)
+ }
+
+ header := &tar.Header{
+ Name: sourceToDestinationName(path.Base(u.Path), dst, false),
+ Mode: 0600,
+ }
+ r := resp.Body
+ if resp.ContentLength == -1 {
+ f, err := ioutil.TempFile(tempDir, "url")
+ if err != nil {
+ return nil, nil, false, fmt.Errorf("unable to create temporary file for source URL: %v", err)
+ }
+ n, err := io.Copy(f, resp.Body)
+ if err != nil {
+ f.Close()
+ return nil, nil, false, fmt.Errorf("unable to download source URL: %v", err)
+ }
+ if err := f.Close(); err != nil {
+ return nil, nil, false, fmt.Errorf("unable to write source URL: %v", err)
+ }
+ f, err = os.Open(f.Name())
+ if err != nil {
+ return nil, nil, false, fmt.Errorf("unable to open downloaded source URL: %v", err)
+ }
+ r = f
+ header.Size = n
+ } else {
+ header.Size = resp.ContentLength
+ }
+ return header, r, false, nil
+ })
+ return archive, closers{resp.Body.Close, archive.Close}, nil
+}
+
+func archiveFromDisk(directory string, src, dst string, allowDownload bool, excludes []string, check DirectoryCheck) (io.Reader, io.Closer, error) {
+ var err error
+ if filepath.IsAbs(src) {
+ src, err = filepath.Rel(directory, filepath.Join(directory, src))
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ infos, err := CalcCopyInfo(src, directory, true)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // special case when we are archiving a single file at the root
+ if len(infos) == 1 && !infos[0].FileInfo.IsDir() && (infos[0].Path == "." || infos[0].Path == "/") {
+ klog.V(5).Infof("Archiving a file instead of a directory from %s", directory)
+ infos[0].Path = filepath.Base(directory)
+ infos[0].FromDir = false
+ directory = filepath.Dir(directory)
+ }
+
+ options, err := archiveOptionsFor(directory, infos, dst, excludes, allowDownload, check)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ pipeReader, pipeWriter := io.Pipe() // the archive we're creating
+
+ includeFiles := options.IncludeFiles
+ var returnedError error
+ go func() {
+ defer pipeWriter.Close()
+ tw := tar.NewWriter(pipeWriter)
+ defer tw.Close()
+ var nonArchives []string
+ for _, includeFile := range includeFiles {
+ if allowDownload && src != "." && src != "/" && isArchivePath(filepath.Join(directory, includeFile)) {
+ // it's an archive -> copy each item to the
+ // archive being written to the pipe writer
+ klog.V(4).Infof("Extracting %s", includeFile)
+ if err := func() error {
+ f, err := os.Open(filepath.Join(directory, includeFile))
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ dc, err := archive.DecompressStream(f)
+ if err != nil {
+ return err
+ }
+ defer dc.Close()
+ tr := tar.NewReader(dc)
+ hdr, err := tr.Next()
+ for err == nil {
+ if renamed, ok := options.RebaseNames[includeFile]; ok {
+ hdr.Name = strings.TrimSuffix(renamed, includeFile) + hdr.Name
+ if hdr.Typeflag == tar.TypeLink {
+ hdr.Linkname = strings.TrimSuffix(renamed, includeFile) + hdr.Linkname
+ }
+ }
+ tw.WriteHeader(hdr)
+ _, err = io.Copy(tw, tr)
+ if err != nil {
+ break
+ }
+ hdr, err = tr.Next()
+ }
+ if err != nil && err != io.EOF {
+ return err
+ }
+ return nil
+ }(); err != nil {
+ returnedError = err
+ break
+ }
+ continue
+ }
+ nonArchives = append(nonArchives, includeFile)
+ }
+ if len(nonArchives) > 0 && returnedError == nil {
+ // the not-archive items -> add them all to the archive as-is
+ options.IncludeFiles = nonArchives
+ klog.V(4).Infof("Tar of %s %#v", directory, options)
+ rc, err := archive.TarWithOptions(directory, options)
+ if err != nil {
+ returnedError = err
+ return
+ }
+ defer rc.Close()
+ tr := tar.NewReader(rc)
+ hdr, err := tr.Next()
+ for err == nil {
+ tw.WriteHeader(hdr)
+ _, err = io.Copy(tw, tr)
+ if err != nil {
+ break
+ }
+ hdr, err = tr.Next()
+ }
+ if err != nil && err != io.EOF {
+ returnedError = err
+ return
+ }
+ }
+ }()
+
+ // the reader should close the pipe, and also get any error we need to report
+ readWrapper := ioutils.NewReadCloserWrapper(pipeReader, func() error {
+ if err := pipeReader.Close(); err != nil {
+ return err
+ }
+ return returnedError
+ })
+
+ return readWrapper, readWrapper, err
+}
+
+func archiveFromFile(file string, src, dst string, excludes []string, check DirectoryCheck) (io.Reader, io.Closer, error) {
+ var err error
+ if filepath.IsAbs(src) {
+ src, err = filepath.Rel(filepath.Dir(src), src)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ refetch := func(pw *io.PipeWriter) {
+ f, err := os.Open(file)
+ if err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ defer f.Close()
+ dc, err := archive.DecompressStream(f)
+ if err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ defer dc.Close()
+ _, err = io.Copy(pw, dc)
+ pw.CloseWithError(err)
+ }
+
+ mapper, _, err := newArchiveMapper(src, dst, excludes, false, true, check, refetch, true)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ r, err := transformArchive(f, true, mapper.Filter)
+ cc := newCloser(func() error {
+ err := f.Close()
+ if mapper.foundItems == 0 {
+ return fmt.Errorf("%s: %w", src, os.ErrNotExist)
+ }
+ return err
+ })
+ return r, cc, err
+}
+
+func archiveFromContainer(in io.Reader, src, dst string, excludes []string, check DirectoryCheck, refetch FetchArchiveFunc, assumeDstIsDirectory bool) (io.ReadCloser, string, error) {
+ mapper, archiveRoot, err := newArchiveMapper(src, dst, excludes, true, false, check, refetch, assumeDstIsDirectory)
+ if err != nil {
+ return nil, "", err
+ }
+
+ r, err := transformArchive(in, false, mapper.Filter)
+ rc := readCloser{Reader: r, Closer: newCloser(func() error {
+ if mapper.foundItems == 0 {
+ return fmt.Errorf("%s: %w", src, os.ErrNotExist)
+ }
+ return nil
+ })}
+ return rc, archiveRoot, err
+}
+
+func transformArchive(r io.Reader, compressed bool, fn TransformFileFunc) (io.Reader, error) {
+ pr, pw := io.Pipe()
+ go func() {
+ if compressed {
+ in, err := archive.DecompressStream(r)
+ if err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ r = in
+ }
+ err := FilterArchive(r, pw, fn)
+ pw.CloseWithError(err)
+ }()
+ return pr, nil
+}
+
+// * -> test
+// a (dir) -> test
+// a (file) -> test
+// a (dir) -> test/
+// a (file) -> test/
+func archivePathMapper(src, dst string, isDestDir bool) (fn func(itemCount *int, name string, isDir bool) (string, bool, error)) {
+ srcPattern := filepath.Clean(src)
+ if srcPattern == "." {
+ srcPattern = "*"
+ }
+ pattern := filepath.Base(srcPattern)
+
+ klog.V(6).Infof("creating mapper for srcPattern=%s pattern=%s dst=%s isDestDir=%t", srcPattern, pattern, dst, isDestDir)
+
+ // no wildcards
+ if !containsWildcards(pattern) {
+ return func(itemCount *int, name string, isDir bool) (string, bool, error) {
+ // when extracting from the working directory, Docker prefaces with ./
+ if strings.HasPrefix(name, "."+string(filepath.Separator)) {
+ name = name[2:]
+ }
+ if name == srcPattern {
+ if isDir { // the source is a directory: this directory; skip it
+ return "", false, nil
+ }
+ if isDestDir { // the destination is a directory, put this under it
+ return filepath.Join(dst, filepath.Base(name)), true, nil
+ }
+ // the source is a non-directory: copy to the destination's name
+ if itemCount != nil && *itemCount != 0 { // but we've already written something there
+ return "", false, dstNeedsToBeDirectoryError // tell the caller to start over
+ }
+ return dst, true, nil
+ }
+
+ // source is a directory, this is under it; put this under the destination directory
+ remainder := strings.TrimPrefix(name, srcPattern+string(filepath.Separator))
+ if remainder == name {
+ return "", false, nil
+ }
+ return filepath.Join(dst, remainder), true, nil
+ }
+ }
+
+ // root with pattern
+ prefix := filepath.Dir(srcPattern)
+ if prefix == "." {
+ return func(itemCount *int, name string, isDir bool) (string, bool, error) {
+ // match only on the first segment under the prefix
+ var firstSegment = name
+ if i := strings.Index(name, string(filepath.Separator)); i != -1 {
+ firstSegment = name[:i]
+ }
+ ok, _ := filepath.Match(pattern, firstSegment)
+ if !ok {
+ return "", false, nil
+ }
+ if !isDestDir && !isDir { // the destination is not a directory, put this right there
+ if itemCount != nil && *itemCount != 0 { // but we've already written something there
+ return "", false, dstNeedsToBeDirectoryError // tell the caller to start over
+ }
+ return dst, true, nil
+ }
+ return filepath.Join(dst, name), true, nil
+ }
+ }
+ prefix += string(filepath.Separator)
+
+ // nested with pattern
+ return func(_ *int, name string, isDir bool) (string, bool, error) {
+ remainder := strings.TrimPrefix(name, prefix)
+ if remainder == name {
+ return "", false, nil
+ }
+ // match only on the first segment under the prefix
+ var firstSegment = remainder
+ if i := strings.Index(remainder, string(filepath.Separator)); i != -1 {
+ firstSegment = remainder[:i]
+ }
+ ok, _ := filepath.Match(pattern, firstSegment)
+ if !ok {
+ return "", false, nil
+ }
+ return filepath.Join(dst, remainder), true, nil
+ }
+}
+
+type archiveMapper struct {
+ exclude *fileutils.PatternMatcher
+ rename func(itemCount *int, name string, isDir bool) (string, bool, error)
+ prefix string
+ dst string
+ resetDstMode bool
+ resetOwners bool
+ foundItems int
+ refetch FetchArchiveFunc
+ renameLinks map[string]string
+}
+
+func newArchiveMapper(src, dst string, excludes []string, resetDstMode, resetOwners bool, check DirectoryCheck, refetch FetchArchiveFunc, assumeDstIsDirectory bool) (*archiveMapper, string, error) {
+ ex, err := fileutils.NewPatternMatcher(excludes)
+ if err != nil {
+ return nil, "", err
+ }
+
+ isDestDir := strings.HasSuffix(dst, "/") || path.Base(dst) == "." || strings.HasSuffix(src, "/") || path.Base(src) == "." || assumeDstIsDirectory
+ dst = path.Clean(dst)
+ if !isDestDir && check != nil {
+ isDir, err := check.IsDirectory(dst)
+ if err != nil {
+ return nil, "", err
+ }
+ isDestDir = isDir
+ }
+
+ var prefix string
+ archiveRoot := src
+ srcPattern := "*"
+ switch {
+ case src == "":
+ return nil, "", fmt.Errorf("source may not be empty")
+ case src == ".", src == "/":
+ // no transformation necessary
+ case strings.HasSuffix(src, "/"), strings.HasSuffix(src, "/."):
+ src = path.Clean(src)
+ archiveRoot = src
+ if archiveRoot != "/" && archiveRoot != "." {
+ prefix = path.Base(archiveRoot)
+ }
+ default:
+ src = path.Clean(src)
+ srcPattern = path.Base(src)
+ archiveRoot = path.Dir(src)
+ if archiveRoot != "/" && archiveRoot != "." {
+ prefix = path.Base(archiveRoot)
+ }
+ }
+ if !strings.HasSuffix(archiveRoot, "/") {
+ archiveRoot += "/"
+ }
+
+ mapperFn := archivePathMapper(srcPattern, dst, isDestDir)
+
+ return &archiveMapper{
+ exclude: ex,
+ rename: mapperFn,
+ prefix: prefix,
+ dst: dst,
+ resetDstMode: resetDstMode,
+ resetOwners: resetOwners,
+ refetch: refetch,
+ renameLinks: make(map[string]string),
+ }, archiveRoot, nil
+}
+
+func (m *archiveMapper) Filter(h *tar.Header, r io.Reader) ([]byte, bool, bool, error) {
+ if m.resetOwners {
+ h.Uid, h.Gid = 0, 0
+ }
+ // Trim a leading path, the prefix segment (which has no leading or trailing slashes), and
+ // the final leader segment. Depending on the segment, Docker could return /prefix/ or prefix/.
+ h.Name = strings.TrimPrefix(h.Name, "/")
+ if !strings.HasPrefix(h.Name, m.prefix) {
+ return nil, false, true, nil
+ }
+ h.Name = strings.TrimPrefix(strings.TrimPrefix(h.Name, m.prefix), "/")
+
+ // skip a file if it doesn't match the src
+ isDir := h.Typeflag == tar.TypeDir
+ newName, ok, err := m.rename(&m.foundItems, h.Name, isDir)
+ if err != nil {
+ return nil, false, true, err
+ }
+ if !ok {
+ return nil, false, true, nil
+ }
+ if newName == "." {
+ return nil, false, true, nil
+ }
+ // skip based on excludes
+ if ok, _ := m.exclude.Matches(h.Name); ok {
+ return nil, false, true, nil
+ }
+
+ m.foundItems++
+
+ h.Name = newName
+
+ if m.resetDstMode && isDir && path.Clean(h.Name) == path.Clean(m.dst) {
+ h.Mode = (h.Mode & ^0o777) | 0o755
+ }
+
+ if h.Typeflag == tar.TypeLink {
+ if newTarget, ok := m.renameLinks[h.Linkname]; ok {
+ // we already replaced the original link target, so make this a link to the file we copied
+ klog.V(6).Infof("Replaced link target %s -> %s: ok=%t", h.Linkname, newTarget, ok)
+ h.Linkname = newTarget
+ } else {
+ needReplacement := false
+ // run the link target name through the same mapping the Name
+ // in the target's entry would have gotten
+ linkName := strings.TrimPrefix(h.Linkname, "/")
+ if !strings.HasPrefix(linkName, m.prefix) {
+ // the link target didn't start with the prefix, so it wasn't passed along
+ needReplacement = true
+ }
+ var newTarget string
+ if !needReplacement {
+ linkName = strings.TrimPrefix(strings.TrimPrefix(linkName, m.prefix), "/")
+ var ok bool
+ if newTarget, ok, err = m.rename(nil, linkName, false); err != nil {
+ return nil, false, true, err
+ }
+ if !ok || newTarget == "." {
+ // the link target wasn't passed along
+ needReplacement = true
+ }
+ }
+ if !needReplacement {
+ if ok, _ := m.exclude.Matches(linkName); ok {
+ // link target was skipped based on excludes
+ needReplacement = true
+ }
+ }
+ if !needReplacement {
+ // the link target was passed along, everything's fine
+ klog.V(6).Infof("Transform link target %s -> %s: ok=%t skip=%t", h.Linkname, newTarget, ok, true)
+ h.Linkname = newTarget
+ } else {
+ // the link target wasn't passed along, splice it back in as this file
+ if m.refetch == nil {
+ return nil, false, true, fmt.Errorf("need to create %q as a hard link to %q, but did not copy %q", h.Name, h.Linkname, h.Linkname)
+ }
+ pr, pw := io.Pipe()
+ go m.refetch(pw)
+ tr2 := tar.NewReader(pr)
+ rehdr, err := tr2.Next()
+ for err == nil && rehdr.Name != h.Linkname {
+ rehdr, err = tr2.Next()
+ }
+ if err != nil {
+ pr.Close()
+ return nil, false, true, fmt.Errorf("needed to create %q as a hard link to %q, but got error refetching %q: %v", h.Name, h.Linkname, h.Linkname, err)
+ }
+ buf, err := ioutil.ReadAll(pr)
+ pr.Close()
+ if err != nil {
+ return nil, false, true, fmt.Errorf("needed to create %q as a hard link to %q, but got error refetching contents of %q: %v", h.Name, h.Linkname, h.Linkname, err)
+ }
+ m.renameLinks[h.Linkname] = h.Name
+ h.Typeflag = tar.TypeReg
+ h.Size, h.Mode = rehdr.Size, rehdr.Mode
+ h.Uid, h.Gid = rehdr.Uid, rehdr.Gid
+ h.Uname, h.Gname = rehdr.Uname, rehdr.Gname
+ h.ModTime, h.AccessTime, h.ChangeTime = rehdr.ModTime, rehdr.AccessTime, rehdr.ChangeTime
+ h.Xattrs = nil
+ for k, v := range rehdr.Xattrs {
+ if h.Xattrs != nil {
+ h.Xattrs = make(map[string]string)
+ }
+ h.Xattrs[k] = v
+ }
+ klog.V(6).Infof("Transform link %s -> reg %s", h.Linkname, h.Name)
+ h.Linkname = ""
+ return buf, true, false, nil
+ }
+ }
+ }
+
+ // include all files
+ return nil, false, false, nil
+}
+
+func archiveOptionsFor(directory string, infos []CopyInfo, dst string, excludes []string, allowDownload bool, check DirectoryCheck) (*archive.TarOptions, error) {
+ dst = trimLeadingPath(dst)
+ dstIsDir := strings.HasSuffix(dst, "/") || dst == "." || dst == "/" || strings.HasSuffix(dst, "/.")
+ dst = trimTrailingSlash(dst)
+ dstIsRoot := dst == "." || dst == "/"
+
+ if !dstIsDir && check != nil {
+ isDir, err := check.IsDirectory(dst)
+ if err != nil {
+ return nil, fmt.Errorf("unable to check whether %s is a directory: %v", dst, err)
+ }
+ dstIsDir = isDir
+ }
+
+ options := &archive.TarOptions{
+ ChownOpts: &idtools.IDPair{UID: 0, GID: 0},
+ }
+
+ pm, err := fileutils.NewPatternMatcher(excludes)
+ if err != nil {
+ return options, nil
+ }
+
+ if !dstIsDir {
+ for _, info := range infos {
+ if ok, _ := pm.Matches(info.Path); ok {
+ continue
+ }
+ infoPath := info.Path
+ if directory != "" {
+ infoPath = filepath.Join(directory, infoPath)
+ }
+ if allowDownload && isArchivePath(infoPath) {
+ dstIsDir = true
+ break
+ }
+ }
+ }
+
+ for _, info := range infos {
+ if ok, _ := pm.Matches(info.Path); ok {
+ continue
+ }
+
+ srcIsDir := strings.HasSuffix(info.Path, "/") || info.Path == "." || info.Path == "/" || strings.HasSuffix(info.Path, "/.")
+ infoPath := trimTrailingSlash(info.Path)
+
+ options.IncludeFiles = append(options.IncludeFiles, infoPath)
+ if len(dst) == 0 {
+ continue
+ }
+ if options.RebaseNames == nil {
+ options.RebaseNames = make(map[string]string)
+ }
+
+ klog.V(6).Infof("len=%d info.FromDir=%t info.IsDir=%t dstIsRoot=%t dstIsDir=%t srcIsDir=%t", len(infos), info.FromDir, info.IsDir(), dstIsRoot, dstIsDir, srcIsDir)
+ switch {
+ case len(infos) > 1 && dstIsRoot:
+ // copying multiple things into root, no rename necessary ([Dockerfile, dir] -> [Dockerfile, dir])
+ case len(infos) > 1:
+ // put each input into the target, which is assumed to be a directory ([Dockerfile, dir] -> [a/Dockerfile, a/dir])
+ options.RebaseNames[infoPath] = path.Join(dst, path.Base(infoPath))
+ case info.FileInfo.IsDir():
+ // mapping a directory to a destination, explicit or not ([dir] -> [a])
+ options.RebaseNames[infoPath] = dst
+ case info.FromDir:
+ // this is a file that was part of an explicit directory request, no transformation
+ options.RebaseNames[infoPath] = path.Join(dst, path.Base(infoPath))
+ case dstIsDir:
+ // mapping what is probably a file to a non-root directory ([Dockerfile] -> [dir/Dockerfile])
+ options.RebaseNames[infoPath] = path.Join(dst, path.Base(infoPath))
+ default:
+ // a single file mapped to another single file ([Dockerfile] -> [Dockerfile.2])
+ options.RebaseNames[infoPath] = dst
+ }
+ }
+
+ options.ExcludePatterns = excludes
+ return options, nil
+}
+
+func sourceToDestinationName(src, dst string, forceDir bool) string {
+ switch {
+ case forceDir, strings.HasSuffix(dst, "/"), path.Base(dst) == ".":
+ return path.Join(dst, src)
+ default:
+ return dst
+ }
+}
+
+// logArchiveOutput prints log info about the provided tar file as it is streamed. If an
+// error occurs the remainder of the pipe is read to prevent blocking.
+func logArchiveOutput(r io.Reader, prefix string) {
+ pr, pw := io.Pipe()
+ r = ioutil.NopCloser(io.TeeReader(r, pw))
+ go func() {
+ err := func() error {
+ tr := tar.NewReader(pr)
+ for {
+ h, err := tr.Next()
+ if err != nil {
+ return err
+ }
+ klog.Infof("%s %s (%d %s)", prefix, h.Name, h.Size, h.FileInfo().Mode())
+ if _, err := io.Copy(ioutil.Discard, tr); err != nil {
+ return err
+ }
+ }
+ }()
+ if err != io.EOF {
+ klog.Infof("%s: unable to log archive output: %v", prefix, err)
+ io.Copy(ioutil.Discard, pr)
+ }
+ }()
+}
+
+type closer struct {
+ closefn func() error
+}
+
+func newCloser(closeFunction func() error) *closer {
+ return &closer{closefn: closeFunction}
+}
+
+func (r *closer) Close() error {
+ return r.closefn()
+}
+
+type readCloser struct {
+ io.Reader
+ io.Closer
+}
diff --git a/dockerclient/archive_test.go b/dockerclient/archive_test.go
new file mode 100644
index 0000000..e5cb99d
--- /dev/null
+++ b/dockerclient/archive_test.go
@@ -0,0 +1,535 @@
+package dockerclient
+
+import (
+ "archive/tar"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "testing"
+
+ "github.com/docker/docker/pkg/archive"
+)
+
+type testDirectoryCheck map[string]bool
+
+func (c testDirectoryCheck) IsDirectory(path string) (bool, error) {
+ if c == nil {
+ return false, nil
+ }
+
+ isDir, ok := c[path]
+ if !ok {
+ return false, fmt.Errorf("no path defined for %s", path)
+ }
+ return isDir, nil
+}
+
+type archiveGenerator struct {
+ Headers []*tar.Header
+}
+
+func newArchiveGenerator() *archiveGenerator {
+ return &archiveGenerator{}
+}
+
+func typeflagAsString(b byte) string {
+ switch b {
+ case tar.TypeDir:
+ return "dir"
+ case tar.TypeReg:
+ return "reg"
+ case tar.TypeRegA:
+ return "rega"
+ default:
+ return fmt.Sprintf("%d", b)
+ }
+}
+
+func (g *archiveGenerator) String() string {
+ s := "["
+ for i := range g.Headers {
+ if i > 0 {
+ s += ", "
+ }
+ s += fmt.Sprintf("{%q: %v}", g.Headers[i].Name, typeflagAsString(g.Headers[i].Typeflag))
+ }
+ s += "]"
+ return s
+}
+
+func (g *archiveGenerator) File(name string) *archiveGenerator {
+ g.Headers = append(g.Headers, &tar.Header{Name: name, Typeflag: tar.TypeReg, Size: 1})
+ return g
+}
+
+func (g *archiveGenerator) Dir(name string) *archiveGenerator {
+ g.Headers = append(g.Headers, &tar.Header{Name: name, Typeflag: tar.TypeDir})
+ return g
+}
+
+func (g *archiveGenerator) Reader() io.Reader {
+ pr, pw := io.Pipe()
+ go func() {
+ err := func() error {
+ w := tar.NewWriter(pw)
+ for _, h := range g.Headers {
+ if err := w.WriteHeader(h); err != nil {
+ return err
+ }
+ if h.Typeflag&tar.TypeDir == tar.TypeDir {
+ continue
+ }
+ for i := int64(0); i < h.Size; i++ {
+ if _, err := w.Write([]byte{byte(i)}); err != nil {
+ return err
+ }
+ }
+ }
+ return w.Close()
+ }()
+ pw.CloseWithError(err)
+ }()
+ return pr
+}
+
+func Test_archiveFromFile(t *testing.T) {
+ f, err := ioutil.TempFile("", "test-tar")
+ if err != nil {
+ t.Fatal(err)
+ }
+ rc, err := archive.TarWithOptions("testdata/dir", &archive.TarOptions{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := io.Copy(f, rc); err != nil {
+ t.Fatal(err)
+ }
+ if err := f.Close(); err != nil {
+ t.Fatal(err)
+ }
+ defer os.Remove(f.Name())
+
+ testArchive := f.Name()
+ testCases := []struct {
+ file string
+ gen *archiveGenerator
+ src string
+ closeErr error
+ dst string
+ excludes []string
+ expect []string
+ check map[string]bool
+ }{
+ {
+ file: testArchive,
+ src: "/*",
+ dst: "test",
+ expect: []string{
+ "test/Dockerfile",
+ "test/file",
+ "test/subdir",
+ "test/subdir/file2",
+ },
+ },
+ {
+ file: testArchive,
+ src: ".",
+ dst: "test",
+ expect: []string{
+ "test/Dockerfile",
+ "test/file",
+ "test/subdir",
+ "test/subdir/file2",
+ },
+ },
+ {
+ file: testArchive,
+ src: "fil?",
+ dst: "test",
+ expect: []string{
+ "test/file",
+ },
+ },
+ {
+ file: testArchive,
+ src: "fil?",
+ dst: "",
+ expect: []string{
+ "file",
+ },
+ },
+ {
+ file: testArchive,
+ src: "subdir",
+ dst: "",
+ expect: []string{
+ "file2",
+ },
+ },
+ {
+ file: testArchive,
+ src: "subdir/",
+ dst: "",
+ expect: []string{
+ "file2",
+ },
+ },
+ {
+ file: testArchive,
+ src: "subdir/",
+ dst: "test/",
+ expect: []string{
+ "test",
+ "test/file2",
+ },
+ },
+ {
+ file: testArchive,
+ src: "subdir/file?",
+ dst: "test/",
+ expect: []string{
+ "test/file2",
+ },
+ },
+ {
+ file: testArchive,
+ src: "subdi?",
+ dst: "test",
+ expect: []string{
+ "test/subdir",
+ "test/subdir/file2",
+ },
+ },
+ {
+ file: testArchive,
+ src: "subdi?",
+ dst: "test/",
+ expect: []string{
+ "test/subdir",
+ "test/subdir/file2",
+ },
+ },
+ {
+ file: testArchive,
+ src: "subdi?",
+ dst: "test/",
+ excludes: []string{"**/file*"},
+ expect: []string{
+ "test/subdir",
+ },
+ },
+ {
+ file: testArchive,
+ src: ".",
+ dst: "",
+ excludes: []string{"unknown"},
+ expect: []string{
+ "Dockerfile",
+ "file",
+ "subdir",
+ "subdir/file2",
+ },
+ },
+ {
+ file: testArchive,
+ src: ".",
+ dst: "",
+ excludes: []string{"subdir"},
+ expect: []string{
+ "Dockerfile",
+ "file",
+ },
+ },
+ {
+ file: testArchive,
+ src: ".",
+ dst: "",
+ excludes: []string{"file"},
+ expect: []string{
+ "Dockerfile",
+ "subdir",
+ "subdir/file2",
+ },
+ },
+ {
+ file: testArchive,
+ src: ".",
+ dst: "",
+ excludes: []string{"*/file2"},
+ expect: []string{
+ "Dockerfile",
+ "file",
+ "subdir",
+ },
+ },
+ {
+ file: testArchive,
+ src: "subdir/no-such-file",
+ closeErr: os.ErrNotExist,
+ },
+ }
+ for i := range testCases {
+ testCase := testCases[i]
+ t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+ r, c, err := archiveFromFile(
+ testCase.file,
+ testCase.src,
+ testCase.dst,
+ testCase.excludes,
+ testDirectoryCheck(testCase.check),
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+ tr := tar.NewReader(r)
+ var found []string
+ for {
+ h, err := tr.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ t.Fatal(err)
+ }
+ found = append(found, h.Name)
+ }
+ closeErr := c.Close()
+ if !errors.Is(closeErr, testCase.closeErr) {
+ t.Fatalf("expected error %v, got %v", testCase.closeErr, closeErr)
+ }
+ sort.Strings(found)
+ if !reflect.DeepEqual(testCase.expect, found) {
+ t.Errorf("unexpected files:\n%v\n%v", testCase.expect, found)
+ }
+ })
+ }
+}
+
+func Test_archiveFromContainer(t *testing.T) {
+ testCases := []struct {
+ gen *archiveGenerator
+ src string
+ closeErr error
+ dst string
+ excludes []string
+ expect []string
+ path string
+ check map[string]bool
+ }{
+ {
+ gen: newArchiveGenerator().File("file").Dir("test").File("test/file2"),
+ src: "/*",
+ dst: "test",
+ check: map[string]bool{"test": true},
+ path: "/",
+ expect: []string{
+ "test/file",
+ "test/test",
+ "test/test/file2",
+ },
+ },
+ {
+ gen: newArchiveGenerator().File("file").Dir("test").File("test/file2"),
+ src: "/",
+ dst: "test",
+ path: "/",
+ expect: []string{
+ "test/file",
+ "test/test",
+ "test/test/file2",
+ },
+ },
+ {
+ gen: newArchiveGenerator().File("file").Dir("test").File("test/file2"),
+ src: ".",
+ dst: "test",
+ path: ".",
+ expect: []string{
+ "test/file",
+ "test/test",
+ "test/test/file2",
+ },
+ },
+ {
+ gen: newArchiveGenerator().File("file").Dir("test").File("test/file2"),
+ src: ".",
+ dst: "test/",
+ path: ".",
+ expect: []string{
+ "test/file",
+ "test/test",
+ "test/test/file2",
+ },
+ },
+ {
+ gen: newArchiveGenerator().File("file").Dir("test").File("test/file2"),
+ src: ".",
+ dst: "/test",
+ path: ".",
+ expect: []string{
+ "/test/file",
+ "/test/test",
+ "/test/test/file2",
+ },
+ },
+ {
+ gen: newArchiveGenerator().File("file").Dir("test").File("test/file2"),
+ src: ".",
+ dst: "/test/",
+ path: ".",
+ expect: []string{
+ "/test/file",
+ "/test/test",
+ "/test/test/file2",
+ },
+ },
+ {
+ gen: newArchiveGenerator().File("b/file").Dir("b/test").File("b/test/file2"),
+ src: "/a/b/",
+ dst: "/b",
+ path: "/a/b",
+ expect: []string{
+ "/b/file",
+ "/b/test",
+ "/b/test/file2",
+ },
+ },
+ {
+ gen: newArchiveGenerator().File("/b/file").Dir("/b/test").File("/b/test/file2"),
+ src: "/a/b/*",
+ dst: "/b",
+ check: map[string]bool{"/b": true},
+ path: "/a/b",
+ expect: []string{
+ "/b/file",
+ "/b/test",
+ "/b/test/file2",
+ },
+ },
+
+ // DownloadFromContainer returns tar archive paths prefixed with a slash when
+ // the base directory is the root
+ {
+ gen: newArchiveGenerator().File("/a").Dir("/b").File("/b/1"),
+ src: "/a",
+ dst: "/",
+ path: "/",
+ expect: []string{
+ "/a",
+ },
+ },
+ {
+ gen: newArchiveGenerator().File("/a").Dir("/b").File("/b/1"),
+ src: "/a",
+ dst: "/a",
+ path: "/",
+ expect: []string{
+ "/a",
+ },
+ },
+ {
+ gen: newArchiveGenerator().Dir("b/").File("b/1").File("b/2"),
+ src: "/a/b/",
+ dst: "/b/",
+ path: "/a/b",
+ expect: []string{
+ "/b",
+ "/b/1",
+ "/b/2",
+ },
+ },
+ {
+ gen: newArchiveGenerator().Dir("").File("b"),
+ src: "/a/b",
+ closeErr: os.ErrNotExist,
+ dst: "/a",
+ path: "/a",
+ expect: nil,
+ },
+ {
+ gen: newArchiveGenerator().File("b"),
+ src: "/a/b",
+ closeErr: os.ErrNotExist,
+ dst: "/a",
+ check: map[string]bool{"/a": true},
+ path: "/a",
+ expect: nil,
+ },
+ {
+ gen: newArchiveGenerator().Dir("a/").File("a/b"),
+ src: "/a/b",
+ dst: "/a",
+ path: "/a",
+ expect: []string{
+ "/a",
+ },
+ },
+ {
+ gen: newArchiveGenerator().Dir("./a").File("./a/b"),
+ src: "a",
+ dst: "/a",
+ path: ".",
+ expect: []string{"/a/b"},
+ },
+ {
+ gen: newArchiveGenerator().Dir("/a").File("/a/b"),
+ src: "/a/c",
+ path: "/a",
+ closeErr: os.ErrNotExist,
+ },
+ {
+ gen: newArchiveGenerator().Dir("/a").File("/a/b"),
+ src: "/a/c*",
+ path: "/a",
+ closeErr: os.ErrNotExist,
+ },
+ }
+ for i := range testCases {
+ testCase := testCases[i]
+ t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+ rc, path, err := archiveFromContainer(
+ testCase.gen.Reader(),
+ testCase.src,
+ testCase.dst,
+ testCase.excludes,
+ testDirectoryCheck(testCase.check),
+ func(pw *io.PipeWriter) {
+ _, err := io.Copy(pw, testCase.gen.Reader())
+ pw.CloseWithError(err)
+ },
+ false,
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+ if filepath.Clean(path) != testCase.path {
+ t.Errorf("unexpected path for root of archive: %q != expected value %q", filepath.Clean(path), testCase.path)
+ }
+ tr := tar.NewReader(rc)
+ var found []string
+ for {
+ h, err := tr.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ t.Fatal(err)
+ }
+ found = append(found, h.Name)
+ }
+ closeErr := rc.Close()
+ if !errors.Is(closeErr, testCase.closeErr) {
+ t.Fatalf("expected error %v, got %v", testCase.closeErr, closeErr)
+ }
+ sort.Strings(found)
+ if !reflect.DeepEqual(testCase.expect, found) {
+ t.Errorf("from %q to %q with content: %v\nunexpected files:\nexpected: %v\nfound: %v", testCase.src, testCase.dst, testCase.gen, testCase.expect, found)
+ }
+ })
+ }
+}
diff --git a/dockerclient/client.go b/dockerclient/client.go
new file mode 100644
index 0000000..87dbbe9
--- /dev/null
+++ b/dockerclient/client.go
@@ -0,0 +1,1526 @@
+package dockerclient
+
+import (
+ "archive/tar"
+ "bufio"
+ "bytes"
+ "context"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+
+ dockertypes "github.com/docker/docker/api/types"
+ docker "github.com/fsouza/go-dockerclient"
+ "k8s.io/klog"
+
+ "github.com/openshift/imagebuilder"
+ "github.com/openshift/imagebuilder/dockerfile/parser"
+ "github.com/openshift/imagebuilder/imageprogress"
+)
+
+// NewClientFromEnv is exposed to simplify getting a client when vendoring this library.
+func NewClientFromEnv() (*docker.Client, error) {
+ return docker.NewClientFromEnv()
+}
+
+// Mount represents a binding between the current system and the destination client
+type Mount struct {
+ SourcePath string
+ DestinationPath string
+}
+
+// ClientExecutor can run Docker builds from a Docker client.
+type ClientExecutor struct {
+ // Name is an optional name for this executor.
+ Name string
+ // Named is a map of other named executors.
+ Named map[string]*ClientExecutor
+
+ // TempDir is the temporary directory to use for storing file
+ // contents. If unset, the default temporary directory for the
+ // system will be used.
+ TempDir string
+ // Client is a client to a Docker daemon.
+ Client *docker.Client
+ // Directory is the context directory to build from, will use
+ // the current working directory if not set. Ignored if
+ // ContextArchive is set.
+ Directory string
+ // A compressed or uncompressed tar archive that should be used
+ // as the build context.
+ ContextArchive string
+ // Excludes are a list of file patterns that should be excluded
+ // from the context. Will be set to the contents of the
+ // .dockerignore file if nil.
+ Excludes []string
+ // Tag is an optional value to tag the resulting built image.
+ Tag string
+ // Additional tags is an optional array of other tags to apply
+ // to the image.
+ AdditionalTags []string
+ // AllowPull when set will pull images that are not present on
+ // the daemon.
+ AllowPull bool
+ // IgnoreUnrecognizedInstructions, if true, allows instructions
+ // that are not yet supported to be ignored (will be printed)
+ IgnoreUnrecognizedInstructions bool
+ // StrictVolumeOwnership if true will fail the build if a RUN
+ // command follows a VOLUME command, since this client cannot
+ // guarantee that the restored contents of the VOLUME directory
+ // will have the right permissions.
+ StrictVolumeOwnership bool
+ // TransientMounts are a set of mounts from outside the build
+ // to the inside that will not be part of the final image. Any
+ // content created inside the mount's destinationPath will be
+ // omitted from the final image.
+ TransientMounts []Mount
+
+ // The path within the container to perform the transient mount.
+ ContainerTransientMount string
+
+ // The streams used for canonical output.
+ Out, ErrOut io.Writer
+
+ // Container is optional and can be set to a container to use as
+ // the execution environment for a build.
+ Container *docker.Container
+ // Command, if set, will be used as the entrypoint for the new
+ // container. This is ignored if Container is set.
+ Command []string
+ // Image is optional and may be set to control which image is used
+ // as a base for this build. Otherwise the FROM value from the
+ // Dockerfile is read (will be pulled if not locally present).
+ Image *docker.Image
+ // Committed is optional and is used to track a temporary image, if one
+ // was created, that was based on the container as its stage ended.
+ Committed *docker.Image
+
+ // AuthFn will handle authenticating any docker pulls if Image
+ // is set to nil.
+ AuthFn func(name string) ([]dockertypes.AuthConfig, bool)
+ // HostConfig is used to start the container (if necessary).
+ HostConfig *docker.HostConfig
+ // LogFn is an optional command to log information to the end user
+ LogFn func(format string, args ...interface{})
+
+ // Deferred is a list of operations that must be cleaned up at
+ // the end of execution. Use Release() to invoke all of these.
+ Deferred []func() error
+
+ // Volumes handles saving and restoring volumes after RUN
+ // commands are executed.
+ Volumes *ContainerVolumeTracker
+}
+
+// NoAuthFn can be used for AuthFn when no authentication is required in Docker.
+func NoAuthFn(string) ([]dockertypes.AuthConfig, bool) {
+ return nil, false
+}
+
+// NewClientExecutor creates a client executor.
+func NewClientExecutor(client *docker.Client) *ClientExecutor {
+ return &ClientExecutor{
+ Client: client,
+ LogFn: func(string, ...interface{}) {},
+
+ ContainerTransientMount: "/.imagebuilder-transient-mount",
+ }
+}
+
+// DefaultExcludes reads the default list of excluded file patterns from the
+// context directory's .containerignore file if it exists, or from the context
+// directory's .dockerignore file, if it exists.
+func (e *ClientExecutor) DefaultExcludes() error {
+ var err error
+ e.Excludes, err = imagebuilder.ParseDockerignore(e.Directory)
+ return err
+}
+
+// WithName creates a new child executor that will be used whenever a COPY statement
+// uses --from=NAME or --from=POSITION.
+func (e *ClientExecutor) WithName(name string, position int) *ClientExecutor {
+ if e.Named == nil {
+ e.Named = make(map[string]*ClientExecutor)
+ }
+ e.Deferred = append([]func() error{func() error {
+ stage, ok := e.Named[strconv.Itoa(position)]
+ if !ok {
+ return fmt.Errorf("error finding stage %d", position)
+ }
+ errs := stage.Release()
+ if len(errs) > 0 {
+ return fmt.Errorf("%v", errs)
+ }
+ return nil
+ }}, e.Deferred...)
+
+ copied := *e
+ copied.Name = name
+ copied.Container = nil
+ copied.Deferred = nil
+ copied.Image = nil
+ copied.Volumes = nil
+ copied.Committed = nil
+
+ child := &copied
+ e.Named[name] = child
+ e.Named[strconv.Itoa(position)] = child
+ return child
+}
+
+// Stages executes all of the provided stages, starting from the base image. It returns the executor of the last stage
+// or an error if a stage fails.
+func (e *ClientExecutor) Stages(b *imagebuilder.Builder, stages imagebuilder.Stages, from string) (*ClientExecutor, error) {
+ var stageExecutor *ClientExecutor
+ for i, stage := range stages {
+ stageExecutor = e.WithName(stage.Name, stage.Position)
+
+ var stageFrom string
+ if i == 0 {
+ stageFrom = from
+ } else {
+ from, err := b.From(stage.Node)
+ if err != nil {
+ return nil, err
+ }
+ if prereq := e.Named[from]; prereq != nil {
+ b, ok := stages.ByName(from)
+ if !ok {
+ return nil, fmt.Errorf("error: Unable to find stage %s builder", from)
+ }
+ if prereq.Committed == nil {
+ config := b.Builder.Config()
+ if prereq.Container.State.Running {
+ klog.V(4).Infof("Stopping container %s ...", prereq.Container.ID)
+ if err := e.Client.StopContainer(prereq.Container.ID, 0); err != nil {
+ return nil, fmt.Errorf("unable to stop build container: %v", err)
+ }
+ prereq.Container.State.Running = false
+ // Starting the container may perform escaping of args, so to be consistent
+ // we also set that here
+ config.ArgsEscaped = true
+ }
+ image, err := e.Client.CommitContainer(docker.CommitContainerOptions{
+ Container: prereq.Container.ID,
+ Run: config,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("unable to commit stage %s container: %v", from, err)
+ }
+ klog.V(4).Infof("Committed %s to %s as basis for image %q: %#v", prereq.Container.ID, image.ID, from, config)
+ // deleting this image will fail with an "image has dependent child images" error
+ // if it ends up being an ancestor of the final image, so don't bother returning
+ // errors from this specific removeImage() call
+ prereq.Deferred = append([]func() error{func() error { e.removeImage(image.ID); return nil }}, prereq.Deferred...)
+ prereq.Committed = image
+ }
+ klog.V(4).Infof("Using image %s based on previous stage %s as image", prereq.Committed.ID, from)
+ from = prereq.Committed.ID
+ }
+ stageFrom = from
+ }
+
+ if err := stageExecutor.Prepare(stage.Builder, stage.Node, stageFrom); err != nil {
+ return nil, err
+ }
+ if err := stageExecutor.Execute(stage.Builder, stage.Node); err != nil {
+ return nil, err
+ }
+
+ // remember the outcome of the stage execution on the container config in case
+ // another stage needs to access incremental state
+ stageExecutor.Container.Config = stage.Builder.Config()
+ }
+ return stageExecutor, nil
+}
+
+// Build is a helper method to perform a Docker build against the
+// provided Docker client. It will load the image if not specified,
+// create a container if one does not already exist, and start a
+// container if the Dockerfile contains RUN commands. It will cleanup
+// any containers it creates directly, and set the e.Committed.ID field
+// to the generated image.
+func (e *ClientExecutor) Build(b *imagebuilder.Builder, node *parser.Node, from string) error {
+ defer e.Release()
+ if err := e.Prepare(b, node, from); err != nil {
+ return err
+ }
+ if err := e.Execute(b, node); err != nil {
+ return err
+ }
+ return e.Commit(b)
+}
+
+func (e *ClientExecutor) Prepare(b *imagebuilder.Builder, node *parser.Node, from string) error {
+ var err error
+
+ // identify the base image
+ if len(from) == 0 {
+ from, err = b.From(node)
+ if err != nil {
+ return err
+ }
+ }
+
+ // load the image
+ if e.Image == nil {
+ if from == imagebuilder.NoBaseImageSpecifier {
+ if runtime.GOOS == "windows" {
+ return fmt.Errorf("building from scratch images is not supported")
+ }
+ from, err = e.CreateScratchImage()
+ if err != nil {
+ return fmt.Errorf("unable to create a scratch image for this build: %v", err)
+ }
+ e.Deferred = append([]func() error{func() error { return e.removeImage(from) }}, e.Deferred...)
+ }
+ klog.V(4).Infof("Retrieving image %q", from)
+ e.Image, err = e.LoadImageWithPlatform(from, b.Platform)
+ if err != nil {
+ return err
+ }
+ }
+
+ // update the builder with any information from the image, including ONBUILD
+ // statements
+ if err := b.FromImage(e.Image, node); err != nil {
+ return err
+ }
+
+ b.RunConfig.Image = from
+ if len(e.Name) > 0 {
+ e.LogFn("FROM %s as %s", from, e.Name)
+ } else {
+ e.LogFn("FROM %s", from)
+ }
+ klog.V(4).Infof("step: FROM %s as %s", from, e.Name)
+
+ b.Excludes = e.Excludes
+
+ var sharedMount string
+
+ defaultShell := b.RunConfig.Shell
+ if len(defaultShell) == 0 {
+ defaultShell = []string{"/bin/sh", "-c"}
+ }
+
+ // create a container to execute in, if necessary
+ mustStart := b.RequiresStart(node)
+ if e.Container == nil {
+ opts := docker.CreateContainerOptions{
+ Config: &docker.Config{
+ Image: from,
+ },
+ HostConfig: &docker.HostConfig{},
+ }
+ if e.HostConfig != nil {
+ opts.HostConfig = e.HostConfig
+ }
+ originalBinds := opts.HostConfig.Binds
+
+ if mustStart {
+ // Transient mounts only make sense on images that will be running processes
+ if len(e.TransientMounts) > 0 {
+ volumeName, err := randSeq(imageSafeCharacters, 24)
+ if err != nil {
+ return err
+ }
+ v, err := e.Client.CreateVolume(docker.CreateVolumeOptions{Name: volumeName})
+ if err != nil {
+ return fmt.Errorf("unable to create volume to mount secrets: %v", err)
+ }
+ e.Deferred = append([]func() error{func() error { return e.Client.RemoveVolume(volumeName) }}, e.Deferred...)
+ sharedMount = v.Mountpoint
+ opts.HostConfig.Binds = append(opts.HostConfig.Binds, volumeName+":"+e.ContainerTransientMount)
+ }
+
+ // TODO: windows support
+ if len(e.Command) > 0 {
+ opts.Config.Cmd = e.Command
+ opts.Config.Entrypoint = nil
+ } else {
+ // TODO; replace me with a better default command
+ opts.Config.Cmd = []string{fmt.Sprintf("%s\nsleep 86400", "#(imagebuilder)")}
+ opts.Config.Entrypoint = append([]string{}, defaultShell...)
+ }
+ }
+
+ if len(opts.Config.Cmd) == 0 {
+ opts.Config.Entrypoint = append(append([]string{}, defaultShell...), "#(imagebuilder)")
+ }
+
+ // copy any source content into the temporary mount path
+ if mustStart && len(e.TransientMounts) > 0 {
+ if len(sharedMount) == 0 {
+ return fmt.Errorf("no mount point available for temporary mounts")
+ }
+ binds, err := e.PopulateTransientMounts(opts, e.TransientMounts, sharedMount)
+ if err != nil {
+ return err
+ }
+ opts.HostConfig.Binds = append(originalBinds, binds...)
+ }
+
+ klog.V(4).Infof("Creating container with %#v %#v", opts.Config, opts.HostConfig)
+ container, err := e.Client.CreateContainer(opts)
+ if err != nil {
+ return fmt.Errorf("unable to create build container: %v", err)
+ }
+ e.Container = container
+ e.Deferred = append([]func() error{func() error { return e.removeContainer(container.ID) }}, e.Deferred...)
+ }
+
+ // TODO: lazy start
+ if mustStart && !e.Container.State.Running {
+ if err := e.Client.StartContainer(e.Container.ID, nil); err != nil {
+ return fmt.Errorf("unable to start build container: %v", err)
+ }
+ e.Container.State.Running = true
+ // TODO: is this racy? may have to loop wait in the actual run step
+ }
+ return nil
+}
+
+// Execute performs all of the provided steps against the initialized container. May be
+// invoked multiple times for a given container.
+func (e *ClientExecutor) Execute(b *imagebuilder.Builder, node *parser.Node) error {
+ for i, child := range node.Children {
+ step := b.Step()
+ if err := step.Resolve(child); err != nil {
+ return err
+ }
+ klog.V(4).Infof("step: %s", step.Original)
+ if e.LogFn != nil {
+ // original may have unescaped %, so perform fmt escaping
+ e.LogFn(strings.Replace(step.Original, "%", "%%", -1))
+ }
+ noRunsRemaining := !b.RequiresStart(&parser.Node{Children: node.Children[i+1:]})
+
+ if err := b.Run(step, e, noRunsRemaining); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Commit saves the completed build as an image with the provided tag. It will
+// stop the container, commit the image, and then remove the container.
+func (e *ClientExecutor) Commit(b *imagebuilder.Builder) error {
+ config := b.Config()
+
+ if e.Container.State.Running {
+ klog.V(4).Infof("Stopping container %s ...", e.Container.ID)
+ if err := e.Client.StopContainer(e.Container.ID, 0); err != nil {
+ return fmt.Errorf("unable to stop build container: %v", err)
+ }
+ e.Container.State.Running = false
+ // Starting the container may perform escaping of args, so to be consistent
+ // we also set that here
+ config.ArgsEscaped = true
+ }
+
+ var repository, tag string
+ if len(e.Tag) > 0 {
+ repository, tag = docker.ParseRepositoryTag(e.Tag)
+ klog.V(4).Infof("Committing built container %s as image %q: %#v", e.Container.ID, e.Tag, config)
+ if e.LogFn != nil {
+ e.LogFn("Committing changes to %s ...", e.Tag)
+ }
+ } else {
+ klog.V(4).Infof("Committing built container %s: %#v", e.Container.ID, config)
+ if e.LogFn != nil {
+ e.LogFn("Committing changes ...")
+ }
+ }
+
+ defer func() {
+ for _, err := range e.Release() {
+ e.LogFn("Unable to cleanup: %v", err)
+ }
+ }()
+
+ image, err := e.Client.CommitContainer(docker.CommitContainerOptions{
+ Author: b.Author,
+ Container: e.Container.ID,
+ Run: config,
+ Repository: repository,
+ Tag: tag,
+ })
+ if err != nil {
+ return fmt.Errorf("unable to commit build container: %v", err)
+ }
+
+ e.Committed = image
+ klog.V(4).Infof("Committed %s to %s", e.Container.ID, image.ID)
+
+ if len(e.Tag) > 0 {
+ for _, s := range e.AdditionalTags {
+ repository, tag := docker.ParseRepositoryTag(s)
+ err := e.Client.TagImage(image.ID, docker.TagImageOptions{
+ Repo: repository,
+ Tag: tag,
+ })
+ if err != nil {
+ e.Deferred = append([]func() error{func() error { return e.removeImage(image.ID) }}, e.Deferred...)
+ return fmt.Errorf("unable to tag %q: %v", s, err)
+ }
+ e.LogFn("Tagged as %s", s)
+ }
+ }
+
+ if e.LogFn != nil {
+ e.LogFn("Done")
+ }
+ return nil
+}
+
+func (e *ClientExecutor) PopulateTransientMounts(opts docker.CreateContainerOptions, transientMounts []Mount, sharedMount string) ([]string, error) {
+ container, err := e.Client.CreateContainer(opts)
+ if err != nil {
+ return nil, fmt.Errorf("unable to create transient container: %v", err)
+ }
+ defer e.removeContainer(container.ID)
+
+ var copies []imagebuilder.Copy
+ for i, mount := range transientMounts {
+ copies = append(copies, imagebuilder.Copy{
+ FromFS: true,
+ Src: []string{mount.SourcePath},
+ Dest: filepath.Join(e.ContainerTransientMount, strconv.Itoa(i)),
+ })
+ }
+ if err := e.CopyContainer(container, nil, copies...); err != nil {
+ return nil, fmt.Errorf("unable to copy transient context into container: %v", err)
+ }
+
+ // mount individual items temporarily
+ var binds []string
+ for i, mount := range e.TransientMounts {
+ binds = append(binds, fmt.Sprintf("%s:%s:%s", filepath.Join(sharedMount, strconv.Itoa(i)), mount.DestinationPath, "ro"))
+ }
+ return binds, nil
+}
+
+// Release deletes any items started by this executor.
+func (e *ClientExecutor) Release() []error {
+ errs := e.Volumes.Release()
+ for _, fn := range e.Deferred {
+ if err := fn(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ e.Deferred = nil
+ return errs
+}
+
+// removeContainer removes the provided container ID
+func (e *ClientExecutor) removeContainer(id string) error {
+ e.Client.StopContainer(id, 0)
+ err := e.Client.RemoveContainer(docker.RemoveContainerOptions{
+ ID: id,
+ RemoveVolumes: true,
+ Force: true,
+ })
+ if _, ok := err.(*docker.NoSuchContainer); err != nil && !ok {
+ return fmt.Errorf("unable to cleanup container %s: %v", id, err)
+ }
+ return nil
+}
+
+// removeImage removes the provided image ID
+func (e *ClientExecutor) removeImage(id string) error {
+ if err := e.Client.RemoveImageExtended(id, docker.RemoveImageOptions{
+ Force: true,
+ }); err != nil {
+ return fmt.Errorf("unable to clean up image %s: %v", id, err)
+ }
+ return nil
+}
+
+// CreateScratchImage creates a new, zero byte layer that is identical to "scratch"
+// except that the resulting image will have two layers.
+func (e *ClientExecutor) CreateScratchImage() (string, error) {
+ random, err := randSeq(imageSafeCharacters, 24)
+ if err != nil {
+ return "", err
+ }
+ name := fmt.Sprintf("scratch%s", random)
+
+ buf := &bytes.Buffer{}
+ w := tar.NewWriter(buf)
+ w.Close()
+
+ return name, e.Client.ImportImage(docker.ImportImageOptions{
+ Repository: name,
+ Source: "-",
+ InputStream: buf,
+ })
+}
+
+// imageSafeCharacters are characters allowed to be part of a Docker image name.
+const imageSafeCharacters = "abcdefghijklmnopqrstuvwxyz0123456789"
+
+// randSeq returns a sequence of random characters drawn from source. It returns
+// an error if cryptographic randomness is not available or source is more than 255
+// characters.
+func randSeq(source string, n int) (string, error) {
+ if len(source) > 255 {
+ return "", fmt.Errorf("source must be less than 256 bytes long")
+ }
+ random := make([]byte, n)
+ if _, err := io.ReadFull(rand.Reader, random); err != nil {
+ return "", err
+ }
+ for i := range random {
+ random[i] = source[random[i]%byte(len(source))]
+ }
+ return string(random), nil
+}
+
+// LoadImage checks the client for an image matching from. If not found,
+// attempts to pull the image and then tries to inspect again.
+func (e *ClientExecutor) LoadImage(from string) (*docker.Image, error) {
+ return e.LoadImageWithPlatform(from, "")
+}
+
+// LoadImage checks the client for an image matching from. If not found,
+// attempts to pull the image with specified platform string.
+func (e *ClientExecutor) LoadImageWithPlatform(from string, platform string) (*docker.Image, error) {
+ image, err := e.Client.InspectImage(from)
+ if err == nil {
+ return image, nil
+ }
+ if err != docker.ErrNoSuchImage {
+ return nil, err
+ }
+
+ if !e.AllowPull {
+ klog.V(4).Infof("image %s did not exist", from)
+ return nil, docker.ErrNoSuchImage
+ }
+
+ repository, tag := docker.ParseRepositoryTag(from)
+ if len(tag) == 0 {
+ tag = "latest"
+ }
+
+ klog.V(4).Infof("attempting to pull %s with auth from repository %s:%s", from, repository, tag)
+
+ // TODO: we may want to abstract looping over multiple credentials
+ auth, _ := e.AuthFn(repository)
+ if len(auth) == 0 {
+ auth = append(auth, dockertypes.AuthConfig{})
+ }
+
+ if e.LogFn != nil {
+ e.LogFn("Image %s was not found, pulling ...", from)
+ }
+
+ var lastErr error
+ outputProgress := func(s string) {
+ e.LogFn("%s", s)
+ }
+ for _, config := range auth {
+ // TODO: handle IDs?
+ var pullErr error
+ func() { // A scope for defer
+ pullWriter := imageprogress.NewPullWriter(outputProgress)
+ defer func() {
+ err := pullWriter.Close()
+ if pullErr == nil {
+ pullErr = err
+ }
+ }()
+
+ pullImageOptions := docker.PullImageOptions{
+ Repository: repository,
+ Tag: tag,
+ OutputStream: pullWriter,
+ Platform: platform,
+ RawJSONStream: true,
+ }
+ if klog.V(5) {
+ pullImageOptions.OutputStream = os.Stderr
+ pullImageOptions.RawJSONStream = false
+ }
+ authConfig := docker.AuthConfiguration{Username: config.Username, ServerAddress: config.ServerAddress, Password: config.Password}
+ pullErr = e.Client.PullImage(pullImageOptions, authConfig)
+ }()
+ if pullErr == nil {
+ break
+ }
+ lastErr = pullErr
+ continue
+ }
+ if lastErr != nil {
+ return nil, fmt.Errorf("unable to pull image (from: %s, tag: %s): %v", repository, tag, lastErr)
+ }
+
+ return e.Client.InspectImage(from)
+}
+
+func (e *ClientExecutor) Preserve(path string) error {
+ if e.Volumes == nil {
+ e.Volumes = NewContainerVolumeTracker()
+ }
+
+ if err := e.EnsureContainerPath(path); err != nil {
+ return err
+ }
+
+ e.Volumes.Add(path)
+ return nil
+}
+
+func (e *ClientExecutor) EnsureContainerPath(path string) error {
+ return e.createOrReplaceContainerPathWithOwner(path, 0, 0, nil)
+}
+
+func (e *ClientExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error {
+ uid, gid := 0, 0
+
+ u, g, err := e.getUser(user)
+ if err == nil {
+ uid = u
+ gid = g
+ }
+
+ return e.createOrReplaceContainerPathWithOwner(path, uid, gid, mode)
+}
+
+func (e *ClientExecutor) createOrReplaceContainerPathWithOwner(path string, uid, gid int, mode *os.FileMode) error {
+ if mode == nil {
+ m := os.FileMode(0755)
+ mode = &m
+ }
+ createPath := func(dest string) error {
+ var writerErr error
+ if !strings.HasSuffix(dest, "/") {
+ dest = dest + "/"
+ }
+ reader, writer := io.Pipe()
+ opts := docker.UploadToContainerOptions{
+ InputStream: reader,
+ Path: "/",
+ Context: context.TODO(),
+ }
+ go func() {
+ defer writer.Close()
+ tarball := tar.NewWriter(writer)
+ defer tarball.Close()
+ writerErr = tarball.WriteHeader(&tar.Header{
+ Name: dest,
+ Typeflag: tar.TypeDir,
+ Mode: int64(*mode),
+ Uid: uid,
+ Gid: gid,
+ })
+ }()
+ klog.V(4).Infof("Uploading empty archive to %q", dest)
+ err := e.Client.UploadToContainer(e.Container.ID, opts)
+ if err != nil {
+ return fmt.Errorf("unable to ensure existence of preserved path %s: %v", dest, err)
+ }
+ if writerErr != nil {
+ return fmt.Errorf("error generating tarball to ensure existence of preserved path %s: %v", dest, writerErr)
+ }
+ return nil
+ }
+ readPath := func(dest string) error {
+ if !strings.HasSuffix(dest, "/") {
+ dest = dest + "/"
+ }
+ err := e.Client.DownloadFromContainer(e.Container.ID, docker.DownloadFromContainerOptions{
+ Path: dest,
+ OutputStream: ioutil.Discard,
+ })
+ return err
+ }
+ var pathsToCreate []string
+ pathToCheck := path
+ for {
+ if err := readPath(pathToCheck); err != nil {
+ pathsToCreate = append([]string{pathToCheck}, pathsToCreate...)
+ }
+ if filepath.Dir(pathToCheck) == pathToCheck {
+ break
+ }
+ pathToCheck = filepath.Dir(pathToCheck)
+ }
+ for _, path := range pathsToCreate {
+ if err := createPath(path); err != nil {
+ return fmt.Errorf("error creating container directory %s: %v", path, err)
+ }
+ }
+ return nil
+}
+
+func (e *ClientExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error {
+ if e.IgnoreUnrecognizedInstructions {
+ e.LogFn("warning: Unknown instruction: %s", strings.ToUpper(step.Command))
+ return nil
+ }
+ return fmt.Errorf("Unknown instruction: %s", strings.ToUpper(step.Command))
+}
+
+// Run executes a single Run command against the current container using exec().
+// Since exec does not allow ENV or WORKINGDIR to be set, we force the execution of
+// the user command into a shell and perform those operations before. Since RUN
+// requires /bin/sh, we can use both 'cd' and 'export'.
+func (e *ClientExecutor) Run(run imagebuilder.Run, config docker.Config) error {
+ if len(run.Mounts) > 0 {
+ return fmt.Errorf("RUN --mount not supported")
+ }
+ if run.Network != "" {
+ return fmt.Errorf("RUN --network not supported")
+ }
+
+ args := make([]string, len(run.Args))
+ copy(args, run.Args)
+
+ defaultShell := config.Shell
+ if len(defaultShell) == 0 {
+ if runtime.GOOS == "windows" {
+ defaultShell = []string{"cmd", "/S", "/C"}
+ } else {
+ defaultShell = []string{"/bin/sh", "-c"}
+ }
+ }
+ if runtime.GOOS == "windows" {
+ if len(config.WorkingDir) > 0 {
+ args[0] = fmt.Sprintf("cd %s && %s", imagebuilder.BashQuote(config.WorkingDir), args[0])
+ }
+ // TODO: implement windows ENV
+ args = append(defaultShell, args...)
+ } else {
+ if run.Shell {
+ if len(config.WorkingDir) > 0 {
+ args[0] = fmt.Sprintf("cd %s && %s", imagebuilder.BashQuote(config.WorkingDir), args[0])
+ }
+ if len(config.Env) > 0 {
+ args[0] = imagebuilder.ExportEnv(config.Env) + args[0]
+ }
+ args = append(defaultShell, args...)
+ } else {
+ switch {
+ case len(config.WorkingDir) == 0 && len(config.Env) == 0:
+ // no change necessary
+ case len(args) > 0:
+ setup := "exec \"$@\""
+ if len(config.WorkingDir) > 0 {
+ setup = fmt.Sprintf("cd %s && %s", imagebuilder.BashQuote(config.WorkingDir), setup)
+ }
+ if len(config.Env) > 0 {
+ setup = imagebuilder.ExportEnv(config.Env) + setup
+ }
+ newArgs := make([]string, 0, len(args)+4)
+ newArgs = append(newArgs, defaultShell...)
+ newArgs = append(newArgs, setup, "")
+ newArgs = append(newArgs, args...)
+ args = newArgs
+ }
+ }
+ }
+
+ if e.StrictVolumeOwnership && !e.Volumes.Empty() {
+ return fmt.Errorf("a RUN command was executed after a VOLUME command, which may result in ownership information being lost")
+ }
+ if err := e.Volumes.Save(e.Container.ID, e.TempDir, e.Client); err != nil {
+ return err
+ }
+
+ config.Cmd = args
+ klog.V(4).Infof("Running %#v inside of %s as user %s", config.Cmd, e.Container.ID, config.User)
+ exec, err := e.Client.CreateExec(docker.CreateExecOptions{
+ Cmd: config.Cmd,
+ Container: e.Container.ID,
+ AttachStdout: true,
+ AttachStderr: true,
+ User: config.User,
+ })
+ if err != nil {
+ return err
+ }
+ if err := e.Client.StartExec(exec.ID, docker.StartExecOptions{
+ OutputStream: e.Out,
+ ErrorStream: e.ErrOut,
+ }); err != nil {
+ return err
+ }
+ status, err := e.Client.InspectExec(exec.ID)
+ if err != nil {
+ return err
+ }
+ if status.ExitCode != 0 {
+ klog.V(4).Infof("Failed command (code %d): %v", status.ExitCode, args)
+ return fmt.Errorf("running '%s' failed with exit code %d", strings.Join(run.Args, " "), status.ExitCode)
+ }
+
+ if err := e.Volumes.Restore(e.Container.ID, e.Client); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Copy implements the executor copy function.
+func (e *ClientExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
+ // copying content into a volume invalidates the archived state of any given directory
+ for _, copy := range copies {
+ e.Volumes.Invalidate(copy.Dest)
+ }
+
+ return e.CopyContainer(e.Container, excludes, copies...)
+}
+
+func (e *ClientExecutor) findMissingParents(container *docker.Container, dest string) (parents []string, err error) {
+ destParent := filepath.Clean(dest)
+ for filepath.Dir(destParent) != destParent {
+ exists, err := isContainerPathDirectory(e.Client, container.ID, destParent)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ parents = append(parents, destParent)
+ }
+ destParent = filepath.Dir(destParent)
+ }
+ return parents, nil
+}
+
+func (e *ClientExecutor) getUser(userspec string) (int, int, error) {
+ readFile := func(path string) ([]byte, error) {
+ var buffer, contents bytes.Buffer
+ if err := e.Client.DownloadFromContainer(e.Container.ID, docker.DownloadFromContainerOptions{
+ OutputStream: &buffer,
+ Path: path,
+ Context: context.TODO(),
+ }); err != nil {
+ return nil, err
+ }
+ tr := tar.NewReader(&buffer)
+ hdr, err := tr.Next()
+ if err != nil {
+ return nil, err
+ }
+ if hdr.Typeflag != tar.TypeReg && hdr.Typeflag != tar.TypeRegA {
+ return nil, fmt.Errorf("expected %q to be a regular file, but it was of type %q", path, string(hdr.Typeflag))
+ }
+ if filepath.FromSlash(hdr.Name) != filepath.Base(path) {
+ return nil, fmt.Errorf("error reading contents of %q: got %q instead", path, hdr.Name)
+ }
+ n, err := io.Copy(&contents, tr)
+ if err != nil {
+ return nil, fmt.Errorf("error reading contents of %q: %v", path, err)
+ }
+ if n != hdr.Size {
+ return nil, fmt.Errorf("size mismatch reading contents of %q: %v", path, err)
+ }
+ hdr, err = tr.Next()
+ if err != nil && !errors.Is(err, io.EOF) {
+ return nil, fmt.Errorf("error reading archive of %q: %v", path, err)
+ }
+ if err == nil {
+ return nil, fmt.Errorf("got unexpected extra content while reading archive of %q: %v", path, err)
+ }
+ return contents.Bytes(), nil
+ }
+ parse := func(file []byte, matchField int, key string, numFields, readField int) (string, error) {
+ var value *string
+ scanner := bufio.NewScanner(bytes.NewReader(file))
+ for scanner.Scan() {
+ line := scanner.Text()
+ fields := strings.SplitN(line, ":", numFields)
+ if len(fields) != numFields {
+ return "", fmt.Errorf("error parsing line %q: incorrect number of fields", line)
+ }
+ if fields[matchField] != key {
+ continue
+ }
+ v := fields[readField]
+ value = &v
+ }
+ if err := scanner.Err(); err != nil {
+ return "", fmt.Errorf("error scanning file: %v", err)
+ }
+ if value == nil {
+ return "", os.ErrNotExist
+ }
+ return *value, nil
+ }
+
+ spec := strings.SplitN(userspec, ":", 2)
+ if len(spec) == 2 {
+ parsedUid, err := strconv.ParseUint(spec[0], 10, 32)
+ if err != nil {
+ // maybe it's a user name? look up the UID
+ passwdFile, err := readFile("/etc/passwd")
+ if err != nil {
+ return -1, -1, err
+ }
+ uid, err := parse(passwdFile, 0, spec[0], 7, 2)
+ if err != nil {
+ return -1, -1, fmt.Errorf("error reading UID value from passwd file for --chown=%s: %v", spec[0], err)
+ }
+ parsedUid, err = strconv.ParseUint(uid, 10, 32)
+ if err != nil {
+ return -1, -1, fmt.Errorf("error parsing UID value %q from passwd file for --chown=%s", uid, userspec)
+ }
+ }
+ parsedGid, err := strconv.ParseUint(spec[1], 10, 32)
+ if err != nil {
+ // maybe it's a group name? look up the GID
+ groupFile, err := readFile("/etc/group")
+ if err != nil {
+ return -1, -1, err
+ }
+ gid, err := parse(groupFile, 0, spec[1], 4, 2)
+ if err != nil {
+ return -1, -1, err
+ }
+ parsedGid, err = strconv.ParseUint(gid, 10, 32)
+ if err != nil {
+ return -1, -1, fmt.Errorf("error parsing GID value %q from group file for --chown=%s", gid, userspec)
+ }
+ }
+ return int(parsedUid), int(parsedGid), nil
+ }
+
+ var parsedUid, parsedGid uint64
+ if id, err := strconv.ParseUint(spec[0], 10, 32); err == nil {
+ // it's an ID. use it as both the UID and the GID
+ parsedUid = id
+ parsedGid = id
+ } else {
+ // it's a user name, we'll need to look up their UID and primary GID
+ passwdFile, err := readFile("/etc/passwd")
+ if err != nil {
+ return -1, -1, err
+ }
+ // read the UID and primary GID
+ uid, err := parse(passwdFile, 0, spec[0], 7, 2)
+ if err != nil {
+ return -1, -1, fmt.Errorf("error reading UID value from /etc/passwd for --chown=%s", userspec)
+ }
+ gid, err := parse(passwdFile, 0, spec[0], 7, 3)
+ if err != nil {
+ return -1, -1, fmt.Errorf("error reading GID value from /etc/passwd for --chown=%s", userspec)
+ }
+ if parsedUid, err = strconv.ParseUint(uid, 10, 32); err != nil {
+ return -1, -1, fmt.Errorf("error parsing UID value %q from /etc/passwd for --chown=%s", uid, userspec)
+ }
+ if parsedGid, err = strconv.ParseUint(gid, 10, 32); err != nil {
+ return -1, -1, fmt.Errorf("error parsing GID value %q from /etc/passwd for --chown=%s", gid, userspec)
+ }
+ }
+ return int(parsedUid), int(parsedGid), nil
+}
+
+// CopyContainer copies the provided content into a destination container.
+func (e *ClientExecutor) CopyContainer(container *docker.Container, excludes []string, copies ...imagebuilder.Copy) error {
+ chownUid, chownGid := -1, -1
+ chown := func(h *tar.Header, r io.Reader) (data []byte, update bool, skip bool, err error) {
+ if chownUid != -1 {
+ h.Uid = chownUid
+ }
+ if chownGid != -1 {
+ h.Gid = chownGid
+ }
+ if (h.Uid > 0x1fffff || h.Gid > 0x1fffff) && h.Format == tar.FormatUSTAR {
+ h.Format = tar.FormatPAX
+ }
+ return nil, false, false, nil
+ }
+ for _, c := range copies {
+ var chmod func(h *tar.Header, r io.Reader) (data []byte, update bool, skip bool, err error)
+ if c.Chmod != "" {
+ parsed, err := strconv.ParseInt(c.Chmod, 8, 16)
+ if err != nil {
+ return err
+ }
+ chmod = func(h *tar.Header, r io.Reader) (data []byte, update bool, skip bool, err error) {
+ mode := h.Mode &^ 0o777
+ mode |= parsed & 0o777
+ h.Mode = mode
+ return nil, false, false, nil
+ }
+ }
+ chownUid, chownGid = -1, -1
+ if c.Chown != "" {
+ var err error
+ chownUid, chownGid, err = e.getUser(c.Chown)
+ if err != nil {
+ return err
+ }
+ }
+ // TODO: reuse source
+ for _, src := range c.Src {
+ if src == "" {
+ src = "*"
+ }
+ assumeDstIsDirectory := len(c.Src) > 1
+ repeatThisSrc:
+ klog.V(4).Infof("Archiving %s download=%t fromFS=%t from=%s", src, c.Download, c.FromFS, c.From)
+ var r io.Reader
+ var closer io.Closer
+ var err error
+ if len(c.From) > 0 {
+ if !assumeDstIsDirectory {
+ var err error
+ if assumeDstIsDirectory, err = e.isContainerGlobMultiple(e.Client, c.From, src); err != nil {
+ return err
+ }
+ }
+ r, closer, err = e.archiveFromContainer(c.From, src, c.Dest, assumeDstIsDirectory)
+ } else {
+ r, closer, err = e.Archive(c.FromFS, src, c.Dest, c.Download, excludes)
+ }
+ if err != nil {
+ return err
+ }
+ asOwner := ""
+ if c.Chown != "" {
+ asOwner = fmt.Sprintf(" as %d:%d", chownUid, chownGid)
+ // the daemon would implicitly create missing
+ // directories with the wrong ownership, so
+ // check for any that don't exist and create
+ // them ourselves
+ missingParents, err := e.findMissingParents(container, c.Dest)
+ if err != nil {
+ return err
+ }
+ if len(missingParents) > 0 {
+ sort.Strings(missingParents)
+ klog.V(5).Infof("Uploading directories %v to %s%s", missingParents, container.ID, asOwner)
+ for _, missingParent := range missingParents {
+ if err := e.createOrReplaceContainerPathWithOwner(missingParent, chownUid, chownGid, nil); err != nil {
+ return err
+ }
+ }
+ }
+ filtered, err := transformArchive(r, false, chown)
+ if err != nil {
+ return err
+ }
+ r = filtered
+ }
+ if c.Chmod != "" {
+ filtered, err := transformArchive(r, false, chmod)
+ if err != nil {
+ return err
+ }
+ r = filtered
+ }
+ klog.V(5).Infof("Uploading to %s%s at %s", container.ID, asOwner, c.Dest)
+ if klog.V(6) {
+ logArchiveOutput(r, "Archive file for %s")
+ }
+ // add a workaround allow us to notice if a
+ // dstNeedsToBeDirectoryError was returned while
+ // attempting to read the data we're uploading,
+ // indicating that we thought the content would be just
+ // one item, but it actually isn't
+ reader := &readErrorWrapper{Reader: r}
+ r = reader
+ err = e.Client.UploadToContainer(container.ID, docker.UploadToContainerOptions{
+ InputStream: r,
+ Path: "/",
+ })
+ if err := closer.Close(); err != nil {
+ klog.Errorf("Error while closing stream container copy stream %s: %v", container.ID, err)
+ }
+ if err != nil {
+ if errors.Is(reader.err, dstNeedsToBeDirectoryError) && !assumeDstIsDirectory {
+ assumeDstIsDirectory = true
+ goto repeatThisSrc
+ }
+ if apiErr, ok := err.(*docker.Error); ok && apiErr.Status == 404 {
+ klog.V(4).Infof("path %s did not exist in container %s: %v", src, container.ID, err)
+ }
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+type readErrorWrapper struct {
+ io.Reader
+ err error
+}
+
+func (r *readErrorWrapper) Read(p []byte) (n int, err error) {
+ n, r.err = r.Reader.Read(p)
+ return n, r.err
+}
+
+type closers []func() error
+
+func (c closers) Close() error {
+ var lastErr error
+ for _, fn := range c {
+ if err := fn(); err != nil {
+ lastErr = err
+ }
+ }
+ return lastErr
+}
+
+func (e *ClientExecutor) archiveFromContainer(from string, src, dst string, multipleSources bool) (io.Reader, io.Closer, error) {
+ var containerID string
+ if other, ok := e.Named[from]; ok {
+ if other.Container == nil {
+ return nil, nil, fmt.Errorf("the stage %q has not been built yet", from)
+ }
+ klog.V(5).Infof("Using container %s as input for archive request", other.Container.ID)
+ containerID = other.Container.ID
+ } else {
+ klog.V(5).Infof("Creating a container temporarily for image input from %q in %s", from, src)
+ _, err := e.LoadImage(from)
+ if err != nil {
+ return nil, nil, err
+ }
+ c, err := e.Client.CreateContainer(docker.CreateContainerOptions{
+ Config: &docker.Config{
+ Image: from,
+ },
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+ containerID = c.ID
+ e.Deferred = append([]func() error{func() error { return e.removeContainer(containerID) }}, e.Deferred...)
+ }
+
+ check := newDirectoryCheck(e.Client, e.Container.ID)
+ pr, pw := io.Pipe()
+ var archiveRoot string
+ fetch := func(pw *io.PipeWriter) {
+ klog.V(6).Infof("Download from container %s at path %s", containerID, archiveRoot)
+ err := e.Client.DownloadFromContainer(containerID, docker.DownloadFromContainerOptions{
+ OutputStream: pw,
+ Path: archiveRoot,
+ })
+ pw.CloseWithError(err)
+ }
+ ar, archiveRoot, err := archiveFromContainer(pr, src, dst, nil, check, fetch, multipleSources)
+ if err != nil {
+ pr.Close()
+ pw.Close()
+ return nil, nil, err
+ }
+ closer := newCloser(func() error {
+ err2 := pr.Close()
+ err3 := ar.Close()
+ if err3 != nil {
+ return err3
+ }
+ return err2
+ })
+ go fetch(pw)
+ return &readCloser{Reader: ar, Closer: closer}, pr, nil
+}
+
+func (e *ClientExecutor) isContainerGlobMultiple(client *docker.Client, from, glob string) (bool, error) {
+ reader, closer, err := e.archiveFromContainer(from, glob, "/ignored", true)
+ if err != nil {
+ return false, nil
+ }
+
+ defer closer.Close()
+ tr := tar.NewReader(reader)
+
+ h, err := tr.Next()
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ } else {
+ if apiErr, ok := err.(*docker.Error); ok && apiErr.Status == 404 {
+ klog.V(4).Infof("path %s did not exist in container %s: %v", glob, e.Container.ID, err)
+ err = nil
+ }
+ }
+ return false, err
+ }
+
+ klog.V(4).Infof("Retrieved first header from %s using glob %s: %#v", from, glob, h)
+
+ h, err = tr.Next()
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ }
+ return false, err
+ }
+
+ klog.V(4).Infof("Retrieved second header from %s using glob %s: %#v", from, glob, h)
+
+ // take the remainder of the input and discard it
+ go func() {
+ n, err := io.Copy(ioutil.Discard, reader)
+ if n > 0 || err != nil {
+ klog.V(6).Infof("Discarded %d bytes from end of from glob check, and got error: %v", n, err)
+ }
+ }()
+
+ return true, nil
+}
+
+func (e *ClientExecutor) Archive(fromFS bool, src, dst string, allowDownload bool, excludes []string) (io.Reader, io.Closer, error) {
+ var check DirectoryCheck
+ if e.Container != nil {
+ check = newDirectoryCheck(e.Client, e.Container.ID)
+ }
+ if isURL(src) {
+ if !allowDownload {
+ return nil, nil, fmt.Errorf("source can't be a URL")
+ }
+ klog.V(5).Infof("Archiving %s -> %s from URL", src, dst)
+ return archiveFromURL(src, dst, e.TempDir, check)
+ }
+ // the input is from the filesystem, use the source as the input
+ if fromFS {
+ klog.V(5).Infof("Archiving %s %s -> %s from a filesystem location", src, ".", dst)
+ return archiveFromDisk(src, ".", dst, allowDownload, excludes, check)
+ }
+ // if the context is in archive form, read from it without decompressing
+ if len(e.ContextArchive) > 0 {
+ klog.V(5).Infof("Archiving %s %s -> %s from context archive", e.ContextArchive, src, dst)
+ return archiveFromFile(e.ContextArchive, src, dst, excludes, check)
+ }
+ // if the context is a directory, we only allow relative includes
+ klog.V(5).Infof("Archiving %q %q -> %q from disk", e.Directory, src, dst)
+ return archiveFromDisk(e.Directory, src, dst, allowDownload, excludes, check)
+}
+
+// ContainerVolumeTracker manages tracking archives of specific paths inside a container.
+type ContainerVolumeTracker struct {
+ paths map[string]string
+ errs []error
+}
+
+func NewContainerVolumeTracker() *ContainerVolumeTracker {
+ return &ContainerVolumeTracker{
+ paths: make(map[string]string),
+ }
+}
+
+// Empty returns true if the tracker is not watching any paths
+func (t *ContainerVolumeTracker) Empty() bool {
+ return t == nil || len(t.paths) == 0
+}
+
+// Add tracks path unless it already is being tracked.
+func (t *ContainerVolumeTracker) Add(path string) {
+ if _, ok := t.paths[path]; !ok {
+ t.paths[path] = ""
+ }
+}
+
+// Release removes any stored snapshots
+func (t *ContainerVolumeTracker) Release() []error {
+ if t == nil {
+ return nil
+ }
+ for path := range t.paths {
+ t.ReleasePath(path)
+ }
+ return t.errs
+}
+
+func (t *ContainerVolumeTracker) ReleasePath(path string) {
+ if t == nil {
+ return
+ }
+ if archivePath, ok := t.paths[path]; ok && len(archivePath) > 0 {
+ err := os.Remove(archivePath)
+ if err != nil && !os.IsNotExist(err) {
+ t.errs = append(t.errs, err)
+ }
+ klog.V(5).Infof("Releasing path %s (%v)", path, err)
+ t.paths[path] = ""
+ }
+}
+
+func (t *ContainerVolumeTracker) Invalidate(path string) {
+ if t == nil {
+ return
+ }
+ set := imagebuilder.VolumeSet{}
+ set.Add(path)
+ for path := range t.paths {
+ if set.Covers(path) {
+ t.ReleasePath(path)
+ }
+ }
+}
+
+// Save ensures that all paths tracked underneath this container are archived or
+// returns an error.
+func (t *ContainerVolumeTracker) Save(containerID, tempDir string, client *docker.Client) error {
+ if t == nil {
+ return nil
+ }
+ set := imagebuilder.VolumeSet{}
+ for dest := range t.paths {
+ set.Add(dest)
+ }
+ // remove archive paths that are covered by other paths
+ for dest := range t.paths {
+ if !set.Has(dest) {
+ t.ReleasePath(dest)
+ delete(t.paths, dest)
+ }
+ }
+ for dest, archivePath := range t.paths {
+ if len(archivePath) > 0 {
+ continue
+ }
+ archivePath, err := snapshotPath(dest, containerID, tempDir, client)
+ if err != nil {
+ return err
+ }
+ t.paths[dest] = archivePath
+ }
+ return nil
+}
+
+// filterTarPipe transforms a tar file as it is streamed, calling fn on each header in the file.
+// If fn returns false, the file is skipped. If an error occurs it is returned.
+func filterTarPipe(w *tar.Writer, r *tar.Reader, fn func(*tar.Header) bool) error {
+ for {
+ h, err := r.Next()
+ if err != nil {
+ return err
+ }
+ if fn(h) {
+ if err := w.WriteHeader(h); err != nil {
+ return err
+ }
+ if _, err := io.Copy(w, r); err != nil {
+ return err
+ }
+ } else {
+ if _, err := io.Copy(ioutil.Discard, r); err != nil {
+ return err
+ }
+ }
+ }
+}
+
+// snapshotPath preserves the contents of path in container containerID as a temporary
+// archive, returning either an error or the path of the archived file.
+func snapshotPath(path, containerID, tempDir string, client *docker.Client) (string, error) {
+ f, err := ioutil.TempFile(tempDir, "archived-path")
+ if err != nil {
+ return "", err
+ }
+ klog.V(4).Infof("Snapshot %s for later use under %s", path, f.Name())
+
+ r, w := io.Pipe()
+ tr := tar.NewReader(r)
+ tw := tar.NewWriter(f)
+ go func() {
+ err := filterTarPipe(tw, tr, func(h *tar.Header) bool {
+ if i := strings.Index(h.Name, "/"); i != -1 {
+ h.Name = h.Name[i+1:]
+ }
+ return len(h.Name) > 0
+ })
+ if err == nil || errors.Is(err, io.EOF) {
+ tw.Flush()
+ w.Close()
+ klog.V(5).Infof("Snapshot rewritten from %s", path)
+ return
+ }
+ klog.V(5).Infof("Snapshot of %s failed: %v", path, err)
+ w.CloseWithError(err)
+ }()
+
+ if !strings.HasSuffix(path, "/") {
+ path += "/"
+ }
+ err = client.DownloadFromContainer(containerID, docker.DownloadFromContainerOptions{
+ Path: path,
+ OutputStream: w,
+ })
+ f.Close()
+ if err != nil {
+ os.Remove(f.Name())
+ return "", err
+ }
+ return f.Name(), nil
+}
+
+// Restore ensures the paths managed by t exactly match the container. This requires running
+// exec as a user that can delete contents from the container. It will return an error if
+// any client operation fails.
+func (t *ContainerVolumeTracker) Restore(containerID string, client *docker.Client) error {
+ if t == nil {
+ return nil
+ }
+ for dest, archivePath := range t.paths {
+ if len(archivePath) == 0 {
+ return fmt.Errorf("path %s does not have an archive and cannot be restored", dest)
+ }
+ klog.V(4).Infof("Restoring contents of %s from %s", dest, archivePath)
+ if !strings.HasSuffix(dest, "/") {
+ dest = dest + "/"
+ }
+ exec, err := client.CreateExec(docker.CreateExecOptions{
+ Container: containerID,
+ Cmd: []string{"/bin/sh", "-c", "rm -rf $@", "", dest + "*"},
+ User: "0",
+ })
+ if err != nil {
+ return fmt.Errorf("unable to setup clearing preserved path %s: %v", dest, err)
+ }
+ if err := client.StartExec(exec.ID, docker.StartExecOptions{}); err != nil {
+ return fmt.Errorf("unable to clear preserved path %s: %v", dest, err)
+ }
+ var status *docker.ExecInspect
+ for status == nil {
+ status, err = client.InspectExec(exec.ID)
+ if err != nil {
+ break
+ }
+ if !status.Running {
+ break
+ }
+ status = nil
+ }
+ if err != nil {
+ return fmt.Errorf("clearing preserved path %s did not succeed: %v", dest, err)
+ }
+ if status.ExitCode != 0 {
+ return fmt.Errorf("clearing preserved path %s failed with exit code %d", dest, status.ExitCode)
+ }
+ err = func() error {
+ f, err := os.Open(archivePath)
+ if err != nil {
+ return fmt.Errorf("unable to open archive %s for preserved path %s: %v", archivePath, dest, err)
+ }
+ defer f.Close()
+ if err := client.UploadToContainer(containerID, docker.UploadToContainerOptions{
+ InputStream: f,
+ Path: dest,
+ }); err != nil {
+ return fmt.Errorf("unable to upload preserved contents from %s to %s: %v", archivePath, dest, err)
+ }
+ return nil
+ }()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/dockerclient/conformance_test.go b/dockerclient/conformance_test.go
new file mode 100644
index 0000000..0c59e0c
--- /dev/null
+++ b/dockerclient/conformance_test.go
@@ -0,0 +1,1258 @@
+//go:build conformance
+// +build conformance
+
+package dockerclient
+
+import (
+ "archive/tar"
+ "bytes"
+ "encoding/hex"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/docker/docker/pkg/archive"
+ "github.com/docker/docker/pkg/fileutils"
+ docker "github.com/fsouza/go-dockerclient"
+ "github.com/openshift/imagebuilder/dockerfile/command"
+ "github.com/openshift/imagebuilder/dockerfile/parser"
+
+ "github.com/openshift/imagebuilder"
+)
+
+var compareLayers = flag.Bool("compare-layers", false, "If true, compare each generated layer for equivalence")
+
+type conformanceTest struct {
+ Name string
+ Version docker.BuilderVersion
+ Dockerfile string
+ Git string
+ Mounts []Mount
+ ContextDir string
+ Output []*regexp.Regexp
+ Args map[string]string
+ Ignore []ignoreFunc
+ PostClone func(dir string) error
+}
+
+func TestMount(t *testing.T) {
+ tmpDir, err := ioutil.TempDir("", "dockerbuild-conformance-")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpDir)
+
+ c, err := docker.NewClientFromEnv()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ e := NewClientExecutor(c)
+ defer func() {
+ for _, err := range e.Release() {
+ t.Errorf("%v", err)
+ }
+ }()
+
+ out := &bytes.Buffer{}
+ e.Out, e.ErrOut = out, out
+ e.Tag = filepath.Base(tmpDir)
+ e.TransientMounts = []Mount{
+ {SourcePath: "testdata/volume/", DestinationPath: "/tmp/test"},
+ }
+ b := imagebuilder.NewBuilder(nil)
+ node, err := imagebuilder.ParseFile("testdata/Dockerfile.mount")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := e.Prepare(b, node, ""); err != nil {
+ t.Fatal(err)
+ }
+ if err := e.Execute(b, node); err != nil {
+ t.Fatal(err)
+ }
+
+ expected := `91 /tmp/test/Dockerfile 644 regular file 0 0
+4 /tmp/test/file 644 regular file 0 0
+5 /tmp/test/file2 644 regular file 0 0
+`
+
+ if out.String() != expected {
+ t.Errorf("Unexpected build output:\n%s", out.String())
+ }
+}
+
+func TestCopyFrom(t *testing.T) {
+ c, err := docker.NewClientFromEnv()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testCases := []struct {
+ name string
+ create string
+ copy string
+ extra string
+ expect string
+ }{
+ {name: "copy file to root", create: "touch /a /b", copy: "/a /", expect: "[[ -f /a ]]"},
+ {name: "copy file to same file", create: "touch /a", copy: "/a /a", expect: "[[ -f /a ]]"},
+ {name: "copy file to workdir", create: "touch /a", extra: "WORKDIR /b", copy: "/a .", expect: "[[ -f /b/a ]]"},
+ {name: "copy file to workdir rename", create: "touch /a", extra: "WORKDIR /b", copy: "/a ./b", expect: "[[ -f /b/b ]]"},
+ {name: "copy folder contents to higher level", create: "mkdir -p /a/b && touch /a/b/1 /a/b/2", copy: "/a/b/ /b/", expect: "[[ -f /b/1 && -f /b/2 && ! -e /a ]]"},
+ {name: "copy wildcard folder contents to higher level", create: "mkdir -p /a/b && touch /a/b/1 /a/b/2", copy: "/a/b/* /b/", expect: "ls -al /b/1 /b/2 /b && ! ls -al /a /b/a /b/b"},
+ {name: "copy folder with dot contents to higher level", create: "mkdir -p /a/b && touch /a/b/1 /a/b/2", copy: "/a/b/. /b/", expect: "ls -al /b/1 /b/2 /b && ! ls -al /a /b/a /b/b"},
+ {name: "copy root file to different root name", create: "touch /b", copy: "/b /a", expect: "ls -al /a && ! ls -al /b"},
+ {name: "copy nested file to different root name", create: "mkdir -p /a && touch /a/b", copy: "/a/b /a", expect: "ls -al /a && ! ls -al /b"},
+ {name: "copy hard links to excluded file", create: "mkdir -p /a/b/c && touch /a/b/c/d && ln /a/b/c/d /a/b/d && ln /a/b/c/d /a/b/e", extra: "RUN mkdir -p /f/g", copy: "/a/b/d /a/b/e /f/g/", expect: "ls -al /f && ls -al /f/g && ls -al /f/g/d /f/g/e"},
+ {name: "copy file to deeper directory with explicit slash", create: "mkdir -p /a && touch /a/1", copy: "/a/1 /a/b/c/", expect: "ls -al /a/b/c/1 && ! ls -al /a/b/1"},
+ {name: "copy file to deeper directory without explicit slash", create: "mkdir -p /a && touch /a/1", copy: "/a/1 /a/b/c", expect: "ls -al /a/b/c && ! ls -al /a/b/1"},
+ {name: "copy directory to deeper directory without explicit slash", create: "mkdir -p /a && touch /a/1", copy: "/a /a/b/c", expect: "ls -al /a/b/c/1 && ! ls -al /a/b/1"},
+ {name: "copy item from directory that is a symbolic link", create: "mkdir -p /a && touch /a/1 && ln -s /a /b", copy: "b/1 /a/b/c", expect: "ls -al /a/b/c && ! ls -al /a/b/1"},
+ {name: "copy item from directory that is a symbolic link", create: "mkdir -p /a && touch /a/1 && ln -s a /c", copy: "/c/1 /a/b/c", expect: "ls -al /a/b/c && ! ls -al /a/b/1"},
+ {name: "copy directory to root without explicit slash", create: "mkdir -p /a && touch /a/1", copy: "a /a", expect: "ls -al /a/1 && ! ls -al /a/a"},
+ {name: "copy directory trailing to root without explicit slash", create: "mkdir -p /a && touch /a/1", copy: "a/. /a", expect: "ls -al /a/1 && ! ls -al /a/a"},
+ }
+ for i, testCase := range testCases {
+ name := fmt.Sprintf("%d", i)
+ if len(testCase.name) > 0 {
+ name = testCase.name
+ }
+ test := testCase
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+ e := NewClientExecutor(c)
+ defer func() {
+ for _, err := range e.Release() {
+ t.Errorf("%v", err)
+ }
+ }()
+
+ out := &bytes.Buffer{}
+ e.Out, e.ErrOut = out, out
+ b := imagebuilder.NewBuilder(nil)
+ dockerfile := fmt.Sprintf(`
+ FROM busybox AS base
+ RUN %s
+ FROM busybox
+ %s
+ COPY --from=base %s
+ RUN %s
+ `, test.create, test.extra, test.copy, test.expect,
+ )
+ t.Log(dockerfile)
+ node, err := imagebuilder.ParseDockerfile(strings.NewReader(dockerfile))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ stages, err := imagebuilder.NewStages(node, b)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := e.Stages(b, stages, ""); err != nil {
+ t.Log(out.String())
+ t.Fatal(err)
+ }
+ })
+ }
+}
+
+func TestShell(t *testing.T) {
+ tmpDir, err := ioutil.TempDir("", "dockerbuild-conformance-")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpDir)
+
+ c, err := docker.NewClientFromEnv()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ e := NewClientExecutor(c)
+ defer func() {
+ for _, err := range e.Release() {
+ t.Errorf("%v", err)
+ }
+ }()
+
+ out := &bytes.Buffer{}
+ e.Out, e.ErrOut = out, out
+ e.Directory = tmpDir
+ e.Tag = filepath.Base(tmpDir)
+ b := imagebuilder.NewBuilder(nil)
+ node, err := imagebuilder.ParseFile("testdata/Dockerfile.shell")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := e.Prepare(b, node, ""); err != nil {
+ t.Fatal(err)
+ }
+ if err := e.Execute(b, node); err != nil {
+ t.Fatal(err)
+ }
+
+ if !strings.Contains(out.String(), "+ env\n") {
+ t.Errorf("Unexpected build output:\n%s", out.String())
+ }
+}
+
+func TestMultiStageBase(t *testing.T) {
+ tmpDir, err := ioutil.TempDir("", "dockerbuild-conformance-")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpDir)
+
+ c, err := docker.NewClientFromEnv()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ e := NewClientExecutor(c)
+ defer func() {
+ for _, err := range e.Release() {
+ t.Errorf("%v", err)
+ }
+ }()
+
+ out := &bytes.Buffer{}
+ e.Out, e.ErrOut = out, out
+ e.Directory = tmpDir
+ e.Tag = filepath.Base(tmpDir)
+ node, err := imagebuilder.ParseFile("testdata/Dockerfile.reusebase")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ b := imagebuilder.NewBuilder(nil)
+ stages, err := imagebuilder.NewStages(node, b)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := e.Stages(b, stages, ""); err != nil {
+ t.Fatal(err)
+ }
+ if out.String() != "/1\n" {
+ t.Errorf("Unexpected build output:\n%s", out.String())
+ }
+}
+
+// TestConformance* compares the result of running the direct build against a
+// sequential docker build. A dockerfile and git repo is loaded, then each step
+// in the file is run sequentially, committing after each step. The generated
+// image.Config and the resulting filesystems are compared. The next step reuses
+// the previously generated layer and performs an incremental diff. This ensures
+// that each step is functionally equivalent.
+//
+// Deviations:
+// * Builds run at different times
+// - Modification timestamps are ignored on files
+// - Some processes (gem install) result in files created in the image that
+// have different content because of that (timestamps in files). We treat
+// a file that is identical except for size within 10 bytes and neither old
+// or new is zero bytes to be identical.
+// - Docker container commit with ENV FOO=BAR and a Docker build with line
+// ENV FOO=BAR will generate an image with FOO=BAR in different positions
+// (commit places the variable first, build: last). We try to align the
+// generated environment variable to ensure they are equal.
+// - The parent image ID is ignored.
+//
+// TODO: .dockerignore
+// TODO: check context dir
+// TODO: ONBUILD
+// TODO: ensure that the final built image has the right UIDs
+func TestConformanceInternal(t *testing.T) {
+ pwd, err := os.Getwd()
+ if err != nil {
+ t.Fatal(err)
+ }
+ testCases := []conformanceTest{
+ {
+ Name: "directory",
+ ContextDir: "testdata/dir",
+ },
+ {
+ Name: "copy to dir",
+ ContextDir: "testdata/copy",
+ },
+ {
+ Name: "copy chown",
+ ContextDir: "testdata/copychown",
+ },
+ {
+ Name: "copy chmod",
+ Version: docker.BuilderBuildKit,
+ ContextDir: "testdata/copychmod",
+ },
+ {
+ Name: "copy empty 1",
+ ContextDir: "testdata/copyempty",
+ Dockerfile: "Dockerfile",
+ },
+ {
+ Name: "copy empty 2",
+ ContextDir: "testdata/copyempty",
+ Dockerfile: "Dockerfile2",
+ },
+ {
+ Name: "copy dir",
+ ContextDir: "testdata/copydir",
+ },
+ {
+ Name: "copy to renamed file",
+ ContextDir: "testdata/copyrename",
+ },
+ {
+ Name: "copy subdirectory 1",
+ ContextDir: "testdata/copyblahblub",
+ Dockerfile: "Dockerfile",
+ },
+ {
+ Name: "copy subdirectory 2",
+ ContextDir: "testdata/copyblahblub",
+ Dockerfile: "Dockerfile2",
+ },
+ {
+ Name: "copy subdirectory 3",
+ ContextDir: "testdata/copyblahblub",
+ Dockerfile: "Dockerfile3",
+ },
+ {
+ Name: "directory with slash",
+ ContextDir: "testdata/overlapdir",
+ Dockerfile: "Dockerfile.with_slash",
+ },
+ {
+ Name: "directory without slash",
+ ContextDir: "testdata/overlapdir",
+ Dockerfile: "Dockerfile.without_slash",
+ },
+ // TODO: Fix this test
+ // {
+ // ContextDir: "testdata/ignore",
+ // },
+ {
+ Name: "environment",
+ Dockerfile: "testdata/Dockerfile.env",
+ },
+ {
+ Name: "edgecases",
+ Dockerfile: "testdata/Dockerfile.edgecases",
+ },
+ {
+ Name: "exposed_default",
+ Dockerfile: "testdata/Dockerfile.exposedefault",
+ },
+ {
+ Name: "add",
+ Dockerfile: "testdata/Dockerfile.add",
+ },
+ {
+ Name: "add archives",
+ ContextDir: "testdata/add",
+ },
+ {
+ Name: "copy archives",
+ ContextDir: "testdata/add",
+ Dockerfile: "Dockerfile.copy",
+ },
+ {
+ Name: "add directories with archives",
+ ContextDir: "testdata/add",
+ Dockerfile: "Dockerfile.addall",
+ },
+ {
+ Name: "add directories with archives 2",
+ ContextDir: "testdata/add",
+ Dockerfile: "Dockerfile.addslash",
+ },
+ {
+ Name: "run with JSON",
+ Dockerfile: "testdata/Dockerfile.run.args",
+ Output: []*regexp.Regexp{
+ // docker outputs colorized output
+ regexp.MustCompile(`(?m)(\[0m|^)inner outer$`),
+ regexp.MustCompile(`(?m)(\[0m|^)first second$`),
+ regexp.MustCompile(`(?m)(\[0m|^)third fourth$`),
+ regexp.MustCompile(`(?m)(\[0m|^)fifth sixth$`),
+ },
+ },
+ {
+ Name: "shell",
+ Dockerfile: "testdata/Dockerfile.shell",
+ },
+ {
+ Name: "args",
+ Dockerfile: "testdata/Dockerfile.args",
+ Args: map[string]string{"BAR": "first"},
+ },
+ /*{ // uncomment when docker allows this
+ Dockerfile: "testdata/Dockerfile.args",
+ Args: map[string]string{"BAZ": "first"},
+ },*/
+ {
+ Name: "wildcard",
+ ContextDir: "testdata/wildcard",
+ },
+ {
+ Name: "wildcard leading path",
+ ContextDir: "./testdata/wildcard",
+ },
+ {
+ Name: "wildcard absolute path",
+ ContextDir: filepath.Join(pwd, "testdata", "wildcard"),
+ },
+ {
+ Name: "volume",
+ ContextDir: "testdata/volume",
+ },
+ {
+ Name: "volumerun",
+ ContextDir: "testdata/volumerun",
+ },
+ {
+ Name: "novolume",
+ Dockerfile: "testdata/Dockerfile.novolume",
+ },
+ {
+ Name: "novolumenorun",
+ Dockerfile: "testdata/Dockerfile.novolumenorun",
+ },
+ {
+ Name: "noworkdir",
+ Dockerfile: "testdata/Dockerfile.noworkdir",
+ },
+ {
+ Name: "volumeexists",
+ Dockerfile: "testdata/Dockerfile.volumeexists",
+ },
+ {
+ Name: "multistage 1",
+ ContextDir: "testdata",
+ Dockerfile: "Dockerfile.multistage",
+ },
+ {
+ Name: "multistage reuse base",
+ ContextDir: "testdata",
+ Dockerfile: "Dockerfile.reusebase",
+ },
+ {
+ Name: "multistage 2",
+ ContextDir: "testdata/multistage",
+ Dockerfile: "Dockerfile",
+ },
+ {
+ Name: "multistage copy",
+ ContextDir: "testdata/copyfrom",
+ },
+ {
+ Name: "multistageconfiginheritance",
+ ContextDir: "testdata/multistage",
+ Dockerfile: "Dockerfile.env",
+ },
+ {
+ Name: "nonroot-USER-before-WORKDIR-used",
+ Version: docker.BuilderBuildKit,
+ ContextDir: "testdata/user-workdir",
+ Dockerfile: "Dockerfile.used",
+ },
+ {
+ Name: "nonroot-USER-before-WORKDIR-notused",
+ Version: docker.BuilderBuildKit,
+ ContextDir: "testdata/user-workdir",
+ Dockerfile: "Dockerfile.notused",
+ },
+ }
+
+ for i, test := range testCases {
+ t.Run(test.Name, func(t *testing.T) {
+ c, err := docker.NewClientFromEnv()
+ if err != nil {
+ t.Fatal(err)
+ }
+ conformanceTester(t, c, test, i, *compareLayers)
+ })
+ }
+}
+
+// TestConformanceExternal applies external repo testing that may be more expensive or
+// change more frequently.
+func TestConformanceExternal(t *testing.T) {
+ testCases := []conformanceTest{
+ {
+ Name: "ownership change under COPY",
+ // Tests user ownership change under COPY
+ Git: "https://github.com/openshift/ruby-hello-world.git",
+ },
+ {
+ Name: "dockerfile custom location",
+ // Tests Non-default location dockerfile
+ Dockerfile: "Dockerfile.build",
+ Git: "https://github.com/docker-library/hello-world.git",
+ PostClone: func(dir string) error {
+ return os.Remove(filepath.Join(dir, ".dockerignore"))
+ },
+ },
+ {
+ Name: "copy and env interaction",
+ // Tests COPY and other complex interactions of ENV
+ ContextDir: "14/alpine",
+ Dockerfile: "Dockerfile",
+ Git: "https://github.com/docker-library/postgres.git",
+ Ignore: []ignoreFunc{
+ func(a, b *tar.Header) bool {
+ switch {
+ case (a != nil) == (b != nil):
+ return false
+ case a != nil:
+ return strings.HasPrefix(a.Name, "etc/ssl/certs/")
+ case b != nil:
+ return strings.HasPrefix(b.Name, "etc/ssl/certs/")
+ default:
+ return false
+ }
+ },
+ },
+ },
+ }
+
+ for i, test := range testCases {
+ t.Run(test.Name, func(t *testing.T) {
+ c, err := docker.NewClientFromEnv()
+ if err != nil {
+ t.Fatal(err)
+ }
+ conformanceTester(t, c, test, i, *compareLayers)
+ })
+ }
+}
+
+func TestTransientMount(t *testing.T) {
+ c, err := docker.NewClientFromEnv()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ e := NewClientExecutor(c)
+ defer func() {
+ for _, err := range e.Release() {
+ t.Errorf("%v", err)
+ }
+ }()
+
+ e.AllowPull = true
+ e.Directory = "testdata"
+ e.TransientMounts = []Mount{
+ {SourcePath: "testdata/dir", DestinationPath: "/mountdir"},
+ {SourcePath: "testdata/Dockerfile.env", DestinationPath: "/mountfile"},
+ }
+ e.Tag = fmt.Sprintf("conformance%d", rand.Int63())
+
+ defer e.removeImage(e.Tag)
+
+ out := &bytes.Buffer{}
+ e.Out = out
+ b := imagebuilder.NewBuilder(nil)
+ node, err := imagebuilder.ParseDockerfile(bytes.NewBufferString("FROM busybox\nRUN ls /mountdir/subdir\nRUN cat /mountfile\n"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := e.Build(b, node, ""); err != nil {
+ t.Fatalf("unable to build image: %v", err)
+ }
+ if !strings.Contains(out.String(), "ENV name=value\n") {
+ t.Errorf("did not find expected output:\n%s", out.String())
+ }
+ if !strings.Contains(out.String(), "file2\n") {
+ t.Errorf("did not find expected output:\n%s", out.String())
+ }
+
+ result, err := testContainerOutput(c, e.Tag, []string{"/bin/sh", "-c", "ls -al /mountdir"})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if strings.Contains(result, "subdir") {
+ t.Errorf("did not find expected output:\n%s", result)
+ }
+ result, err = testContainerOutput(c, e.Tag, []string{"/bin/sh", "-c", "cat /mountfile"})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if strings.Contains(result, "ENV name=value\n") {
+ t.Errorf("did not find expected output:\n%s", result)
+ }
+}
+
+func testContainerOutput(c *docker.Client, tag string, command []string) (string, error) {
+ container, err := c.CreateContainer(docker.CreateContainerOptions{
+ Name: tag + "-test",
+ Config: &docker.Config{
+ Image: tag,
+ Entrypoint: command,
+ Cmd: nil,
+ },
+ })
+ if err != nil {
+ return "", err
+ }
+ defer c.RemoveContainer(docker.RemoveContainerOptions{ID: container.ID})
+ if err := c.StartContainer(container.ID, nil); err != nil {
+ return "", err
+ }
+ code, err := c.WaitContainer(container.ID)
+ if err != nil {
+ return "", err
+ }
+ if code != 0 {
+ return "", fmt.Errorf("unrecognized exit code: %d", code)
+ }
+ out := &bytes.Buffer{}
+ if err := c.Logs(docker.LogsOptions{Container: container.ID, Stdout: true, OutputStream: out}); err != nil {
+ return "", fmt.Errorf("unable to get logs: %v", err)
+ }
+ return out.String(), nil
+}
+
+func conformanceTester(t *testing.T, c *docker.Client, test conformanceTest, i int, deep bool) {
+ dockerfile := test.Dockerfile
+ if len(dockerfile) == 0 {
+ dockerfile = "Dockerfile"
+ }
+ tmpDir, err := ioutil.TempDir("", "dockerbuild-conformance-")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpDir)
+
+ dir := tmpDir
+ contextDir := filepath.Join(dir, test.ContextDir)
+ dockerfilePath := filepath.Join(dir, test.ContextDir, dockerfile)
+
+ // clone repo or copy the Dockerfile
+ var input string
+ switch {
+ case len(test.Git) > 0:
+ input = test.Git
+ cmd := exec.Command("git", "clone", test.Git, dir)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Errorf("unable to clone %q: %v\n%s", test.Git, err, out)
+ return
+ }
+
+ if test.PostClone != nil {
+ if err := test.PostClone(dir); err != nil {
+ t.Errorf("unable to fixup clone: %v", err)
+ return
+ }
+ }
+ dir = contextDir
+
+ case len(test.ContextDir) > 0:
+ hardlinks := new(hardlinkChecker)
+ if err := filepath.Walk(filepath.Join("", test.ContextDir), func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ dest := filepath.Join(dir, path)
+ if info.IsDir() {
+ if err := os.MkdirAll(dest, info.Mode()); err != nil {
+ return err
+ }
+ return os.Chtimes(dest, info.ModTime(), info.ModTime())
+ }
+ if info.Mode()&os.ModeSymlink == os.ModeSymlink {
+ linkTarget, err := os.Readlink(path)
+ if err != nil {
+ return err
+ }
+ return os.Symlink(linkTarget, dest)
+ }
+ if info.Mode().IsRegular() {
+ if hardlinkTarget, ok := hardlinks.Check(info, dest); ok {
+ return os.Link(hardlinkTarget, dest)
+ }
+ if _, err := fileutils.CopyFile(path, dest); err != nil {
+ return err
+ }
+ if err := os.Chmod(dest, info.Mode()&os.ModePerm); err != nil {
+ return err
+ }
+ return os.Chtimes(dest, info.ModTime(), info.ModTime())
+ }
+ return fmt.Errorf("%s: %w", dest, syscall.ENOTSUP)
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ contextDir = filepath.Join(dir, test.ContextDir)
+ dockerfilePath = filepath.Join(contextDir, "Dockerfile")
+ if len(test.Dockerfile) > 0 {
+ dockerfilePath = filepath.Join(contextDir, test.Dockerfile)
+ }
+ dir = contextDir
+ input = dockerfilePath
+
+ default:
+ dockerfilePath = filepath.Join(dir, "Dockerfile")
+ input = dockerfilePath
+ if _, err := fileutils.CopyFile(filepath.Join("", dockerfile), dockerfilePath); err != nil {
+ t.Fatal(err)
+ }
+ dockerfile = "Dockerfile"
+ dir = contextDir
+ }
+
+ // read the dockerfile
+ data, err := ioutil.ReadFile(dockerfilePath)
+ if err != nil {
+ t.Errorf("%d: unable to read Dockerfile %q: %v", i, input, err)
+ return
+ }
+ node, err := imagebuilder.ParseDockerfile(bytes.NewBuffer(data))
+ if err != nil {
+ t.Errorf("%d: can't parse Dockerfile %q: %v", i, input, err)
+ return
+ }
+ builder := imagebuilder.NewBuilder(nil)
+ stages, err := imagebuilder.NewStages(node, builder)
+ if err != nil {
+ t.Errorf("%d: error parsing Dockerfile %q: %v", i, input, err)
+ return
+ }
+ nameFormat := "conformance-dockerbuild-%d-%s-%d-%d"
+
+ var toDelete []string
+
+ ignoreSmallFileChange := func(a, b *tar.Header) bool {
+ if a == nil || b == nil {
+ return false
+ }
+ diff := a.Size - b.Size
+ if differOnlyByFileSize(a, b, 10) {
+ t.Logf("WARNING: %s differs only in size by %d bytes, probably a timestamp value change", a.Name, diff)
+ return true
+ }
+ return false
+ }
+
+ dockerOut := &bytes.Buffer{}
+ imageOut := &bytes.Buffer{}
+ exclude, _ := imagebuilder.ParseDockerignore(contextDir)
+
+ if deep {
+ // dockerfileWithFrom returns the contents of a new docker file with a different
+ // FROM as the first line, and any --from= arguments in COPY or ADD instructions
+ // replaced with the names of images that we expect to have created at the end
+ // of the stages that built them
+ dockerfileWithFrom := func(from string, steps []*parser.Node, currentStageIndex int) (string, error) {
+ lines := []string{}
+ lines = append(lines, fmt.Sprintf("FROM %s", from))
+ for _, step := range steps {
+ switch strings.ToUpper(step.Value) {
+ case strings.ToUpper(command.Add), strings.ToUpper(command.Copy):
+ line := strings.ToUpper(step.Value)
+ for _, flag := range step.Flags {
+ // replace --from=stageName|stageNumber with --from=stageFinalImage
+ if strings.HasPrefix(flag, "--from=") {
+ stageLabel := strings.TrimPrefix(flag, "--from=")
+ if b, ok := stages.ByName(stageLabel); ok {
+ otherStage := fmt.Sprintf(nameFormat, i, "docker", b.Position, len(b.Node.Children))
+ flag = "--from=" + otherStage
+ } else if stageIndex, err := strconv.Atoi(stageLabel); err == nil {
+ if stageIndex >= currentStageIndex {
+ return "", fmt.Errorf("%q references not-yet-built stage", step.Original)
+ }
+ b := stages[stageIndex]
+ otherStage := fmt.Sprintf(nameFormat, i, "docker", b.Position, len(b.Node.Children))
+ flag = "--from=" + otherStage
+ }
+ }
+ line = line + " " + flag
+ }
+ next := step.Next
+ for next != nil {
+ line = line + " " + next.Value
+ next = next.Next
+ }
+ lines = append(lines, line)
+ default:
+ lines = append(lines, step.Original)
+ }
+ }
+ return strings.Join(lines, "\n"), nil
+ }
+
+ // execute each stage on both Docker build and the direct
+ // builder, comparing as we go
+ for j := range stages {
+ // execute thru each step in this stage on both Docker
+ // build and the direct builder, comparing as we go
+ stageBase, err := builder.From(stages[j].Node)
+ if err != nil {
+ t.Fatalf("%d: %v", j, err)
+ }
+ // if the base is the result of a previous stage,
+ // resolve it to that stage's final image here
+ if b, ok := stages.ByName(stageBase); ok {
+ stageBase = fmt.Sprintf(nameFormat, i, "docker", b.Position, len(b.Node.Children))
+ }
+ steps := stages[j].Node.Children
+ for k := range steps {
+ // construct the Dockerfile
+ testFile, err := dockerfileWithFrom(stageBase, steps[0:k+1], j)
+ if err != nil {
+ t.Fatalf("%d: unable to reconstruct Dockerfile %q: %v", i, dockerfilePath, err)
+ }
+
+ nameDirect := fmt.Sprintf(nameFormat, i, "direct", j, k+1)
+ nameDocker := fmt.Sprintf(nameFormat, i, "docker", j, k+1)
+
+ // run docker build for this stage thru this step
+ if err := ioutil.WriteFile(dockerfilePath, []byte(testFile), 0600); err != nil {
+ t.Fatalf("%d: unable to update Dockerfile %q: %v", i, dockerfilePath, err)
+ }
+ in, err := archive.TarWithOptions(dir, &archive.TarOptions{IncludeFiles: []string{"."}, ExcludePatterns: exclude})
+ if err != nil {
+ t.Fatalf("%d: unable to generate build context %q: %v", i, dockerfilePath, err)
+ }
+ var args []docker.BuildArg
+ for k, v := range test.Args {
+ args = append(args, docker.BuildArg{Name: k, Value: v})
+ }
+ if err := c.BuildImage(docker.BuildImageOptions{
+ Name: nameDocker,
+ Dockerfile: dockerfile,
+ RmTmpContainer: true,
+ ForceRmTmpContainer: true,
+ InputStream: in,
+ OutputStream: dockerOut,
+ BuildArgs: args,
+ NoCache: len(test.Output) > 0,
+ Version: test.Version,
+ }); err != nil {
+ in.Close()
+ data, _ := ioutil.ReadFile(dockerfilePath)
+ t.Fatalf("%d: unable to build Docker image %q: %v\n%s\n%s", i, test.Git, err, string(data), dockerOut)
+ }
+ in.Close()
+ toDelete = append([]string{nameDocker}, toDelete...)
+
+ // run direct build of this stage thru this step
+ e := NewClientExecutor(c)
+ defer func() {
+ for _, err := range e.Release() {
+ t.Errorf("%v", err)
+ }
+ }()
+ e.Out, e.ErrOut = imageOut, imageOut
+ e.Directory = dir
+ e.Tag = nameDirect
+ b := imagebuilder.NewBuilder(test.Args)
+ node, err := imagebuilder.ParseDockerfile(bytes.NewBufferString(testFile))
+ if err != nil {
+ t.Fatalf("%d: %v", i, err)
+ }
+ if err := e.Build(b, node, ""); err != nil {
+ t.Fatalf("%d: failed to build through step %d/%d in dockerfile %q: %s\n%s", i, j, k, dockerfilePath, steps[k].Original, imageOut)
+ }
+ toDelete = append([]string{nameDirect}, toDelete...)
+
+ // only compare filesystem on layers that change the filesystem
+ mutation := steps[k].Value == command.Add || steps[k].Value == command.Copy || steps[k].Value == command.Run
+ // metadata must be strictly equal
+ if !equivalentImages(
+ t, c, nameDocker, nameDirect, mutation,
+ metadataEqual,
+ append(ignoreFuncs{ignoreSmallFileChange}, test.Ignore...)...,
+ ) {
+ data, _ := ioutil.ReadFile(dockerfilePath)
+ t.Logf("Dockerfile:\n%s", data)
+ t.Fatalf("%d: layered Docker build was not equivalent to direct layer image metadata %s", i, input)
+ }
+ }
+ }
+ } else {
+ // run docker build
+ in, err := archive.TarWithOptions(dir, &archive.TarOptions{IncludeFiles: []string{"."}, ExcludePatterns: exclude})
+ if err != nil {
+ t.Errorf("%d: unable to generate build context %q: %v", i, dockerfilePath, err)
+ return
+ }
+ stageSteps := stages[len(stages)-1].Node.Children
+ nameDocker := fmt.Sprintf(nameFormat, i, "docker", len(stages)-1, len(stageSteps))
+ var args []docker.BuildArg
+ for k, v := range test.Args {
+ args = append(args, docker.BuildArg{Name: k, Value: v})
+ }
+ if err := c.BuildImage(docker.BuildImageOptions{
+ Name: nameDocker,
+ Dockerfile: dockerfile,
+ RmTmpContainer: true,
+ ForceRmTmpContainer: true,
+ InputStream: in,
+ OutputStream: dockerOut,
+ BuildArgs: args,
+ NoCache: len(test.Output) > 0,
+ Version: test.Version,
+ }); err != nil {
+ in.Close()
+ t.Errorf("%d: unable to build Docker image %q: %v\n%s", i, test.Git, err, dockerOut)
+ return
+ }
+ in.Close()
+ toDelete = append([]string{nameDocker}, toDelete...)
+
+ // run direct build
+ b := imagebuilder.NewBuilder(test.Args)
+ node, err := imagebuilder.ParseDockerfile(bytes.NewBuffer(data))
+ if err != nil {
+ t.Fatalf("%d: %v", i, err)
+ }
+ stages, err := imagebuilder.NewStages(node, b)
+ if err != nil {
+ t.Errorf("%v", err)
+ return
+ }
+ if len(stages) == 0 {
+ t.Error("parsing Dockerfile produced no stages")
+ return
+ }
+ nameDirect := fmt.Sprintf(nameFormat, i, "direct", len(stages)-1, len(stageSteps))
+ e := NewClientExecutor(c)
+ defer func() {
+ for _, err := range e.Release() {
+ t.Errorf("%v", err)
+ }
+ }()
+ e.Out, e.ErrOut = imageOut, imageOut
+ e.Directory = dir
+ e.Tag = nameDirect
+ lastExecutor, err := e.Stages(b, stages, "")
+ if err != nil {
+ t.Errorf("%v", err)
+ return
+ }
+ if err := lastExecutor.Commit(stages[len(stages)-1].Builder); err != nil {
+ t.Errorf("%d: failed to build complete image in %q: %v\n%s", i, input, err, imageOut)
+ } else {
+ toDelete = append([]string{nameDirect}, toDelete...)
+ if !equivalentImages(
+ t, c, nameDocker, nameDirect, true,
+ // metadata should be loosely equivalent, but because we squash and because of limitations
+ // in docker commit, there are some differences
+ metadataLayerEquivalent,
+ append(ignoreFuncs{
+ ignoreSmallFileChange,
+ // the direct dockerfile contains all steps, the layered image is synthetic from our previous
+ // test and so only contains the last layer
+ ignoreDockerfileSize(dockerfile),
+ }, test.Ignore...)...,
+ ) {
+ t.Errorf("%d: full Docker build was not equivalent to squashed image metadata %s", i, input)
+ }
+ }
+ }
+
+ badOutput := false
+ for _, re := range test.Output {
+ if !re.MatchString(dockerOut.String()) {
+ t.Errorf("Docker did not output %v", re)
+ badOutput = true
+ }
+ if !re.MatchString(imageOut.String()) {
+ t.Errorf("Imagebuilder did not output %v", re)
+ badOutput = true
+ }
+ }
+ if badOutput {
+ t.Logf("Output mismatch:\nDocker:\n---\n%s\n---\nImagebuilder:\n---\n%s\n---", hex.Dump(dockerOut.Bytes()), hex.Dump(imageOut.Bytes()))
+ }
+
+ for _, s := range toDelete {
+ c.RemoveImageExtended(s, docker.RemoveImageOptions{Force: true})
+ }
+}
+
+// ignoreFunc returns true if the difference between the two can be ignored
+type ignoreFunc func(a, b *tar.Header) bool
+
+type ignoreFuncs []ignoreFunc
+
+func (fns ignoreFuncs) Ignore(a, b *tar.Header) bool {
+ for _, fn := range fns {
+ if fn(a, b) {
+ return true
+ }
+ }
+ return false
+}
+
+// metadataFunc returns true if the metadata is equivalent
+type metadataFunc func(a, b *docker.Config) bool
+
+func normalizeOutputMetadata(a, b *docker.Config) {
+ // old docker servers can report no args escaped
+ if !a.ArgsEscaped && b.ArgsEscaped {
+ b.ArgsEscaped = false
+ }
+ if a.Entrypoint == nil && len(b.Entrypoint) == 0 {
+ // we are forced to set Entrypoint [] to reset the entrypoint
+ b.Entrypoint = nil
+ }
+ if len(a.Labels) == 0 && len(b.Labels) == 0 {
+ a.Labels = nil
+ b.Labels = nil
+ }
+ // Serialization of OnBuild is omitempty, which means it may be nil or empty depending on
+ // docker version
+ if len(a.OnBuild) == len(b.OnBuild) && len(a.OnBuild) == 0 {
+ b.OnBuild = a.OnBuild
+ }
+}
+
+// metadataEqual checks that the metadata of two images is directly equivalent.
+func metadataEqual(a, b *docker.Config) bool {
+ // compare output metadata
+ a.Image, b.Image = "", ""
+ a.Hostname, b.Hostname = "", ""
+ e1, e2 := envMap(a.Env), envMap(b.Env)
+ if !reflect.DeepEqual(e1, e2) {
+ return false
+ }
+ normalizeOutputMetadata(a, b)
+ a.Env, b.Env = nil, nil
+ if !reflect.DeepEqual(a, b) {
+ return false
+ }
+ return true
+}
+
+// metadataLayerEquivalent returns true if the last layer of a is equivalent to b, assuming
+// that b is squashed over multiple layers, and a is not. b, for instance, will have an empty
+// slice entrypoint, while a would have a nil entrypoint.
+func metadataLayerEquivalent(a, b *docker.Config) bool {
+ normalizeOutputMetadata(a, b)
+ if len(a.OnBuild) == 1 && len(b.OnBuild) > 0 && a.OnBuild[0] == b.OnBuild[len(b.OnBuild)-1] {
+ // a layered file will only contain the last OnBuild statement
+ b.OnBuild = a.OnBuild
+ }
+ return metadataEqual(a, b)
+}
+
+// equivalentImages executes the provided checks against two docker images, returning true
+// if the images are equivalent, and recording a test suite error in any other condition.
+func equivalentImages(t *testing.T, c *docker.Client, a, b string, testFilesystem bool, metadataFn metadataFunc, ignoreFns ...ignoreFunc) bool {
+ imageA, err := c.InspectImage(a)
+ if err != nil {
+ t.Errorf("can't get image %q: %v", a, err)
+ return false
+ }
+ imageB, err := c.InspectImage(b)
+ if err != nil {
+ t.Errorf("can't get image %q: %v", b, err)
+ return false
+ }
+
+ if !metadataFn(imageA.Config, imageB.Config) {
+ t.Errorf("generated image metadata did not match (%s, %s):\n%#v\n%#v", a, b, imageA.Config, imageB.Config)
+ return false
+ }
+
+ // for mutation commands, check the layer diff
+ if testFilesystem {
+ differs, onlyA, onlyB, err := compareImageFS(c, a, b)
+ if err != nil {
+ t.Errorf("can't calculate FS differences %q: %v", a, err)
+ return false
+ }
+ for k, v := range differs {
+ if ignoreFuncs(ignoreFns).Ignore(v[0].Header, v[1].Header) {
+ delete(differs, k)
+ continue
+ }
+ t.Errorf("%s and %s differ at %s:\n%#v\n%#v", a, b, k, v[0].Header, v[1].Header)
+ }
+ for k, v := range onlyA {
+ if ignoreFuncs(ignoreFns).Ignore(v.Header, nil) {
+ delete(onlyA, k)
+ continue
+ }
+ }
+ for k, v := range onlyB {
+ if ignoreFuncs(ignoreFns).Ignore(nil, v.Header) {
+ delete(onlyB, k)
+ continue
+ }
+ }
+ if len(onlyA)+len(onlyB)+len(differs) > 0 {
+ t.Errorf("a(%s)=%v b(%s)=%v diff=%v", a, onlyA, b, onlyB, differs)
+ return false
+ }
+ }
+ return true
+}
+
+// envMap returns a map from a list of environment variables.
+func envMap(env []string) map[string]string {
+ out := make(map[string]string)
+ for _, envVar := range env {
+ parts := strings.SplitN(envVar, "=", 2)
+ if len(parts) != 2 {
+ out[envVar] = ""
+ continue
+ }
+ out[parts[0]] = parts[1]
+ }
+ return out
+}
+
+// differOnlyByFileSize returns true iff the headers differ only by size, but
+// that differences is less than within bytes.
+func differOnlyByFileSize(a, b *tar.Header, within int64) bool {
+ if a == nil || b == nil {
+ return false
+ }
+ if a.Size == b.Size {
+ return false
+ }
+
+ diff := a.Size - b.Size
+ if diff < 0 {
+ diff = diff * -1
+ }
+ if diff < within && a.Size != 0 && b.Size != 0 {
+ a.Size = b.Size
+ if reflect.DeepEqual(a, b) {
+ return true
+ }
+ }
+ return false
+}
+
+// ignore Dockerfile being different, artifact of this test
+func ignoreDockerfileSize(dockerfile string) ignoreFunc {
+ return func(a, b *tar.Header) bool {
+ if a == nil || b == nil {
+ return false
+ }
+ if !strings.HasSuffix(a.Name, dockerfile) {
+ return false
+ }
+ if a.Size != b.Size {
+ a.Size = b.Size
+ return reflect.DeepEqual(a, b)
+ }
+ return false
+ }
+}
+
+// compareImageFS exports the file systems of two images and returns a map
+// of files that differ in any way (modification time excluded), only exist in
+// image A, or only existing in image B.
+func compareImageFS(c *docker.Client, a, b string) (differ map[string][]tarHeader, onlyA, onlyB map[string]tarHeader, err error) {
+ fsA, err := imageFSMetadata(c, a)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ fsB, err := imageFSMetadata(c, b)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ differ = make(map[string][]tarHeader)
+ onlyA = make(map[string]tarHeader)
+ onlyB = fsB
+ for k, v1 := range fsA {
+ v2, ok := fsB[k]
+ if !ok {
+ onlyA[k] = v1
+ continue
+ }
+ delete(onlyB, k)
+ // we ignore modification time differences
+ v1.ModTime = time.Time{}
+ v2.ModTime = time.Time{}
+ if !reflect.DeepEqual(v1, v2) {
+ differ[k] = []tarHeader{v1, v2}
+ }
+ }
+ return differ, onlyA, onlyB, nil
+}
+
+type tarHeader struct {
+ *tar.Header
+}
+
+func (h tarHeader) String() string {
+ th := h.Header
+ if th == nil {
+ return "nil"
+ }
+ return fmt.Sprintf("<%d %s>", th.Size, th.FileInfo().Mode())
+}
+
+// imageFSMetadata creates a container and reads the filesystem metadata out of the archive.
+func imageFSMetadata(c *docker.Client, name string) (map[string]tarHeader, error) {
+ container, err := c.CreateContainer(docker.CreateContainerOptions{Name: name + "-export", Config: &docker.Config{Image: name}})
+ if err != nil {
+ return nil, err
+ }
+ defer c.RemoveContainer(docker.RemoveContainerOptions{ID: container.ID, RemoveVolumes: true, Force: true})
+
+ ch := make(chan struct{})
+ result := make(map[string]tarHeader)
+ r, w := io.Pipe()
+ go func() {
+ defer close(ch)
+ out := tar.NewReader(r)
+ for {
+ h, err := out.Next()
+ if err != nil {
+ if err == io.EOF {
+ w.Close()
+ } else {
+ w.CloseWithError(err)
+ }
+ break
+ }
+ result[h.Name] = tarHeader{h}
+ }
+ }()
+ if err := c.ExportContainer(docker.ExportContainerOptions{ID: container.ID, OutputStream: w}); err != nil {
+ return nil, err
+ }
+ <-ch
+ return result, nil
+}
+
+type hardlinkChecker struct {
+ known map[hardlinkCheckerKey]string
+}
+
+func (h *hardlinkChecker) Check(info os.FileInfo, name string) (string, bool) {
+ if h.known == nil {
+ h.known = make(map[hardlinkCheckerKey]string)
+ }
+ key := h.makeHardlinkCheckerKey(info)
+ if key != nil {
+ if name, ok := h.known[*key]; ok {
+ return name, ok
+ }
+ h.known[*key] = name
+ }
+ return "", false
+}
diff --git a/dockerclient/conformance_unix_test.go b/dockerclient/conformance_unix_test.go
new file mode 100644
index 0000000..8bc1d82
--- /dev/null
+++ b/dockerclient/conformance_unix_test.go
@@ -0,0 +1,21 @@
+//go:build conformance && !windows
+// +build conformance,!windows
+
+package dockerclient
+
+import (
+ "os"
+ "syscall"
+)
+
+type hardlinkCheckerKey struct {
+ device, inode uint64
+}
+
+func (h *hardlinkChecker) makeHardlinkCheckerKey(info os.FileInfo) *hardlinkCheckerKey {
+ sys := info.Sys()
+ if stat, ok := sys.(*syscall.Stat_t); ok && (stat.Nlink > 1) {
+ return &hardlinkCheckerKey{device: uint64(stat.Dev), inode: uint64(stat.Ino)}
+ }
+ return nil
+}
diff --git a/dockerclient/conformance_windows_test.go b/dockerclient/conformance_windows_test.go
new file mode 100644
index 0000000..04b394a
--- /dev/null
+++ b/dockerclient/conformance_windows_test.go
@@ -0,0 +1,15 @@
+//go:build conformance && windows
+// +build conformance,windows
+
+package dockerclient
+
+import (
+ "os"
+)
+
+type hardlinkCheckerKey struct {
+}
+
+func (h *hardlinkChecker) makeHardlinkCheckerKey(info os.FileInfo) *hardlinkCheckerKey {
+ return nil
+}
diff --git a/dockerclient/copyinfo.go b/dockerclient/copyinfo.go
new file mode 100644
index 0000000..1ed02f4
--- /dev/null
+++ b/dockerclient/copyinfo.go
@@ -0,0 +1,181 @@
+package dockerclient
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+)
+
+type CopyInfo struct {
+ os.FileInfo
+ Path string
+ Decompress bool // deprecated, is never set and is ignored
+ FromDir bool
+}
+
+// CalcCopyInfo identifies the source files selected by a Dockerfile ADD or COPY instruction.
+func CalcCopyInfo(origPath, rootPath string, allowWildcards bool) ([]CopyInfo, error) {
+ explicitDir := origPath == "." || origPath == "/" || strings.HasSuffix(origPath, "/.") || strings.HasSuffix(origPath, "/")
+ // all CopyInfo resulting from this call will have FromDir set to explicitDir
+ infos, err := calcCopyInfo(origPath, rootPath, allowWildcards, explicitDir)
+ if err != nil {
+ return nil, err
+ }
+ return infos, nil
+}
+
+func calcCopyInfo(origPath, rootPath string, allowWildcards, explicitDir bool) ([]CopyInfo, error) {
+ origPath = trimLeadingPath(origPath)
+ if !filepath.IsAbs(rootPath) {
+ rootPath = trimLeadingPath(rootPath)
+ }
+ // Deal with wildcards
+ if allowWildcards && containsWildcards(origPath) {
+ matchPath := filepath.Join(rootPath, origPath)
+ var copyInfos []CopyInfo
+ if err := filepath.Walk(rootPath, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if info.Name() == "" {
+ // Why are we doing this check?
+ return nil
+ }
+ if match, _ := filepath.Match(matchPath, path); !match {
+ return nil
+ }
+
+ // Note we set allowWildcards to false in case the name has
+ // a * in it
+ subInfos, err := calcCopyInfo(trimLeadingPath(strings.TrimPrefix(path, rootPath)), rootPath, false, explicitDir)
+ if err != nil {
+ return err
+ }
+ copyInfos = append(copyInfos, subInfos...)
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ return copyInfos, nil
+ }
+
+ // Must be a dir or a file
+ fi, err := os.Stat(filepath.Join(rootPath, origPath))
+ if err != nil {
+ return nil, err
+ }
+
+ // flatten the root directory so we can rebase it
+ if origPath == "." {
+ if !fi.IsDir() {
+ // we want to mount a single file as input
+ return []CopyInfo{{FileInfo: fi, Path: origPath, FromDir: false}}, nil
+ }
+ var copyInfos []CopyInfo
+ infos, err := ioutil.ReadDir(rootPath)
+ if err != nil {
+ return nil, err
+ }
+ for _, info := range infos {
+ copyInfos = append(copyInfos, CopyInfo{FileInfo: info, Path: info.Name(), FromDir: explicitDir})
+ }
+ return copyInfos, nil
+ }
+
+ origPath = trimTrailingDot(origPath)
+ return []CopyInfo{{FileInfo: fi, Path: origPath, FromDir: explicitDir}}, nil
+}
+
+func DownloadURL(src, dst, tempDir string) ([]CopyInfo, string, error) {
+ // get filename from URL
+ u, err := url.Parse(src)
+ if err != nil {
+ return nil, "", err
+ }
+ base := path.Base(u.Path)
+ if base == "." {
+ return nil, "", fmt.Errorf("cannot determine filename from url: %s", u)
+ }
+
+ resp, err := http.Get(src)
+ if err != nil {
+ return nil, "", err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode >= 400 {
+ return nil, "", fmt.Errorf("server returned a status code >= 400: %s", resp.Status)
+ }
+
+ tmpDir, err := ioutil.TempDir(tempDir, "dockerbuildurl-")
+ if err != nil {
+ return nil, "", err
+ }
+ tmpFileName := filepath.Join(tmpDir, base)
+ tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
+ if err != nil {
+ os.RemoveAll(tmpDir)
+ return nil, "", err
+ }
+ if _, err := io.Copy(tmpFile, resp.Body); err != nil {
+ os.RemoveAll(tmpDir)
+ return nil, "", err
+ }
+ if err := tmpFile.Close(); err != nil {
+ os.RemoveAll(tmpDir)
+ return nil, "", err
+ }
+ info, err := os.Stat(tmpFileName)
+ if err != nil {
+ os.RemoveAll(tmpDir)
+ return nil, "", err
+ }
+ return []CopyInfo{{FileInfo: info, Path: base}}, tmpDir, nil
+}
+
+func trimLeadingPath(origPath string) string {
+ // Work in daemon-specific OS filepath semantics
+ origPath = filepath.FromSlash(origPath)
+ if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 {
+ origPath = origPath[1:]
+ }
+ origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator))
+ return origPath
+}
+
+func trimTrailingSlash(origPath string) string {
+ if origPath == "/" {
+ return origPath
+ }
+ return strings.TrimSuffix(origPath, "/")
+}
+
+func trimTrailingDot(origPath string) string {
+ if strings.HasSuffix(origPath, string(os.PathSeparator)+".") {
+ return strings.TrimSuffix(origPath, ".")
+ }
+ return origPath
+}
+
+// containsWildcards checks whether the provided name has a wildcard.
+func containsWildcards(name string) bool {
+ for i := 0; i < len(name); i++ {
+ ch := name[i]
+ if ch == '\\' {
+ i++
+ } else if ch == '*' || ch == '?' || ch == '[' {
+ return true
+ }
+ }
+ return false
+}
+
+// isURL returns true if the string appears to be a URL.
+func isURL(s string) bool {
+ return strings.HasPrefix(s, "http://") || strings.HasPrefix(s, "https://")
+}
diff --git a/dockerclient/copyinfo_test.go b/dockerclient/copyinfo_test.go
new file mode 100644
index 0000000..5698ff6
--- /dev/null
+++ b/dockerclient/copyinfo_test.go
@@ -0,0 +1,266 @@
+package dockerclient
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+)
+
+func TestCalcCopyInfo(t *testing.T) {
+ nilErr := func(err error) bool { return err == nil }
+ tests := []struct {
+ origPath string
+ rootPath string
+ dstPath string
+ allowWildcards bool
+ errFn func(err error) bool
+ paths map[string]struct{}
+ excludes []string
+ rebaseNames map[string]string
+ check map[string]bool
+ download bool
+ }{
+ {
+ origPath: "subdir/*",
+ rootPath: "testdata/dir",
+ allowWildcards: true,
+ errFn: nilErr,
+ paths: map[string]struct{}{"subdir/file2": {}},
+ },
+ {
+ origPath: "*",
+ rootPath: "testdata/dir",
+ allowWildcards: true,
+ errFn: nilErr,
+ paths: map[string]struct{}{
+ "Dockerfile": {},
+ "file": {},
+ "subdir": {},
+ },
+ },
+ {
+ origPath: ".",
+ rootPath: "testdata/dir",
+ allowWildcards: true,
+ errFn: nilErr,
+ paths: map[string]struct{}{
+ "Dockerfile": {},
+ "file": {},
+ "subdir": {},
+ },
+ },
+ {
+ origPath: "/.",
+ rootPath: "testdata/dir",
+ allowWildcards: true,
+ errFn: nilErr,
+ paths: map[string]struct{}{
+ "Dockerfile": {},
+ "file": {},
+ "subdir": {},
+ },
+ },
+ {
+ origPath: "subdir/",
+ rootPath: "testdata/dir",
+ allowWildcards: true,
+ errFn: nilErr,
+ paths: map[string]struct{}{
+ "subdir/": {},
+ },
+ },
+ {
+ origPath: "subdir",
+ rootPath: "testdata/dir",
+ allowWildcards: true,
+ errFn: nilErr,
+ paths: map[string]struct{}{
+ "subdir": {},
+ },
+ },
+ {
+ origPath: ".",
+ dstPath: "copy",
+ rootPath: "testdata/dir",
+ allowWildcards: true,
+ errFn: nilErr,
+ paths: map[string]struct{}{
+ "file": {},
+ "Dockerfile": {},
+ "subdir": {},
+ },
+ rebaseNames: map[string]string{
+ "file": "copy/file",
+ "Dockerfile": "copy/Dockerfile",
+ "subdir": "copy/subdir",
+ },
+ },
+ {
+ origPath: ".",
+ dstPath: "copy",
+ rootPath: "testdata/singlefile",
+ allowWildcards: true,
+ errFn: nilErr,
+ paths: map[string]struct{}{
+ "Dockerfile": {},
+ },
+ rebaseNames: map[string]string{
+ "Dockerfile": "copy/Dockerfile",
+ },
+ },
+ {
+ origPath: "Dockerfile",
+ dstPath: "copy",
+ rootPath: "testdata/singlefile",
+ allowWildcards: true,
+ errFn: nilErr,
+ paths: map[string]struct{}{
+ "Dockerfile": {},
+ },
+ rebaseNames: map[string]string{
+ "Dockerfile": "copy",
+ },
+ },
+ {
+ origPath: "Dockerfile",
+ dstPath: "copy",
+ check: map[string]bool{"copy": true},
+ rootPath: "testdata/singlefile",
+ allowWildcards: true,
+ errFn: nilErr,
+ paths: map[string]struct{}{
+ "Dockerfile": {},
+ },
+ rebaseNames: map[string]string{
+ "Dockerfile": "copy/Dockerfile",
+ },
+ },
+ {
+ origPath: "existing/",
+ dstPath: ".",
+ rootPath: "testdata/overlapdir",
+ allowWildcards: true,
+ errFn: nilErr,
+ paths: map[string]struct{}{
+ "existing/": {},
+ },
+ rebaseNames: map[string]string{
+ "existing": ".",
+ },
+ },
+ {
+ origPath: "existing",
+ dstPath: ".",
+ rootPath: "testdata/overlapdir",
+ allowWildcards: true,
+ errFn: nilErr,
+ paths: map[string]struct{}{
+ "existing": {},
+ },
+ rebaseNames: map[string]string{
+ "existing": ".",
+ },
+ },
+ {
+ origPath: "existing",
+ dstPath: "/",
+ rootPath: "testdata/overlapdir",
+ allowWildcards: true,
+ errFn: nilErr,
+ paths: map[string]struct{}{
+ "existing": {},
+ },
+ rebaseNames: map[string]string{
+ "existing": "/",
+ },
+ },
+ {
+ origPath: "subdir/.",
+ rootPath: "testdata/dir",
+ allowWildcards: true,
+ errFn: nilErr,
+ paths: map[string]struct{}{
+ "subdir/": {},
+ },
+ },
+ {
+ origPath: "testdata/dir/subdir/.",
+ rootPath: "",
+ allowWildcards: true,
+ errFn: nilErr,
+ paths: map[string]struct{}{
+ "testdata/dir/subdir/": {},
+ },
+ },
+ {
+ origPath: "subdir/",
+ rootPath: "testdata/dir",
+ allowWildcards: true,
+ errFn: nilErr,
+ paths: map[string]struct{}{
+ "subdir/": {},
+ },
+ },
+ {
+ origPath: "subdir/",
+ rootPath: "testdata/dir",
+ allowWildcards: true,
+ errFn: nilErr,
+ paths: map[string]struct{}{
+ "subdir/": {},
+ },
+ dstPath: "test/",
+ rebaseNames: map[string]string{
+ "subdir": "test",
+ },
+ },
+ {
+ origPath: "dir",
+ dstPath: "/dir",
+ check: map[string]bool{"dir": false},
+ rootPath: "testdata/copydir",
+ allowWildcards: true,
+ errFn: nilErr,
+ paths: map[string]struct{}{
+ "dir": {},
+ },
+ rebaseNames: map[string]string{
+ "dir": "dir",
+ },
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+ infos, err := CalcCopyInfo(test.origPath, test.rootPath, test.allowWildcards)
+ if !test.errFn(err) {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if err != nil {
+ return
+ }
+ expect := make(map[string]struct{})
+ for k := range test.paths {
+ expect[k] = struct{}{}
+ }
+ for _, info := range infos {
+ if _, ok := expect[info.Path]; ok {
+ delete(expect, info.Path)
+ } else {
+ t.Errorf("did not expect path %s", info.Path)
+ }
+ }
+ if len(expect) > 0 {
+ t.Errorf("did not see paths: %#v", expect)
+ }
+
+ options, err := archiveOptionsFor("", infos, test.dstPath, test.excludes, test.download, testDirectoryCheck(test.check))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(test.rebaseNames, options.RebaseNames) {
+ t.Errorf("rebase names did not match:\n%#v\n%#v", test.rebaseNames, options.RebaseNames)
+ }
+ })
+ }
+}
diff --git a/dockerclient/directory.go b/dockerclient/directory.go
new file mode 100644
index 0000000..4a9364b
--- /dev/null
+++ b/dockerclient/directory.go
@@ -0,0 +1,87 @@
+package dockerclient
+
+import (
+ "archive/tar"
+ "context"
+ "io"
+ "io/ioutil"
+
+ docker "github.com/fsouza/go-dockerclient"
+ "k8s.io/klog"
+)
+
+type DirectoryCheck interface {
+ IsDirectory(path string) (bool, error)
+}
+
+type directoryCheck struct {
+ containerID string
+ client *docker.Client
+}
+
+func newDirectoryCheck(client *docker.Client, containerID string) *directoryCheck {
+ return &directoryCheck{
+ containerID: containerID,
+ client: client,
+ }
+}
+
+func (c *directoryCheck) IsDirectory(path string) (bool, error) {
+ if path == "/" || path == "." || path == "./" {
+ return true, nil
+ }
+
+ dir, err := isContainerPathDirectory(c.client, c.containerID, path)
+ if err != nil {
+ return false, err
+ }
+
+ return dir, nil
+}
+
+func isContainerPathDirectory(client *docker.Client, containerID, path string) (bool, error) {
+ pr, pw := io.Pipe()
+ defer pw.Close()
+ ctx, cancel := context.WithCancel(context.TODO())
+ go func() {
+ err := client.DownloadFromContainer(containerID, docker.DownloadFromContainerOptions{
+ OutputStream: pw,
+ Path: path,
+ Context: ctx,
+ })
+ if err != nil {
+ if apiErr, ok := err.(*docker.Error); ok && apiErr.Status == 404 {
+ klog.V(4).Infof("path %s did not exist in container %s: %v", path, containerID, err)
+ err = nil
+ }
+ if err != nil && err != context.Canceled {
+ klog.V(6).Infof("error while checking directory contents for container %s at path %s: %v", containerID, path, err)
+ }
+ }
+ pw.CloseWithError(err)
+ }()
+
+ tr := tar.NewReader(pr)
+
+ h, err := tr.Next()
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ }
+ cancel()
+ return false, err
+ }
+
+ klog.V(4).Infof("Retrieved first header from container %s at path %s: %#v", containerID, path, h)
+
+ // take the remainder of the input and discard it
+ go func() {
+ cancel()
+ n, err := io.Copy(ioutil.Discard, pr)
+ if n > 0 || err != nil {
+ klog.V(6).Infof("Discarded %d bytes from end of container directory check, and got error: %v", n, err)
+ }
+ }()
+
+ return h.FileInfo().IsDir(), nil
+}
diff --git a/dockerclient/testdata/Dockerfile.add b/dockerclient/testdata/Dockerfile.add
new file mode 100644
index 0000000..f78180e
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.add
@@ -0,0 +1,11 @@
+FROM busybox
+ADD https://github.com/openshift/origin/raw/master/README.md README.md
+USER 1001
+ADD https://github.com/openshift/origin/raw/master/LICENSE .
+ADD https://github.com/openshift/origin/raw/master/LICENSE A
+ADD https://github.com/openshift/origin/raw/master/LICENSE ./a
+USER root
+RUN mkdir ./b
+ADD https://github.com/openshift/origin/raw/master/LICENSE ./b/a
+ADD https://github.com/openshift/origin/raw/master/LICENSE ./b/.
+ADD https://github.com/openshift/ruby-hello-world/archive/master.zip /tmp/
diff --git a/dockerclient/testdata/Dockerfile.args b/dockerclient/testdata/Dockerfile.args
new file mode 100644
index 0000000..64083ac
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.args
@@ -0,0 +1,7 @@
+FROM busybox
+
+ENV FOO="value" TEST=$BAR
+LABEL test="$FOO"
+ARG BAR
+ENV BAZ=$BAR
+RUN echo $BAR \ No newline at end of file
diff --git a/dockerclient/testdata/Dockerfile.badhealthcheck b/dockerclient/testdata/Dockerfile.badhealthcheck
new file mode 100644
index 0000000..bb7903d
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.badhealthcheck
@@ -0,0 +1,2 @@
+FROM debian
+HEALTHCHECK
diff --git a/dockerclient/testdata/Dockerfile.copyfrom_1 b/dockerclient/testdata/Dockerfile.copyfrom_1
new file mode 100644
index 0000000..9ee8997
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.copyfrom_1
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN touch /a /b
+FROM busybox
+COPY --from=base /a /
+RUN ls -al /a \ No newline at end of file
diff --git a/dockerclient/testdata/Dockerfile.copyfrom_10 b/dockerclient/testdata/Dockerfile.copyfrom_10
new file mode 100644
index 0000000..986e67e
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.copyfrom_10
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN mkdir -p /a && touch /a/1
+FROM busybox
+COPY --from=base /a/1 /a/b/c
+RUN ls -al /a/b/c && ! ls -al /a/b/1 \ No newline at end of file
diff --git a/dockerclient/testdata/Dockerfile.copyfrom_11 b/dockerclient/testdata/Dockerfile.copyfrom_11
new file mode 100644
index 0000000..bfc8101
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.copyfrom_11
@@ -0,0 +1,6 @@
+FROM busybox as base
+RUN mkdir -p /a && touch /a/1
+RUN ln -s /a /b
+FROM busybox
+COPY --from=base /b/1 /a/b/c
+RUN ls -al /a/b/c && ! ls -al /a/b/1
diff --git a/dockerclient/testdata/Dockerfile.copyfrom_12 b/dockerclient/testdata/Dockerfile.copyfrom_12
new file mode 100644
index 0000000..af06722
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.copyfrom_12
@@ -0,0 +1,6 @@
+FROM busybox as base
+RUN mkdir -p /a && touch /a/1
+RUN ln -s a /c
+FROM busybox
+COPY --from=base /c/1 /a/b/c
+RUN ls -al /a/b/c && ! ls -al /a/b/1
diff --git a/dockerclient/testdata/Dockerfile.copyfrom_13 b/dockerclient/testdata/Dockerfile.copyfrom_13
new file mode 100644
index 0000000..42c5db8
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.copyfrom_13
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN touch /a
+FROM busybox
+COPY --from=0 /a /
+RUN ls -al /a
diff --git a/dockerclient/testdata/Dockerfile.copyfrom_14 b/dockerclient/testdata/Dockerfile.copyfrom_14
new file mode 100644
index 0000000..9df1cb6
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.copyfrom_14
@@ -0,0 +1,5 @@
+FROM busybox
+RUN touch /a
+FROM busybox
+COPY --from=0 /a /
+RUN ls -al /a
diff --git a/dockerclient/testdata/Dockerfile.copyfrom_2 b/dockerclient/testdata/Dockerfile.copyfrom_2
new file mode 100644
index 0000000..11f7f86
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.copyfrom_2
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN touch /a
+FROM busybox
+COPY --from=base /a /a
+RUN ls -al /a \ No newline at end of file
diff --git a/dockerclient/testdata/Dockerfile.copyfrom_3 b/dockerclient/testdata/Dockerfile.copyfrom_3
new file mode 100644
index 0000000..1e3e701
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.copyfrom_3
@@ -0,0 +1,6 @@
+FROM busybox as base
+RUN touch /a
+FROM busybox
+WORKDIR /b
+COPY --from=base /a .
+RUN ls -al /b/a \ No newline at end of file
diff --git a/dockerclient/testdata/Dockerfile.copyfrom_4 b/dockerclient/testdata/Dockerfile.copyfrom_4
new file mode 100644
index 0000000..e00201f
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.copyfrom_4
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN mkdir -p /a/b && touch /a/b/1 /a/b/2
+FROM busybox
+COPY --from=base /a/b/ /b/
+RUN ls -al /b/1 /b/2 /b && ! ls -al /a
diff --git a/dockerclient/testdata/Dockerfile.copyfrom_5 b/dockerclient/testdata/Dockerfile.copyfrom_5
new file mode 100644
index 0000000..c5e298d
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.copyfrom_5
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN mkdir -p /a/b && touch /a/b/1 /a/b/2
+FROM busybox
+COPY --from=base /a/b/* /b/
+RUN ls -al /b/1 /b/2 /b && ! ls -al /a /b/a /b/b
diff --git a/dockerclient/testdata/Dockerfile.copyfrom_6 b/dockerclient/testdata/Dockerfile.copyfrom_6
new file mode 100644
index 0000000..0800646
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.copyfrom_6
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN mkdir -p /a/b && touch /a/b/1 /a/b/2
+FROM busybox
+COPY --from=base /a/b/. /b/
+RUN ls -al /b/1 /b/2 /b && ! ls -al /a /b/a /b/b
diff --git a/dockerclient/testdata/Dockerfile.copyfrom_7 b/dockerclient/testdata/Dockerfile.copyfrom_7
new file mode 100644
index 0000000..6ee21bc
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.copyfrom_7
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN touch /b
+FROM busybox
+COPY --from=base /b /a
+RUN ls -al /a && ! ls -al /b \ No newline at end of file
diff --git a/dockerclient/testdata/Dockerfile.copyfrom_8 b/dockerclient/testdata/Dockerfile.copyfrom_8
new file mode 100644
index 0000000..47c7059
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.copyfrom_8
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN mkdir -p /a && touch /a/b
+FROM busybox
+COPY --from=base /a/b /a
+RUN ls -al /a && ! ls -al /b \ No newline at end of file
diff --git a/dockerclient/testdata/Dockerfile.copyfrom_9 b/dockerclient/testdata/Dockerfile.copyfrom_9
new file mode 100644
index 0000000..a8a83ce
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.copyfrom_9
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN mkdir -p /a && touch /a/1
+FROM busybox
+COPY --from=base /a/1 /a/b/c/
+RUN ls -al /a/b/c/1 && ! ls -al /a/b/1 \ No newline at end of file
diff --git a/dockerclient/testdata/Dockerfile.edgecases b/dockerclient/testdata/Dockerfile.edgecases
new file mode 100644
index 0000000..b811120
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.edgecases
@@ -0,0 +1,48 @@
+FROM busybox
+
+MAINTAINER docker <docker@docker.io>
+
+ONBUILD RUN ["echo", "test"]
+ONBUILD RUN echo test
+ONBUILD COPY . /
+
+
+# RUN Commands \
+# linebreak in comment \
+RUN ["ls", "-la"]
+RUN ["echo", "'1234'"]
+RUN echo "1234"
+RUN echo 1234
+RUN echo '1234' && \
+ echo "456" && \
+ echo 789
+RUN sh -c 'echo root:testpass \
+ > /tmp/passwd'
+RUN mkdir -p /test /test2 /test3/test
+
+# ENV \
+ENV SCUBA 1 DUBA 3
+ENV SCUBA "1 DUBA 3"
+
+# CMD \
+CMD ["echo", "test"]
+CMD echo test
+CMD echo "test"
+CMD echo 'test'
+CMD echo 'test' | wc -
+
+#EXPOSE\
+EXPOSE 3000
+EXPOSE 9000 5000 6000
+
+USER docker
+USER docker:root
+
+VOLUME ["/test"]
+VOLUME ["/test", "/test2"]
+VOLUME /test3
+
+WORKDIR /test
+
+ADD . /
+COPY . copy \ No newline at end of file
diff --git a/dockerclient/testdata/Dockerfile.env b/dockerclient/testdata/Dockerfile.env
new file mode 100644
index 0000000..3aa5be5
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.env
@@ -0,0 +1,22 @@
+FROM busybox
+ENV name value
+ENV name=value
+ENV name=value name2=value2
+ENV name="value value1"
+ENV name=value\ value2
+ENV name="value'quote space'value2"
+ENV name='value"double quote"value2'
+ENV name=value\ value2 name2=value2\ value3
+ENV name="a\"b"
+ENV name="a\'b"
+ENV name='a\'b''
+ENV name='a\"b'
+ENV name="''"
+# don't put anything after the next line - it must be the last line of the
+# Dockerfile and it must end with \
+ENV name=value \
+ name1=value1 \
+ name2="value2a \
+ value2b" \
+ name3="value3a\n\"value3b\"" \
+ name4="value4a\\nvalue4b" \ \ No newline at end of file
diff --git a/dockerclient/testdata/Dockerfile.envargconflict b/dockerclient/testdata/Dockerfile.envargconflict
new file mode 100644
index 0000000..762a820
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.envargconflict
@@ -0,0 +1,8 @@
+FROM ubuntu:18.04
+# The ARG should be ignored due to the
+# conflict with the ENV declaration.
+ARG USER_NAME=my_user_arg
+ENV USER_NAME=my_user_env
+RUN useradd -r -s /bin/false -m -d /home/${USER_NAME} ${USER_NAME}
+USER ${USER_NAME}
+WORKDIR /home/${USER_NAME}
diff --git a/dockerclient/testdata/Dockerfile.envsubst b/dockerclient/testdata/Dockerfile.envsubst
new file mode 100644
index 0000000..85741ce
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.envsubst
@@ -0,0 +1,4 @@
+FROM busybox
+
+ENV FOO="value"
+LABEL test="$FOO"
diff --git a/dockerclient/testdata/Dockerfile.escape b/dockerclient/testdata/Dockerfile.escape
new file mode 100644
index 0000000..12af91c
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.escape
@@ -0,0 +1,2 @@
+FROM busybox
+RUN stat -c %u / \ No newline at end of file
diff --git a/dockerclient/testdata/Dockerfile.exposedefault b/dockerclient/testdata/Dockerfile.exposedefault
new file mode 100644
index 0000000..d3d9862
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.exposedefault
@@ -0,0 +1,2 @@
+FROM busybox
+EXPOSE 3469 \ No newline at end of file
diff --git a/dockerclient/testdata/Dockerfile.healthcheck b/dockerclient/testdata/Dockerfile.healthcheck
new file mode 100644
index 0000000..97cea60
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.healthcheck
@@ -0,0 +1,7 @@
+FROM debian
+CMD /app/main.sh
+HEALTHCHECK CMD a b
+HEALTHCHECK --timeout=3s CMD ["foo"]
+HEALTHCHECK --start-period=8s --interval=5s --timeout=3s --retries=3 \
+ CMD /app/check.sh --quiet
+
diff --git a/dockerclient/testdata/Dockerfile.mount b/dockerclient/testdata/Dockerfile.mount
new file mode 100644
index 0000000..6417ece
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.mount
@@ -0,0 +1,2 @@
+FROM busybox
+RUN stat -c "%s %n %a %F %g %u" /tmp/test/*
diff --git a/dockerclient/testdata/Dockerfile.multiarg b/dockerclient/testdata/Dockerfile.multiarg
new file mode 100644
index 0000000..1d8567d
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.multiarg
@@ -0,0 +1,4 @@
+FROM alpine
+ARG multivalarg="a=1 b=2 c=3 d=4"
+ENV multival="${multivalarg}"
+RUN echo $multival
diff --git a/dockerclient/testdata/Dockerfile.multistage b/dockerclient/testdata/Dockerfile.multistage
new file mode 100644
index 0000000..638fd33
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.multistage
@@ -0,0 +1,24 @@
+FROM alpine as multistagebase
+COPY multistage/dir/a.txt /
+WORKDIR /tmp
+RUN touch /base.txt tmp.txt
+
+FROM multistagebase as second
+COPY dir/file /
+RUN touch /second.txt
+
+FROM alpine
+COPY --from=1 /second.txt /third.txt
+
+FROM alpine
+COPY --from=2 /third.txt /fourth.txt
+
+FROM alpine
+COPY --from=multistagebase /base.txt /fifth.txt
+COPY --from=multistagebase ./tmp/tmp.txt /tmp.txt
+# "golang" has a default working directory of /go, and /go/src is a directory
+COPY --from=golang go/src /src
+
+FROM multistagebase as final
+COPY copy/script /
+RUN touch /final.txt
diff --git a/dockerclient/testdata/Dockerfile.novolume b/dockerclient/testdata/Dockerfile.novolume
new file mode 100644
index 0000000..ed098b3
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.novolume
@@ -0,0 +1,5 @@
+FROM busybox
+RUN rm -fr /var/lib/not-in-this-image
+VOLUME /var/lib/not-in-this-image
+RUN mkdir -p /var/lib
+RUN touch /var/lib/file-not-in-image
diff --git a/dockerclient/testdata/Dockerfile.novolumenorun b/dockerclient/testdata/Dockerfile.novolumenorun
new file mode 100644
index 0000000..4cc7853
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.novolumenorun
@@ -0,0 +1,3 @@
+FROM busybox
+RUN rm -fr /var/lib/not-in-this-image
+VOLUME /var/lib/not-in-this-image
diff --git a/dockerclient/testdata/Dockerfile.noworkdir b/dockerclient/testdata/Dockerfile.noworkdir
new file mode 100644
index 0000000..6d1e22f
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.noworkdir
@@ -0,0 +1,4 @@
+FROM busybox
+WORKDIR /foo
+VOLUME [ "/foo" ]
+RUN echo
diff --git a/dockerclient/testdata/Dockerfile.reusebase b/dockerclient/testdata/Dockerfile.reusebase
new file mode 100644
index 0000000..5e90316
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.reusebase
@@ -0,0 +1,6 @@
+FROM centos:7 AS base
+RUN touch /1
+ENV LOCAL=/1
+
+FROM base
+RUN find $LOCAL \ No newline at end of file
diff --git a/dockerclient/testdata/Dockerfile.run.args b/dockerclient/testdata/Dockerfile.run.args
new file mode 100644
index 0000000..e09926c
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.run.args
@@ -0,0 +1,5 @@
+FROM busybox
+RUN echo first second
+RUN /bin/echo third fourth
+RUN ["/bin/echo", "fifth", "sixth"]
+RUN ["/bin/sh", "-c", "echo inner $1", "", "outer"] \ No newline at end of file
diff --git a/dockerclient/testdata/Dockerfile.shell b/dockerclient/testdata/Dockerfile.shell
new file mode 100644
index 0000000..514c7c9
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.shell
@@ -0,0 +1,3 @@
+FROM centos:7
+SHELL ["/bin/bash", "-xc"]
+RUN env \ No newline at end of file
diff --git a/dockerclient/testdata/Dockerfile.target b/dockerclient/testdata/Dockerfile.target
new file mode 100644
index 0000000..fe19862
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.target
@@ -0,0 +1,8 @@
+FROM ubuntu:latest
+RUN touch /1
+
+FROM alpine:latest AS mytarget
+RUN touch /2
+
+FROM busybox:latest AS mytarget2
+RUN touch /3
diff --git a/dockerclient/testdata/Dockerfile.unknown b/dockerclient/testdata/Dockerfile.unknown
new file mode 100644
index 0000000..5b856ac
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.unknown
@@ -0,0 +1,3 @@
+FROM busybox
+HEALTH NONE
+UNRECOGNIZED \ No newline at end of file
diff --git a/dockerclient/testdata/Dockerfile.unset b/dockerclient/testdata/Dockerfile.unset
new file mode 100644
index 0000000..fa122cd
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.unset
@@ -0,0 +1,5 @@
+FROM busybox
+
+ARG FOO
+ENV FOO=${FOO:?}
+LABEL test="$FOO"
diff --git a/dockerclient/testdata/Dockerfile.volumeexists b/dockerclient/testdata/Dockerfile.volumeexists
new file mode 100644
index 0000000..91d30fb
--- /dev/null
+++ b/dockerclient/testdata/Dockerfile.volumeexists
@@ -0,0 +1,5 @@
+FROM busybox
+RUN mkdir -p 0700 /var/lib/bespoke-directory
+RUN chown 1:1 /var/lib/bespoke-directory
+VOLUME /var/lib/bespoke-directory
+RUN touch /var/lib/bespoke-directory/emptyfile
diff --git a/dockerclient/testdata/add/Dockerfile b/dockerclient/testdata/add/Dockerfile
new file mode 100644
index 0000000..4b28683
--- /dev/null
+++ b/dockerclient/testdata/add/Dockerfile
@@ -0,0 +1,18 @@
+FROM centos:7
+ADD archived.txt /archived.txt
+ADD archived.txt /archived/
+ADD archived.tar /archived.tar
+ADD archived.tar /archived-tar/
+ADD archived.tar.gz /archived.tar.gz
+ADD archived.tar.gz /archived-gz/
+ADD archived.tar.bz2 /archived.tar.bz2
+ADD archived.tar.bz2 /archived-bz2/
+ADD archived.tar.xz /archived-xz/
+ADD archived.tar.xz /archived.tar.xz
+ADD archived.txt archived.tar.xz archived.tar.bz2 /archived-mixed/
+ADD archived.txt /archived.tar.xz ./archived.tar.bz2 /archived-mixed-path-variations/
+ADD archived.txt* /archived-globbed-plain/
+ADD archived.tar.gz* /archived-globbed-gz/
+ADD archived.tar.bz2* /archived-globbed-bz2/
+ADD archived.tar.xz* /archived-globbed-xz/
+ADD archived.* /archived-globbed/
diff --git a/dockerclient/testdata/add/Dockerfile.addall b/dockerclient/testdata/add/Dockerfile.addall
new file mode 100644
index 0000000..d4f32fc
--- /dev/null
+++ b/dockerclient/testdata/add/Dockerfile.addall
@@ -0,0 +1,2 @@
+FROM centos:7
+ADD . .
diff --git a/dockerclient/testdata/add/Dockerfile.addslash b/dockerclient/testdata/add/Dockerfile.addslash
new file mode 100644
index 0000000..89a4581
--- /dev/null
+++ b/dockerclient/testdata/add/Dockerfile.addslash
@@ -0,0 +1,2 @@
+FROM centos:7
+ADD / /
diff --git a/dockerclient/testdata/add/Dockerfile.copy b/dockerclient/testdata/add/Dockerfile.copy
new file mode 100644
index 0000000..177ba85
--- /dev/null
+++ b/dockerclient/testdata/add/Dockerfile.copy
@@ -0,0 +1,13 @@
+FROM centos:7
+COPY archived.txt /archived/
+COPY archived.txt /archived.txt
+COPY archived.tar /archived-tar/
+COPY archived.tar /archived.tar
+COPY archived.tar.gz /archived-gz/
+COPY archived.tar.gz /archived.tar.gz
+COPY archived.tar.bz2 /archived-bz2/
+COPY archived.tar.bz2 /archived.tar.bz2
+COPY archived.tar.xz /archived-xz/
+COPY archived.tar.xz /archived.tar.xz
+COPY archived.txt archived.tar.xz archived.tar.bz2 /archived-mixed/
+COPY archived.txt /archived.tar.xz ./archived.tar.bz2 /archived-mixed-path-variations/
diff --git a/dockerclient/testdata/add/archived-bz2.txt b/dockerclient/testdata/add/archived-bz2.txt
new file mode 100644
index 0000000..ef8fee6
--- /dev/null
+++ b/dockerclient/testdata/add/archived-bz2.txt
@@ -0,0 +1 @@
+help, i've been archived
diff --git a/dockerclient/testdata/add/archived-gz.txt b/dockerclient/testdata/add/archived-gz.txt
new file mode 100644
index 0000000..ef8fee6
--- /dev/null
+++ b/dockerclient/testdata/add/archived-gz.txt
@@ -0,0 +1 @@
+help, i've been archived
diff --git a/dockerclient/testdata/add/archived-xz.txt b/dockerclient/testdata/add/archived-xz.txt
new file mode 100644
index 0000000..ef8fee6
--- /dev/null
+++ b/dockerclient/testdata/add/archived-xz.txt
@@ -0,0 +1 @@
+help, i've been archived
diff --git a/dockerclient/testdata/add/archived.tar b/dockerclient/testdata/add/archived.tar
new file mode 100644
index 0000000..9222eec
--- /dev/null
+++ b/dockerclient/testdata/add/archived.tar
Binary files differ
diff --git a/dockerclient/testdata/add/archived.tar.bz2 b/dockerclient/testdata/add/archived.tar.bz2
new file mode 100644
index 0000000..e5707e4
--- /dev/null
+++ b/dockerclient/testdata/add/archived.tar.bz2
Binary files differ
diff --git a/dockerclient/testdata/add/archived.tar.gz b/dockerclient/testdata/add/archived.tar.gz
new file mode 100644
index 0000000..f534269
--- /dev/null
+++ b/dockerclient/testdata/add/archived.tar.gz
Binary files differ
diff --git a/dockerclient/testdata/add/archived.tar.xz b/dockerclient/testdata/add/archived.tar.xz
new file mode 100644
index 0000000..4c11f04
--- /dev/null
+++ b/dockerclient/testdata/add/archived.tar.xz
Binary files differ
diff --git a/dockerclient/testdata/add/archived.txt b/dockerclient/testdata/add/archived.txt
new file mode 100644
index 0000000..ef8fee6
--- /dev/null
+++ b/dockerclient/testdata/add/archived.txt
@@ -0,0 +1 @@
+help, i've been archived
diff --git a/dockerclient/testdata/copy/Dockerfile b/dockerclient/testdata/copy/Dockerfile
new file mode 100644
index 0000000..815de49
--- /dev/null
+++ b/dockerclient/testdata/copy/Dockerfile
@@ -0,0 +1,3 @@
+FROM centos:7
+COPY script /usr/bin
+RUN ls -al /usr/bin/script \ No newline at end of file
diff --git a/dockerclient/testdata/copy/script b/dockerclient/testdata/copy/script
new file mode 100644
index 0000000..c3c3f3f
--- /dev/null
+++ b/dockerclient/testdata/copy/script
@@ -0,0 +1,2 @@
+#!/bin/bash
+exit 0 \ No newline at end of file
diff --git a/dockerclient/testdata/copyblahblub/Dockerfile b/dockerclient/testdata/copyblahblub/Dockerfile
new file mode 100644
index 0000000..3c64a21
--- /dev/null
+++ b/dockerclient/testdata/copyblahblub/Dockerfile
@@ -0,0 +1,4 @@
+FROM busybox
+COPY firstdir/seconddir /var
+RUN ls -la /var
+RUN ls -la /var/dir-a
diff --git a/dockerclient/testdata/copyblahblub/Dockerfile2 b/dockerclient/testdata/copyblahblub/Dockerfile2
new file mode 100644
index 0000000..688b352
--- /dev/null
+++ b/dockerclient/testdata/copyblahblub/Dockerfile2
@@ -0,0 +1,4 @@
+FROM busybox
+COPY /firstdir/seconddir /var
+RUN ls -la /var
+RUN ls -la /var/dir-a
diff --git a/dockerclient/testdata/copyblahblub/Dockerfile3 b/dockerclient/testdata/copyblahblub/Dockerfile3
new file mode 100644
index 0000000..2ef7755
--- /dev/null
+++ b/dockerclient/testdata/copyblahblub/Dockerfile3
@@ -0,0 +1,4 @@
+FROM busybox
+COPY /firstdir/seconddir/ /var
+RUN ls -la /var
+RUN ls -la /var/dir-a
diff --git a/dockerclient/testdata/copyblahblub/firstdir/seconddir/dir-a/file-a b/dockerclient/testdata/copyblahblub/firstdir/seconddir/dir-a/file-a
new file mode 100644
index 0000000..2f76e89
--- /dev/null
+++ b/dockerclient/testdata/copyblahblub/firstdir/seconddir/dir-a/file-a
@@ -0,0 +1 @@
+file-a
diff --git a/dockerclient/testdata/copyblahblub/firstdir/seconddir/dir-b/file-b b/dockerclient/testdata/copyblahblub/firstdir/seconddir/dir-b/file-b
new file mode 100644
index 0000000..3b8ef5a
--- /dev/null
+++ b/dockerclient/testdata/copyblahblub/firstdir/seconddir/dir-b/file-b
@@ -0,0 +1 @@
+file-b
diff --git a/dockerclient/testdata/copychmod/Dockerfile b/dockerclient/testdata/copychmod/Dockerfile
new file mode 100644
index 0000000..632f4bd
--- /dev/null
+++ b/dockerclient/testdata/copychmod/Dockerfile
@@ -0,0 +1,4 @@
+FROM busybox
+ADD --chown=0:0 --chmod=0755 file /
+ADD --chown=0:0 --chmod=644 file2 /
+ADD --chown=0:0 --chmod=7755 file3 /
diff --git a/dockerclient/testdata/copychmod/file b/dockerclient/testdata/copychmod/file
new file mode 100644
index 0000000..1a010b1
--- /dev/null
+++ b/dockerclient/testdata/copychmod/file
@@ -0,0 +1 @@
+file \ No newline at end of file
diff --git a/dockerclient/testdata/copychmod/file2 b/dockerclient/testdata/copychmod/file2
new file mode 100644
index 0000000..30d67d4
--- /dev/null
+++ b/dockerclient/testdata/copychmod/file2
@@ -0,0 +1 @@
+file2 \ No newline at end of file
diff --git a/dockerclient/testdata/copychmod/file3 b/dockerclient/testdata/copychmod/file3
new file mode 100644
index 0000000..873fb8d
--- /dev/null
+++ b/dockerclient/testdata/copychmod/file3
@@ -0,0 +1 @@
+file3 \ No newline at end of file
diff --git a/dockerclient/testdata/copychown/Dockerfile b/dockerclient/testdata/copychown/Dockerfile
new file mode 100644
index 0000000..6d7649f
--- /dev/null
+++ b/dockerclient/testdata/copychown/Dockerfile
@@ -0,0 +1,20 @@
+FROM centos:7
+COPY --chown=1:2 script /usr/bin/script.12
+COPY --chown=1:adm script /usr/bin/script.1-adm
+COPY --chown=1 script /usr/bin/script.1
+COPY --chown=lp:adm script /usr/bin/script.lp-adm
+COPY --chown=2:mail script /usr/bin/script.2-mail
+COPY --chown=2 script /usr/bin/script.2
+COPY --chown=bin script /usr/bin/script.bin
+COPY --chown=lp script /usr/bin/script.lp
+COPY --chown=3 script script2 /usr/local/bin/
+RUN rm -fr /var/created-directory
+COPY --chown=2097152 script script2 /var/created/directory/
+RUN rm -fr /no-such-directory
+COPY --chown=3 script script2 /no-such-directory/
+RUN rm -fr /new-workdir
+WORKDIR /new-workdir/several/levels/deep
+COPY --chown=3 script script2 no-such-directory/
+WORKDIR ../deeper
+COPY --chown=3 script script2 no-such-directory-either/
+COPY --chown=3 script script2 ../no-such-subdirectory/
diff --git a/dockerclient/testdata/copychown/script b/dockerclient/testdata/copychown/script
new file mode 100644
index 0000000..c3c3f3f
--- /dev/null
+++ b/dockerclient/testdata/copychown/script
@@ -0,0 +1,2 @@
+#!/bin/bash
+exit 0 \ No newline at end of file
diff --git a/dockerclient/testdata/copychown/script2 b/dockerclient/testdata/copychown/script2
new file mode 100644
index 0000000..80f336a
--- /dev/null
+++ b/dockerclient/testdata/copychown/script2
@@ -0,0 +1,2 @@
+#!/bin/bash
+exit 1 \ No newline at end of file
diff --git a/dockerclient/testdata/copydir/Dockerfile b/dockerclient/testdata/copydir/Dockerfile
new file mode 100644
index 0000000..92c53fd
--- /dev/null
+++ b/dockerclient/testdata/copydir/Dockerfile
@@ -0,0 +1,3 @@
+FROM centos:7
+COPY dir /dir
+RUN ls -al /dir/file \ No newline at end of file
diff --git a/dockerclient/testdata/copydir/dir/file b/dockerclient/testdata/copydir/dir/file
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dockerclient/testdata/copydir/dir/file
diff --git a/dockerclient/testdata/copyempty/.script b/dockerclient/testdata/copyempty/.script
new file mode 100644
index 0000000..f52d057
--- /dev/null
+++ b/dockerclient/testdata/copyempty/.script
@@ -0,0 +1,2 @@
+#!/bin/bash
+: \ No newline at end of file
diff --git a/dockerclient/testdata/copyempty/Dockerfile b/dockerclient/testdata/copyempty/Dockerfile
new file mode 100644
index 0000000..8b8c87e
--- /dev/null
+++ b/dockerclient/testdata/copyempty/Dockerfile
@@ -0,0 +1,2 @@
+FROM centos:7
+COPY "" /usr/local/tmp/
diff --git a/dockerclient/testdata/copyempty/Dockerfile2 b/dockerclient/testdata/copyempty/Dockerfile2
new file mode 100644
index 0000000..b99b8ad
--- /dev/null
+++ b/dockerclient/testdata/copyempty/Dockerfile2
@@ -0,0 +1,2 @@
+FROM centos:7
+COPY script1 "" script2 /usr/local/tmp/
diff --git a/dockerclient/testdata/copyempty/script1 b/dockerclient/testdata/copyempty/script1
new file mode 100644
index 0000000..c3c3f3f
--- /dev/null
+++ b/dockerclient/testdata/copyempty/script1
@@ -0,0 +1,2 @@
+#!/bin/bash
+exit 0 \ No newline at end of file
diff --git a/dockerclient/testdata/copyempty/script2 b/dockerclient/testdata/copyempty/script2
new file mode 100644
index 0000000..80f336a
--- /dev/null
+++ b/dockerclient/testdata/copyempty/script2
@@ -0,0 +1,2 @@
+#!/bin/bash
+exit 1 \ No newline at end of file
diff --git a/dockerclient/testdata/copyfrom/Dockerfile b/dockerclient/testdata/copyfrom/Dockerfile
new file mode 100644
index 0000000..6634ec7
--- /dev/null
+++ b/dockerclient/testdata/copyfrom/Dockerfile
@@ -0,0 +1,15 @@
+FROM centos:7 as base
+RUN mkdir -p /a/blah && touch /a/blah/1 /a/blah/2
+RUN mkdir -m 711 /711 && touch /711/711.txt
+RUN mkdir -m 755 /755 && touch /755/755.txt
+RUN mkdir -m 777 /777 && touch /777/777.txt
+FROM centos:7
+COPY --from=base /a/blah/* /blah/
+RUN rm -fr /711 /755 /777
+COPY --from=0 /711 /711
+COPY --from=0 /755 /755
+COPY --from=0 /777 /777
+RUN mkdir /precreated /precreated/711 /precreated/755 /precreated/777
+COPY --from=0 /711 /precreated/711
+COPY --from=0 /755 /precreated/755
+COPY --from=0 /777 /precreated/777
diff --git a/dockerclient/testdata/copyrename/Dockerfile b/dockerclient/testdata/copyrename/Dockerfile
new file mode 100644
index 0000000..575bf2c
--- /dev/null
+++ b/dockerclient/testdata/copyrename/Dockerfile
@@ -0,0 +1,3 @@
+FROM centos:7
+COPY file1 /usr/bin/file2
+RUN ls -al /usr/bin/file2 && ! ls -al /usr/bin/file1 \ No newline at end of file
diff --git a/dockerclient/testdata/copyrename/file1 b/dockerclient/testdata/copyrename/file1
new file mode 100644
index 0000000..c3c3f3f
--- /dev/null
+++ b/dockerclient/testdata/copyrename/file1
@@ -0,0 +1,2 @@
+#!/bin/bash
+exit 0 \ No newline at end of file
diff --git a/dockerclient/testdata/dir/Dockerfile b/dockerclient/testdata/dir/Dockerfile
new file mode 100644
index 0000000..4164fec
--- /dev/null
+++ b/dockerclient/testdata/dir/Dockerfile
@@ -0,0 +1,4 @@
+FROM busybox
+COPY . /
+COPY . dir
+COPY subdir/ test/
diff --git a/dockerclient/testdata/dir/file b/dockerclient/testdata/dir/file
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dockerclient/testdata/dir/file
diff --git a/dockerclient/testdata/dir/subdir/file2 b/dockerclient/testdata/dir/subdir/file2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dockerclient/testdata/dir/subdir/file2
diff --git a/dockerclient/testdata/ignore/.dockerignore b/dockerclient/testdata/ignore/.dockerignore
new file mode 100644
index 0000000..80bf870
--- /dev/null
+++ b/dockerclient/testdata/ignore/.dockerignore
@@ -0,0 +1,2 @@
+file
+file2 \ No newline at end of file
diff --git a/dockerclient/testdata/ignore/Dockerfile b/dockerclient/testdata/ignore/Dockerfile
new file mode 100644
index 0000000..c329312
--- /dev/null
+++ b/dockerclient/testdata/ignore/Dockerfile
@@ -0,0 +1,2 @@
+FROM busybox
+COPY . /
diff --git a/dockerclient/testdata/ignore/file b/dockerclient/testdata/ignore/file
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dockerclient/testdata/ignore/file
diff --git a/dockerclient/testdata/ignore/file2 b/dockerclient/testdata/ignore/file2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dockerclient/testdata/ignore/file2
diff --git a/dockerclient/testdata/ignore/file3 b/dockerclient/testdata/ignore/file3
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dockerclient/testdata/ignore/file3
diff --git a/dockerclient/testdata/multistage/Dockerfile b/dockerclient/testdata/multistage/Dockerfile
new file mode 100644
index 0000000..42bf4e5
--- /dev/null
+++ b/dockerclient/testdata/multistage/Dockerfile
@@ -0,0 +1,16 @@
+FROM golang:1.9 as builder
+WORKDIR /tmp
+COPY . .
+RUN echo foo > /tmp/bar
+
+FROM busybox:latest AS modifier
+WORKDIR /tmp
+COPY --from=builder /tmp/bar /tmp/bar
+RUN echo foo2 >> /tmp/bar
+
+FROM busybox:latest
+WORKDIR /
+COPY --from=modifier /tmp/bar /bin/baz
+COPY dir /var/dir
+
+RUN echo /bin/baz
diff --git a/dockerclient/testdata/multistage/Dockerfile.arg-scope b/dockerclient/testdata/multistage/Dockerfile.arg-scope
new file mode 100644
index 0000000..1c3e088
--- /dev/null
+++ b/dockerclient/testdata/multistage/Dockerfile.arg-scope
@@ -0,0 +1,9 @@
+FROM alpine
+ARG SECRET
+RUN echo "$SECRET"
+
+FROM alpine
+ARG FOO=test
+ARG BAR=bartest
+RUN echo "$FOO:$BAR"
+RUN echo "$SECRET"
diff --git a/dockerclient/testdata/multistage/Dockerfile.env b/dockerclient/testdata/multistage/Dockerfile.env
new file mode 100644
index 0000000..609aeb4
--- /dev/null
+++ b/dockerclient/testdata/multistage/Dockerfile.env
@@ -0,0 +1,15 @@
+# Taken from #209
+
+FROM alpine AS base
+ENV FOO=foo
+RUN echo FOO=$FOO
+
+FROM base AS devel
+ENV BAR=bar
+RUN echo FOO=$FOO BAR=$BAR
+
+FROM devel AS devel2
+RUN echo FOO=$FOO BAR=$BAR
+
+FROM devel2 AS devel3
+RUN echo FOO=$FOO BAR=$BAR
diff --git a/dockerclient/testdata/multistage/Dockerfile.heading-arg b/dockerclient/testdata/multistage/Dockerfile.heading-arg
new file mode 100644
index 0000000..76ccffe
--- /dev/null
+++ b/dockerclient/testdata/multistage/Dockerfile.heading-arg
@@ -0,0 +1,18 @@
+ARG GO_VERSION=1.9
+ARG GO_IMAGE=golang
+FROM $GO_IMAGE:$GO_VERSION as builder
+ARG FOO
+WORKDIR /tmp
+COPY . .
+RUN echo foo > /tmp/bar
+
+FROM busybox:latest AS modifier
+WORKDIR /tmp
+COPY --from=builder /tmp/bar /tmp/bar
+RUN echo foo2 >> /tmp/bar
+
+FROM $GO_IMAGE:$GO_VERSION
+WORKDIR /
+COPY --from=modifier /tmp/bar /bin/baz
+
+RUN echo /bin/baz
diff --git a/dockerclient/testdata/multistage/Dockerfile.heading-redefine b/dockerclient/testdata/multistage/Dockerfile.heading-redefine
new file mode 100644
index 0000000..4d476f3
--- /dev/null
+++ b/dockerclient/testdata/multistage/Dockerfile.heading-redefine
@@ -0,0 +1,7 @@
+ARG FOO=latest
+FROM alpine
+RUN echo "$FOO"
+
+FROM centos:$FOO
+ARG FOO
+RUN echo "$FOO"
diff --git a/dockerclient/testdata/multistage/Dockerfile.ref b/dockerclient/testdata/multistage/Dockerfile.ref
new file mode 100644
index 0000000..1e3e418
--- /dev/null
+++ b/dockerclient/testdata/multistage/Dockerfile.ref
@@ -0,0 +1,6 @@
+FROM busybox:latest
+WORKDIR /
+COPY --from=nginx:latest /etc/nginx/nginx.conf /var/tmp/
+COPY dir /var/dir
+RUN cat /var/tmp/nginx.conf
+
diff --git a/dockerclient/testdata/multistage/Dockerfile.relative-copy_1 b/dockerclient/testdata/multistage/Dockerfile.relative-copy_1
new file mode 100644
index 0000000..8d02c58
--- /dev/null
+++ b/dockerclient/testdata/multistage/Dockerfile.relative-copy_1
@@ -0,0 +1,7 @@
+FROM busybox AS builder
+WORKDIR /usr
+RUN echo "test" > /usr/a.txt
+
+FROM busybox
+COPY --from=builder ./a.txt /other/
+RUN ls /other/a.txt \ No newline at end of file
diff --git a/dockerclient/testdata/multistage/Dockerfile.relative-copy_2 b/dockerclient/testdata/multistage/Dockerfile.relative-copy_2
new file mode 100644
index 0000000..611d2c9
--- /dev/null
+++ b/dockerclient/testdata/multistage/Dockerfile.relative-copy_2
@@ -0,0 +1,7 @@
+FROM busybox AS builder
+WORKDIR /usr
+RUN echo "test" > /usr/a.txt
+
+FROM busybox
+COPY --from=builder ./a.txt /b.txt
+RUN ls /b.txt \ No newline at end of file
diff --git a/dockerclient/testdata/multistage/dir/a.txt b/dockerclient/testdata/multistage/dir/a.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dockerclient/testdata/multistage/dir/a.txt
diff --git a/dockerclient/testdata/overlapdir/Dockerfile.with_slash b/dockerclient/testdata/overlapdir/Dockerfile.with_slash
new file mode 100644
index 0000000..81988db
--- /dev/null
+++ b/dockerclient/testdata/overlapdir/Dockerfile.with_slash
@@ -0,0 +1,2 @@
+FROM busybox
+COPY existing/ . \ No newline at end of file
diff --git a/dockerclient/testdata/overlapdir/Dockerfile.without_slash b/dockerclient/testdata/overlapdir/Dockerfile.without_slash
new file mode 100644
index 0000000..0833b15
--- /dev/null
+++ b/dockerclient/testdata/overlapdir/Dockerfile.without_slash
@@ -0,0 +1,2 @@
+FROM busybox
+COPY existing . \ No newline at end of file
diff --git a/dockerclient/testdata/overlapdir/existing/etc/file-in-existing-dir b/dockerclient/testdata/overlapdir/existing/etc/file-in-existing-dir
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dockerclient/testdata/overlapdir/existing/etc/file-in-existing-dir
diff --git a/dockerclient/testdata/singlefile/Dockerfile b/dockerclient/testdata/singlefile/Dockerfile
new file mode 100644
index 0000000..5bede27
--- /dev/null
+++ b/dockerclient/testdata/singlefile/Dockerfile
@@ -0,0 +1,2 @@
+FROM busybox
+COPY . copy \ No newline at end of file
diff --git a/dockerclient/testdata/user-workdir/Dockerfile.notused b/dockerclient/testdata/user-workdir/Dockerfile.notused
new file mode 100644
index 0000000..5d2403f
--- /dev/null
+++ b/dockerclient/testdata/user-workdir/Dockerfile.notused
@@ -0,0 +1,5 @@
+FROM alpine
+RUN adduser -D buildtest
+USER buildtest
+WORKDIR /bin/created
+WORKDIR /workdir/created/deep/below
diff --git a/dockerclient/testdata/user-workdir/Dockerfile.used b/dockerclient/testdata/user-workdir/Dockerfile.used
new file mode 100644
index 0000000..1035a1b
--- /dev/null
+++ b/dockerclient/testdata/user-workdir/Dockerfile.used
@@ -0,0 +1,7 @@
+FROM alpine
+RUN adduser -D buildtest
+USER buildtest
+WORKDIR /bin/created
+RUN ls -l /bin
+WORKDIR /workdir/created/deep/below
+RUN ls -l /workdir
diff --git a/dockerclient/testdata/volume/Dockerfile b/dockerclient/testdata/volume/Dockerfile
new file mode 100644
index 0000000..f7cc0d3
--- /dev/null
+++ b/dockerclient/testdata/volume/Dockerfile
@@ -0,0 +1,7 @@
+FROM busybox
+
+ADD file /var/www/
+VOLUME /var/www
+ADD file /var/
+VOLUME /var
+ADD file2 /var/ \ No newline at end of file
diff --git a/dockerclient/testdata/volume/file b/dockerclient/testdata/volume/file
new file mode 100644
index 0000000..1a010b1
--- /dev/null
+++ b/dockerclient/testdata/volume/file
@@ -0,0 +1 @@
+file \ No newline at end of file
diff --git a/dockerclient/testdata/volume/file2 b/dockerclient/testdata/volume/file2
new file mode 100644
index 0000000..30d67d4
--- /dev/null
+++ b/dockerclient/testdata/volume/file2
@@ -0,0 +1 @@
+file2 \ No newline at end of file
diff --git a/dockerclient/testdata/volumerun/Dockerfile b/dockerclient/testdata/volumerun/Dockerfile
new file mode 100644
index 0000000..2cc82f8
--- /dev/null
+++ b/dockerclient/testdata/volumerun/Dockerfile
@@ -0,0 +1,7 @@
+FROM busybox
+
+ADD file /var/www/
+VOLUME /var/www
+ADD file2 /var/www/
+RUN touch /var/www/file3
+ADD file4 /var/www/ \ No newline at end of file
diff --git a/dockerclient/testdata/volumerun/file b/dockerclient/testdata/volumerun/file
new file mode 100644
index 0000000..1a010b1
--- /dev/null
+++ b/dockerclient/testdata/volumerun/file
@@ -0,0 +1 @@
+file \ No newline at end of file
diff --git a/dockerclient/testdata/volumerun/file2 b/dockerclient/testdata/volumerun/file2
new file mode 100644
index 0000000..30d67d4
--- /dev/null
+++ b/dockerclient/testdata/volumerun/file2
@@ -0,0 +1 @@
+file2 \ No newline at end of file
diff --git a/dockerclient/testdata/volumerun/file4 b/dockerclient/testdata/volumerun/file4
new file mode 100644
index 0000000..eed6780
--- /dev/null
+++ b/dockerclient/testdata/volumerun/file4
@@ -0,0 +1 @@
+file4 \ No newline at end of file
diff --git a/dockerclient/testdata/wildcard/Dockerfile b/dockerclient/testdata/wildcard/Dockerfile
new file mode 100644
index 0000000..a13cc56
--- /dev/null
+++ b/dockerclient/testdata/wildcard/Dockerfile
@@ -0,0 +1,3 @@
+FROM busybox
+ENV DIR=/usr
+ADD dir2/*.b dir2/*.c $DIR/test/
diff --git a/dockerclient/testdata/wildcard/dir2/file.a b/dockerclient/testdata/wildcard/dir2/file.a
new file mode 100644
index 0000000..d26db9c
--- /dev/null
+++ b/dockerclient/testdata/wildcard/dir2/file.a
@@ -0,0 +1 @@
+file.a \ No newline at end of file
diff --git a/dockerclient/testdata/wildcard/dir2/file.b b/dockerclient/testdata/wildcard/dir2/file.b
new file mode 100644
index 0000000..5c3dc17
--- /dev/null
+++ b/dockerclient/testdata/wildcard/dir2/file.b
@@ -0,0 +1 @@
+file.b \ No newline at end of file
diff --git a/dockerclient/testdata/wildcard/dir2/file.c b/dockerclient/testdata/wildcard/dir2/file.c
new file mode 100644
index 0000000..e0c7bb2
--- /dev/null
+++ b/dockerclient/testdata/wildcard/dir2/file.c
@@ -0,0 +1 @@
+file.c \ No newline at end of file
diff --git a/dockerclient/testdata/wildcard/dir2/file2.b b/dockerclient/testdata/wildcard/dir2/file2.b
new file mode 100644
index 0000000..9e43652
--- /dev/null
+++ b/dockerclient/testdata/wildcard/dir2/file2.b
@@ -0,0 +1 @@
+file2.b \ No newline at end of file
diff --git a/dockerfile/NOTICE b/dockerfile/NOTICE
new file mode 100644
index 0000000..519a7e9
--- /dev/null
+++ b/dockerfile/NOTICE
@@ -0,0 +1,26 @@
+Source files in this directory and all sub-directories have been
+copied from github.com/docker/docker/builder/dockerfile and are
+Licensed under the Apache License Version 2.0.
+
+Note that the fork of github.com/docker/docker used commit
+b68221c37ee597950364788204546f9c9d0e46a1.
+
+Docker
+Copyright 2012-2017 Docker, Inc.
+
+This product includes software developed at Docker, Inc. (https://www.docker.com).
+
+This product contains software (https://github.com/kr/pty) developed
+by Keith Rarick, licensed under the MIT License.
+
+The following is courtesy of our legal counsel:
+
+
+Use and transfer of Docker may be subject to certain restrictions by the
+United States and other governments.
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see https://www.bis.doc.gov
+
+See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/dockerfile/command/command.go b/dockerfile/command/command.go
new file mode 100644
index 0000000..f23c687
--- /dev/null
+++ b/dockerfile/command/command.go
@@ -0,0 +1,46 @@
+// Package command contains the set of Dockerfile commands.
+package command
+
+// Define constants for the command strings
+const (
+ Add = "add"
+ Arg = "arg"
+ Cmd = "cmd"
+ Copy = "copy"
+ Entrypoint = "entrypoint"
+ Env = "env"
+ Expose = "expose"
+ From = "from"
+ Healthcheck = "healthcheck"
+ Label = "label"
+ Maintainer = "maintainer"
+ Onbuild = "onbuild"
+ Run = "run"
+ Shell = "shell"
+ StopSignal = "stopsignal"
+ User = "user"
+ Volume = "volume"
+ Workdir = "workdir"
+)
+
+// Commands is list of all Dockerfile commands
+var Commands = map[string]struct{}{
+ Add: {},
+ Arg: {},
+ Cmd: {},
+ Copy: {},
+ Entrypoint: {},
+ Env: {},
+ Expose: {},
+ From: {},
+ Healthcheck: {},
+ Label: {},
+ Maintainer: {},
+ Onbuild: {},
+ Run: {},
+ Shell: {},
+ StopSignal: {},
+ User: {},
+ Volume: {},
+ Workdir: {},
+}
diff --git a/dockerfile/parser/dumper/main.go b/dockerfile/parser/dumper/main.go
new file mode 100644
index 0000000..4ad1f41
--- /dev/null
+++ b/dockerfile/parser/dumper/main.go
@@ -0,0 +1,32 @@
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/openshift/imagebuilder/dockerfile/parser"
+)
+
+func main() {
+ var f *os.File
+ var err error
+
+ if len(os.Args) < 2 {
+ fmt.Println("please supply filename(s)")
+ os.Exit(1)
+ }
+
+ for _, fn := range os.Args[1:] {
+ f, err = os.Open(fn)
+ if err != nil {
+ panic(err)
+ }
+ defer f.Close()
+
+ result, err := parser.Parse(f)
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println(result.AST.Dump())
+ }
+}
diff --git a/dockerfile/parser/json_test.go b/dockerfile/parser/json_test.go
new file mode 100644
index 0000000..ebe90a5
--- /dev/null
+++ b/dockerfile/parser/json_test.go
@@ -0,0 +1,59 @@
+package parser
+
+import (
+ "testing"
+)
+
+var invalidJSONArraysOfStrings = []string{
+ `["a",42,"b"]`,
+ `["a",123.456,"b"]`,
+ `["a",{},"b"]`,
+ `["a",{"c": "d"},"b"]`,
+ `["a",["c"],"b"]`,
+ `["a",true,"b"]`,
+ `["a",false,"b"]`,
+ `["a",null,"b"]`,
+}
+
+var validJSONArraysOfStrings = map[string][]string{
+ `[]`: {},
+ `[""]`: {""},
+ `["a"]`: {"a"},
+ `["a","b"]`: {"a", "b"},
+ `[ "a", "b" ]`: {"a", "b"},
+ `[ "a", "b" ]`: {"a", "b"},
+ ` [ "a", "b" ] `: {"a", "b"},
+ `["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]`: {"abc 123", "♥", "☃", "\" \\ / \b \f \n \r \t \u0000"},
+}
+
+func TestJSONArraysOfStrings(t *testing.T) {
+ for json, expected := range validJSONArraysOfStrings {
+ d := NewDefaultDirective()
+
+ if node, _, err := parseJSON(json, d); err != nil {
+ t.Fatalf("%q should be a valid JSON array of strings, but wasn't! (err: %q)", json, err)
+ } else {
+ i := 0
+ for node != nil {
+ if i >= len(expected) {
+ t.Fatalf("expected result is shorter than parsed result (%d vs %d+) in %q", len(expected), i+1, json)
+ }
+ if node.Value != expected[i] {
+ t.Fatalf("expected %q (not %q) in %q at pos %d", expected[i], node.Value, json, i)
+ }
+ node = node.Next
+ i++
+ }
+ if i != len(expected) {
+ t.Fatalf("expected result is longer than parsed result (%d vs %d) in %q", len(expected), i+1, json)
+ }
+ }
+ }
+ for _, json := range invalidJSONArraysOfStrings {
+ d := NewDefaultDirective()
+
+ if _, _, err := parseJSON(json, d); err != errDockerfileNotStringArray {
+ t.Fatalf("%q should be an invalid JSON array of strings, but wasn't!", json)
+ }
+ }
+}
diff --git a/dockerfile/parser/line_parsers.go b/dockerfile/parser/line_parsers.go
new file mode 100644
index 0000000..24d05dc
--- /dev/null
+++ b/dockerfile/parser/line_parsers.go
@@ -0,0 +1,398 @@
+package parser
+
+// line parsers are dispatch calls that parse a single unit of text into a
+// Node object which contains the whole statement. Dockerfiles have varied
+// (but not usually unique, see ONBUILD for a unique example) parsing rules
+// per-command, and these unify the processing in a way that makes it
+// manageable.
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/openshift/imagebuilder/dockerfile/command"
+)
+
+var (
+ errDockerfileNotStringArray = errors.New("when using JSON array syntax, arrays must be comprised of strings only")
+)
+
+const (
+ commandLabel = "LABEL"
+)
+
+// ignore the current argument. This will still leave a command parsed, but
+// will not incorporate the arguments into the ast.
+func parseIgnore(rest string, d *Directive) (*Node, map[string]bool, error) {
+ return &Node{}, nil, nil
+}
+
+// used for onbuild. Could potentially be used for anything that represents a
+// statement with sub-statements.
+//
+// ONBUILD RUN foo bar -> (onbuild (run foo bar))
+func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error) {
+ if rest == "" {
+ return nil, nil, nil
+ }
+
+ child, err := newNodeFromLine(rest, d)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return &Node{Children: []*Node{child}}, nil, nil
+}
+
+// helper to parse words (i.e space delimited or quoted strings) in a statement.
+// The quotes are preserved as part of this function and they are stripped later
+// as part of processWords().
+func parseWords(rest string, d *Directive) []string {
+ const (
+ inSpaces = iota // looking for start of a word
+ inWord
+ inQuote
+ )
+
+ words := []string{}
+ phase := inSpaces
+ word := ""
+ quote := '\000'
+ blankOK := false
+ var ch rune
+ var chWidth int
+
+ for pos := 0; pos <= len(rest); pos += chWidth {
+ if pos != len(rest) {
+ ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
+ }
+
+ if phase == inSpaces { // Looking for start of word
+ if pos == len(rest) { // end of input
+ break
+ }
+ if unicode.IsSpace(ch) { // skip spaces
+ continue
+ }
+ phase = inWord // found it, fall through
+ }
+ if (phase == inWord || phase == inQuote) && (pos == len(rest)) {
+ if blankOK || len(word) > 0 {
+ words = append(words, word)
+ }
+ break
+ }
+ if phase == inWord {
+ if unicode.IsSpace(ch) {
+ phase = inSpaces
+ if blankOK || len(word) > 0 {
+ words = append(words, word)
+ }
+ word = ""
+ blankOK = false
+ continue
+ }
+ if ch == '\'' || ch == '"' {
+ quote = ch
+ blankOK = true
+ phase = inQuote
+ }
+ if ch == d.escapeToken {
+ if pos+chWidth == len(rest) {
+ continue // just skip an escape token at end of line
+ }
+ // If we're not quoted and we see an escape token, then always just
+ // add the escape token plus the char to the word, even if the char
+ // is a quote.
+ word += string(ch)
+ pos += chWidth
+ ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
+ }
+ word += string(ch)
+ continue
+ }
+ if phase == inQuote {
+ if ch == quote {
+ phase = inWord
+ }
+ // The escape token is special except for ' quotes - can't escape anything for '
+ if ch == d.escapeToken && quote != '\'' {
+ if pos+chWidth == len(rest) {
+ phase = inWord
+ continue // just skip the escape token at end
+ }
+ pos += chWidth
+ word += string(ch)
+ ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
+ }
+ word += string(ch)
+ }
+ }
+
+ return words
+}
+
+// parse environment like statements. Note that this does *not* handle
+// variable interpolation, which will be handled in the evaluator.
+func parseNameVal(rest string, key string, d *Directive) (*Node, error) {
+ // This is kind of tricky because we need to support the old
+ // variant: KEY name value
+ // as well as the new one: KEY name=value ...
+ // The trigger to know which one is being used will be whether we hit
+ // a space or = first. space ==> old, "=" ==> new
+
+ words := parseWords(rest, d)
+ if len(words) == 0 {
+ return nil, nil
+ }
+
+ // Old format (KEY name value)
+ if !strings.Contains(words[0], "=") {
+ parts := tokenWhitespace.Split(rest, 2)
+ if len(parts) < 2 {
+ return nil, fmt.Errorf(key + " must have two arguments")
+ }
+ return newKeyValueNode(parts[0], parts[1]), nil
+ }
+
+ var rootNode *Node
+ var prevNode *Node
+ for _, word := range words {
+ if !strings.Contains(word, "=") {
+ return nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word)
+ }
+
+ parts := strings.SplitN(word, "=", 2)
+ node := newKeyValueNode(parts[0], parts[1])
+ rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode)
+ }
+
+ return rootNode, nil
+}
+
+func newKeyValueNode(key, value string) *Node {
+ return &Node{
+ Value: key,
+ Next: &Node{Value: value},
+ }
+}
+
+func appendKeyValueNode(node, rootNode, prevNode *Node) (*Node, *Node) {
+ if rootNode == nil {
+ rootNode = node
+ }
+ if prevNode != nil {
+ prevNode.Next = node
+ }
+
+ prevNode = node.Next
+ return rootNode, prevNode
+}
+
+func parseEnv(rest string, d *Directive) (*Node, map[string]bool, error) {
+ node, err := parseNameVal(rest, "ENV", d)
+ return node, nil, err
+}
+
+func parseLabel(rest string, d *Directive) (*Node, map[string]bool, error) {
+ node, err := parseNameVal(rest, commandLabel, d)
+ return node, nil, err
+}
+
+// NodeFromLabels returns a Node for the injected labels
+func NodeFromLabels(labels map[string]string) *Node {
+ keys := []string{}
+ for key := range labels {
+ keys = append(keys, key)
+ }
+ // Sort the label to have a repeatable order
+ sort.Strings(keys)
+
+ labelPairs := []string{}
+ var rootNode *Node
+ var prevNode *Node
+ for _, key := range keys {
+ value := labels[key]
+ labelPairs = append(labelPairs, fmt.Sprintf("%q='%s'", key, value))
+ // Value must be single quoted to prevent env variable expansion
+ // See https://github.com/docker/docker/issues/26027
+ node := newKeyValueNode(key, "'"+value+"'")
+ rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode)
+ }
+
+ return &Node{
+ Value: command.Label,
+ Original: commandLabel + " " + strings.Join(labelPairs, " "),
+ Next: rootNode,
+ }
+}
+
+// parses a statement containing one or more keyword definition(s) and/or
+// value assignments, like `name1 name2= name3="" name4=value`.
+// Note that this is a stricter format than the old format of assignment,
+// allowed by parseNameVal(), in a way that this only allows assignment of the
+// form `keyword=[<value>]` like `name2=`, `name3=""`, and `name4=value` above.
+// In addition, a keyword definition alone is of the form `keyword` like `name1`
+// above. And the assignments `name2=` and `name3=""` are equivalent and
+// assign an empty value to the respective keywords.
+func parseNameOrNameVal(rest string, d *Directive) (*Node, map[string]bool, error) {
+ words := parseWords(rest, d)
+ if len(words) == 0 {
+ return nil, nil, nil
+ }
+
+ var (
+ rootnode *Node
+ prevNode *Node
+ )
+ for i, word := range words {
+ node := &Node{}
+ node.Value = word
+ if i == 0 {
+ rootnode = node
+ } else {
+ prevNode.Next = node
+ }
+ prevNode = node
+ }
+
+ return rootnode, nil, nil
+}
+
+// parses a whitespace-delimited set of arguments. The result is effectively a
+// linked list of string arguments.
+func parseStringsWhitespaceDelimited(rest string, d *Directive) (*Node, map[string]bool, error) {
+ if rest == "" {
+ return nil, nil, nil
+ }
+
+ node := &Node{}
+ rootnode := node
+ prevnode := node
+ for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp
+ prevnode = node
+ node.Value = str
+ node.Next = &Node{}
+ node = node.Next
+ }
+
+ // XXX to get around regexp.Split *always* providing an empty string at the
+ // end due to how our loop is constructed, nil out the last node in the
+ // chain.
+ prevnode.Next = nil
+
+ return rootnode, nil, nil
+}
+
+// parseString just wraps the string in quotes and returns a working node.
+func parseString(rest string, d *Directive) (*Node, map[string]bool, error) {
+ if rest == "" {
+ return nil, nil, nil
+ }
+ n := &Node{}
+ n.Value = rest
+ return n, nil, nil
+}
+
+// parseJSON converts JSON arrays to an AST.
+func parseJSON(rest string, d *Directive) (*Node, map[string]bool, error) {
+ rest = strings.TrimLeftFunc(rest, unicode.IsSpace)
+ if !strings.HasPrefix(rest, "[") {
+ return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest)
+ }
+
+ var myJSON []interface{}
+ if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil {
+ return nil, nil, err
+ }
+
+ var top, prev *Node
+ for _, str := range myJSON {
+ s, ok := str.(string)
+ if !ok {
+ return nil, nil, errDockerfileNotStringArray
+ }
+
+ node := &Node{Value: s}
+ if prev == nil {
+ top = node
+ } else {
+ prev.Next = node
+ }
+ prev = node
+ }
+
+ return top, map[string]bool{"json": true}, nil
+}
+
+// parseMaybeJSON determines if the argument appears to be a JSON array. If
+// so, passes to parseJSON; if not, quotes the result and returns a single
+// node.
+func parseMaybeJSON(rest string, d *Directive) (*Node, map[string]bool, error) {
+ if rest == "" {
+ return nil, nil, nil
+ }
+
+ node, attrs, err := parseJSON(rest, d)
+
+ if err == nil {
+ return node, attrs, nil
+ }
+ if err == errDockerfileNotStringArray {
+ return nil, nil, err
+ }
+
+ node = &Node{}
+ node.Value = rest
+ return node, nil, nil
+}
+
+// parseMaybeJSONToList determines if the argument appears to be a JSON array. If
+// so, passes to parseJSON; if not, attempts to parse it as a whitespace
+// delimited string.
+func parseMaybeJSONToList(rest string, d *Directive) (*Node, map[string]bool, error) {
+ node, attrs, err := parseJSON(rest, d)
+
+ if err == nil {
+ return node, attrs, nil
+ }
+ if err == errDockerfileNotStringArray {
+ return nil, nil, err
+ }
+
+ return parseStringsWhitespaceDelimited(rest, d)
+}
+
+// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument.
+func parseHealthConfig(rest string, d *Directive) (*Node, map[string]bool, error) {
+ // Find end of first argument
+ var sep int
+ for ; sep < len(rest); sep++ {
+ if unicode.IsSpace(rune(rest[sep])) {
+ break
+ }
+ }
+ next := sep
+ for ; next < len(rest); next++ {
+ if !unicode.IsSpace(rune(rest[next])) {
+ break
+ }
+ }
+
+ if sep == 0 {
+ return nil, nil, nil
+ }
+
+ typ := rest[:sep]
+ cmd, attrs, err := parseMaybeJSON(rest[next:], d)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return &Node{Value: typ, Next: cmd}, attrs, err
+}
diff --git a/dockerfile/parser/line_parsers_test.go b/dockerfile/parser/line_parsers_test.go
new file mode 100644
index 0000000..cf0b21b
--- /dev/null
+++ b/dockerfile/parser/line_parsers_test.go
@@ -0,0 +1,74 @@
+package parser
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestParseNameValOldFormat(t *testing.T) {
+ directive := Directive{}
+ node, err := parseNameVal("foo bar", "LABEL", &directive)
+ assert.NoError(t, err)
+
+ expected := &Node{
+ Value: "foo",
+ Next: &Node{Value: "bar"},
+ }
+ assert.Equal(t, expected, node)
+}
+
+func TestParseNameValNewFormat(t *testing.T) {
+ directive := Directive{}
+ node, err := parseNameVal("foo=bar thing=star", "LABEL", &directive)
+ assert.NoError(t, err)
+
+ expected := &Node{
+ Value: "foo",
+ Next: &Node{
+ Value: "bar",
+ Next: &Node{
+ Value: "thing",
+ Next: &Node{
+ Value: "star",
+ },
+ },
+ },
+ }
+ assert.Equal(t, expected, node)
+}
+
+func TestNodeFromLabels(t *testing.T) {
+ labels := map[string]string{
+ "foo": "bar",
+ "weird": "first' second",
+ }
+ expected := &Node{
+ Value: "label",
+ Original: `LABEL "foo"='bar' "weird"='first' second'`,
+ Next: &Node{
+ Value: "foo",
+ Next: &Node{
+ Value: "'bar'",
+ Next: &Node{
+ Value: "weird",
+ Next: &Node{
+ Value: "'first' second'",
+ },
+ },
+ },
+ },
+ }
+
+ node := NodeFromLabels(labels)
+ assert.Equal(t, expected, node)
+
+}
+
+func TestParseNameValWithoutVal(t *testing.T) {
+ directive := Directive{}
+ // In Config.Env, a variable without `=` is removed from the environment. (#31634)
+ // However, in Dockerfile, we don't allow "unsetting" an environment variable. (#11922)
+ _, err := parseNameVal("foo", "ENV", &directive)
+ assert.Error(t, err, "ENV must have two arguments")
+}
diff --git a/dockerfile/parser/parser.go b/dockerfile/parser/parser.go
new file mode 100644
index 0000000..f5bef44
--- /dev/null
+++ b/dockerfile/parser/parser.go
@@ -0,0 +1,355 @@
+// Package parser implements a parser and parse tree dumper for Dockerfiles.
+package parser
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "unicode"
+
+ sRegexp "github.com/containers/storage/pkg/regexp"
+ "github.com/containers/storage/pkg/system"
+ "github.com/openshift/imagebuilder/dockerfile/command"
+)
+
+// Node is a structure used to represent a parse tree.
+//
+// In the node there are three fields, Value, Next, and Children. Value is the
+// current token's string value. Next is always the next non-child token, and
+// children contains all the children. Here's an example:
+//
+// (value next (child child-next child-next-next) next-next)
+//
+// This data structure is frankly pretty lousy for handling complex languages,
+// but lucky for us the Dockerfile isn't very complicated. This structure
+// works a little more effectively than a "proper" parse tree for our needs.
+type Node struct {
+ Value string // actual content
+ Next *Node // the next item in the current sexp
+ Children []*Node // the children of this sexp
+ Attributes map[string]bool // special attributes for this node
+ Original string // original line used before parsing
+ Flags []string // only top Node should have this set
+ StartLine int // the line in the original dockerfile where the node begins
+ EndLine int // the line in the original dockerfile where the node ends
+}
+
+// Dump dumps the AST defined by `node` as a list of sexps.
+// Returns a string suitable for printing.
+func (node *Node) Dump() string {
+ str := ""
+ str += node.Value
+
+ if len(node.Flags) > 0 {
+ str += fmt.Sprintf(" %q", node.Flags)
+ }
+
+ for _, n := range node.Children {
+ str += "(" + n.Dump() + ")\n"
+ }
+
+ for n := node.Next; n != nil; n = n.Next {
+ if len(n.Children) > 0 {
+ str += " " + n.Dump()
+ } else {
+ str += " " + strconv.Quote(n.Value)
+ }
+ }
+
+ return strings.TrimSpace(str)
+}
+
+func (node *Node) lines(start, end int) {
+ node.StartLine = start
+ node.EndLine = end
+}
+
+// AddChild adds a new child node, and updates line information
+func (node *Node) AddChild(child *Node, startLine, endLine int) {
+ child.lines(startLine, endLine)
+ if node.StartLine < 0 {
+ node.StartLine = startLine
+ }
+ node.EndLine = endLine
+ node.Children = append(node.Children, child)
+}
+
+var (
+ dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error)
+ tokenWhitespace = sRegexp.Delayed(`[\t\v\f\r ]+`)
+ tokenEscapeCommand = sRegexp.Delayed(`^#[ \t]*escape[ \t]*=[ \t]*(?P<escapechar>.).*$`)
+ tokenPlatformCommand = sRegexp.Delayed(`^#[ \t]*platform[ \t]*=[ \t]*(?P<platform>.*)$`)
+ tokenComment = sRegexp.Delayed(`^#.*$`)
+)
+
+// DefaultEscapeToken is the default escape token
+const DefaultEscapeToken = '\\'
+
+// defaultPlatformToken is the platform assumed for the build if not explicitly provided
+var defaultPlatformToken = runtime.GOOS
+
+// Directive is the structure used during a build run to hold the state of
+// parsing directives.
+type Directive struct {
+ escapeToken rune // Current escape token
+ platformToken string // Current platform token
+ lineContinuationRegex *regexp.Regexp // Current line continuation regex
+ processingComplete bool // Whether we are done looking for directives
+ escapeSeen bool // Whether the escape directive has been seen
+ platformSeen bool // Whether the platform directive has been seen
+}
+
+// setEscapeToken sets the default token for escaping characters in a Dockerfile.
+func (d *Directive) setEscapeToken(s string) error {
+ if s != "`" && s != "\\" {
+ return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s)
+ }
+ d.escapeToken = rune(s[0])
+ d.lineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`)
+ return nil
+}
+
+// setPlatformToken sets the default platform for pulling images in a Dockerfile.
+func (d *Directive) setPlatformToken(s string) error {
+ s = strings.ToLower(s)
+ valid := []string{runtime.GOOS}
+ if system.LCOWSupported() {
+ valid = append(valid, "linux")
+ }
+ for _, item := range valid {
+ if s == item {
+ d.platformToken = s
+ return nil
+ }
+ }
+ return fmt.Errorf("invalid PLATFORM '%s'. Must be one of %v", s, valid)
+}
+
+// possibleParserDirective looks for one or more parser directives '# escapeToken=<char>' and
+// '# platform=<string>'. Parser directives must precede any builder instruction
+// or other comments, and cannot be repeated.
+func (d *Directive) possibleParserDirective(line string) error {
+ if d.processingComplete {
+ return nil
+ }
+
+ tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line))
+ if len(tecMatch) != 0 {
+ for i, n := range tokenEscapeCommand.SubexpNames() {
+ if n == "escapechar" {
+ if d.escapeSeen == true {
+ return errors.New("only one escape parser directive can be used")
+ }
+ d.escapeSeen = true
+ return d.setEscapeToken(tecMatch[i])
+ }
+ }
+ }
+
+ // TODO @jhowardmsft LCOW Support: Eventually this check can be removed,
+ // but only recognise a platform token if running in LCOW mode.
+ if system.LCOWSupported() {
+ tpcMatch := tokenPlatformCommand.FindStringSubmatch(strings.ToLower(line))
+ if len(tpcMatch) != 0 {
+ for i, n := range tokenPlatformCommand.SubexpNames() {
+ if n == "platform" {
+ if d.platformSeen == true {
+ return errors.New("only one platform parser directive can be used")
+ }
+ d.platformSeen = true
+ return d.setPlatformToken(tpcMatch[i])
+ }
+ }
+ }
+ }
+
+ d.processingComplete = true
+ return nil
+}
+
+// NewDefaultDirective returns a new Directive with the default escapeToken token
+func NewDefaultDirective() *Directive {
+ directive := Directive{}
+ directive.setEscapeToken(string(DefaultEscapeToken))
+ directive.setPlatformToken(defaultPlatformToken)
+ return &directive
+}
+
+func init() {
+ // Dispatch Table. see line_parsers.go for the parse functions.
+ // The command is parsed and mapped to the line parser. The line parser
+ // receives the arguments but not the command, and returns an AST after
+ // reformulating the arguments according to the rules in the parser
+ // functions. Errors are propagated up by Parse() and the resulting AST can
+ // be incorporated directly into the existing AST as a next.
+ dispatch = map[string]func(string, *Directive) (*Node, map[string]bool, error){
+ command.Add: parseMaybeJSONToList,
+ command.Arg: parseNameOrNameVal,
+ command.Cmd: parseMaybeJSON,
+ command.Copy: parseMaybeJSONToList,
+ command.Entrypoint: parseMaybeJSON,
+ command.Env: parseEnv,
+ command.Expose: parseStringsWhitespaceDelimited,
+ command.From: parseStringsWhitespaceDelimited,
+ command.Healthcheck: parseHealthConfig,
+ command.Label: parseLabel,
+ command.Maintainer: parseString,
+ command.Onbuild: parseSubCommand,
+ command.Run: parseMaybeJSON,
+ command.Shell: parseMaybeJSON,
+ command.StopSignal: parseString,
+ command.User: parseString,
+ command.Volume: parseMaybeJSONToList,
+ command.Workdir: parseString,
+ }
+}
+
+// newNodeFromLine splits the line into parts, and dispatches to a function
+// based on the command and command arguments. A Node is created from the
+// result of the dispatch.
+func newNodeFromLine(line string, directive *Directive) (*Node, error) {
+ cmd, flags, args, err := splitCommand(line)
+ if err != nil {
+ return nil, err
+ }
+
+ fn := dispatch[cmd]
+ // Ignore invalid Dockerfile instructions
+ if fn == nil {
+ fn = parseIgnore
+ }
+ next, attrs, err := fn(args, directive)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Node{
+ Value: cmd,
+ Original: line,
+ Flags: flags,
+ Next: next,
+ Attributes: attrs,
+ }, nil
+}
+
+// Result is the result of parsing a Dockerfile
+type Result struct {
+ AST *Node
+ EscapeToken rune
+ Platform string
+ Warnings []string
+}
+
+// PrintWarnings to the writer
+func (r *Result) PrintWarnings(out io.Writer) {
+ if len(r.Warnings) == 0 {
+ return
+ }
+ fmt.Fprintf(out, strings.Join(r.Warnings, "\n")+"\n")
+}
+
+// Parse reads lines from a Reader, parses the lines into an AST and returns
+// the AST and escape token
+func Parse(rwc io.Reader) (*Result, error) {
+ d := NewDefaultDirective()
+ currentLine := 0
+ root := &Node{StartLine: -1}
+ scanner := bufio.NewScanner(rwc)
+ warnings := []string{}
+
+ var err error
+ for scanner.Scan() {
+ bytesRead := scanner.Bytes()
+ if currentLine == 0 {
+ // First line, strip the byte-order-marker if present
+ bytesRead = bytes.TrimPrefix(bytesRead, utf8bom)
+ }
+ bytesRead, err = processLine(d, bytesRead, true)
+ if err != nil {
+ return nil, err
+ }
+ currentLine++
+
+ startLine := currentLine
+ line, isEndOfLine := trimContinuationCharacter(string(bytesRead), d)
+ if isEndOfLine && line == "" {
+ continue
+ }
+
+ var hasEmptyContinuationLine bool
+ for !isEndOfLine && scanner.Scan() {
+ bytesRead, err := processLine(d, scanner.Bytes(), false)
+ if err != nil {
+ return nil, err
+ }
+ currentLine++
+
+ if isEmptyContinuationLine(bytesRead) {
+ hasEmptyContinuationLine = true
+ continue
+ }
+
+ continuationLine := string(bytesRead)
+ continuationLine, isEndOfLine = trimContinuationCharacter(continuationLine, d)
+ line += continuationLine
+ }
+
+ if hasEmptyContinuationLine {
+ warning := "[WARNING]: Empty continuation line found in:\n " + line
+ warnings = append(warnings, warning)
+ }
+
+ child, err := newNodeFromLine(line, d)
+ if err != nil {
+ return nil, err
+ }
+ root.AddChild(child, startLine, currentLine)
+ }
+
+ if len(warnings) > 0 {
+ warnings = append(warnings, "[WARNING]: Empty continuation lines will become errors in a future release.")
+ }
+ return &Result{
+ AST: root,
+ Warnings: warnings,
+ EscapeToken: d.escapeToken,
+ Platform: d.platformToken,
+ }, nil
+}
+
+func trimComments(src []byte) []byte {
+ return tokenComment.ReplaceAll(src, []byte{})
+}
+
+func trimWhitespace(src []byte) []byte {
+ return bytes.TrimLeftFunc(src, unicode.IsSpace)
+}
+
+func isEmptyContinuationLine(line []byte) bool {
+ return len(trimComments(trimWhitespace(line))) == 0
+}
+
+var utf8bom = []byte{0xEF, 0xBB, 0xBF}
+
+func trimContinuationCharacter(line string, d *Directive) (string, bool) {
+ if d.lineContinuationRegex.MatchString(line) {
+ line = d.lineContinuationRegex.ReplaceAllString(line, "")
+ return line, false
+ }
+ return line, true
+}
+
+// TODO: remove stripLeftWhitespace after deprecation period. It seems silly
+// to preserve whitespace on continuation lines. Why is that done?
+func processLine(d *Directive, token []byte, stripLeftWhitespace bool) ([]byte, error) {
+ if stripLeftWhitespace {
+ token = trimWhitespace(token)
+ }
+ return trimComments(token), d.possibleParserDirective(string(token))
+}
diff --git a/dockerfile/parser/parser_test.go b/dockerfile/parser/parser_test.go
new file mode 100644
index 0000000..32d24f5
--- /dev/null
+++ b/dockerfile/parser/parser_test.go
@@ -0,0 +1,154 @@
+package parser
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const testDir = "testfiles"
+const negativeTestDir = "testfiles-negative"
+const testFileLineInfo = "testfile-line/Dockerfile"
+
+func getDirs(t *testing.T, dir string) []string {
+ f, err := os.Open(dir)
+ require.NoError(t, err)
+ defer f.Close()
+
+ dirs, err := f.Readdirnames(0)
+ require.NoError(t, err)
+ return dirs
+}
+
+func TestParseErrorCases(t *testing.T) {
+ for _, dir := range getDirs(t, negativeTestDir) {
+ dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile")
+
+ df, err := os.Open(dockerfile)
+ require.NoError(t, err, dockerfile)
+ defer df.Close()
+
+ _, err = Parse(df)
+ assert.Error(t, err, dockerfile)
+ }
+}
+
+func TestParseCases(t *testing.T) {
+ for _, dir := range getDirs(t, testDir) {
+ dockerfile := filepath.Join(testDir, dir, "Dockerfile")
+ resultfile := filepath.Join(testDir, dir, "result")
+
+ df, err := os.Open(dockerfile)
+ require.NoError(t, err, dockerfile)
+ defer df.Close()
+
+ result, err := Parse(df)
+ require.NoError(t, err, dockerfile)
+
+ content, err := ioutil.ReadFile(resultfile)
+ require.NoError(t, err, resultfile)
+
+ if runtime.GOOS == "windows" {
+ // CRLF --> CR to match Unix behavior
+ content = bytes.Replace(content, []byte{'\x0d', '\x0a'}, []byte{'\x0a'}, -1)
+ }
+ assert.Equal(t, result.AST.Dump()+"\n", string(content), "In "+dockerfile)
+ }
+}
+
+func TestParseWords(t *testing.T) {
+ tests := []map[string][]string{
+ {
+ "input": {"foo"},
+ "expect": {"foo"},
+ },
+ {
+ "input": {"foo bar"},
+ "expect": {"foo", "bar"},
+ },
+ {
+ "input": {"foo\\ bar"},
+ "expect": {"foo\\ bar"},
+ },
+ {
+ "input": {"foo=bar"},
+ "expect": {"foo=bar"},
+ },
+ {
+ "input": {"foo bar 'abc xyz'"},
+ "expect": {"foo", "bar", "'abc xyz'"},
+ },
+ {
+ "input": {`foo bar "abc xyz"`},
+ "expect": {"foo", "bar", `"abc xyz"`},
+ },
+ {
+ "input": {"àöû"},
+ "expect": {"àöû"},
+ },
+ {
+ "input": {`föo bàr "âbc xÿz"`},
+ "expect": {"föo", "bàr", `"âbc xÿz"`},
+ },
+ }
+
+ for _, test := range tests {
+ words := parseWords(test["input"][0], NewDefaultDirective())
+ assert.Equal(t, test["expect"], words)
+ }
+}
+
+func TestParseIncludesLineNumbers(t *testing.T) {
+ df, err := os.Open(testFileLineInfo)
+ require.NoError(t, err)
+ defer df.Close()
+
+ result, err := Parse(df)
+ require.NoError(t, err)
+
+ ast := result.AST
+ assert.Equal(t, 5, ast.StartLine)
+ assert.Equal(t, 31, ast.EndLine)
+ assert.Len(t, ast.Children, 3)
+ expected := [][]int{
+ {5, 5},
+ {11, 12},
+ {17, 31},
+ }
+ for i, child := range ast.Children {
+ msg := fmt.Sprintf("Child %d", i)
+ assert.Equal(t, expected[i], []int{child.StartLine, child.EndLine}, msg)
+ }
+}
+
+func TestParseWarnsOnEmptyContinutationLine(t *testing.T) {
+ dockerfile := bytes.NewBufferString(`
+FROM alpine:3.6
+
+RUN something \
+
+ following \
+
+ more
+
+RUN another \
+
+ thing
+ `)
+
+ result, err := Parse(dockerfile)
+ require.NoError(t, err)
+ warnings := result.Warnings
+ assert.Len(t, warnings, 3)
+ assert.Contains(t, warnings[0], "Empty continuation line found in")
+ assert.Contains(t, warnings[0], "RUN something following more")
+ assert.Contains(t, warnings[1], "RUN another thing")
+ assert.Contains(t, warnings[2], "will become errors in a future release")
+}
diff --git a/dockerfile/parser/split_command.go b/dockerfile/parser/split_command.go
new file mode 100644
index 0000000..171f454
--- /dev/null
+++ b/dockerfile/parser/split_command.go
@@ -0,0 +1,118 @@
+package parser
+
+import (
+ "strings"
+ "unicode"
+)
+
+// splitCommand takes a single line of text and parses out the cmd and args,
+// which are used for dispatching to more exact parsing functions.
+func splitCommand(line string) (string, []string, string, error) {
+ var args string
+ var flags []string
+
+ // Make sure we get the same results irrespective of leading/trailing spaces
+ cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2)
+ cmd := strings.ToLower(cmdline[0])
+
+ if len(cmdline) == 2 {
+ var err error
+ args, flags, err = extractBuilderFlags(cmdline[1])
+ if err != nil {
+ return "", nil, "", err
+ }
+ }
+
+ return cmd, flags, strings.TrimSpace(args), nil
+}
+
+func extractBuilderFlags(line string) (string, []string, error) {
+ // Parses the BuilderFlags and returns the remaining part of the line
+
+ const (
+ inSpaces = iota // looking for start of a word
+ inWord
+ inQuote
+ )
+
+ words := []string{}
+ phase := inSpaces
+ word := ""
+ quote := '\000'
+ blankOK := false
+ var ch rune
+
+ for pos := 0; pos <= len(line); pos++ {
+ if pos != len(line) {
+ ch = rune(line[pos])
+ }
+
+ if phase == inSpaces { // Looking for start of word
+ if pos == len(line) { // end of input
+ break
+ }
+ if unicode.IsSpace(ch) { // skip spaces
+ continue
+ }
+
+ // Only keep going if the next word starts with --
+ if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' {
+ return line[pos:], words, nil
+ }
+
+ phase = inWord // found something with "--", fall through
+ }
+ if (phase == inWord || phase == inQuote) && (pos == len(line)) {
+ if word != "--" && (blankOK || len(word) > 0) {
+ words = append(words, word)
+ }
+ break
+ }
+ if phase == inWord {
+ if unicode.IsSpace(ch) {
+ phase = inSpaces
+ if word == "--" {
+ return line[pos:], words, nil
+ }
+ if blankOK || len(word) > 0 {
+ words = append(words, word)
+ }
+ word = ""
+ blankOK = false
+ continue
+ }
+ if ch == '\'' || ch == '"' {
+ quote = ch
+ blankOK = true
+ phase = inQuote
+ continue
+ }
+ if ch == '\\' {
+ if pos+1 == len(line) {
+ continue // just skip \ at end
+ }
+ pos++
+ ch = rune(line[pos])
+ }
+ word += string(ch)
+ continue
+ }
+ if phase == inQuote {
+ if ch == quote {
+ phase = inWord
+ continue
+ }
+ if ch == '\\' {
+ if pos+1 == len(line) {
+ phase = inWord
+ continue // just skip \ at end
+ }
+ pos++
+ ch = rune(line[pos])
+ }
+ word += string(ch)
+ }
+ }
+
+ return "", words, nil
+}
diff --git a/dockerfile/parser/testfile-line/Dockerfile b/dockerfile/parser/testfile-line/Dockerfile
new file mode 100644
index 0000000..c7601c9
--- /dev/null
+++ b/dockerfile/parser/testfile-line/Dockerfile
@@ -0,0 +1,35 @@
+# ESCAPE=\
+
+
+
+FROM brimstone/ubuntu:14.04
+
+
+# TORUN -v /var/run/docker.sock:/var/run/docker.sock
+
+
+ENV GOPATH \
+/go
+
+
+
+# Install the packages we need, clean up after them and us
+RUN apt-get update \
+ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
+
+
+ && apt-get install -y --no-install-recommends git golang ca-certificates \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists \
+
+ && go get -v github.com/brimstone/consuldock \
+ && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \
+
+ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
+ && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
+ && rm /tmp/dpkg.* \
+ && rm -rf $GOPATH
+
+
+
+
diff --git a/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile b/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile
new file mode 100644
index 0000000..1d65578
--- /dev/null
+++ b/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile
@@ -0,0 +1,3 @@
+FROM busybox
+
+ENV PATH
diff --git a/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile b/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile
new file mode 100644
index 0000000..d1be459
--- /dev/null
+++ b/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile
@@ -0,0 +1 @@
+CMD [ "echo", [ "nested json" ] ]
diff --git a/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile b/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile
new file mode 100644
index 0000000..035b4e8
--- /dev/null
+++ b/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile
@@ -0,0 +1,11 @@
+FROM ubuntu:14.04
+LABEL maintainer Seongyeol Lim <seongyeol37@gmail.com>
+
+COPY . /go/src/github.com/docker/docker
+ADD . /
+ADD null /
+COPY nullfile /tmp
+ADD [ "vimrc", "/tmp" ]
+COPY [ "bashrc", "/tmp" ]
+COPY [ "test file", "/tmp" ]
+ADD [ "test file", "/tmp/test file" ]
diff --git a/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result b/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result
new file mode 100644
index 0000000..d1f71ec
--- /dev/null
+++ b/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result
@@ -0,0 +1,10 @@
+(from "ubuntu:14.04")
+(label "maintainer" "Seongyeol Lim <seongyeol37@gmail.com>")
+(copy "." "/go/src/github.com/docker/docker")
+(add "." "/")
+(add "null" "/")
+(copy "nullfile" "/tmp")
+(add "vimrc" "/tmp")
+(copy "bashrc" "/tmp")
+(copy "test file" "/tmp")
+(add "test file" "/tmp/test file")
diff --git a/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile b/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile
new file mode 100644
index 0000000..9c0952a
--- /dev/null
+++ b/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile
@@ -0,0 +1,26 @@
+#escape=\
+FROM brimstone/ubuntu:14.04
+
+LABEL maintainer brimstone@the.narro.ws
+
+# TORUN -v /var/run/docker.sock:/var/run/docker.sock
+
+ENV GOPATH /go
+
+# Set our command
+ENTRYPOINT ["/usr/local/bin/consuldock"]
+
+# Install the packages we need, clean up after them and us
+RUN apt-get update \
+ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
+ && apt-get install -y --no-install-recommends git golang ca-certificates \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists \
+
+ && go get -v github.com/brimstone/consuldock \
+ && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \
+
+ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
+ && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
+ && rm /tmp/dpkg.* \
+ && rm -rf $GOPATH
diff --git a/dockerfile/parser/testfiles/brimstone-consuldock/result b/dockerfile/parser/testfiles/brimstone-consuldock/result
new file mode 100644
index 0000000..3b45db6
--- /dev/null
+++ b/dockerfile/parser/testfiles/brimstone-consuldock/result
@@ -0,0 +1,5 @@
+(from "brimstone/ubuntu:14.04")
+(label "maintainer" "brimstone@the.narro.ws")
+(env "GOPATH" "/go")
+(entrypoint "/usr/local/bin/consuldock")
+(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH")
diff --git a/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile b/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile
new file mode 100644
index 0000000..25ae352
--- /dev/null
+++ b/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile
@@ -0,0 +1,52 @@
+FROM brimstone/ubuntu:14.04
+
+CMD []
+
+ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"]
+
+EXPOSE 8500 8600 8400 8301 8302
+
+RUN apt-get update \
+ && apt-get install -y unzip wget \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists
+
+RUN cd /tmp \
+ && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \
+ -O web_ui.zip \
+ && unzip web_ui.zip \
+ && mv dist /webui \
+ && rm web_ui.zip
+
+RUN apt-get update \
+ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
+ && apt-get install -y --no-install-recommends unzip wget \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists \
+
+ && cd /tmp \
+ && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \
+ -O web_ui.zip \
+ && unzip web_ui.zip \
+ && mv dist /webui \
+ && rm web_ui.zip \
+
+ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
+ && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
+ && rm /tmp/dpkg.*
+
+ENV GOPATH /go
+
+RUN apt-get update \
+ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
+ && apt-get install -y --no-install-recommends git golang ca-certificates build-essential \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists \
+
+ && go get -v github.com/hashicorp/consul \
+ && mv $GOPATH/bin/consul /usr/bin/consul \
+
+ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
+ && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
+ && rm /tmp/dpkg.* \
+ && rm -rf $GOPATH
diff --git a/dockerfile/parser/testfiles/brimstone-docker-consul/result b/dockerfile/parser/testfiles/brimstone-docker-consul/result
new file mode 100644
index 0000000..16492e5
--- /dev/null
+++ b/dockerfile/parser/testfiles/brimstone-docker-consul/result
@@ -0,0 +1,9 @@
+(from "brimstone/ubuntu:14.04")
+(cmd)
+(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui")
+(expose "8500" "8600" "8400" "8301" "8302")
+(run "apt-get update && apt-get install -y unzip wget \t&& apt-get clean \t&& rm -rf /var/lib/apt/lists")
+(run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip")
+(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.*")
+(env "GOPATH" "/go")
+(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/hashicorp/consul \t&& mv $GOPATH/bin/consul /usr/bin/consul \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH")
diff --git a/dockerfile/parser/testfiles/continue-at-eof/Dockerfile b/dockerfile/parser/testfiles/continue-at-eof/Dockerfile
new file mode 100644
index 0000000..a8ec369
--- /dev/null
+++ b/dockerfile/parser/testfiles/continue-at-eof/Dockerfile
@@ -0,0 +1,3 @@
+FROM alpine:3.5
+
+RUN something \ \ No newline at end of file
diff --git a/dockerfile/parser/testfiles/continue-at-eof/result b/dockerfile/parser/testfiles/continue-at-eof/result
new file mode 100644
index 0000000..14e4f09
--- /dev/null
+++ b/dockerfile/parser/testfiles/continue-at-eof/result
@@ -0,0 +1,2 @@
+(from "alpine:3.5")
+(run "something")
diff --git a/dockerfile/parser/testfiles/continueIndent/Dockerfile b/dockerfile/parser/testfiles/continueIndent/Dockerfile
new file mode 100644
index 0000000..42b324e
--- /dev/null
+++ b/dockerfile/parser/testfiles/continueIndent/Dockerfile
@@ -0,0 +1,36 @@
+FROM ubuntu:14.04
+
+RUN echo hello\
+ world\
+ goodnight \
+ moon\
+ light\
+ning
+RUN echo hello \
+ world
+RUN echo hello \
+world
+RUN echo hello \
+goodbye\
+frog
+RUN echo hello \
+world
+RUN echo hi \
+ \
+ world \
+\
+ good\
+\
+night
+RUN echo goodbye\
+frog
+RUN echo good\
+bye\
+frog
+
+RUN echo hello \
+# this is a comment
+
+# this is a comment with a blank line surrounding it
+
+this is some more useful stuff
diff --git a/dockerfile/parser/testfiles/continueIndent/result b/dockerfile/parser/testfiles/continueIndent/result
new file mode 100644
index 0000000..268ae07
--- /dev/null
+++ b/dockerfile/parser/testfiles/continueIndent/result
@@ -0,0 +1,10 @@
+(from "ubuntu:14.04")
+(run "echo hello world goodnight moon lightning")
+(run "echo hello world")
+(run "echo hello world")
+(run "echo hello goodbyefrog")
+(run "echo hello world")
+(run "echo hi world goodnight")
+(run "echo goodbyefrog")
+(run "echo goodbyefrog")
+(run "echo hello this is some more useful stuff")
diff --git a/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile b/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile
new file mode 100644
index 0000000..8ccb71a
--- /dev/null
+++ b/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile
@@ -0,0 +1,54 @@
+FROM cpuguy83/ubuntu
+ENV NAGIOS_HOME /opt/nagios
+ENV NAGIOS_USER nagios
+ENV NAGIOS_GROUP nagios
+ENV NAGIOS_CMDUSER nagios
+ENV NAGIOS_CMDGROUP nagios
+ENV NAGIOSADMIN_USER nagiosadmin
+ENV NAGIOSADMIN_PASS nagios
+ENV APACHE_RUN_USER nagios
+ENV APACHE_RUN_GROUP nagios
+ENV NAGIOS_TIMEZONE UTC
+
+RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list
+RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx
+RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP )
+RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )
+
+ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz
+RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf
+ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/
+RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install
+
+RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars
+RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default
+
+RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo
+
+RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf
+
+RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs
+RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg
+RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg
+RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf
+
+RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \
+ sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg
+RUN cp /etc/services /var/spool/postfix/etc/
+
+RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix
+ADD nagios.init /etc/sv/nagios/run
+ADD apache.init /etc/sv/apache/run
+ADD postfix.init /etc/sv/postfix/run
+ADD postfix.stop /etc/sv/postfix/finish
+
+ADD start.sh /usr/local/bin/start_nagios
+
+ENV APACHE_LOCK_DIR /var/run
+ENV APACHE_LOG_DIR /var/log/apache2
+
+EXPOSE 80
+
+VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"]
+
+CMD ["/usr/local/bin/start_nagios"]
diff --git a/dockerfile/parser/testfiles/cpuguy83-nagios/result b/dockerfile/parser/testfiles/cpuguy83-nagios/result
new file mode 100644
index 0000000..25dd3dd
--- /dev/null
+++ b/dockerfile/parser/testfiles/cpuguy83-nagios/result
@@ -0,0 +1,40 @@
+(from "cpuguy83/ubuntu")
+(env "NAGIOS_HOME" "/opt/nagios")
+(env "NAGIOS_USER" "nagios")
+(env "NAGIOS_GROUP" "nagios")
+(env "NAGIOS_CMDUSER" "nagios")
+(env "NAGIOS_CMDGROUP" "nagios")
+(env "NAGIOSADMIN_USER" "nagiosadmin")
+(env "NAGIOSADMIN_PASS" "nagios")
+(env "APACHE_RUN_USER" "nagios")
+(env "APACHE_RUN_GROUP" "nagios")
+(env "NAGIOS_TIMEZONE" "UTC")
+(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list")
+(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx")
+(run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )")
+(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )")
+(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz")
+(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf")
+(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/")
+(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install")
+(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars")
+(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default")
+(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo")
+(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf")
+(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs")
+(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg")
+(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg")
+(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf")
+(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg")
+(run "cp /etc/services /var/spool/postfix/etc/")
+(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix")
+(add "nagios.init" "/etc/sv/nagios/run")
+(add "apache.init" "/etc/sv/apache/run")
+(add "postfix.init" "/etc/sv/postfix/run")
+(add "postfix.stop" "/etc/sv/postfix/finish")
+(add "start.sh" "/usr/local/bin/start_nagios")
+(env "APACHE_LOCK_DIR" "/var/run")
+(env "APACHE_LOG_DIR" "/var/log/apache2")
+(expose "80")
+(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs")
+(cmd "/usr/local/bin/start_nagios")
diff --git a/dockerfile/parser/testfiles/docker/Dockerfile b/dockerfile/parser/testfiles/docker/Dockerfile
new file mode 100644
index 0000000..5153453
--- /dev/null
+++ b/dockerfile/parser/testfiles/docker/Dockerfile
@@ -0,0 +1,102 @@
+# This file describes the standard way to build Docker, using docker
+#
+# Usage:
+#
+# # Assemble the full dev environment. This is slow the first time.
+# docker build -t docker .
+#
+# # Mount your source in an interactive container for quick testing:
+# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
+#
+# # Run the test suite:
+# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py
+#
+# # Publish a release:
+# docker run --privileged \
+# -e AWS_S3_BUCKET=baz \
+# -e AWS_ACCESS_KEY=foo \
+# -e AWS_SECRET_KEY=bar \
+# -e GPG_PASSPHRASE=gloubiboulga \
+# docker hack/release.sh
+#
+# Note: AppArmor used to mess with privileged mode, but this is no longer
+# the case. Therefore, you don't have to disable it anymore.
+#
+
+FROM ubuntu:14.04
+LABEL maintainer Tianon Gravi <admwiggin@gmail.com> (@tianon)
+
+# Packaged dependencies
+RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \
+ apt-utils \
+ aufs-tools \
+ automake \
+ btrfs-tools \
+ build-essential \
+ curl \
+ dpkg-sig \
+ git \
+ iptables \
+ libapparmor-dev \
+ libcap-dev \
+ mercurial \
+ pandoc \
+ parallel \
+ reprepro \
+ ruby1.9.1 \
+ ruby1.9.1-dev \
+ s3cmd=1.1.0* \
+ --no-install-recommends
+
+# Get lvm2 source for compiling statically
+RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103
+# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
+# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly
+
+# Compile and install lvm2
+RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper
+# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
+
+# Install Go
+RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz
+ENV PATH /usr/local/go/bin:$PATH
+ENV GOPATH /go:/go/src/github.com/docker/docker/vendor
+RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1
+
+# Compile Go for cross compilation
+ENV DOCKER_CROSSPLATFORMS \
+ linux/386 linux/arm \
+ darwin/amd64 darwin/386 \
+ freebsd/amd64 freebsd/386 freebsd/arm
+# (set an explicit GOARM of 5 for maximum compatibility)
+ENV GOARM 5
+RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
+
+# Grab Go's cover tool for dead-simple code coverage testing
+RUN go get golang.org/x/tools/cmd/cover
+
+# TODO replace FPM with some very minimal debhelper stuff
+RUN gem install --no-rdoc --no-ri fpm --version 1.0.2
+
+# Get the "busybox" image source so we can build locally instead of pulling
+RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox
+
+# Setup s3cmd config
+RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg
+
+# Set user.email so crosbymichael's in-container merge commits go smoothly
+RUN git config --global user.email 'docker-dummy@example.com'
+
+# Add an unprivileged user to be used for tests which need it
+RUN groupadd -r docker
+RUN useradd --create-home --gid docker unprivilegeduser
+
+VOLUME /var/lib/docker
+WORKDIR /go/src/github.com/docker/docker
+ENV DOCKER_BUILDTAGS apparmor selinux
+
+# Wrap all commands in the "docker-in-docker" script to allow nested containers
+ENTRYPOINT ["hack/dind"]
+
+# Upload docker source
+COPY . /go/src/github.com/docker/docker
diff --git a/dockerfile/parser/testfiles/docker/result b/dockerfile/parser/testfiles/docker/result
new file mode 100644
index 0000000..0c2f229
--- /dev/null
+++ b/dockerfile/parser/testfiles/docker/result
@@ -0,0 +1,24 @@
+(from "ubuntu:14.04")
+(label "maintainer" "Tianon Gravi <admwiggin@gmail.com> (@tianon)")
+(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \tapt-utils \taufs-tools \tautomake \tbtrfs-tools \tbuild-essential \tcurl \tdpkg-sig \tgit \tiptables \tlibapparmor-dev \tlibcap-dev \tmercurial \tpandoc \tparallel \treprepro \truby1.9.1 \truby1.9.1-dev \ts3cmd=1.1.0* \t--no-install-recommends")
+(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103")
+(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper")
+(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz")
+(env "PATH" "/usr/local/go/bin:$PATH")
+(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor")
+(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1")
+(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm \tdarwin/amd64 darwin/386 \tfreebsd/amd64 freebsd/386 freebsd/arm")
+(env "GOARM" "5")
+(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'")
+(run "go get golang.org/x/tools/cmd/cover")
+(run "gem install --no-rdoc --no-ri fpm --version 1.0.2")
+(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox")
+(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg")
+(run "git config --global user.email 'docker-dummy@example.com'")
+(run "groupadd -r docker")
+(run "useradd --create-home --gid docker unprivilegeduser")
+(volume "/var/lib/docker")
+(workdir "/go/src/github.com/docker/docker")
+(env "DOCKER_BUILDTAGS" "apparmor selinux")
+(entrypoint "hack/dind")
+(copy "." "/go/src/github.com/docker/docker")
diff --git a/dockerfile/parser/testfiles/env/Dockerfile b/dockerfile/parser/testfiles/env/Dockerfile
new file mode 100644
index 0000000..08fa18a
--- /dev/null
+++ b/dockerfile/parser/testfiles/env/Dockerfile
@@ -0,0 +1,23 @@
+FROM ubuntu
+ENV name value
+ENV name=value
+ENV name=value name2=value2
+ENV name="value value1"
+ENV name=value\ value2
+ENV name="value'quote space'value2"
+ENV name='value"double quote"value2'
+ENV name=value\ value2 name2=value2\ value3
+ENV name="a\"b"
+ENV name="a\'b"
+ENV name='a\'b'
+ENV name='a\'b''
+ENV name='a\"b'
+ENV name="''"
+# don't put anything after the next line - it must be the last line of the
+# Dockerfile and it must end with \
+ENV name=value \
+ name1=value1 \
+ name2="value2a \
+ value2b" \
+ name3="value3a\n\"value3b\"" \
+ name4="value4a\\nvalue4b" \
diff --git a/dockerfile/parser/testfiles/env/result b/dockerfile/parser/testfiles/env/result
new file mode 100644
index 0000000..ba0a6dd
--- /dev/null
+++ b/dockerfile/parser/testfiles/env/result
@@ -0,0 +1,16 @@
+(from "ubuntu")
+(env "name" "value")
+(env "name" "value")
+(env "name" "value" "name2" "value2")
+(env "name" "\"value value1\"")
+(env "name" "value\\ value2")
+(env "name" "\"value'quote space'value2\"")
+(env "name" "'value\"double quote\"value2'")
+(env "name" "value\\ value2" "name2" "value2\\ value3")
+(env "name" "\"a\\\"b\"")
+(env "name" "\"a\\'b\"")
+(env "name" "'a\\'b'")
+(env "name" "'a\\'b''")
+(env "name" "'a\\\"b'")
+(env "name" "\"''\"")
+(env "name" "value" "name1" "value1" "name2" "\"value2a value2b\"" "name3" "\"value3a\\n\\\"value3b\\\"\"" "name4" "\"value4a\\\\nvalue4b\"")
diff --git a/dockerfile/parser/testfiles/escape-after-comment/Dockerfile b/dockerfile/parser/testfiles/escape-after-comment/Dockerfile
new file mode 100644
index 0000000..18e9a47
--- /dev/null
+++ b/dockerfile/parser/testfiles/escape-after-comment/Dockerfile
@@ -0,0 +1,9 @@
+# Comment here. Should not be looking for the following parser directive.
+# Hence the following line will be ignored, and the subsequent backslash
+# continuation will be the default.
+# escape = `
+
+FROM image
+LABEL maintainer foo@bar.com
+ENV GOPATH \
+\go \ No newline at end of file
diff --git a/dockerfile/parser/testfiles/escape-after-comment/result b/dockerfile/parser/testfiles/escape-after-comment/result
new file mode 100644
index 0000000..9ab119c
--- /dev/null
+++ b/dockerfile/parser/testfiles/escape-after-comment/result
@@ -0,0 +1,3 @@
+(from "image")
+(label "maintainer" "foo@bar.com")
+(env "GOPATH" "\\go")
diff --git a/dockerfile/parser/testfiles/escape-nonewline/Dockerfile b/dockerfile/parser/testfiles/escape-nonewline/Dockerfile
new file mode 100644
index 0000000..366ee3c
--- /dev/null
+++ b/dockerfile/parser/testfiles/escape-nonewline/Dockerfile
@@ -0,0 +1,7 @@
+# escape = ``
+# There is no white space line after the directives. This still succeeds, but goes
+# against best practices.
+FROM image
+LABEL maintainer foo@bar.com
+ENV GOPATH `
+\go \ No newline at end of file
diff --git a/dockerfile/parser/testfiles/escape-nonewline/result b/dockerfile/parser/testfiles/escape-nonewline/result
new file mode 100644
index 0000000..9ab119c
--- /dev/null
+++ b/dockerfile/parser/testfiles/escape-nonewline/result
@@ -0,0 +1,3 @@
+(from "image")
+(label "maintainer" "foo@bar.com")
+(env "GOPATH" "\\go")
diff --git a/dockerfile/parser/testfiles/escape/Dockerfile b/dockerfile/parser/testfiles/escape/Dockerfile
new file mode 100644
index 0000000..a515af1
--- /dev/null
+++ b/dockerfile/parser/testfiles/escape/Dockerfile
@@ -0,0 +1,6 @@
+#escape = `
+
+FROM image
+LABEL maintainer foo@bar.com
+ENV GOPATH `
+\go \ No newline at end of file
diff --git a/dockerfile/parser/testfiles/escape/result b/dockerfile/parser/testfiles/escape/result
new file mode 100644
index 0000000..9ab119c
--- /dev/null
+++ b/dockerfile/parser/testfiles/escape/result
@@ -0,0 +1,3 @@
+(from "image")
+(label "maintainer" "foo@bar.com")
+(env "GOPATH" "\\go")
diff --git a/dockerfile/parser/testfiles/escapes/Dockerfile b/dockerfile/parser/testfiles/escapes/Dockerfile
new file mode 100644
index 0000000..0306239
--- /dev/null
+++ b/dockerfile/parser/testfiles/escapes/Dockerfile
@@ -0,0 +1,14 @@
+FROM ubuntu:14.04
+LABEL maintainer Erik \\Hollensbe <erik@hollensbe.org>\"
+
+RUN apt-get \update && \
+ apt-get \"install znc -y
+ADD \conf\\" /.znc
+
+RUN foo \
+
+bar \
+
+baz
+
+CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ]
diff --git a/dockerfile/parser/testfiles/escapes/result b/dockerfile/parser/testfiles/escapes/result
new file mode 100644
index 0000000..98e3e3b
--- /dev/null
+++ b/dockerfile/parser/testfiles/escapes/result
@@ -0,0 +1,6 @@
+(from "ubuntu:14.04")
+(label "maintainer" "Erik \\\\Hollensbe <erik@hollensbe.org>\\\"")
+(run "apt-get \\update && apt-get \\\"install znc -y")
+(add "\\conf\\\\\"" "/.znc")
+(run "foo bar baz")
+(cmd "/usr\\\"/bin/znc" "-f" "-r")
diff --git a/dockerfile/parser/testfiles/flags/Dockerfile b/dockerfile/parser/testfiles/flags/Dockerfile
new file mode 100644
index 0000000..2418e0f
--- /dev/null
+++ b/dockerfile/parser/testfiles/flags/Dockerfile
@@ -0,0 +1,10 @@
+FROM scratch
+COPY foo /tmp/
+COPY --user=me foo /tmp/
+COPY --doit=true foo /tmp/
+COPY --user=me --doit=true foo /tmp/
+COPY --doit=true -- foo /tmp/
+COPY -- foo /tmp/
+CMD --doit [ "a", "b" ]
+CMD --doit=true -- [ "a", "b" ]
+CMD --doit -- [ ]
diff --git a/dockerfile/parser/testfiles/flags/result b/dockerfile/parser/testfiles/flags/result
new file mode 100644
index 0000000..4578f4c
--- /dev/null
+++ b/dockerfile/parser/testfiles/flags/result
@@ -0,0 +1,10 @@
+(from "scratch")
+(copy "foo" "/tmp/")
+(copy ["--user=me"] "foo" "/tmp/")
+(copy ["--doit=true"] "foo" "/tmp/")
+(copy ["--user=me" "--doit=true"] "foo" "/tmp/")
+(copy ["--doit=true"] "foo" "/tmp/")
+(copy "foo" "/tmp/")
+(cmd ["--doit"] "a" "b")
+(cmd ["--doit=true"] "a" "b")
+(cmd ["--doit"])
diff --git a/dockerfile/parser/testfiles/health/Dockerfile b/dockerfile/parser/testfiles/health/Dockerfile
new file mode 100644
index 0000000..081e442
--- /dev/null
+++ b/dockerfile/parser/testfiles/health/Dockerfile
@@ -0,0 +1,10 @@
+FROM debian
+ADD check.sh main.sh /app/
+CMD /app/main.sh
+HEALTHCHECK
+HEALTHCHECK --interval=5s --timeout=3s --retries=3 \
+ CMD /app/check.sh --quiet
+HEALTHCHECK CMD
+HEALTHCHECK CMD a b
+HEALTHCHECK --timeout=3s CMD ["foo"]
+HEALTHCHECK CONNECT TCP 7000
diff --git a/dockerfile/parser/testfiles/health/result b/dockerfile/parser/testfiles/health/result
new file mode 100644
index 0000000..092924f
--- /dev/null
+++ b/dockerfile/parser/testfiles/health/result
@@ -0,0 +1,9 @@
+(from "debian")
+(add "check.sh" "main.sh" "/app/")
+(cmd "/app/main.sh")
+(healthcheck)
+(healthcheck ["--interval=5s" "--timeout=3s" "--retries=3"] "CMD" "/app/check.sh --quiet")
+(healthcheck "CMD")
+(healthcheck "CMD" "a b")
+(healthcheck ["--timeout=3s"] "CMD" "foo")
+(healthcheck "CONNECT" "TCP 7000")
diff --git a/dockerfile/parser/testfiles/influxdb/Dockerfile b/dockerfile/parser/testfiles/influxdb/Dockerfile
new file mode 100644
index 0000000..587fb9b
--- /dev/null
+++ b/dockerfile/parser/testfiles/influxdb/Dockerfile
@@ -0,0 +1,15 @@
+FROM ubuntu:14.04
+
+RUN apt-get update && apt-get install wget -y
+RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb
+RUN dpkg -i influxdb_latest_amd64.deb
+RUN rm -r /opt/influxdb/shared
+
+VOLUME /opt/influxdb/shared
+
+CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml
+
+EXPOSE 8083
+EXPOSE 8086
+EXPOSE 8090
+EXPOSE 8099
diff --git a/dockerfile/parser/testfiles/influxdb/result b/dockerfile/parser/testfiles/influxdb/result
new file mode 100644
index 0000000..0998e87
--- /dev/null
+++ b/dockerfile/parser/testfiles/influxdb/result
@@ -0,0 +1,11 @@
+(from "ubuntu:14.04")
+(run "apt-get update && apt-get install wget -y")
+(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb")
+(run "dpkg -i influxdb_latest_amd64.deb")
+(run "rm -r /opt/influxdb/shared")
+(volume "/opt/influxdb/shared")
+(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml")
+(expose "8083")
+(expose "8086")
+(expose "8090")
+(expose "8099")
diff --git a/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile b/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile
new file mode 100644
index 0000000..39fe27d
--- /dev/null
+++ b/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile
@@ -0,0 +1 @@
+CMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]"
diff --git a/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result b/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result
new file mode 100644
index 0000000..afc220c
--- /dev/null
+++ b/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result
@@ -0,0 +1 @@
+(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"")
diff --git a/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile b/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile
new file mode 100644
index 0000000..eaae081
--- /dev/null
+++ b/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile
@@ -0,0 +1 @@
+CMD '["echo", "Well, JSON in a string is JSON too?"]'
diff --git a/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result b/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result
new file mode 100644
index 0000000..484804e
--- /dev/null
+++ b/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result
@@ -0,0 +1 @@
+(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'")
diff --git a/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile b/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile
new file mode 100644
index 0000000..c3ac63c
--- /dev/null
+++ b/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile
@@ -0,0 +1 @@
+CMD ['echo','single quotes are invalid JSON']
diff --git a/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result b/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result
new file mode 100644
index 0000000..6147891
--- /dev/null
+++ b/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result
@@ -0,0 +1 @@
+(cmd "['echo','single quotes are invalid JSON']")
diff --git a/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile b/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile
new file mode 100644
index 0000000..5fd4afa
--- /dev/null
+++ b/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile
@@ -0,0 +1 @@
+CMD ["echo", "Please, close the brackets when you're done"
diff --git a/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result b/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result
new file mode 100644
index 0000000..1ffbb8f
--- /dev/null
+++ b/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result
@@ -0,0 +1 @@
+(cmd "[\"echo\", \"Please, close the brackets when you're done\"")
diff --git a/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile b/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile
new file mode 100644
index 0000000..30cc4bb
--- /dev/null
+++ b/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile
@@ -0,0 +1 @@
+CMD ["echo", "look ma, no quote!]
diff --git a/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result b/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result
new file mode 100644
index 0000000..3204814
--- /dev/null
+++ b/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result
@@ -0,0 +1 @@
+(cmd "[\"echo\", \"look ma, no quote!]")
diff --git a/dockerfile/parser/testfiles/json/Dockerfile b/dockerfile/parser/testfiles/json/Dockerfile
new file mode 100644
index 0000000..a586917
--- /dev/null
+++ b/dockerfile/parser/testfiles/json/Dockerfile
@@ -0,0 +1,8 @@
+CMD []
+CMD [""]
+CMD ["a"]
+CMD ["a","b"]
+CMD [ "a", "b" ]
+CMD [ "a", "b" ]
+CMD [ "a", "b" ]
+CMD ["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]
diff --git a/dockerfile/parser/testfiles/json/result b/dockerfile/parser/testfiles/json/result
new file mode 100644
index 0000000..c6553e6
--- /dev/null
+++ b/dockerfile/parser/testfiles/json/result
@@ -0,0 +1,8 @@
+(cmd)
+(cmd "")
+(cmd "a")
+(cmd "a" "b")
+(cmd "a" "b")
+(cmd "a" "b")
+(cmd "a" "b")
+(cmd "abc 123" "♥" "☃" "\" \\ / \b \f \n \r \t \x00")
diff --git a/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile b/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile
new file mode 100644
index 0000000..728ec9a
--- /dev/null
+++ b/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile
@@ -0,0 +1,7 @@
+FROM ubuntu:14.04
+LABEL maintainer James Turnbull "james@example.com"
+ENV REFRESHED_AT 2014-06-01
+RUN apt-get update
+RUN apt-get -y install redis-server redis-tools
+EXPOSE 6379
+ENTRYPOINT [ "/usr/bin/redis-server" ]
diff --git a/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result b/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result
new file mode 100644
index 0000000..e774bc4
--- /dev/null
+++ b/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result
@@ -0,0 +1,7 @@
+(from "ubuntu:14.04")
+(label "maintainer" "James Turnbull \"james@example.com\"")
+(env "REFRESHED_AT" "2014-06-01")
+(run "apt-get update")
+(run "apt-get -y install redis-server redis-tools")
+(expose "6379")
+(entrypoint "/usr/bin/redis-server")
diff --git a/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile b/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile
new file mode 100644
index 0000000..27f28cb
--- /dev/null
+++ b/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile
@@ -0,0 +1,48 @@
+FROM busybox:buildroot-2014.02
+
+LABEL maintainer docker <docker@docker.io>
+
+ONBUILD RUN ["echo", "test"]
+ONBUILD RUN echo test
+ONBUILD COPY . /
+
+
+# RUN Commands \
+# linebreak in comment \
+RUN ["ls", "-la"]
+RUN ["echo", "'1234'"]
+RUN echo "1234"
+RUN echo 1234
+RUN echo '1234' && \
+ echo "456" && \
+ echo 789
+RUN sh -c 'echo root:testpass \
+ > /tmp/passwd'
+RUN mkdir -p /test /test2 /test3/test
+
+# ENV \
+ENV SCUBA 1 DUBA 3
+ENV SCUBA "1 DUBA 3"
+
+# CMD \
+CMD ["echo", "test"]
+CMD echo test
+CMD echo "test"
+CMD echo 'test'
+CMD echo 'test' | wc -
+
+#EXPOSE\
+EXPOSE 3000
+EXPOSE 9000 5000 6000
+
+USER docker
+USER docker:root
+
+VOLUME ["/test"]
+VOLUME ["/test", "/test2"]
+VOLUME /test3
+
+WORKDIR /test
+
+ADD . /
+COPY . copy
diff --git a/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result b/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result
new file mode 100644
index 0000000..8a499ff
--- /dev/null
+++ b/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result
@@ -0,0 +1,29 @@
+(from "busybox:buildroot-2014.02")
+(label "maintainer" "docker <docker@docker.io>")
+(onbuild (run "echo" "test"))
+(onbuild (run "echo test"))
+(onbuild (copy "." "/"))
+(run "ls" "-la")
+(run "echo" "'1234'")
+(run "echo \"1234\"")
+(run "echo 1234")
+(run "echo '1234' && echo \"456\" && echo 789")
+(run "sh -c 'echo root:testpass > /tmp/passwd'")
+(run "mkdir -p /test /test2 /test3/test")
+(env "SCUBA" "1 DUBA 3")
+(env "SCUBA" "\"1 DUBA 3\"")
+(cmd "echo" "test")
+(cmd "echo test")
+(cmd "echo \"test\"")
+(cmd "echo 'test'")
+(cmd "echo 'test' | wc -")
+(expose "3000")
+(expose "9000" "5000" "6000")
+(user "docker")
+(user "docker:root")
+(volume "/test")
+(volume "/test" "/test2")
+(volume "/test3")
+(workdir "/test")
+(add "." "/")
+(copy "." "copy")
diff --git a/dockerfile/parser/testfiles/mail/Dockerfile b/dockerfile/parser/testfiles/mail/Dockerfile
new file mode 100644
index 0000000..f64c116
--- /dev/null
+++ b/dockerfile/parser/testfiles/mail/Dockerfile
@@ -0,0 +1,16 @@
+FROM ubuntu:14.04
+
+RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y
+ADD .muttrc /
+ADD .offlineimaprc /
+ADD .tmux.conf /
+ADD mutt /.mutt
+ADD vim /.vim
+ADD vimrc /.vimrc
+ADD crontab /etc/crontab
+RUN chmod 644 /etc/crontab
+RUN mkdir /Mail
+RUN mkdir /.offlineimap
+RUN echo "export TERM=screen-256color" >/.zshenv
+
+CMD setsid cron; tmux -2
diff --git a/dockerfile/parser/testfiles/mail/result b/dockerfile/parser/testfiles/mail/result
new file mode 100644
index 0000000..a0efcf0
--- /dev/null
+++ b/dockerfile/parser/testfiles/mail/result
@@ -0,0 +1,14 @@
+(from "ubuntu:14.04")
+(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y")
+(add ".muttrc" "/")
+(add ".offlineimaprc" "/")
+(add ".tmux.conf" "/")
+(add "mutt" "/.mutt")
+(add "vim" "/.vim")
+(add "vimrc" "/.vimrc")
+(add "crontab" "/etc/crontab")
+(run "chmod 644 /etc/crontab")
+(run "mkdir /Mail")
+(run "mkdir /.offlineimap")
+(run "echo \"export TERM=screen-256color\" >/.zshenv")
+(cmd "setsid cron; tmux -2")
diff --git a/dockerfile/parser/testfiles/multiple-volumes/Dockerfile b/dockerfile/parser/testfiles/multiple-volumes/Dockerfile
new file mode 100644
index 0000000..57bb597
--- /dev/null
+++ b/dockerfile/parser/testfiles/multiple-volumes/Dockerfile
@@ -0,0 +1,3 @@
+FROM foo
+
+VOLUME /opt/nagios/var /opt/nagios/etc /opt/nagios/libexec /var/log/apache2 /usr/share/snmp/mibs
diff --git a/dockerfile/parser/testfiles/multiple-volumes/result b/dockerfile/parser/testfiles/multiple-volumes/result
new file mode 100644
index 0000000..18dbdee
--- /dev/null
+++ b/dockerfile/parser/testfiles/multiple-volumes/result
@@ -0,0 +1,2 @@
+(from "foo")
+(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs")
diff --git a/dockerfile/parser/testfiles/mumble/Dockerfile b/dockerfile/parser/testfiles/mumble/Dockerfile
new file mode 100644
index 0000000..5b9ec06
--- /dev/null
+++ b/dockerfile/parser/testfiles/mumble/Dockerfile
@@ -0,0 +1,7 @@
+FROM ubuntu:14.04
+
+RUN apt-get update && apt-get install libcap2-bin mumble-server -y
+
+ADD ./mumble-server.ini /etc/mumble-server.ini
+
+CMD /usr/sbin/murmurd
diff --git a/dockerfile/parser/testfiles/mumble/result b/dockerfile/parser/testfiles/mumble/result
new file mode 100644
index 0000000..a0036a9
--- /dev/null
+++ b/dockerfile/parser/testfiles/mumble/result
@@ -0,0 +1,4 @@
+(from "ubuntu:14.04")
+(run "apt-get update && apt-get install libcap2-bin mumble-server -y")
+(add "./mumble-server.ini" "/etc/mumble-server.ini")
+(cmd "/usr/sbin/murmurd")
diff --git a/dockerfile/parser/testfiles/nginx/Dockerfile b/dockerfile/parser/testfiles/nginx/Dockerfile
new file mode 100644
index 0000000..0a35e2c
--- /dev/null
+++ b/dockerfile/parser/testfiles/nginx/Dockerfile
@@ -0,0 +1,14 @@
+FROM ubuntu:14.04
+LABEL maintainer Erik Hollensbe <erik@hollensbe.org>
+
+RUN apt-get update && apt-get install nginx-full -y
+RUN rm -rf /etc/nginx
+ADD etc /etc/nginx
+RUN chown -R root:root /etc/nginx
+RUN /usr/sbin/nginx -qt
+RUN mkdir /www
+
+CMD ["/usr/sbin/nginx"]
+
+VOLUME /www
+EXPOSE 80
diff --git a/dockerfile/parser/testfiles/nginx/result b/dockerfile/parser/testfiles/nginx/result
new file mode 100644
index 0000000..a895fad
--- /dev/null
+++ b/dockerfile/parser/testfiles/nginx/result
@@ -0,0 +1,11 @@
+(from "ubuntu:14.04")
+(label "maintainer" "Erik Hollensbe <erik@hollensbe.org>")
+(run "apt-get update && apt-get install nginx-full -y")
+(run "rm -rf /etc/nginx")
+(add "etc" "/etc/nginx")
+(run "chown -R root:root /etc/nginx")
+(run "/usr/sbin/nginx -qt")
+(run "mkdir /www")
+(cmd "/usr/sbin/nginx")
+(volume "/www")
+(expose "80")
diff --git a/dockerfile/parser/testfiles/tf2/Dockerfile b/dockerfile/parser/testfiles/tf2/Dockerfile
new file mode 100644
index 0000000..72b79bd
--- /dev/null
+++ b/dockerfile/parser/testfiles/tf2/Dockerfile
@@ -0,0 +1,23 @@
+FROM ubuntu:12.04
+
+EXPOSE 27015
+EXPOSE 27005
+EXPOSE 26901
+EXPOSE 27020
+
+RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y
+RUN mkdir -p /steam
+RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam
+ADD ./script /steam/script
+RUN /steam/steamcmd.sh +runscript /steam/script
+RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf
+RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf
+ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg
+ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg
+ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg
+RUN rm -r /steam/tf2/tf/addons/sourcemod/configs
+ADD ./configs /steam/tf2/tf/addons/sourcemod/configs
+RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en
+RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en
+
+CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill
diff --git a/dockerfile/parser/testfiles/tf2/result b/dockerfile/parser/testfiles/tf2/result
new file mode 100644
index 0000000..d4f94cd
--- /dev/null
+++ b/dockerfile/parser/testfiles/tf2/result
@@ -0,0 +1,20 @@
+(from "ubuntu:12.04")
+(expose "27015")
+(expose "27005")
+(expose "26901")
+(expose "27020")
+(run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y")
+(run "mkdir -p /steam")
+(run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam")
+(add "./script" "/steam/script")
+(run "/steam/steamcmd.sh +runscript /steam/script")
+(run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf")
+(run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf")
+(add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg")
+(add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg")
+(add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg")
+(run "rm -r /steam/tf2/tf/addons/sourcemod/configs")
+(add "./configs" "/steam/tf2/tf/addons/sourcemod/configs")
+(run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en")
+(run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en")
+(cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill")
diff --git a/dockerfile/parser/testfiles/weechat/Dockerfile b/dockerfile/parser/testfiles/weechat/Dockerfile
new file mode 100644
index 0000000..4842088
--- /dev/null
+++ b/dockerfile/parser/testfiles/weechat/Dockerfile
@@ -0,0 +1,9 @@
+FROM ubuntu:14.04
+
+RUN apt-get update -qy && apt-get install tmux zsh weechat-curses -y
+
+ADD .weechat /.weechat
+ADD .tmux.conf /
+RUN echo "export TERM=screen-256color" >/.zshenv
+
+CMD zsh -c weechat
diff --git a/dockerfile/parser/testfiles/weechat/result b/dockerfile/parser/testfiles/weechat/result
new file mode 100644
index 0000000..c3abb4c
--- /dev/null
+++ b/dockerfile/parser/testfiles/weechat/result
@@ -0,0 +1,6 @@
+(from "ubuntu:14.04")
+(run "apt-get update -qy && apt-get install tmux zsh weechat-curses -y")
+(add ".weechat" "/.weechat")
+(add ".tmux.conf" "/")
+(run "echo \"export TERM=screen-256color\" >/.zshenv")
+(cmd "zsh -c weechat")
diff --git a/dockerfile/parser/testfiles/znc/Dockerfile b/dockerfile/parser/testfiles/znc/Dockerfile
new file mode 100644
index 0000000..626b126
--- /dev/null
+++ b/dockerfile/parser/testfiles/znc/Dockerfile
@@ -0,0 +1,7 @@
+FROM ubuntu:14.04
+LABEL maintainer Erik Hollensbe <erik@hollensbe.org>
+
+RUN apt-get update && apt-get install znc -y
+ADD conf /.znc
+
+CMD [ "/usr/bin/znc", "-f", "-r" ]
diff --git a/dockerfile/parser/testfiles/znc/result b/dockerfile/parser/testfiles/znc/result
new file mode 100644
index 0000000..bfc7f65
--- /dev/null
+++ b/dockerfile/parser/testfiles/znc/result
@@ -0,0 +1,5 @@
+(from "ubuntu:14.04")
+(label "maintainer" "Erik Hollensbe <erik@hollensbe.org>")
+(run "apt-get update && apt-get install znc -y")
+(add "conf" "/.znc")
+(cmd "/usr/bin/znc" "-f" "-r")
diff --git a/evaluator.go b/evaluator.go
new file mode 100644
index 0000000..b05f6c6
--- /dev/null
+++ b/evaluator.go
@@ -0,0 +1,163 @@
+package imagebuilder
+
+import (
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/openshift/imagebuilder/dockerfile/command"
+ "github.com/openshift/imagebuilder/dockerfile/parser"
+)
+
+// ParseDockerfile parses the provided stream as a canonical Dockerfile
+func ParseDockerfile(r io.Reader) (*parser.Node, error) {
+ result, err := parser.Parse(r)
+ if err != nil {
+ return nil, err
+ }
+ return result.AST, nil
+}
+
+// Environment variable interpolation will happen on these statements only.
+var replaceEnvAllowed = map[string]bool{
+ command.Env: true,
+ command.Label: true,
+ command.Add: true,
+ command.Copy: true,
+ command.Workdir: true,
+ command.Expose: true,
+ command.Volume: true,
+ command.User: true,
+ command.StopSignal: true,
+ command.Arg: true,
+}
+
+// Certain commands are allowed to have their args split into more
+// words after env var replacements. Meaning:
+//
+// ENV foo="123 456"
+// EXPOSE $foo
+//
+// should result in the same thing as:
+//
+// EXPOSE 123 456
+//
+// and not treat "123 456" as a single word.
+// Note that: EXPOSE "$foo" and EXPOSE $foo are not the same thing.
+// Quotes will cause it to still be treated as single word.
+var allowWordExpansion = map[string]bool{
+ command.Expose: true,
+}
+
+// Step represents the input Env and the output command after all
+// post processing of the command arguments is done.
+type Step struct {
+ Env []string
+
+ Command string
+ Args []string
+ Flags []string
+ Attrs map[string]bool
+ Message string
+ Original string
+}
+
+// Resolve transforms a parsed Dockerfile line into a command to execute,
+// resolving any arguments.
+//
+// Almost all nodes will have this structure:
+// Child[Node, Node, Node] where Child is from parser.Node.Children and each
+// node comes from parser.Node.Next. This forms a "line" with a statement and
+// arguments and we process them in this normalized form by hitting
+// evaluateTable with the leaf nodes of the command and the Builder object.
+//
+// ONBUILD is a special case; in this case the parser will emit:
+// Child[Node, Child[Node, Node...]] where the first node is the literal
+// "onbuild" and the child entrypoint is the command of the ONBUILD statement,
+// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to
+// deal with that, at least until it becomes more of a general concern with new
+// features.
+func (b *Step) Resolve(ast *parser.Node) error {
+ cmd := ast.Value
+ upperCasedCmd := strings.ToUpper(cmd)
+
+ // To ensure the user is given a decent error message if the platform
+ // on which the daemon is running does not support a builder command.
+ if err := platformSupports(strings.ToLower(cmd)); err != nil {
+ return err
+ }
+
+ attrs := ast.Attributes
+ original := ast.Original
+ flags := ast.Flags
+ strList := []string{}
+ msg := upperCasedCmd
+
+ if len(ast.Flags) > 0 {
+ msg += " " + strings.Join(ast.Flags, " ")
+ }
+
+ if cmd == "onbuild" {
+ if ast.Next == nil {
+ return fmt.Errorf("ONBUILD requires at least one argument")
+ }
+ ast = ast.Next.Children[0]
+ strList = append(strList, ast.Value)
+ msg += " " + ast.Value
+
+ if len(ast.Flags) > 0 {
+ msg += " " + strings.Join(ast.Flags, " ")
+ }
+
+ }
+
+ // count the number of nodes that we are going to traverse first
+ // so we can pre-create the argument and message array. This speeds up the
+ // allocation of those list a lot when they have a lot of arguments
+ cursor := ast
+ var n int
+ for cursor.Next != nil {
+ cursor = cursor.Next
+ n++
+ }
+ msgList := make([]string, n)
+
+ var i int
+ envs := b.Env
+ for ast.Next != nil {
+ ast = ast.Next
+ str := ast.Value
+ if replaceEnvAllowed[cmd] {
+ var err error
+ var words []string
+
+ if allowWordExpansion[cmd] {
+ words, err = ProcessWords(str, envs)
+ if err != nil {
+ return err
+ }
+ strList = append(strList, words...)
+ } else {
+ str, err = ProcessWord(str, envs)
+ if err != nil {
+ return err
+ }
+ strList = append(strList, str)
+ }
+ } else {
+ strList = append(strList, str)
+ }
+ msgList[i] = ast.Value
+ i++
+ }
+
+ msg += " " + strings.Join(msgList, " ")
+
+ b.Message = msg
+ b.Command = cmd
+ b.Args = strList
+ b.Original = original
+ b.Attrs = attrs
+ b.Flags = flags
+ return nil
+}
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..65328a7
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,49 @@
+module github.com/openshift/imagebuilder
+
+go 1.19
+
+require (
+ github.com/containerd/containerd v1.7.0
+ github.com/containers/storage v1.46.1
+ github.com/docker/distribution v2.8.1+incompatible
+ github.com/docker/docker v23.0.6+incompatible
+ github.com/fsouza/go-dockerclient v1.9.7
+ github.com/sirupsen/logrus v1.9.0
+ github.com/stretchr/testify v1.8.2
+ k8s.io/klog v1.0.0
+)
+
+require (
+ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
+ github.com/Microsoft/go-winio v0.6.0 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 // indirect
+ github.com/docker/go-units v0.5.0 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang/protobuf v1.5.2 // indirect
+ github.com/klauspost/compress v1.16.4 // indirect
+ github.com/klauspost/pgzip v1.2.5 // indirect
+ github.com/kr/text v0.2.0 // indirect
+ github.com/moby/patternmatcher v0.5.0 // indirect
+ github.com/moby/sys/mountinfo v0.6.2 // indirect
+ github.com/moby/sys/sequential v0.5.0 // indirect
+ github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
+ github.com/morikuni/aec v1.0.0 // indirect
+ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
+ github.com/opencontainers/go-digest v1.0.0 // indirect
+ github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect
+ github.com/opencontainers/runc v1.1.5 // indirect
+ github.com/opencontainers/runtime-spec v1.1.0-rc.1 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
+ github.com/ulikunitz/xz v0.5.11 // indirect
+ golang.org/x/mod v0.8.0 // indirect
+ golang.org/x/sys v0.7.0 // indirect
+ golang.org/x/tools v0.5.0 // indirect
+ google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
+ google.golang.org/grpc v1.53.0 // indirect
+ google.golang.org/protobuf v1.28.1 // indirect
+ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..fe94d43
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,190 @@
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
+github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
+github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
+github.com/Microsoft/hcsshim v0.10.0-rc.7 h1:HBytQPxcv8Oy4244zbQbe6hnOnx544eL5QPUqhJldz8=
+github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
+github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
+github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
+github.com/containerd/containerd v1.7.0 h1:G/ZQr3gMZs6ZT0qPUZ15znx5QSdQdASW11nXTLTM2Pg=
+github.com/containerd/containerd v1.7.0/go.mod h1:QfR7Efgb/6X2BDpTPJRvPTYDE9rsF0FsXX9J8sIs/sc=
+github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg=
+github.com/containers/storage v1.46.1 h1:GcAe8J0Y6T2CF70fXPojUpnme6zXamuzGCrNujVtIGE=
+github.com/containers/storage v1.46.1/go.mod h1:81vNDX4h+nXJ2o0D6Yqy6JGXDYJGVpHZpz0nr09iJuQ=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
+github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
+github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v23.0.6+incompatible h1:aBD4np894vatVX99UTx/GyOUOK4uEcROwA3+bQhEcoU=
+github.com/docker/docker v23.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 h1:IPrmumsT9t5BS7XcPhgsCTlkWbYg80SEXUzDpReaU6Y=
+github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q=
+github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/fsouza/go-dockerclient v1.9.7 h1:FlIrT71E62zwKgRvCvWGdxRD+a/pIy+miY/n3MXgfuw=
+github.com/fsouza/go-dockerclient v1.9.7/go.mod h1:vx9C32kE2D15yDSOMCDaAEIARZpDQDFBHeqL3MgQy/U=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.16.4 h1:91KN02FnsOYhuunwU4ssRe8lc2JosWmizWa91B5v1PU=
+github.com/klauspost/compress v1.16.4/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
+github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo=
+github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
+github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
+github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
+github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
+github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
+github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
+github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
+github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
+github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
+github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8=
+github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=
+github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs=
+github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
+github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.1.0-rc.1 h1:wHa9jroFfKGQqFHj0I1fMRKLl0pfj+ynAqBxo3v6u9w=
+github.com/opencontainers/runtime-spec v1.1.0-rc.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
+github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
+github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
+github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
+github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4=
+golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA=
+google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
+google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
+google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
+google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
+gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E=
+gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
+k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
+k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
diff --git a/imagebuilder.spec b/imagebuilder.spec
new file mode 100644
index 0000000..8f26e33
--- /dev/null
+++ b/imagebuilder.spec
@@ -0,0 +1,63 @@
+
+#debuginfo not supported with Go
+%global debug_package %{nil}
+
+# modifying the Go binaries breaks the DWARF debugging
+%global __os_install_post %{_rpmconfigdir}/brp-compress
+
+%{!?commit: %global commit HEAD }
+
+#
+# Customize from here.
+#
+
+%global golang_version 1.8.1
+%{!?version: %global version 1.2.5}
+%{!?release: %global release 1}
+%global package_name imagebuilder
+%global product_name Container Image Builder
+%global import_path github.com/openshift/imagebuilder
+
+Name: %{package_name}
+Version: %{version}
+Release: %{release}%{?dist}
+Summary: Builds Dockerfile using the Docker client
+License: ASL 2.0
+URL: https://%{import_path}
+
+Source0: https://%{import_path}/archive/%{commit}/%{name}-%{version}.tar.gz
+BuildRequires: golang >= %{golang_version}
+
+### AUTO-BUNDLED-GEN-ENTRY-POINT
+
+%description
+Builds Dockerfile using the Docker client
+
+%prep
+GOPATH=$RPM_BUILD_DIR/go
+rm -rf $GOPATH
+mkdir -p $GOPATH/{src/github.com/openshift,bin,pkg}
+%setup -q -c -n imagebuilder
+cd ..
+mv imagebuilder $GOPATH/src/github.com/openshift/imagebuilder
+ln -s $GOPATH/src/github.com/openshift/imagebuilder imagebuilder
+
+%build
+export GOPATH=$RPM_BUILD_DIR/go
+cd $GOPATH/src/github.com/openshift/imagebuilder
+go install ./cmd/imagebuilder
+
+%install
+
+install -d %{buildroot}%{_bindir}
+install -p -m 755 $RPM_BUILD_DIR/go/bin/imagebuilder %{buildroot}%{_bindir}/imagebuilder
+
+%files
+%doc README.md
+%license LICENSE
+%{_bindir}/imagebuilder
+
+%pre
+
+%changelog
+
diff --git a/imageprogress/progress.go b/imageprogress/progress.go
new file mode 100644
index 0000000..a374edb
--- /dev/null
+++ b/imageprogress/progress.go
@@ -0,0 +1,315 @@
+package imageprogress
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/containers/storage/pkg/regexp"
+)
+
+const (
+ defaultProgressTimeThreshhold = 30 * time.Second
+ defaultStableThreshhold = 10
+)
+
+// progressLine is a structure representation of a Docker pull progress line
+type progressLine struct {
+ ID string `json:"id"`
+ Status string `json:"status"`
+ Detail *progressDetail `json:"progressDetail"`
+ Error string `json:"error"`
+}
+
+// progressDetail is the progressDetail structure in a Docker pull progress line
+type progressDetail struct {
+ Current int64 `json:"current"`
+ Total int64 `json:"total"`
+}
+
+// layerDetail is layer information associated with a specific layerStatus
+type layerDetail struct {
+ Count int
+ Current int64
+ Total int64
+}
+
+// layerStatus is one of different possible status for layers detected by
+// the ProgressWriter
+type layerStatus int
+
+const (
+ statusPending layerStatus = iota
+ statusDownloading
+ statusExtracting
+ statusComplete
+ statusPushing
+)
+
+// layerStatusFromDockerString translates a string in a Docker status
+// line to a layerStatus
+func layerStatusFromDockerString(dockerStatus string) layerStatus {
+ switch dockerStatus {
+ case "Pushing":
+ return statusPushing
+ case "Downloading":
+ return statusDownloading
+ case "Extracting", "Verifying Checksum", "Download complete":
+ return statusExtracting
+ case "Pull complete", "Already exists", "Pushed", "Layer already exists":
+ return statusComplete
+ default:
+ return statusPending
+ }
+}
+
+type report map[layerStatus]*layerDetail
+
+func (r report) count(status layerStatus) int {
+ detail, ok := r[status]
+ if !ok {
+ return 0
+ }
+ return detail.Count
+}
+
+func (r report) percentProgress(status layerStatus) float32 {
+ detail, ok := r[status]
+ if !ok {
+ return 0
+ }
+ if detail.Total == 0 {
+ return 0
+ }
+ pct := float32(detail.Current) / float32(detail.Total) * 100.0
+ if pct > 100.0 {
+ pct = 100.0
+ }
+ if pct < 0.0 {
+ pct = 0.0
+ }
+ return pct
+}
+
+func (r report) totalCount() int {
+ cnt := 0
+ for _, detail := range r {
+ cnt += detail.Count
+ }
+ return cnt
+}
+
+// String is used for test output
+func (r report) String() string {
+ result := &bytes.Buffer{}
+ fmt.Fprintf(result, "{")
+ for k := range r {
+ var status string
+ switch k {
+ case statusPending:
+ status = "pending"
+ case statusDownloading:
+ status = "downloading"
+ case statusExtracting:
+ status = "extracting"
+ case statusComplete:
+ status = "complete"
+ }
+ fmt.Fprintf(result, "%s:{Count: %d, Current: %d, Total: %d}, ", status, r[k].Count, r[k].Current, r[k].Total)
+ }
+ fmt.Fprintf(result, "}")
+ return result.String()
+}
+
+// newWriter creates a writer that periodically reports
+// on pull/push progress of a Docker image. It only reports when the state of the
+// different layers has changed and uses time thresholds to limit the
+// rate of the reports.
+func newWriter(reportFn func(report), layersChangedFn func(report, report) bool) io.WriteCloser {
+ writer := &imageProgressWriter{
+ mutex: &sync.Mutex{},
+ layerStatus: map[string]*progressLine{},
+ reportFn: reportFn,
+ layersChangedFn: layersChangedFn,
+ progressTimeThreshhold: defaultProgressTimeThreshhold,
+ stableThreshhold: defaultStableThreshhold,
+ }
+ return writer
+}
+
+type imageProgressWriter struct {
+ mutex *sync.Mutex
+ internalWriter io.WriteCloser
+ readingGoroutineStatus <-chan error // Exists if internalWriter != nil
+ layerStatus map[string]*progressLine
+ lastLayerCount int
+ stableLines int
+ stableThreshhold int
+ progressTimeThreshhold time.Duration
+ lastReport report
+ lastReportTime time.Time
+ reportFn func(report)
+ layersChangedFn func(report, report) bool
+}
+
+func (w *imageProgressWriter) Write(data []byte) (int, error) {
+ w.mutex.Lock()
+ defer w.mutex.Unlock()
+ if w.internalWriter == nil {
+ var pipeIn *io.PipeReader
+ statusChannel := make(chan error, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block.
+ pipeIn, w.internalWriter = io.Pipe()
+ w.readingGoroutineStatus = statusChannel
+ go w.readingGoroutine(statusChannel, pipeIn)
+ }
+ return w.internalWriter.Write(data)
+}
+
+func (w *imageProgressWriter) Close() error {
+ w.mutex.Lock()
+ defer w.mutex.Unlock()
+
+ if w.internalWriter == nil {
+ return nil
+ }
+
+ err := w.internalWriter.Close() // As of Go 1.9 and 1.10, PipeWriter.Close() always returns nil
+ readingErr := <-w.readingGoroutineStatus
+ if err == nil && readingErr != nil {
+ err = readingErr
+ }
+ return err
+}
+
+func (w *imageProgressWriter) readingGoroutine(statusChannel chan<- error, pipeIn *io.PipeReader) {
+ err := errors.New("Internal error: unexpected panic in imageProgressWriter.readingGoroutine")
+ defer func() { statusChannel <- err }()
+ defer func() {
+ if err != nil {
+ pipeIn.CloseWithError(err)
+ } else {
+ pipeIn.Close()
+ }
+ }()
+
+ err = w.readProgress(pipeIn)
+ // err is nil on reaching EOF
+}
+
+func (w *imageProgressWriter) readProgress(pipeIn *io.PipeReader) error {
+ decoder := json.NewDecoder(pipeIn)
+ for {
+ line := &progressLine{}
+ err := decoder.Decode(line)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ err = w.processLine(line)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (w *imageProgressWriter) processLine(line *progressLine) error {
+
+ if err := getError(line); err != nil {
+ return err
+ }
+
+ // determine if it's a line we want to process
+ if !islayerStatus(line) {
+ return nil
+ }
+
+ w.layerStatus[line.ID] = line
+
+ // if the number of layers has not stabilized yet, return and wait for more
+ // progress
+ if !w.isStableLayerCount() {
+ return nil
+ }
+
+ r := createReport(w.layerStatus)
+
+ // check if the count of layers in each state has changed
+ if w.layersChangedFn(w.lastReport, r) {
+ w.lastReport = r
+ w.lastReportTime = time.Now()
+ w.reportFn(r)
+ return nil
+ }
+ // If layer counts haven't changed, but enough time has passed (30 sec by default),
+ // at least report on download/push progress
+ if time.Since(w.lastReportTime) > w.progressTimeThreshhold {
+ w.lastReport = r
+ w.lastReportTime = time.Now()
+ w.reportFn(r)
+ }
+ return nil
+}
+
+func (w *imageProgressWriter) isStableLayerCount() bool {
+ // If the number of layers has changed since last status, we're not stable
+ if w.lastLayerCount != len(w.layerStatus) {
+ w.lastLayerCount = len(w.layerStatus)
+ w.stableLines = 0
+ return false
+ }
+ // Only proceed after we've received status for the same number
+ // of layers at least stableThreshhold times. If not, they're still increasing
+ w.stableLines++
+ return w.stableLines >= w.stableThreshhold
+}
+
+var layerIDRegexp = regexp.Delayed("^[a-f0-9]*$")
+
+func islayerStatus(line *progressLine) bool {
+ // ignore status lines with no layer id
+ if len(line.ID) == 0 {
+ return false
+ }
+ // ignore layer ids that are not hex string
+ if !layerIDRegexp.MatchString(line.ID) {
+ return false
+ }
+ // ignore retrying status
+ if strings.HasPrefix(line.Status, "Retrying") {
+ return false
+ }
+ return true
+}
+
+func getError(line *progressLine) error {
+ if len(line.Error) > 0 {
+ return errors.New(line.Error)
+ }
+ return nil
+}
+
+func createReport(dockerProgress map[string]*progressLine) report {
+ r := report{}
+ for _, line := range dockerProgress {
+ layerStatus := layerStatusFromDockerString(line.Status)
+ detail, exists := r[layerStatus]
+ if !exists {
+ detail = &layerDetail{}
+ r[layerStatus] = detail
+ }
+ detail.Count++
+ if line.Detail != nil {
+ detail.Current += line.Detail.Current
+ detail.Total += line.Detail.Total
+ }
+ }
+ return r
+}
diff --git a/imageprogress/progress_test.go b/imageprogress/progress_test.go
new file mode 100644
index 0000000..239f36f
--- /dev/null
+++ b/imageprogress/progress_test.go
@@ -0,0 +1,216 @@
+package imageprogress
+
+import (
+ "encoding/json"
+ "io"
+ "reflect"
+ "strconv"
+ "testing"
+)
+
+func TestReports(t *testing.T) {
+ tests := []struct {
+ name string
+ gen func(*progressGenerator)
+ errExpected bool
+ expected report
+ }{
+ {
+ name: "simple report",
+ gen: func(p *progressGenerator) {
+ p.status("1", "Extracting")
+ p.status("2", "Downloading")
+ p.status("1", "Downloading")
+ p.status("2", "Pull complete")
+ },
+ expected: report{
+ statusDownloading: &layerDetail{Count: 1},
+ statusComplete: &layerDetail{Count: 1},
+ },
+ },
+ {
+ name: "ignore invalid layer id",
+ gen: func(p *progressGenerator) {
+ p.status("1", "Downloading")
+ p.status("hello", "testing")
+ p.status("1", "Downloading")
+ },
+ expected: report{
+ statusDownloading: &layerDetail{Count: 1},
+ },
+ },
+ {
+ name: "ignore retrying status",
+ gen: func(p *progressGenerator) {
+ p.status("1", "Downloading")
+ p.status("2", "Pull complete")
+ p.status("1", "Downloading")
+ p.status("3", "Retrying")
+ },
+ expected: report{
+ statusDownloading: &layerDetail{Count: 1},
+ statusComplete: &layerDetail{Count: 1},
+ },
+ },
+ {
+ name: "detect error",
+ gen: func(p *progressGenerator) {
+ p.status("1", "Downloading")
+ p.err("an error")
+ },
+ errExpected: true,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ pipeIn, pipeOut := io.Pipe()
+ go func() {
+ p := newProgressGenerator(pipeOut)
+ test.gen(p)
+ pipeOut.Close()
+ }()
+ var lastReport report
+ w := newWriter(
+ func(r report) {
+ lastReport = r
+ },
+ func(a report, b report) bool {
+ return true
+ },
+ )
+ w.(*imageProgressWriter).stableThreshhold = 0
+ _, err := io.Copy(w, pipeIn)
+ if err == nil {
+ err = w.Close()
+ }
+ if err != nil {
+ if !test.errExpected {
+ t.Fatalf("%s: unexpected: %v", test.name, err)
+ }
+ return
+ }
+ if test.errExpected {
+ t.Fatalf("%s: did not get expected error", test.name)
+ }
+ // w.Close() waits until the goroutine inside of the progress generator finishes
+ if !compareReport(lastReport, test.expected) {
+ t.Errorf("%s: unexpected report, got: %v, expected: %v", test.name, lastReport, test.expected)
+ }
+ })
+ }
+}
+
+func TestErrorOnCopy(t *testing.T) {
+ // Producer pipe
+ genIn, genOut := io.Pipe()
+ p := newProgressGenerator(genOut)
+
+ // generate some data
+ go func() {
+ for i := 0; i < 100; i++ {
+ p.status("1", "Downloading")
+ p.status("2", "Downloading")
+ p.status("3", "Downloading")
+ }
+ p.err("data error")
+ genOut.Close()
+ }()
+
+ w := newWriter(func(r report) {}, func(a, b report) bool { return true })
+
+ // Ensure that the error is propagated to the copy
+ _, err := io.Copy(w, genIn)
+ if err == nil {
+ err = w.Close()
+ }
+ if err == nil {
+ t.Errorf("Did not get an error when copying to writer")
+ }
+ if err.Error() != "data error" {
+ t.Errorf("Did not get expected error: %v", err)
+ }
+}
+
+func TestStableLayerCount(t *testing.T) {
+
+ tests := []struct {
+ name string
+ lastLayerCount int
+ layerStatusCount int
+ stableThreshhold int
+ callCount int
+ expectStable bool
+ }{
+ {
+ name: "increasing layer count",
+ lastLayerCount: 3,
+ layerStatusCount: 4,
+ callCount: 1,
+ expectStable: false,
+ },
+ {
+ name: "has not met stable threshhold",
+ lastLayerCount: 3,
+ layerStatusCount: 3,
+ callCount: 2,
+ stableThreshhold: 3,
+ expectStable: false,
+ },
+ {
+ name: "met stable threshhold",
+ lastLayerCount: 3,
+ layerStatusCount: 3,
+ callCount: 4,
+ stableThreshhold: 3,
+ expectStable: true,
+ },
+ }
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ w := newWriter(func(report) {}, func(a, b report) bool { return true }).(*imageProgressWriter)
+ w.lastLayerCount = test.lastLayerCount
+ w.layerStatus = map[string]*progressLine{}
+ w.stableThreshhold = test.stableThreshhold
+ for i := 0; i < test.layerStatusCount; i++ {
+ w.layerStatus[strconv.Itoa(i)] = &progressLine{}
+ }
+ var result bool
+ for i := 0; i < test.callCount; i++ {
+ result = w.isStableLayerCount()
+ }
+ if result != test.expectStable {
+ t.Errorf("%s: expected %v, got %v", test.name, test.expectStable, result)
+ }
+ })
+ }
+}
+
+func compareReport(a, b report) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for k := range a {
+ if _, ok := b[k]; !ok {
+ return false
+ }
+ if !reflect.DeepEqual(*a[k], *b[k]) {
+ return false
+ }
+ }
+ return true
+}
+
+type progressGenerator json.Encoder
+
+func newProgressGenerator(w io.Writer) *progressGenerator {
+ return (*progressGenerator)(json.NewEncoder(w))
+}
+
+func (p *progressGenerator) status(id, status string) {
+ (*json.Encoder)(p).Encode(&progressLine{ID: id, Status: status})
+}
+
+func (p *progressGenerator) err(msg string) {
+ (*json.Encoder)(p).Encode(&progressLine{Error: msg})
+}
diff --git a/imageprogress/pull.go b/imageprogress/pull.go
new file mode 100644
index 0000000..79016b5
--- /dev/null
+++ b/imageprogress/pull.go
@@ -0,0 +1,45 @@
+package imageprogress
+
+import (
+ "fmt"
+ "io"
+)
+
+// NewPullWriter creates a writer that periodically reports
+// on pull progress of a Docker image. It only reports when the state of the
+// different layers has changed and uses time thresholds to limit the
+// rate of the reports.
+func NewPullWriter(printFn func(string)) io.WriteCloser {
+ return newWriter(pullReporter(printFn), pullLayersChanged)
+}
+
+func pullReporter(printFn func(string)) func(report) {
+ extracting := false
+ return func(r report) {
+ if extracting {
+ return
+ }
+ if r.count(statusDownloading) == 0 &&
+ r.count(statusPending) == 0 &&
+ r.count(statusExtracting) > 0 {
+
+ printFn(fmt.Sprintf("Pulled %[1]d/%[1]d layers, 100%% complete", r.totalCount()))
+ printFn("Extracting")
+ extracting = true
+ return
+ }
+
+ completeCount := r.count(statusComplete) + r.count(statusExtracting)
+ var pctComplete float32 = 0.0
+ pctComplete += float32(completeCount) / float32(r.totalCount())
+ pctComplete += float32(r.count(statusDownloading)) / float32(r.totalCount()) * r.percentProgress(statusDownloading) / 100.0
+ pctComplete *= 100.0
+ printFn(fmt.Sprintf("Pulled %d/%d layers, %.0f%% complete", completeCount, r.totalCount(), pctComplete))
+ }
+}
+
+func pullLayersChanged(older, newer report) bool {
+ olderCompleteCount := older.count(statusComplete) + older.count(statusExtracting)
+ newerCompleteCount := newer.count(statusComplete) + newer.count(statusExtracting)
+ return olderCompleteCount != newerCompleteCount
+}
diff --git a/imageprogress/push.go b/imageprogress/push.go
new file mode 100644
index 0000000..464b910
--- /dev/null
+++ b/imageprogress/push.go
@@ -0,0 +1,29 @@
+package imageprogress
+
+import (
+ "fmt"
+ "io"
+)
+
+// NewPushWriter creates a writer that periodically reports
+// on push progress of a Docker image. It only reports when the state of the
+// different layers has changed and uses time thresholds to limit the
+// rate of the reports.
+func NewPushWriter(printFn func(string)) io.WriteCloser {
+ return newWriter(pushReporter(printFn), pushLayersChanged)
+}
+
+func pushReporter(printFn func(string)) func(report) {
+ return func(r report) {
+ var pctComplete float32 = 0.0
+ pctComplete += float32(r.count(statusComplete)) / float32(r.totalCount())
+ pctComplete += float32(r.count(statusPushing)) / float32(r.totalCount()) * r.percentProgress(statusPushing) / 100.0
+ pctComplete *= 100.0
+
+ printFn(fmt.Sprintf("Pushed %d/%d layers, %.0f%% complete", r.count(statusComplete), r.totalCount(), pctComplete))
+ }
+}
+
+func pushLayersChanged(older, newer report) bool {
+ return older.count(statusComplete) != newer.count(statusComplete)
+}
diff --git a/internals.go b/internals.go
new file mode 100644
index 0000000..d4b9539
--- /dev/null
+++ b/internals.go
@@ -0,0 +1,120 @@
+package imagebuilder
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+)
+
+// hasEnvName returns true if the provided environment contains the named ENV var.
+func hasEnvName(env []string, name string) bool {
+ for _, e := range env {
+ if strings.HasPrefix(e, name+"=") {
+ return true
+ }
+ }
+ return false
+}
+
+// platformSupports is a short-term function to give users a quality error
+// message if a Dockerfile uses a command not supported on the platform.
+func platformSupports(command string) error {
+ if runtime.GOOS != "windows" {
+ return nil
+ }
+ switch command {
+ case "expose", "user", "stopsignal", "arg":
+ return fmt.Errorf("The daemon on this platform does not support the command '%s'", command)
+ }
+ return nil
+}
+
+func handleJSONArgs(args []string, attributes map[string]bool) []string {
+ if len(args) == 0 {
+ return []string{}
+ }
+
+ if attributes != nil && attributes["json"] {
+ return args
+ }
+
+ // literal string command, not an exec array
+ return []string{strings.Join(args, " ")}
+}
+
+func hasSlash(input string) bool {
+ return strings.HasSuffix(input, string(os.PathSeparator)) || strings.HasSuffix(input, string(os.PathSeparator)+".")
+}
+
+// makeAbsolute ensures that the provided path is absolute.
+func makeAbsolute(dest, workingDir string) string {
+ // Twiddle the destination when it's a relative path - meaning, make it
+ // relative to the WORKINGDIR
+ if dest == "." {
+ if !hasSlash(workingDir) {
+ workingDir += string(os.PathSeparator)
+ }
+ dest = workingDir
+ }
+
+ if !filepath.IsAbs(dest) {
+ hasSlash := hasSlash(dest)
+ dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(workingDir), dest)
+
+ // Make sure we preserve any trailing slash
+ if hasSlash {
+ dest += string(os.PathSeparator)
+ }
+ }
+ return dest
+}
+
+// parseOptInterval(flag) is the duration of flag.Value, or 0 if
+// empty. An error is reported if the value is given and is not positive.
+func parseOptInterval(f *flag.Flag) (time.Duration, error) {
+ if f == nil {
+ return 0, fmt.Errorf("No flag defined")
+ }
+ s := f.Value.String()
+ if s == "" {
+ return 0, nil
+ }
+ d, err := time.ParseDuration(s)
+ if err != nil {
+ return 0, err
+ }
+ if d <= 0 {
+ return 0, fmt.Errorf("Interval %#v must be positive", f.Name)
+ }
+ return d, nil
+}
+
+// mergeEnv merges two lists of environment variables, avoiding duplicates.
+func mergeEnv(defaults, overrides []string) []string {
+ s := make([]string, 0, len(defaults)+len(overrides))
+ index := make(map[string]int)
+ for _, envSpec := range append(defaults, overrides...) {
+ envVar := strings.SplitN(envSpec, "=", 2)
+ if i, ok := index[envVar[0]]; ok {
+ s[i] = envSpec
+ continue
+ }
+ s = append(s, envSpec)
+ index[envVar[0]] = len(s) - 1
+ }
+ return s
+}
+
+// envMapAsSlice returns the contents of a map[string]string as a slice of keys
+// and values joined with "=".
+func envMapAsSlice(m map[string]string) []string {
+ s := make([]string, 0, len(m))
+ for k, v := range m {
+ s = append(s, k+"="+v)
+ }
+ return s
+}
diff --git a/internals_test.go b/internals_test.go
new file mode 100644
index 0000000..bdc96e6
--- /dev/null
+++ b/internals_test.go
@@ -0,0 +1,77 @@
+package imagebuilder
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+ "testing"
+)
+
+func TestMergeEnv(t *testing.T) {
+ tests := [][3][]string{
+ {
+ []string{"A=B", "B=C", "C=D"},
+ nil,
+ []string{"A=B", "B=C", "C=D"},
+ },
+ {
+ nil,
+ []string{"A=B", "B=C", "C=D"},
+ []string{"A=B", "B=C", "C=D"},
+ },
+ {
+ []string{"A=B", "B=C", "C=D", "E=F"},
+ []string{"B=O", "F=G"},
+ []string{"A=B", "B=O", "C=D", "E=F", "F=G"},
+ },
+ }
+ for i, test := range tests {
+ t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+ result := mergeEnv(test[0], test[1])
+ if len(result) != len(test[2]) {
+ t.Fatalf("expected %v, got %v", test[2], result)
+ }
+ for i := range result {
+ if result[i] != test[2][i] {
+ t.Fatalf("expected %v, got %v", test[2], result)
+ }
+ }
+ })
+ }
+}
+
+func TestEnvMapAsSlice(t *testing.T) {
+ tests := [][2][]string{
+ {
+ []string{"A=B", "B=C", "C=D"},
+ []string{"A=B", "B=C", "C=D"},
+ },
+ {
+ []string{"A=B", "B=C", "C=D", "E=F", "B=O", "F=G"},
+ []string{"A=B", "B=O", "C=D", "E=F", "F=G"},
+ },
+ {
+ []string{"A=B", "C=D", "B=C", "E=F", "F=G", "B=O"},
+ []string{"A=B", "B=O", "C=D", "E=F", "F=G"},
+ },
+ }
+ for i, test := range tests {
+ t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+ m := make(map[string]string)
+ for _, spec := range test[0] {
+ s := strings.SplitN(spec, "=", 2)
+ m[s[0]] = s[1]
+ }
+ result := envMapAsSlice(m)
+ sort.Strings(result)
+ if len(result) != len(test[1]) {
+ t.Fatalf("expected %v, got %v", test[1], result)
+ }
+ for i := range result {
+ if result[i] != test[1][i] {
+ t.Fatalf("expected %v, got %v", test[1], result)
+ }
+ }
+ })
+ }
+}
diff --git a/shell_parser.go b/shell_parser.go
new file mode 100644
index 0000000..9a0b6c3
--- /dev/null
+++ b/shell_parser.go
@@ -0,0 +1,332 @@
+package imagebuilder
+
+// This will take a single word and an array of env variables and
+// process all quotes (" and ') as well as $xxx and ${xxx} env variable
+// tokens. Tries to mimic bash shell process.
+// It doesn't support all flavors of ${xx:...} formats but new ones can
+// be added by adding code to the "special ${} format processing" section
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "text/scanner"
+ "unicode"
+)
+
+type shellWord struct {
+ word string
+ scanner scanner.Scanner
+ envs []string
+ pos int
+}
+
+// ProcessWord will use the 'env' list of environment variables,
+// and replace any env var references in 'word'.
+func ProcessWord(word string, env []string) (string, error) {
+ sw := &shellWord{
+ word: word,
+ envs: env,
+ pos: 0,
+ }
+ sw.scanner.Init(strings.NewReader(word))
+ word, _, err := sw.process()
+ return word, err
+}
+
+// ProcessWords will use the 'env' list of environment variables,
+// and replace any env var references in 'word' then it will also
+// return a slice of strings which represents the 'word'
+// split up based on spaces - taking into account quotes. Note that
+// this splitting is done **after** the env var substitutions are done.
+// Note, each one is trimmed to remove leading and trailing spaces (unless
+// they are quoted", but ProcessWord retains spaces between words.
+func ProcessWords(word string, env []string) ([]string, error) {
+ sw := &shellWord{
+ word: word,
+ envs: env,
+ pos: 0,
+ }
+ sw.scanner.Init(strings.NewReader(word))
+ _, words, err := sw.process()
+ return words, err
+}
+
+func (sw *shellWord) process() (string, []string, error) {
+ return sw.processStopOn(scanner.EOF)
+}
+
+type wordsStruct struct {
+ word string
+ words []string
+ inWord bool
+}
+
+func (w *wordsStruct) addChar(ch rune) {
+ if unicode.IsSpace(ch) && w.inWord {
+ if len(w.word) != 0 {
+ w.words = append(w.words, w.word)
+ w.word = ""
+ w.inWord = false
+ }
+ } else if !unicode.IsSpace(ch) {
+ w.addRawChar(ch)
+ }
+}
+
+func (w *wordsStruct) addRawChar(ch rune) {
+ w.word += string(ch)
+ w.inWord = true
+}
+
+func (w *wordsStruct) addString(str string) {
+ var scan scanner.Scanner
+ scan.Init(strings.NewReader(str))
+ for scan.Peek() != scanner.EOF {
+ w.addChar(scan.Next())
+ }
+}
+
+func (w *wordsStruct) addRawString(str string) {
+ w.word += str
+ w.inWord = true
+}
+
+func (w *wordsStruct) getWords() []string {
+ if len(w.word) > 0 {
+ w.words = append(w.words, w.word)
+
+ // Just in case we're called again by mistake
+ w.word = ""
+ w.inWord = false
+ }
+ return w.words
+}
+
+// Process the word, starting at 'pos', and stop when we get to the
+// end of the word or the 'stopChar' character
+func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) {
+ var result string
+ var words wordsStruct
+
+ var charFuncMapping = map[rune]func() (string, error){
+ '\'': sw.processSingleQuote,
+ '"': sw.processDoubleQuote,
+ '$': sw.processDollar,
+ }
+
+ for sw.scanner.Peek() != scanner.EOF {
+ ch := sw.scanner.Peek()
+
+ if stopChar != scanner.EOF && ch == stopChar {
+ sw.scanner.Next()
+ return result, words.getWords(), nil
+ }
+ if fn, ok := charFuncMapping[ch]; ok {
+ // Call special processing func for certain chars
+ tmp, err := fn()
+ if err != nil {
+ return "", []string{}, err
+ }
+ result += tmp
+
+ if ch == rune('$') {
+ words.addString(tmp)
+ } else {
+ words.addRawString(tmp)
+ }
+ } else {
+ // Not special, just add it to the result
+ ch = sw.scanner.Next()
+
+ if ch == '\\' {
+ // '\' escapes, except end of line
+
+ ch = sw.scanner.Next()
+
+ if ch == scanner.EOF {
+ break
+ }
+
+ words.addRawChar(ch)
+ } else {
+ words.addChar(ch)
+ }
+
+ result += string(ch)
+ }
+ }
+
+ if stopChar != scanner.EOF {
+ return "", []string{}, fmt.Errorf("unexpected end of statement while looking for matching %s", string(stopChar))
+ }
+
+ return result, words.getWords(), nil
+}
+
+func (sw *shellWord) processSingleQuote() (string, error) {
+ // All chars between single quotes are taken as-is
+ // Note, you can't escape '
+ var result string
+
+ sw.scanner.Next()
+
+ for {
+ ch := sw.scanner.Next()
+ if ch == '\'' {
+ break
+ }
+ if ch == scanner.EOF {
+ return "", errors.New("unexpected end of statement while looking for matching single-quote")
+ }
+ result += string(ch)
+ }
+
+ return result, nil
+}
+
+func (sw *shellWord) processDoubleQuote() (string, error) {
+ // All chars up to the next " are taken as-is, even ', except any $ chars
+ // But you can escape " with a \
+ var result string
+
+ sw.scanner.Next()
+
+ for {
+ ch := sw.scanner.Peek()
+ if ch == '"' {
+ sw.scanner.Next()
+ break
+ }
+ if ch == scanner.EOF {
+ return "", errors.New("unexpected end of statement while looking for matching double-quote")
+ }
+ if ch == '$' {
+ tmp, err := sw.processDollar()
+ if err != nil {
+ return "", err
+ }
+ result += tmp
+ } else {
+ ch = sw.scanner.Next()
+ if ch == '\\' {
+ chNext := sw.scanner.Peek()
+
+ if chNext == scanner.EOF {
+ // Ignore \ at end of word
+ continue
+ }
+
+ if chNext == '"' || chNext == '$' || chNext == '\\' {
+ // \" and \$ and \\ can be escaped, all other \'s are left as-is
+ ch = sw.scanner.Next()
+ }
+ }
+ result += string(ch)
+ }
+ }
+
+ return result, nil
+}
+
+func (sw *shellWord) processDollar() (string, error) {
+ sw.scanner.Next()
+ ch := sw.scanner.Peek()
+ if ch == '{' {
+ sw.scanner.Next()
+ name := sw.processName()
+ ch = sw.scanner.Peek()
+ if ch == '}' {
+ // Normal ${xx} case
+ sw.scanner.Next()
+ return sw.getEnv(name), nil
+ }
+ if ch == ':' {
+ // Special ${xx:...} format processing
+ // Yes it allows for recursive $'s in the ... spot
+
+ sw.scanner.Next() // skip over :
+ modifier := sw.scanner.Next()
+
+ word, _, err := sw.processStopOn('}')
+ if err != nil {
+ return "", err
+ }
+
+ // Grab the current value of the variable in question so we
+ // can use it to determine what to do based on the modifier
+ newValue := sw.getEnv(name)
+
+ switch modifier {
+ case '+':
+ if newValue != "" {
+ newValue = word
+ }
+ return newValue, nil
+
+ case '-':
+ if newValue == "" {
+ newValue = word
+ }
+ return newValue, nil
+ case '?':
+ if newValue == "" {
+ newValue = word
+ }
+ if newValue == "" {
+ return "", fmt.Errorf("Failed to process `%s`: %s is not allowed to be unset", sw.word, name)
+ }
+ return newValue, nil
+ default:
+ return "", fmt.Errorf("Unsupported modifier (%c) in substitution: %s", modifier, sw.word)
+ }
+ }
+ return "", fmt.Errorf("Missing ':' in substitution: %s", sw.word)
+ }
+ // $xxx case
+ name := sw.processName()
+ if name == "" {
+ return "$", nil
+ }
+ return sw.getEnv(name), nil
+}
+
+func (sw *shellWord) processName() string {
+ // Read in a name (alphanumeric or _)
+ // If it starts with a numeric then just return $#
+ var name string
+
+ for sw.scanner.Peek() != scanner.EOF {
+ ch := sw.scanner.Peek()
+ if len(name) == 0 && unicode.IsDigit(ch) {
+ ch = sw.scanner.Next()
+ return string(ch)
+ }
+ if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' {
+ break
+ }
+ ch = sw.scanner.Next()
+ name += string(ch)
+ }
+
+ return name
+}
+
+func (sw *shellWord) getEnv(name string) string {
+ for _, env := range sw.envs {
+ i := strings.Index(env, "=")
+ if i < 0 {
+ if name == env {
+ // Should probably never get here, but just in case treat
+ // it like "var" and "var=" are the same
+ return ""
+ }
+ continue
+ }
+ if name != env[:i] {
+ continue
+ }
+ return env[i+1:]
+ }
+ return ""
+}
diff --git a/signal/README.md b/signal/README.md
new file mode 100644
index 0000000..2b237a5
--- /dev/null
+++ b/signal/README.md
@@ -0,0 +1 @@
+This package provides helper functions for dealing with signals across various operating systems \ No newline at end of file
diff --git a/signal/signal.go b/signal/signal.go
new file mode 100644
index 0000000..4649396
--- /dev/null
+++ b/signal/signal.go
@@ -0,0 +1,25 @@
+// Package signal provides helper functions for dealing with signals across
+// various operating systems.
+package signal
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// CheckSignal translates a string to a valid syscall signal.
+// It returns an error if the signal map doesn't include the given signal.
+func CheckSignal(rawSignal string) error {
+ s, err := strconv.Atoi(rawSignal)
+ if err == nil {
+ if s == 0 {
+ return fmt.Errorf("Invalid signal: %s", rawSignal)
+ }
+ return nil
+ }
+ if _, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")]; !ok {
+ return fmt.Errorf("Invalid signal: %s", rawSignal)
+ }
+ return nil
+}
diff --git a/signal/signals.go b/signal/signals.go
new file mode 100644
index 0000000..41d6fbd
--- /dev/null
+++ b/signal/signals.go
@@ -0,0 +1,79 @@
+package signal
+
+// SignalMap is a map of supported signals.
+var SignalMap = map[string]struct{}{
+ "ABRT": {},
+ "ALRM": {},
+ "BUS": {},
+ "CHLD": {},
+ "CLD": {},
+ "CONT": {},
+ "FPE": {},
+ "HUP": {},
+ "ILL": {},
+ "INT": {},
+ "IO": {},
+ "IOT": {},
+ "KILL": {},
+ "PIPE": {},
+ "POLL": {},
+ "PROF": {},
+ "PWR": {},
+ "QUIT": {},
+ "SEGV": {},
+ "STKFLT": {},
+ "STOP": {},
+ "SYS": {},
+ "TERM": {},
+ "TRAP": {},
+ "TSTP": {},
+ "TTIN": {},
+ "TTOU": {},
+ "UNUSED": {},
+ "URG": {},
+ "USR1": {},
+ "USR2": {},
+ "VTALRM": {},
+ "WINCH": {},
+ "XCPU": {},
+ "XFSZ": {},
+ "RTMIN": {},
+ "RTMIN+1": {},
+ "RTMIN+2": {},
+ "RTMIN+3": {},
+ "RTMIN+4": {},
+ "RTMIN+5": {},
+ "RTMIN+6": {},
+ "RTMIN+7": {},
+ "RTMIN+8": {},
+ "RTMIN+9": {},
+ "RTMIN+10": {},
+ "RTMIN+11": {},
+ "RTMIN+12": {},
+ "RTMIN+13": {},
+ "RTMIN+14": {},
+ "RTMIN+15": {},
+ "RTMAX-14": {},
+ "RTMAX-13": {},
+ "RTMAX-12": {},
+ "RTMAX-11": {},
+ "RTMAX-10": {},
+ "RTMAX-9": {},
+ "RTMAX-8": {},
+ "RTMAX-7": {},
+ "RTMAX-6": {},
+ "RTMAX-5": {},
+ "RTMAX-4": {},
+ "RTMAX-3": {},
+ "RTMAX-2": {},
+ "RTMAX-1": {},
+ "RTMAX": {},
+
+ "BUG": {},
+ "EMT": {},
+ "INFO": {},
+
+ "BUF": {},
+ "LWP": {},
+ "THR": {},
+}
diff --git a/strslice/strslice.go b/strslice/strslice.go
new file mode 100644
index 0000000..bad493f
--- /dev/null
+++ b/strslice/strslice.go
@@ -0,0 +1,30 @@
+package strslice
+
+import "encoding/json"
+
+// StrSlice represents a string or an array of strings.
+// We need to override the json decoder to accept both options.
+type StrSlice []string
+
+// UnmarshalJSON decodes the byte slice whether it's a string or an array of
+// strings. This method is needed to implement json.Unmarshaler.
+func (e *StrSlice) UnmarshalJSON(b []byte) error {
+ if len(b) == 0 {
+ // With no input, we preserve the existing value by returning nil and
+ // leaving the target alone. This allows defining default values for
+ // the type.
+ return nil
+ }
+
+ p := make([]string, 0, 1)
+ if err := json.Unmarshal(b, &p); err != nil {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ p = append(p, s)
+ }
+
+ *e = p
+ return nil
+}